repo_name
string
path
string
copies
string
size
string
content
string
license
string
TREX-ROM/android_kernel_lge_mako
lib/ts_fsm.c
13885
10866
/* * lib/ts_fsm.c A naive finite state machine text search approach * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Thomas Graf <tgraf@suug.ch> * * ========================================================================== * * A finite state machine consists of n states (struct ts_fsm_token) * representing the pattern as a finite automation. The data is read * sequentially on an octet basis. Every state token specifies the number * of recurrences and the type of value accepted which can be either a * specific character or ctype based set of characters. The available * type of recurrences include 1, (0|1), [0 n], and [1 n]. * * The algorithm differs between strict/non-strict mode specifying * whether the pattern has to start at the first octet. Strict mode * is enabled by default and can be disabled by inserting * TS_FSM_HEAD_IGNORE as the first token in the chain. * * The runtime performance of the algorithm should be around O(n), * however while in strict mode the average runtime can be better. */ #include <linux/module.h> #include <linux/types.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/textsearch.h> #include <linux/textsearch_fsm.h> struct ts_fsm { unsigned int ntokens; struct ts_fsm_token tokens[0]; }; /* other values derived from ctype.h */ #define _A 0x100 /* ascii */ #define _W 0x200 /* wildcard */ /* Map to _ctype flags and some magic numbers */ static const u16 token_map[TS_FSM_TYPE_MAX+1] = { [TS_FSM_SPECIFIC] = 0, [TS_FSM_WILDCARD] = _W, [TS_FSM_CNTRL] = _C, [TS_FSM_LOWER] = _L, [TS_FSM_UPPER] = _U, [TS_FSM_PUNCT] = _P, [TS_FSM_SPACE] = _S, [TS_FSM_DIGIT] = _D, [TS_FSM_XDIGIT] = _D | _X, [TS_FSM_ALPHA] = _U | _L, [TS_FSM_ALNUM] = _U | _L | _D, [TS_FSM_PRINT] = _P | _U | _L | _D | _SP, [TS_FSM_GRAPH] = _P | _U | _L | _D, [TS_FSM_ASCII] = _A, }; static const u16 token_lookup_tbl[256] = { _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 0- 3 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 4- 7 */ _W|_A|_C, _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C|_S, /* 8- 11 */ _W|_A|_C|_S, _W|_A|_C|_S, _W|_A|_C, _W|_A|_C, /* 12- 15 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 16- 19 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 20- 23 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 24- 27 */ _W|_A|_C, _W|_A|_C, _W|_A|_C, _W|_A|_C, /* 28- 31 */ _W|_A|_S|_SP, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 32- 35 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 36- 39 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 40- 43 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 44- 47 */ _W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 48- 51 */ _W|_A|_D, _W|_A|_D, _W|_A|_D, _W|_A|_D, /* 52- 55 */ _W|_A|_D, _W|_A|_D, _W|_A|_P, _W|_A|_P, /* 56- 59 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 60- 63 */ _W|_A|_P, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, /* 64- 67 */ _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U|_X, _W|_A|_U, /* 68- 71 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 72- 75 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 76- 79 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 80- 83 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_U, /* 84- 87 */ _W|_A|_U, _W|_A|_U, _W|_A|_U, _W|_A|_P, /* 88- 91 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_P, /* 92- 95 */ _W|_A|_P, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, /* 96- 99 */ _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L|_X, _W|_A|_L, /* 100-103 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 104-107 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 108-111 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 112-115 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_L, /* 116-119 */ _W|_A|_L, _W|_A|_L, _W|_A|_L, _W|_A|_P, /* 120-123 */ _W|_A|_P, _W|_A|_P, _W|_A|_P, _W|_A|_C, /* 124-127 */ _W, _W, _W, _W, /* 128-131 */ _W, _W, _W, _W, /* 132-135 */ _W, _W, _W, _W, /* 136-139 */ _W, _W, _W, _W, /* 140-143 */ _W, _W, _W, _W, /* 144-147 */ _W, _W, _W, _W, /* 148-151 */ _W, _W, _W, _W, /* 152-155 */ _W, _W, _W, _W, /* 156-159 */ _W|_S|_SP, _W|_P, _W|_P, _W|_P, /* 160-163 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 164-167 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 168-171 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 172-175 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 176-179 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 180-183 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 184-187 */ _W|_P, _W|_P, _W|_P, _W|_P, /* 188-191 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 192-195 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 196-199 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 200-203 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 204-207 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 208-211 */ _W|_U, _W|_U, _W|_U, _W|_P, /* 212-215 */ _W|_U, _W|_U, _W|_U, _W|_U, /* 216-219 */ _W|_U, _W|_U, _W|_U, _W|_L, /* 220-223 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 224-227 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 228-231 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 232-235 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 236-239 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 240-243 */ _W|_L, _W|_L, _W|_L, _W|_P, /* 244-247 */ _W|_L, _W|_L, _W|_L, _W|_L, /* 248-251 */ _W|_L, _W|_L, _W|_L, _W|_L}; /* 252-255 */ static inline int match_token(struct ts_fsm_token *t, u8 d) { if (t->type) return (token_lookup_tbl[d] & t->type) != 0; else return t->value == d; } static unsigned int fsm_find(struct ts_config *conf, struct ts_state *state) { struct ts_fsm *fsm = ts_config_priv(conf); struct ts_fsm_token *cur = NULL, *next; unsigned int match_start, block_idx = 0, tok_idx; unsigned block_len = 0, strict, consumed = state->offset; const u8 *data; #define GET_NEXT_BLOCK() \ ({ consumed += block_idx; \ block_idx = 0; \ block_len = conf->get_next_block(consumed, &data, conf, state); }) #define TOKEN_MISMATCH() \ do { \ if (strict) \ goto no_match; \ block_idx++; \ goto startover; \ } while(0) #define end_of_data() unlikely(block_idx >= block_len && !GET_NEXT_BLOCK()) if (end_of_data()) goto no_match; strict = fsm->tokens[0].recur != TS_FSM_HEAD_IGNORE; startover: match_start = consumed + block_idx; for (tok_idx = 0; tok_idx < fsm->ntokens; tok_idx++) { cur = &fsm->tokens[tok_idx]; if (likely(tok_idx < (fsm->ntokens - 1))) next = &fsm->tokens[tok_idx + 1]; else next = NULL; switch (cur->recur) { case TS_FSM_SINGLE: if (end_of_data()) goto no_match; if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); break; case TS_FSM_PERHAPS: if (end_of_data() || !match_token(cur, data[block_idx])) continue; break; case TS_FSM_MULTI: if (end_of_data()) goto no_match; if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); block_idx++; /* fall through */ case TS_FSM_ANY: if (next == NULL) goto found_match; if (end_of_data()) continue; while (!match_token(next, data[block_idx])) { if (!match_token(cur, data[block_idx])) TOKEN_MISMATCH(); block_idx++; if (end_of_data()) goto no_match; } continue; /* * Optimization: Prefer small local loop over jumping * back and forth until garbage at head is munched. */ case TS_FSM_HEAD_IGNORE: if (end_of_data()) continue; while (!match_token(next, data[block_idx])) { /* * Special case, don't start over upon * a mismatch, give the user the * chance to specify the type of data * allowed to be ignored. */ if (!match_token(cur, data[block_idx])) goto no_match; block_idx++; if (end_of_data()) goto no_match; } match_start = consumed + block_idx; continue; } block_idx++; } if (end_of_data()) goto found_match; no_match: return UINT_MAX; found_match: state->offset = consumed + block_idx; return match_start; } static struct ts_config *fsm_init(const void *pattern, unsigned int len, gfp_t gfp_mask, int flags) { int i, err = -EINVAL; struct ts_config *conf; struct ts_fsm *fsm; struct ts_fsm_token *tokens = (struct ts_fsm_token *) pattern; unsigned int ntokens = len / sizeof(*tokens); size_t priv_size = sizeof(*fsm) + len; if (len % sizeof(struct ts_fsm_token) || ntokens < 1) goto errout; if (flags & TS_IGNORECASE) goto errout; for (i = 0; i < ntokens; i++) { struct ts_fsm_token *t = &tokens[i]; if (t->type > TS_FSM_TYPE_MAX || t->recur > TS_FSM_RECUR_MAX) goto errout; if (t->recur == TS_FSM_HEAD_IGNORE && (i != 0 || i == (ntokens - 1))) goto errout; } conf = alloc_ts_config(priv_size, gfp_mask); if (IS_ERR(conf)) return conf; conf->flags = flags; fsm = ts_config_priv(conf); fsm->ntokens = ntokens; memcpy(fsm->tokens, pattern, len); for (i = 0; i < fsm->ntokens; i++) { struct ts_fsm_token *t = &fsm->tokens[i]; t->type = token_map[t->type]; } return conf; errout: return ERR_PTR(err); } static void *fsm_get_pattern(struct ts_config *conf) { struct ts_fsm *fsm = ts_config_priv(conf); return fsm->tokens; } static unsigned int fsm_get_pattern_len(struct ts_config *conf) { struct ts_fsm *fsm = ts_config_priv(conf); return fsm->ntokens * sizeof(struct ts_fsm_token); } static struct ts_ops fsm_ops = { .name = "fsm", .find = fsm_find, .init = fsm_init, .get_pattern = fsm_get_pattern, .get_pattern_len = fsm_get_pattern_len, .owner = THIS_MODULE, .list = LIST_HEAD_INIT(fsm_ops.list) }; static int __init init_fsm(void) { return textsearch_register(&fsm_ops); } static void __exit exit_fsm(void) { textsearch_unregister(&fsm_ops); } MODULE_LICENSE("GPL"); module_init(init_fsm); module_exit(exit_fsm);
gpl-2.0
varunfsl/fsl_pamu
drivers/tty/pty.c
62
22496
/* * Copyright (C) 1991, 1992 Linus Torvalds * * Added support for a Unix98-style ptmx device. * -- C. Scott Ananian <cananian@alumni.princeton.edu>, 14-Jan-1998 * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/fcntl.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/major.h> #include <linux/mm.h> #include <linux/init.h> #include <linux/device.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/devpts_fs.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/poll.h> #ifdef CONFIG_UNIX98_PTYS static struct tty_driver *ptm_driver; static struct tty_driver *pts_driver; static DEFINE_MUTEX(devpts_mutex); #endif static void pty_close(struct tty_struct *tty, struct file *filp) { BUG_ON(!tty); if (tty->driver->subtype == PTY_TYPE_MASTER) WARN_ON(tty->count > 1); else { if (test_bit(TTY_IO_ERROR, &tty->flags)) return; if (tty->count > 2) return; } set_bit(TTY_IO_ERROR, &tty->flags); wake_up_interruptible(&tty->read_wait); wake_up_interruptible(&tty->write_wait); spin_lock_irq(&tty->ctrl_lock); tty->packet = 0; spin_unlock_irq(&tty->ctrl_lock); /* Review - krefs on tty_link ?? */ if (!tty->link) return; tty_flush_to_ldisc(tty->link); set_bit(TTY_OTHER_CLOSED, &tty->link->flags); wake_up_interruptible(&tty->link->read_wait); wake_up_interruptible(&tty->link->write_wait); if (tty->driver->subtype == PTY_TYPE_MASTER) { set_bit(TTY_OTHER_CLOSED, &tty->flags); #ifdef CONFIG_UNIX98_PTYS if (tty->driver == ptm_driver) { mutex_lock(&devpts_mutex); if (tty->link->driver_data) devpts_pty_kill(tty->link->driver_data); mutex_unlock(&devpts_mutex); } #endif tty_vhangup(tty->link); } } /* * The unthrottle routine is called by the line discipline to signal * that it can receive more characters. For PTY's, the TTY_THROTTLED * flag is always set, to force the line discipline to always call the * unthrottle routine when there are fewer than TTY_THRESHOLD_UNTHROTTLE * characters in the queue. This is necessary since each time this * happens, we need to wake up any sleeping processes that could be * (1) trying to send data to the pty, or (2) waiting in wait_until_sent() * for the pty buffer to be drained. */ static void pty_unthrottle(struct tty_struct *tty) { tty_wakeup(tty->link); set_bit(TTY_THROTTLED, &tty->flags); } /** * pty_space - report space left for writing * @to: tty we are writing into * * Limit the buffer space used by ptys to 8k. */ static int pty_space(struct tty_struct *to) { int n = tty_buffer_space_avail(to->port); return min(n, 8192); } /** * pty_write - write to a pty * @tty: the tty we write from * @buf: kernel buffer of data * @count: bytes to write * * Our "hardware" write method. Data is coming from the ldisc which * may be in a non sleeping state. We simply throw this at the other * end of the link as if we were an IRQ handler receiving stuff for * the other side of the pty/tty pair. */ static int pty_write(struct tty_struct *tty, const unsigned char *buf, int c) { struct tty_struct *to = tty->link; if (tty->stopped) return 0; if (c > 0) { /* Stuff the data into the input queue of the other end */ c = tty_insert_flip_string(to->port, buf, c); /* And shovel */ if (c) tty_flip_buffer_push(to->port); } return c; } /** * pty_write_room - write space * @tty: tty we are writing from * * Report how many bytes the ldisc can send into the queue for * the other device. */ static int pty_write_room(struct tty_struct *tty) { if (tty->stopped) return 0; return pty_space(tty->link); } /** * pty_chars_in_buffer - characters currently in our tx queue * @tty: our tty * * Report how much we have in the transmit queue. As everything is * instantly at the other end this is easy to implement. */ static int pty_chars_in_buffer(struct tty_struct *tty) { return 0; } /* Set the lock flag on a pty */ static int pty_set_lock(struct tty_struct *tty, int __user *arg) { int val; if (get_user(val, arg)) return -EFAULT; if (val) set_bit(TTY_PTY_LOCK, &tty->flags); else clear_bit(TTY_PTY_LOCK, &tty->flags); return 0; } static int pty_get_lock(struct tty_struct *tty, int __user *arg) { int locked = test_bit(TTY_PTY_LOCK, &tty->flags); return put_user(locked, arg); } /* Set the packet mode on a pty */ static int pty_set_pktmode(struct tty_struct *tty, int __user *arg) { int pktmode; if (get_user(pktmode, arg)) return -EFAULT; spin_lock_irq(&tty->ctrl_lock); if (pktmode) { if (!tty->packet) { tty->link->ctrl_status = 0; smp_mb(); tty->packet = 1; } } else tty->packet = 0; spin_unlock_irq(&tty->ctrl_lock); return 0; } /* Get the packet mode of a pty */ static int pty_get_pktmode(struct tty_struct *tty, int __user *arg) { int pktmode = tty->packet; return put_user(pktmode, arg); } /* Send a signal to the slave */ static int pty_signal(struct tty_struct *tty, int sig) { struct pid *pgrp; if (tty->link) { pgrp = tty_get_pgrp(tty->link); if (pgrp) kill_pgrp(pgrp, sig, 1); put_pid(pgrp); } return 0; } static void pty_flush_buffer(struct tty_struct *tty) { struct tty_struct *to = tty->link; if (!to) return; /* tty_buffer_flush(to); FIXME */ if (to->packet) { spin_lock_irq(&tty->ctrl_lock); tty->ctrl_status |= TIOCPKT_FLUSHWRITE; wake_up_interruptible(&to->read_wait); spin_unlock_irq(&tty->ctrl_lock); } } static int pty_open(struct tty_struct *tty, struct file *filp) { if (!tty || !tty->link) return -ENODEV; if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) goto out; if (test_bit(TTY_PTY_LOCK, &tty->link->flags)) goto out; if (tty->driver->subtype == PTY_TYPE_SLAVE && tty->link->count != 1) goto out; clear_bit(TTY_IO_ERROR, &tty->flags); clear_bit(TTY_OTHER_CLOSED, &tty->link->flags); set_bit(TTY_THROTTLED, &tty->flags); return 0; out: set_bit(TTY_IO_ERROR, &tty->flags); return -EIO; } static void pty_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { /* See if packet mode change of state. */ if (tty->link && tty->link->packet) { int extproc = (old_termios->c_lflag & EXTPROC) | (tty->termios.c_lflag & EXTPROC); int old_flow = ((old_termios->c_iflag & IXON) && (old_termios->c_cc[VSTOP] == '\023') && (old_termios->c_cc[VSTART] == '\021')); int new_flow = (I_IXON(tty) && STOP_CHAR(tty) == '\023' && START_CHAR(tty) == '\021'); if ((old_flow != new_flow) || extproc) { spin_lock_irq(&tty->ctrl_lock); if (old_flow != new_flow) { tty->ctrl_status &= ~(TIOCPKT_DOSTOP | TIOCPKT_NOSTOP); if (new_flow) tty->ctrl_status |= TIOCPKT_DOSTOP; else tty->ctrl_status |= TIOCPKT_NOSTOP; } if (extproc) tty->ctrl_status |= TIOCPKT_IOCTL; spin_unlock_irq(&tty->ctrl_lock); wake_up_interruptible(&tty->link->read_wait); } } tty->termios.c_cflag &= ~(CSIZE | PARENB); tty->termios.c_cflag |= (CS8 | CREAD); } /** * pty_do_resize - resize event * @tty: tty being resized * @ws: window size being set. * * Update the termios variables and send the necessary signals to * peform a terminal resize correctly */ static int pty_resize(struct tty_struct *tty, struct winsize *ws) { struct pid *pgrp, *rpgrp; struct tty_struct *pty = tty->link; /* For a PTY we need to lock the tty side */ mutex_lock(&tty->winsize_mutex); if (!memcmp(ws, &tty->winsize, sizeof(*ws))) goto done; /* Signal the foreground process group of both ptys */ pgrp = tty_get_pgrp(tty); rpgrp = tty_get_pgrp(pty); if (pgrp) kill_pgrp(pgrp, SIGWINCH, 1); if (rpgrp != pgrp && rpgrp) kill_pgrp(rpgrp, SIGWINCH, 1); put_pid(pgrp); put_pid(rpgrp); tty->winsize = *ws; pty->winsize = *ws; /* Never used so will go away soon */ done: mutex_unlock(&tty->winsize_mutex); return 0; } /** * pty_start - start() handler * pty_stop - stop() handler * @tty: tty being flow-controlled * * Propagates the TIOCPKT status to the master pty. * * NB: only the master pty can be in packet mode so only the slave * needs start()/stop() handlers */ static void pty_start(struct tty_struct *tty) { unsigned long flags; if (tty->link && tty->link->packet) { spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status &= ~TIOCPKT_STOP; tty->ctrl_status |= TIOCPKT_START; spin_unlock_irqrestore(&tty->ctrl_lock, flags); wake_up_interruptible_poll(&tty->link->read_wait, POLLIN); } } static void pty_stop(struct tty_struct *tty) { unsigned long flags; if (tty->link && tty->link->packet) { spin_lock_irqsave(&tty->ctrl_lock, flags); tty->ctrl_status &= ~TIOCPKT_START; tty->ctrl_status |= TIOCPKT_STOP; spin_unlock_irqrestore(&tty->ctrl_lock, flags); wake_up_interruptible_poll(&tty->link->read_wait, POLLIN); } } /** * pty_common_install - set up the pty pair * @driver: the pty driver * @tty: the tty being instantiated * @legacy: true if this is BSD style * * Perform the initial set up for the tty/pty pair. Called from the * tty layer when the port is first opened. * * Locking: the caller must hold the tty_mutex */ static int pty_common_install(struct tty_driver *driver, struct tty_struct *tty, bool legacy) { struct tty_struct *o_tty; struct tty_port *ports[2]; int idx = tty->index; int retval = -ENOMEM; /* Opening the slave first has always returned -EIO */ if (driver->subtype != PTY_TYPE_MASTER) return -EIO; ports[0] = kmalloc(sizeof **ports, GFP_KERNEL); ports[1] = kmalloc(sizeof **ports, GFP_KERNEL); if (!ports[0] || !ports[1]) goto err; if (!try_module_get(driver->other->owner)) { /* This cannot in fact currently happen */ goto err; } o_tty = alloc_tty_struct(driver->other, idx); if (!o_tty) goto err_put_module; tty_set_lock_subclass(o_tty); if (legacy) { /* We always use new tty termios data so we can do this the easy way .. */ retval = tty_init_termios(tty); if (retval) goto err_deinit_tty; retval = tty_init_termios(o_tty); if (retval) goto err_free_termios; driver->other->ttys[idx] = o_tty; driver->ttys[idx] = tty; } else { memset(&tty->termios_locked, 0, sizeof(tty->termios_locked)); tty->termios = driver->init_termios; memset(&o_tty->termios_locked, 0, sizeof(tty->termios_locked)); o_tty->termios = driver->other->init_termios; } /* * Everything allocated ... set up the o_tty structure. */ tty_driver_kref_get(driver->other); /* Establish the links in both directions */ tty->link = o_tty; o_tty->link = tty; tty_port_init(ports[0]); tty_port_init(ports[1]); o_tty->port = ports[0]; tty->port = ports[1]; o_tty->port->itty = o_tty; tty_driver_kref_get(driver); tty->count++; o_tty->count++; return 0; err_free_termios: if (legacy) tty_free_termios(tty); err_deinit_tty: deinitialize_tty_struct(o_tty); free_tty_struct(o_tty); err_put_module: module_put(driver->other->owner); err: kfree(ports[0]); kfree(ports[1]); return retval; } static void pty_cleanup(struct tty_struct *tty) { tty_port_put(tty->port); } /* Traditional BSD devices */ #ifdef CONFIG_LEGACY_PTYS static int pty_install(struct tty_driver *driver, struct tty_struct *tty) { return pty_common_install(driver, tty, true); } static void pty_remove(struct tty_driver *driver, struct tty_struct *tty) { struct tty_struct *pair = tty->link; driver->ttys[tty->index] = NULL; if (pair) pair->driver->ttys[pair->index] = NULL; } static int pty_bsd_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { switch (cmd) { case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */ return pty_set_lock(tty, (int __user *) arg); case TIOCGPTLCK: /* Get PT Lock status */ return pty_get_lock(tty, (int __user *)arg); case TIOCPKT: /* Set PT packet mode */ return pty_set_pktmode(tty, (int __user *)arg); case TIOCGPKT: /* Get PT packet mode */ return pty_get_pktmode(tty, (int __user *)arg); case TIOCSIG: /* Send signal to other side of pty */ return pty_signal(tty, (int) arg); case TIOCGPTN: /* TTY returns ENOTTY, but glibc expects EINVAL here */ return -EINVAL; } return -ENOIOCTLCMD; } static int legacy_count = CONFIG_LEGACY_PTY_COUNT; module_param(legacy_count, int, 0); /* * The master side of a pty can do TIOCSPTLCK and thus * has pty_bsd_ioctl. */ static const struct tty_operations master_pty_ops_bsd = { .install = pty_install, .open = pty_open, .close = pty_close, .write = pty_write, .write_room = pty_write_room, .flush_buffer = pty_flush_buffer, .chars_in_buffer = pty_chars_in_buffer, .unthrottle = pty_unthrottle, .ioctl = pty_bsd_ioctl, .cleanup = pty_cleanup, .resize = pty_resize, .remove = pty_remove }; static const struct tty_operations slave_pty_ops_bsd = { .install = pty_install, .open = pty_open, .close = pty_close, .write = pty_write, .write_room = pty_write_room, .flush_buffer = pty_flush_buffer, .chars_in_buffer = pty_chars_in_buffer, .unthrottle = pty_unthrottle, .set_termios = pty_set_termios, .cleanup = pty_cleanup, .resize = pty_resize, .start = pty_start, .stop = pty_stop, .remove = pty_remove }; static void __init legacy_pty_init(void) { struct tty_driver *pty_driver, *pty_slave_driver; if (legacy_count <= 0) return; pty_driver = tty_alloc_driver(legacy_count, TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_ALLOC); if (IS_ERR(pty_driver)) panic("Couldn't allocate pty driver"); pty_slave_driver = tty_alloc_driver(legacy_count, TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_ALLOC); if (IS_ERR(pty_slave_driver)) panic("Couldn't allocate pty slave driver"); pty_driver->driver_name = "pty_master"; pty_driver->name = "pty"; pty_driver->major = PTY_MASTER_MAJOR; pty_driver->minor_start = 0; pty_driver->type = TTY_DRIVER_TYPE_PTY; pty_driver->subtype = PTY_TYPE_MASTER; pty_driver->init_termios = tty_std_termios; pty_driver->init_termios.c_iflag = 0; pty_driver->init_termios.c_oflag = 0; pty_driver->init_termios.c_cflag = B38400 | CS8 | CREAD; pty_driver->init_termios.c_lflag = 0; pty_driver->init_termios.c_ispeed = 38400; pty_driver->init_termios.c_ospeed = 38400; pty_driver->other = pty_slave_driver; tty_set_operations(pty_driver, &master_pty_ops_bsd); pty_slave_driver->driver_name = "pty_slave"; pty_slave_driver->name = "ttyp"; pty_slave_driver->major = PTY_SLAVE_MAJOR; pty_slave_driver->minor_start = 0; pty_slave_driver->type = TTY_DRIVER_TYPE_PTY; pty_slave_driver->subtype = PTY_TYPE_SLAVE; pty_slave_driver->init_termios = tty_std_termios; pty_slave_driver->init_termios.c_cflag = B38400 | CS8 | CREAD; pty_slave_driver->init_termios.c_ispeed = 38400; pty_slave_driver->init_termios.c_ospeed = 38400; pty_slave_driver->other = pty_driver; tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd); if (tty_register_driver(pty_driver)) panic("Couldn't register pty driver"); if (tty_register_driver(pty_slave_driver)) panic("Couldn't register pty slave driver"); } #else static inline void legacy_pty_init(void) { } #endif /* Unix98 devices */ #ifdef CONFIG_UNIX98_PTYS static struct cdev ptmx_cdev; static int pty_unix98_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { switch (cmd) { case TIOCSPTLCK: /* Set PT Lock (disallow slave open) */ return pty_set_lock(tty, (int __user *)arg); case TIOCGPTLCK: /* Get PT Lock status */ return pty_get_lock(tty, (int __user *)arg); case TIOCPKT: /* Set PT packet mode */ return pty_set_pktmode(tty, (int __user *)arg); case TIOCGPKT: /* Get PT packet mode */ return pty_get_pktmode(tty, (int __user *)arg); case TIOCGPTN: /* Get PT Number */ return put_user(tty->index, (unsigned int __user *)arg); case TIOCSIG: /* Send signal to other side of pty */ return pty_signal(tty, (int) arg); } return -ENOIOCTLCMD; } /** * ptm_unix98_lookup - find a pty master * @driver: ptm driver * @idx: tty index * * Look up a pty master device. Called under the tty_mutex for now. * This provides our locking. */ static struct tty_struct *ptm_unix98_lookup(struct tty_driver *driver, struct inode *ptm_inode, int idx) { /* Master must be open via /dev/ptmx */ return ERR_PTR(-EIO); } /** * pts_unix98_lookup - find a pty slave * @driver: pts driver * @idx: tty index * * Look up a pty master device. Called under the tty_mutex for now. * This provides our locking for the tty pointer. */ static struct tty_struct *pts_unix98_lookup(struct tty_driver *driver, struct inode *pts_inode, int idx) { struct tty_struct *tty; mutex_lock(&devpts_mutex); tty = devpts_get_priv(pts_inode); mutex_unlock(&devpts_mutex); /* Master must be open before slave */ if (!tty) return ERR_PTR(-EIO); return tty; } /* We have no need to install and remove our tty objects as devpts does all the work for us */ static int pty_unix98_install(struct tty_driver *driver, struct tty_struct *tty) { return pty_common_install(driver, tty, false); } static void pty_unix98_remove(struct tty_driver *driver, struct tty_struct *tty) { } /* this is called once with whichever end is closed last */ static void pty_unix98_shutdown(struct tty_struct *tty) { devpts_kill_index(tty->driver_data, tty->index); } static const struct tty_operations ptm_unix98_ops = { .lookup = ptm_unix98_lookup, .install = pty_unix98_install, .remove = pty_unix98_remove, .open = pty_open, .close = pty_close, .write = pty_write, .write_room = pty_write_room, .flush_buffer = pty_flush_buffer, .chars_in_buffer = pty_chars_in_buffer, .unthrottle = pty_unthrottle, .ioctl = pty_unix98_ioctl, .resize = pty_resize, .shutdown = pty_unix98_shutdown, .cleanup = pty_cleanup }; static const struct tty_operations pty_unix98_ops = { .lookup = pts_unix98_lookup, .install = pty_unix98_install, .remove = pty_unix98_remove, .open = pty_open, .close = pty_close, .write = pty_write, .write_room = pty_write_room, .flush_buffer = pty_flush_buffer, .chars_in_buffer = pty_chars_in_buffer, .unthrottle = pty_unthrottle, .set_termios = pty_set_termios, .start = pty_start, .stop = pty_stop, .shutdown = pty_unix98_shutdown, .cleanup = pty_cleanup, }; /** * ptmx_open - open a unix 98 pty master * @inode: inode of device file * @filp: file pointer to tty * * Allocate a unix98 pty master device from the ptmx driver. * * Locking: tty_mutex protects the init_dev work. tty->count should * protect the rest. * allocated_ptys_lock handles the list of free pty numbers */ static int ptmx_open(struct inode *inode, struct file *filp) { struct tty_struct *tty; struct inode *slave_inode; int retval; int index; nonseekable_open(inode, filp); /* We refuse fsnotify events on ptmx, since it's a shared resource */ filp->f_mode |= FMODE_NONOTIFY; retval = tty_alloc_file(filp); if (retval) return retval; /* find a device that is not in use. */ mutex_lock(&devpts_mutex); index = devpts_new_index(inode); if (index < 0) { retval = index; mutex_unlock(&devpts_mutex); goto err_file; } mutex_unlock(&devpts_mutex); mutex_lock(&tty_mutex); tty = tty_init_dev(ptm_driver, index); if (IS_ERR(tty)) { retval = PTR_ERR(tty); goto out; } /* The tty returned here is locked so we can safely drop the mutex */ mutex_unlock(&tty_mutex); set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ tty->driver_data = inode; tty_add_file(tty, filp); slave_inode = devpts_pty_new(inode, MKDEV(UNIX98_PTY_SLAVE_MAJOR, index), index, tty->link); if (IS_ERR(slave_inode)) { retval = PTR_ERR(slave_inode); goto err_release; } tty->link->driver_data = slave_inode; retval = ptm_driver->ops->open(tty, filp); if (retval) goto err_release; tty_unlock(tty); return 0; err_release: tty_unlock(tty); tty_release(inode, filp); return retval; out: mutex_unlock(&tty_mutex); devpts_kill_index(inode, index); err_file: tty_free_file(filp); return retval; } static struct file_operations ptmx_fops; static void __init unix98_pty_init(void) { ptm_driver = tty_alloc_driver(NR_UNIX98_PTY_MAX, TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM | TTY_DRIVER_DYNAMIC_ALLOC); if (IS_ERR(ptm_driver)) panic("Couldn't allocate Unix98 ptm driver"); pts_driver = tty_alloc_driver(NR_UNIX98_PTY_MAX, TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV | TTY_DRIVER_DEVPTS_MEM | TTY_DRIVER_DYNAMIC_ALLOC); if (IS_ERR(pts_driver)) panic("Couldn't allocate Unix98 pts driver"); ptm_driver->driver_name = "pty_master"; ptm_driver->name = "ptm"; ptm_driver->major = UNIX98_PTY_MASTER_MAJOR; ptm_driver->minor_start = 0; ptm_driver->type = TTY_DRIVER_TYPE_PTY; ptm_driver->subtype = PTY_TYPE_MASTER; ptm_driver->init_termios = tty_std_termios; ptm_driver->init_termios.c_iflag = 0; ptm_driver->init_termios.c_oflag = 0; ptm_driver->init_termios.c_cflag = B38400 | CS8 | CREAD; ptm_driver->init_termios.c_lflag = 0; ptm_driver->init_termios.c_ispeed = 38400; ptm_driver->init_termios.c_ospeed = 38400; ptm_driver->other = pts_driver; tty_set_operations(ptm_driver, &ptm_unix98_ops); pts_driver->driver_name = "pty_slave"; pts_driver->name = "pts"; pts_driver->major = UNIX98_PTY_SLAVE_MAJOR; pts_driver->minor_start = 0; pts_driver->type = TTY_DRIVER_TYPE_PTY; pts_driver->subtype = PTY_TYPE_SLAVE; pts_driver->init_termios = tty_std_termios; pts_driver->init_termios.c_cflag = B38400 | CS8 | CREAD; pts_driver->init_termios.c_ispeed = 38400; pts_driver->init_termios.c_ospeed = 38400; pts_driver->other = ptm_driver; tty_set_operations(pts_driver, &pty_unix98_ops); if (tty_register_driver(ptm_driver)) panic("Couldn't register Unix98 ptm driver"); if (tty_register_driver(pts_driver)) panic("Couldn't register Unix98 pts driver"); /* Now create the /dev/ptmx special device */ tty_default_fops(&ptmx_fops); ptmx_fops.open = ptmx_open; cdev_init(&ptmx_cdev, &ptmx_fops); if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) || register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0) panic("Couldn't register /dev/ptmx driver"); device_create(tty_class, NULL, MKDEV(TTYAUX_MAJOR, 2), NULL, "ptmx"); } #else static inline void unix98_pty_init(void) { } #endif static int __init pty_init(void) { legacy_pty_init(); unix98_pty_init(); return 0; } module_init(pty_init);
gpl-2.0
gazoo74/linux
drivers/net/ethernet/freescale/fman/mac.c
62
23512
/* Copyright 2008-2015 Freescale Semiconductor, Inc. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of Freescale Semiconductor nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * * ALTERNATIVELY, this software may be distributed under the terms of the * GNU General Public License ("GPL") as published by the Free Software * Foundation, either version 2 of that License or (at your option) any * later version. * * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/of_net.h> #include <linux/of_mdio.h> #include <linux/device.h> #include <linux/phy.h> #include <linux/netdevice.h> #include <linux/phy_fixed.h> #include <linux/etherdevice.h> #include <linux/libfdt_env.h> #include "mac.h" #include "fman_mac.h" #include "fman_dtsec.h" #include "fman_tgec.h" #include "fman_memac.h" MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("FSL FMan MAC API based driver"); struct mac_priv_s { struct device *dev; void __iomem *vaddr; u8 cell_index; struct fman *fman; struct device_node *internal_phy_node; /* List of multicast addresses */ struct list_head mc_addr_list; struct platform_device *eth_dev; struct fixed_phy_status *fixed_link; u16 speed; u16 max_speed; int (*enable)(struct fman_mac *mac_dev, enum comm_mode mode); int (*disable)(struct fman_mac *mac_dev, enum comm_mode mode); }; struct mac_address { u8 addr[ETH_ALEN]; struct list_head list; }; static void mac_exception(void *handle, enum fman_mac_exceptions ex) { struct mac_device *mac_dev; struct mac_priv_s *priv; mac_dev = handle; priv = mac_dev->priv; if (ex == FM_MAC_EX_10G_RX_FIFO_OVFL) { /* don't flag RX FIFO after the first */ mac_dev->set_exception(mac_dev->fman_mac, FM_MAC_EX_10G_RX_FIFO_OVFL, false); dev_err(priv->dev, "10G MAC got RX FIFO Error = %x\n", ex); } dev_dbg(priv->dev, "%s:%s() -> %d\n", KBUILD_BASENAME ".c", __func__, ex); } static void set_fman_mac_params(struct mac_device *mac_dev, struct fman_mac_params *params) { struct mac_priv_s *priv = mac_dev->priv; params->base_addr = (typeof(params->base_addr)) devm_ioremap(priv->dev, mac_dev->res->start, resource_size(mac_dev->res)); memcpy(&params->addr, mac_dev->addr, sizeof(mac_dev->addr)); params->max_speed = priv->max_speed; params->phy_if = mac_dev->phy_if; params->basex_if = false; params->mac_id = priv->cell_index; params->fm = (void *)priv->fman; params->exception_cb = mac_exception; params->event_cb = mac_exception; params->dev_id = mac_dev; params->internal_phy_node = priv->internal_phy_node; } static int tgec_initialization(struct mac_device *mac_dev) { int err; struct mac_priv_s *priv; struct fman_mac_params params; u32 version; priv = mac_dev->priv; set_fman_mac_params(mac_dev, &params); mac_dev->fman_mac = tgec_config(&params); if (!mac_dev->fman_mac) { err = -EINVAL; goto _return; } err = tgec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); if (err < 0) goto _return_fm_mac_free; err = tgec_init(mac_dev->fman_mac); if (err < 0) goto _return_fm_mac_free; /* For 10G MAC, disable Tx ECC exception */ err = mac_dev->set_exception(mac_dev->fman_mac, FM_MAC_EX_10G_TX_ECC_ER, false); if (err < 0) goto _return_fm_mac_free; err = tgec_get_version(mac_dev->fman_mac, &version); if (err < 0) goto _return_fm_mac_free; dev_info(priv->dev, "FMan XGEC version: 0x%08x\n", version); goto _return; _return_fm_mac_free: tgec_free(mac_dev->fman_mac); _return: return err; } static int dtsec_initialization(struct mac_device *mac_dev) { int err; struct mac_priv_s *priv; struct fman_mac_params params; u32 version; priv = mac_dev->priv; set_fman_mac_params(mac_dev, &params); mac_dev->fman_mac = dtsec_config(&params); if (!mac_dev->fman_mac) { err = -EINVAL; goto _return; } err = dtsec_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); if (err < 0) goto _return_fm_mac_free; err = dtsec_cfg_pad_and_crc(mac_dev->fman_mac, true); if (err < 0) goto _return_fm_mac_free; err = dtsec_init(mac_dev->fman_mac); if (err < 0) goto _return_fm_mac_free; /* For 1G MAC, disable by default the MIB counters overflow interrupt */ err = mac_dev->set_exception(mac_dev->fman_mac, FM_MAC_EX_1G_RX_MIB_CNT_OVFL, false); if (err < 0) goto _return_fm_mac_free; err = dtsec_get_version(mac_dev->fman_mac, &version); if (err < 0) goto _return_fm_mac_free; dev_info(priv->dev, "FMan dTSEC version: 0x%08x\n", version); goto _return; _return_fm_mac_free: dtsec_free(mac_dev->fman_mac); _return: return err; } static int memac_initialization(struct mac_device *mac_dev) { int err; struct mac_priv_s *priv; struct fman_mac_params params; priv = mac_dev->priv; set_fman_mac_params(mac_dev, &params); if (priv->max_speed == SPEED_10000) params.phy_if = PHY_INTERFACE_MODE_XGMII; mac_dev->fman_mac = memac_config(&params); if (!mac_dev->fman_mac) { err = -EINVAL; goto _return; } err = memac_cfg_max_frame_len(mac_dev->fman_mac, fman_get_max_frm()); if (err < 0) goto _return_fm_mac_free; err = memac_cfg_reset_on_init(mac_dev->fman_mac, true); if (err < 0) goto _return_fm_mac_free; err = memac_cfg_fixed_link(mac_dev->fman_mac, priv->fixed_link); if (err < 0) goto _return_fm_mac_free; err = memac_init(mac_dev->fman_mac); if (err < 0) goto _return_fm_mac_free; dev_info(priv->dev, "FMan MEMAC\n"); goto _return; _return_fm_mac_free: memac_free(mac_dev->fman_mac); _return: return err; } static int start(struct mac_device *mac_dev) { int err; struct phy_device *phy_dev = mac_dev->phy_dev; struct mac_priv_s *priv = mac_dev->priv; err = priv->enable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX); if (!err && phy_dev) phy_start(phy_dev); return err; } static int stop(struct mac_device *mac_dev) { struct mac_priv_s *priv = mac_dev->priv; if (mac_dev->phy_dev) phy_stop(mac_dev->phy_dev); return priv->disable(mac_dev->fman_mac, COMM_MODE_RX_AND_TX); } static int set_multi(struct net_device *net_dev, struct mac_device *mac_dev) { struct mac_priv_s *priv; struct mac_address *old_addr, *tmp; struct netdev_hw_addr *ha; int err; enet_addr_t *addr; priv = mac_dev->priv; /* Clear previous address list */ list_for_each_entry_safe(old_addr, tmp, &priv->mc_addr_list, list) { addr = (enet_addr_t *)old_addr->addr; err = mac_dev->remove_hash_mac_addr(mac_dev->fman_mac, addr); if (err < 0) return err; list_del(&old_addr->list); kfree(old_addr); } /* Add all the addresses from the new list */ netdev_for_each_mc_addr(ha, net_dev) { addr = (enet_addr_t *)ha->addr; err = mac_dev->add_hash_mac_addr(mac_dev->fman_mac, addr); if (err < 0) return err; tmp = kmalloc(sizeof(*tmp), GFP_ATOMIC); if (!tmp) return -ENOMEM; ether_addr_copy(tmp->addr, ha->addr); list_add(&tmp->list, &priv->mc_addr_list); } return 0; } /** * fman_set_mac_active_pause * @mac_dev: A pointer to the MAC device * @rx: Pause frame setting for RX * @tx: Pause frame setting for TX * * Set the MAC RX/TX PAUSE frames settings * * Avoid redundant calls to FMD, if the MAC driver already contains the desired * active PAUSE settings. Otherwise, the new active settings should be reflected * in FMan. * * Return: 0 on success; Error code otherwise. */ int fman_set_mac_active_pause(struct mac_device *mac_dev, bool rx, bool tx) { struct fman_mac *fman_mac = mac_dev->fman_mac; int err = 0; if (rx != mac_dev->rx_pause_active) { err = mac_dev->set_rx_pause(fman_mac, rx); if (likely(err == 0)) mac_dev->rx_pause_active = rx; } if (tx != mac_dev->tx_pause_active) { u16 pause_time = (tx ? FSL_FM_PAUSE_TIME_ENABLE : FSL_FM_PAUSE_TIME_DISABLE); err = mac_dev->set_tx_pause(fman_mac, 0, pause_time, 0); if (likely(err == 0)) mac_dev->tx_pause_active = tx; } return err; } EXPORT_SYMBOL(fman_set_mac_active_pause); /** * fman_get_pause_cfg * @mac_dev: A pointer to the MAC device * @rx: Return value for RX setting * @tx: Return value for TX setting * * Determine the MAC RX/TX PAUSE frames settings based on PHY * autonegotiation or values set by eththool. * * Return: Pointer to FMan device. */ void fman_get_pause_cfg(struct mac_device *mac_dev, bool *rx_pause, bool *tx_pause) { struct phy_device *phy_dev = mac_dev->phy_dev; u16 lcl_adv, rmt_adv; u8 flowctrl; *rx_pause = *tx_pause = false; if (!phy_dev->duplex) return; /* If PAUSE autonegotiation is disabled, the TX/RX PAUSE settings * are those set by ethtool. */ if (!mac_dev->autoneg_pause) { *rx_pause = mac_dev->rx_pause_req; *tx_pause = mac_dev->tx_pause_req; return; } /* Else if PAUSE autonegotiation is enabled, the TX/RX PAUSE * settings depend on the result of the link negotiation. */ /* get local capabilities */ lcl_adv = linkmode_adv_to_lcl_adv_t(phy_dev->advertising); /* get link partner capabilities */ rmt_adv = 0; if (phy_dev->pause) rmt_adv |= LPA_PAUSE_CAP; if (phy_dev->asym_pause) rmt_adv |= LPA_PAUSE_ASYM; /* Calculate TX/RX settings based on local and peer advertised * symmetric/asymmetric PAUSE capabilities. */ flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv); if (flowctrl & FLOW_CTRL_RX) *rx_pause = true; if (flowctrl & FLOW_CTRL_TX) *tx_pause = true; } EXPORT_SYMBOL(fman_get_pause_cfg); static void adjust_link_void(struct mac_device *mac_dev) { } static void adjust_link_dtsec(struct mac_device *mac_dev) { struct phy_device *phy_dev = mac_dev->phy_dev; struct fman_mac *fman_mac; bool rx_pause, tx_pause; int err; fman_mac = mac_dev->fman_mac; if (!phy_dev->link) { dtsec_restart_autoneg(fman_mac); return; } dtsec_adjust_link(fman_mac, phy_dev->speed); fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); if (err < 0) dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n", err); } static void adjust_link_memac(struct mac_device *mac_dev) { struct phy_device *phy_dev = mac_dev->phy_dev; struct fman_mac *fman_mac; bool rx_pause, tx_pause; int err; fman_mac = mac_dev->fman_mac; memac_adjust_link(fman_mac, phy_dev->speed); fman_get_pause_cfg(mac_dev, &rx_pause, &tx_pause); err = fman_set_mac_active_pause(mac_dev, rx_pause, tx_pause); if (err < 0) dev_err(mac_dev->priv->dev, "fman_set_mac_active_pause() = %d\n", err); } static void setup_dtsec(struct mac_device *mac_dev) { mac_dev->init = dtsec_initialization; mac_dev->set_promisc = dtsec_set_promiscuous; mac_dev->change_addr = dtsec_modify_mac_address; mac_dev->add_hash_mac_addr = dtsec_add_hash_mac_address; mac_dev->remove_hash_mac_addr = dtsec_del_hash_mac_address; mac_dev->set_tx_pause = dtsec_set_tx_pause_frames; mac_dev->set_rx_pause = dtsec_accept_rx_pause_frames; mac_dev->set_exception = dtsec_set_exception; mac_dev->set_allmulti = dtsec_set_allmulti; mac_dev->set_tstamp = dtsec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; mac_dev->adjust_link = adjust_link_dtsec; mac_dev->priv->enable = dtsec_enable; mac_dev->priv->disable = dtsec_disable; } static void setup_tgec(struct mac_device *mac_dev) { mac_dev->init = tgec_initialization; mac_dev->set_promisc = tgec_set_promiscuous; mac_dev->change_addr = tgec_modify_mac_address; mac_dev->add_hash_mac_addr = tgec_add_hash_mac_address; mac_dev->remove_hash_mac_addr = tgec_del_hash_mac_address; mac_dev->set_tx_pause = tgec_set_tx_pause_frames; mac_dev->set_rx_pause = tgec_accept_rx_pause_frames; mac_dev->set_exception = tgec_set_exception; mac_dev->set_allmulti = tgec_set_allmulti; mac_dev->set_tstamp = tgec_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; mac_dev->adjust_link = adjust_link_void; mac_dev->priv->enable = tgec_enable; mac_dev->priv->disable = tgec_disable; } static void setup_memac(struct mac_device *mac_dev) { mac_dev->init = memac_initialization; mac_dev->set_promisc = memac_set_promiscuous; mac_dev->change_addr = memac_modify_mac_address; mac_dev->add_hash_mac_addr = memac_add_hash_mac_address; mac_dev->remove_hash_mac_addr = memac_del_hash_mac_address; mac_dev->set_tx_pause = memac_set_tx_pause_frames; mac_dev->set_rx_pause = memac_accept_rx_pause_frames; mac_dev->set_exception = memac_set_exception; mac_dev->set_allmulti = memac_set_allmulti; mac_dev->set_tstamp = memac_set_tstamp; mac_dev->set_multi = set_multi; mac_dev->start = start; mac_dev->stop = stop; mac_dev->adjust_link = adjust_link_memac; mac_dev->priv->enable = memac_enable; mac_dev->priv->disable = memac_disable; } #define DTSEC_SUPPORTED \ (SUPPORTED_10baseT_Half \ | SUPPORTED_10baseT_Full \ | SUPPORTED_100baseT_Half \ | SUPPORTED_100baseT_Full \ | SUPPORTED_Autoneg \ | SUPPORTED_Pause \ | SUPPORTED_Asym_Pause \ | SUPPORTED_MII) static DEFINE_MUTEX(eth_lock); static const u16 phy2speed[] = { [PHY_INTERFACE_MODE_MII] = SPEED_100, [PHY_INTERFACE_MODE_GMII] = SPEED_1000, [PHY_INTERFACE_MODE_SGMII] = SPEED_1000, [PHY_INTERFACE_MODE_TBI] = SPEED_1000, [PHY_INTERFACE_MODE_RMII] = SPEED_100, [PHY_INTERFACE_MODE_RGMII] = SPEED_1000, [PHY_INTERFACE_MODE_RGMII_ID] = SPEED_1000, [PHY_INTERFACE_MODE_RGMII_RXID] = SPEED_1000, [PHY_INTERFACE_MODE_RGMII_TXID] = SPEED_1000, [PHY_INTERFACE_MODE_RTBI] = SPEED_1000, [PHY_INTERFACE_MODE_QSGMII] = SPEED_1000, [PHY_INTERFACE_MODE_XGMII] = SPEED_10000 }; static struct platform_device *dpaa_eth_add_device(int fman_id, struct mac_device *mac_dev) { struct platform_device *pdev; struct dpaa_eth_data data; struct mac_priv_s *priv; static int dpaa_eth_dev_cnt; int ret; priv = mac_dev->priv; data.mac_dev = mac_dev; data.mac_hw_id = priv->cell_index; data.fman_hw_id = fman_id; mutex_lock(&eth_lock); pdev = platform_device_alloc("dpaa-ethernet", dpaa_eth_dev_cnt); if (!pdev) { ret = -ENOMEM; goto no_mem; } pdev->dev.parent = priv->dev; ret = platform_device_add_data(pdev, &data, sizeof(data)); if (ret) goto err; ret = platform_device_add(pdev); if (ret) goto err; dpaa_eth_dev_cnt++; mutex_unlock(&eth_lock); return pdev; err: platform_device_put(pdev); no_mem: mutex_unlock(&eth_lock); return ERR_PTR(ret); } static const struct of_device_id mac_match[] = { { .compatible = "fsl,fman-dtsec" }, { .compatible = "fsl,fman-xgec" }, { .compatible = "fsl,fman-memac" }, {} }; MODULE_DEVICE_TABLE(of, mac_match); static int mac_probe(struct platform_device *_of_dev) { int err, i, nph; struct device *dev; struct device_node *mac_node, *dev_node; struct mac_device *mac_dev; struct platform_device *of_dev; struct resource res; struct mac_priv_s *priv; const u8 *mac_addr; u32 val; u8 fman_id; int phy_if; dev = &_of_dev->dev; mac_node = dev->of_node; mac_dev = devm_kzalloc(dev, sizeof(*mac_dev), GFP_KERNEL); if (!mac_dev) { err = -ENOMEM; goto _return; } priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) { err = -ENOMEM; goto _return; } /* Save private information */ mac_dev->priv = priv; priv->dev = dev; if (of_device_is_compatible(mac_node, "fsl,fman-dtsec")) { setup_dtsec(mac_dev); priv->internal_phy_node = of_parse_phandle(mac_node, "tbi-handle", 0); } else if (of_device_is_compatible(mac_node, "fsl,fman-xgec")) { setup_tgec(mac_dev); } else if (of_device_is_compatible(mac_node, "fsl,fman-memac")) { setup_memac(mac_dev); priv->internal_phy_node = of_parse_phandle(mac_node, "pcsphy-handle", 0); } else { dev_err(dev, "MAC node (%pOF) contains unsupported MAC\n", mac_node); err = -EINVAL; goto _return; } INIT_LIST_HEAD(&priv->mc_addr_list); /* Get the FM node */ dev_node = of_get_parent(mac_node); if (!dev_node) { dev_err(dev, "of_get_parent(%pOF) failed\n", mac_node); err = -EINVAL; goto _return_of_get_parent; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node); err = -EINVAL; goto _return_of_node_put; } /* Get the FMan cell-index */ err = of_property_read_u32(dev_node, "cell-index", &val); if (err) { dev_err(dev, "failed to read cell-index for %pOF\n", dev_node); err = -EINVAL; goto _return_of_node_put; } /* cell-index 0 => FMan id 1 */ fman_id = (u8)(val + 1); priv->fman = fman_bind(&of_dev->dev); if (!priv->fman) { dev_err(dev, "fman_bind(%pOF) failed\n", dev_node); err = -ENODEV; goto _return_of_node_put; } of_node_put(dev_node); /* Get the address of the memory mapped registers */ err = of_address_to_resource(mac_node, 0, &res); if (err < 0) { dev_err(dev, "of_address_to_resource(%pOF) = %d\n", mac_node, err); goto _return_of_get_parent; } mac_dev->res = __devm_request_region(dev, fman_get_mem_region(priv->fman), res.start, res.end + 1 - res.start, "mac"); if (!mac_dev->res) { dev_err(dev, "__devm_request_mem_region(mac) failed\n"); err = -EBUSY; goto _return_of_get_parent; } priv->vaddr = devm_ioremap(dev, mac_dev->res->start, mac_dev->res->end + 1 - mac_dev->res->start); if (!priv->vaddr) { dev_err(dev, "devm_ioremap() failed\n"); err = -EIO; goto _return_of_get_parent; } if (!of_device_is_available(mac_node)) { err = -ENODEV; goto _return_of_get_parent; } /* Get the cell-index */ err = of_property_read_u32(mac_node, "cell-index", &val); if (err) { dev_err(dev, "failed to read cell-index for %pOF\n", mac_node); err = -EINVAL; goto _return_of_get_parent; } priv->cell_index = (u8)val; /* Get the MAC address */ mac_addr = of_get_mac_address(mac_node); if (IS_ERR(mac_addr)) { dev_err(dev, "of_get_mac_address(%pOF) failed\n", mac_node); err = -EINVAL; goto _return_of_get_parent; } ether_addr_copy(mac_dev->addr, mac_addr); /* Get the port handles */ nph = of_count_phandle_with_args(mac_node, "fsl,fman-ports", NULL); if (unlikely(nph < 0)) { dev_err(dev, "of_count_phandle_with_args(%pOF, fsl,fman-ports) failed\n", mac_node); err = nph; goto _return_of_get_parent; } if (nph != ARRAY_SIZE(mac_dev->port)) { dev_err(dev, "Not supported number of fman-ports handles of mac node %pOF from device tree\n", mac_node); err = -EINVAL; goto _return_of_get_parent; } for (i = 0; i < ARRAY_SIZE(mac_dev->port); i++) { /* Find the port node */ dev_node = of_parse_phandle(mac_node, "fsl,fman-ports", i); if (!dev_node) { dev_err(dev, "of_parse_phandle(%pOF, fsl,fman-ports) failed\n", mac_node); err = -EINVAL; goto _return_of_node_put; } of_dev = of_find_device_by_node(dev_node); if (!of_dev) { dev_err(dev, "of_find_device_by_node(%pOF) failed\n", dev_node); err = -EINVAL; goto _return_of_node_put; } mac_dev->port[i] = fman_port_bind(&of_dev->dev); if (!mac_dev->port[i]) { dev_err(dev, "dev_get_drvdata(%pOF) failed\n", dev_node); err = -EINVAL; goto _return_of_node_put; } of_node_put(dev_node); } /* Get the PHY connection type */ phy_if = of_get_phy_mode(mac_node); if (phy_if < 0) { dev_warn(dev, "of_get_phy_mode() for %pOF failed. Defaulting to SGMII\n", mac_node); phy_if = PHY_INTERFACE_MODE_SGMII; } mac_dev->phy_if = phy_if; priv->speed = phy2speed[mac_dev->phy_if]; priv->max_speed = priv->speed; mac_dev->if_support = DTSEC_SUPPORTED; /* We don't support half-duplex in SGMII mode */ if (mac_dev->phy_if == PHY_INTERFACE_MODE_SGMII) mac_dev->if_support &= ~(SUPPORTED_10baseT_Half | SUPPORTED_100baseT_Half); /* Gigabit support (no half-duplex) */ if (priv->max_speed == 1000) mac_dev->if_support |= SUPPORTED_1000baseT_Full; /* The 10G interface only supports one mode */ if (mac_dev->phy_if == PHY_INTERFACE_MODE_XGMII) mac_dev->if_support = SUPPORTED_10000baseT_Full; /* Get the rest of the PHY information */ mac_dev->phy_node = of_parse_phandle(mac_node, "phy-handle", 0); if (!mac_dev->phy_node && of_phy_is_fixed_link(mac_node)) { struct phy_device *phy; err = of_phy_register_fixed_link(mac_node); if (err) goto _return_of_get_parent; priv->fixed_link = kzalloc(sizeof(*priv->fixed_link), GFP_KERNEL); if (!priv->fixed_link) { err = -ENOMEM; goto _return_of_get_parent; } mac_dev->phy_node = of_node_get(mac_node); phy = of_phy_find_device(mac_dev->phy_node); if (!phy) { err = -EINVAL; of_node_put(mac_dev->phy_node); goto _return_of_get_parent; } priv->fixed_link->link = phy->link; priv->fixed_link->speed = phy->speed; priv->fixed_link->duplex = phy->duplex; priv->fixed_link->pause = phy->pause; priv->fixed_link->asym_pause = phy->asym_pause; put_device(&phy->mdio.dev); } err = mac_dev->init(mac_dev); if (err < 0) { dev_err(dev, "mac_dev->init() = %d\n", err); of_node_put(mac_dev->phy_node); goto _return_of_get_parent; } /* pause frame autonegotiation enabled */ mac_dev->autoneg_pause = true; /* By intializing the values to false, force FMD to enable PAUSE frames * on RX and TX */ mac_dev->rx_pause_req = true; mac_dev->tx_pause_req = true; mac_dev->rx_pause_active = false; mac_dev->tx_pause_active = false; err = fman_set_mac_active_pause(mac_dev, true, true); if (err < 0) dev_err(dev, "fman_set_mac_active_pause() = %d\n", err); dev_info(dev, "FMan MAC address: %pM\n", mac_dev->addr); priv->eth_dev = dpaa_eth_add_device(fman_id, mac_dev); if (IS_ERR(priv->eth_dev)) { dev_err(dev, "failed to add Ethernet platform device for MAC %d\n", priv->cell_index); priv->eth_dev = NULL; } goto _return; _return_of_node_put: of_node_put(dev_node); _return_of_get_parent: kfree(priv->fixed_link); _return: return err; } static struct platform_driver mac_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = mac_match, }, .probe = mac_probe, }; builtin_platform_driver(mac_driver);
gpl-2.0
croniccorey/OnePlus2-Kernel
drivers/hv/vmbus_drv.c
1342
21891
/* * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> * K. Y. Srinivasan <kys@microsoft.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/sysctl.h> #include <linux/slab.h> #include <linux/acpi.h> #include <acpi/acpi_bus.h> #include <linux/completion.h> #include <linux/hyperv.h> #include <linux/kernel_stat.h> #include <asm/hyperv.h> #include <asm/hypervisor.h> #include <asm/mshyperv.h> #include "hyperv_vmbus.h" static struct acpi_device *hv_acpi_dev; static struct tasklet_struct msg_dpc; static struct completion probe_event; static int irq; struct hv_device_info { u32 chn_id; u32 chn_state; uuid_le chn_type; uuid_le chn_instance; u32 monitor_id; u32 server_monitor_pending; u32 server_monitor_latency; u32 server_monitor_conn_id; u32 client_monitor_pending; u32 client_monitor_latency; u32 client_monitor_conn_id; struct hv_dev_port_info inbound; struct hv_dev_port_info outbound; }; static int vmbus_exists(void) { if (hv_acpi_dev == NULL) return -ENODEV; return 0; } static void get_channel_info(struct hv_device *device, struct hv_device_info *info) { struct vmbus_channel_debug_info debug_info; if (!device->channel) return; vmbus_get_debug_info(device->channel, &debug_info); info->chn_id = debug_info.relid; info->chn_state = debug_info.state; memcpy(&info->chn_type, &debug_info.interfacetype, sizeof(uuid_le)); memcpy(&info->chn_instance, &debug_info.interface_instance, sizeof(uuid_le)); info->monitor_id = debug_info.monitorid; info->server_monitor_pending = debug_info.servermonitor_pending; info->server_monitor_latency = debug_info.servermonitor_latency; info->server_monitor_conn_id = debug_info.servermonitor_connectionid; info->client_monitor_pending = debug_info.clientmonitor_pending; info->client_monitor_latency = debug_info.clientmonitor_latency; info->client_monitor_conn_id = debug_info.clientmonitor_connectionid; info->inbound.int_mask = debug_info.inbound.current_interrupt_mask; info->inbound.read_idx = debug_info.inbound.current_read_index; info->inbound.write_idx = debug_info.inbound.current_write_index; info->inbound.bytes_avail_toread = debug_info.inbound.bytes_avail_toread; info->inbound.bytes_avail_towrite = debug_info.inbound.bytes_avail_towrite; info->outbound.int_mask = debug_info.outbound.current_interrupt_mask; info->outbound.read_idx = debug_info.outbound.current_read_index; info->outbound.write_idx = debug_info.outbound.current_write_index; info->outbound.bytes_avail_toread = debug_info.outbound.bytes_avail_toread; info->outbound.bytes_avail_towrite = debug_info.outbound.bytes_avail_towrite; } #define VMBUS_ALIAS_LEN ((sizeof((struct hv_vmbus_device_id *)0)->guid) * 2) static void print_alias_name(struct hv_device *hv_dev, char *alias_name) { int i; for (i = 0; i < VMBUS_ALIAS_LEN; i += 2) sprintf(&alias_name[i], "%02x", hv_dev->dev_type.b[i/2]); } /* * vmbus_show_device_attr - Show the device attribute in sysfs. * * This is invoked when user does a * "cat /sys/bus/vmbus/devices/<busdevice>/<attr name>" */ static ssize_t vmbus_show_device_attr(struct device *dev, struct device_attribute *dev_attr, char *buf) { struct hv_device *hv_dev = device_to_hv_device(dev); struct hv_device_info *device_info; char alias_name[VMBUS_ALIAS_LEN + 1]; int ret = 0; device_info = kzalloc(sizeof(struct hv_device_info), GFP_KERNEL); if (!device_info) return ret; get_channel_info(hv_dev, device_info); if (!strcmp(dev_attr->attr.name, "class_id")) { ret = sprintf(buf, "{%pUl}\n", device_info->chn_type.b); } else if (!strcmp(dev_attr->attr.name, "device_id")) { ret = sprintf(buf, "{%pUl}\n", device_info->chn_instance.b); } else if (!strcmp(dev_attr->attr.name, "modalias")) { print_alias_name(hv_dev, alias_name); ret = sprintf(buf, "vmbus:%s\n", alias_name); } else if (!strcmp(dev_attr->attr.name, "state")) { ret = sprintf(buf, "%d\n", device_info->chn_state); } else if (!strcmp(dev_attr->attr.name, "id")) { ret = sprintf(buf, "%d\n", device_info->chn_id); } else if (!strcmp(dev_attr->attr.name, "out_intr_mask")) { ret = sprintf(buf, "%d\n", device_info->outbound.int_mask); } else if (!strcmp(dev_attr->attr.name, "out_read_index")) { ret = sprintf(buf, "%d\n", device_info->outbound.read_idx); } else if (!strcmp(dev_attr->attr.name, "out_write_index")) { ret = sprintf(buf, "%d\n", device_info->outbound.write_idx); } else if (!strcmp(dev_attr->attr.name, "out_read_bytes_avail")) { ret = sprintf(buf, "%d\n", device_info->outbound.bytes_avail_toread); } else if (!strcmp(dev_attr->attr.name, "out_write_bytes_avail")) { ret = sprintf(buf, "%d\n", device_info->outbound.bytes_avail_towrite); } else if (!strcmp(dev_attr->attr.name, "in_intr_mask")) { ret = sprintf(buf, "%d\n", device_info->inbound.int_mask); } else if (!strcmp(dev_attr->attr.name, "in_read_index")) { ret = sprintf(buf, "%d\n", device_info->inbound.read_idx); } else if (!strcmp(dev_attr->attr.name, "in_write_index")) { ret = sprintf(buf, "%d\n", device_info->inbound.write_idx); } else if (!strcmp(dev_attr->attr.name, "in_read_bytes_avail")) { ret = sprintf(buf, "%d\n", device_info->inbound.bytes_avail_toread); } else if (!strcmp(dev_attr->attr.name, "in_write_bytes_avail")) { ret = sprintf(buf, "%d\n", device_info->inbound.bytes_avail_towrite); } else if (!strcmp(dev_attr->attr.name, "monitor_id")) { ret = sprintf(buf, "%d\n", device_info->monitor_id); } else if (!strcmp(dev_attr->attr.name, "server_monitor_pending")) { ret = sprintf(buf, "%d\n", device_info->server_monitor_pending); } else if (!strcmp(dev_attr->attr.name, "server_monitor_latency")) { ret = sprintf(buf, "%d\n", device_info->server_monitor_latency); } else if (!strcmp(dev_attr->attr.name, "server_monitor_conn_id")) { ret = sprintf(buf, "%d\n", device_info->server_monitor_conn_id); } else if (!strcmp(dev_attr->attr.name, "client_monitor_pending")) { ret = sprintf(buf, "%d\n", device_info->client_monitor_pending); } else if (!strcmp(dev_attr->attr.name, "client_monitor_latency")) { ret = sprintf(buf, "%d\n", device_info->client_monitor_latency); } else if (!strcmp(dev_attr->attr.name, "client_monitor_conn_id")) { ret = sprintf(buf, "%d\n", device_info->client_monitor_conn_id); } kfree(device_info); return ret; } /* Set up per device attributes in /sys/bus/vmbus/devices/<bus device> */ static struct device_attribute vmbus_device_attrs[] = { __ATTR(id, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(state, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(class_id, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(device_id, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(monitor_id, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(modalias, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(server_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(server_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(server_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(client_monitor_pending, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(client_monitor_latency, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(client_monitor_conn_id, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(out_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(out_read_index, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(out_write_index, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(out_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(out_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(in_intr_mask, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(in_read_index, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(in_write_index, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(in_read_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR(in_write_bytes_avail, S_IRUGO, vmbus_show_device_attr, NULL), __ATTR_NULL }; /* * vmbus_uevent - add uevent for our device * * This routine is invoked when a device is added or removed on the vmbus to * generate a uevent to udev in the userspace. The udev will then look at its * rule and the uevent generated here to load the appropriate driver * * The alias string will be of the form vmbus:guid where guid is the string * representation of the device guid (each byte of the guid will be * represented with two hex characters. */ static int vmbus_uevent(struct device *device, struct kobj_uevent_env *env) { struct hv_device *dev = device_to_hv_device(device); int ret; char alias_name[VMBUS_ALIAS_LEN + 1]; print_alias_name(dev, alias_name); ret = add_uevent_var(env, "MODALIAS=vmbus:%s", alias_name); return ret; } static uuid_le null_guid; static inline bool is_null_guid(const __u8 *guid) { if (memcmp(guid, &null_guid, sizeof(uuid_le))) return false; return true; } /* * Return a matching hv_vmbus_device_id pointer. * If there is no match, return NULL. */ static const struct hv_vmbus_device_id *hv_vmbus_get_id( const struct hv_vmbus_device_id *id, __u8 *guid) { for (; !is_null_guid(id->guid); id++) if (!memcmp(&id->guid, guid, sizeof(uuid_le))) return id; return NULL; } /* * vmbus_match - Attempt to match the specified device to the specified driver */ static int vmbus_match(struct device *device, struct device_driver *driver) { struct hv_driver *drv = drv_to_hv_drv(driver); struct hv_device *hv_dev = device_to_hv_device(device); if (hv_vmbus_get_id(drv->id_table, hv_dev->dev_type.b)) return 1; return 0; } /* * vmbus_probe - Add the new vmbus's child device */ static int vmbus_probe(struct device *child_device) { int ret = 0; struct hv_driver *drv = drv_to_hv_drv(child_device->driver); struct hv_device *dev = device_to_hv_device(child_device); const struct hv_vmbus_device_id *dev_id; dev_id = hv_vmbus_get_id(drv->id_table, dev->dev_type.b); if (drv->probe) { ret = drv->probe(dev, dev_id); if (ret != 0) pr_err("probe failed for device %s (%d)\n", dev_name(child_device), ret); } else { pr_err("probe not set for driver %s\n", dev_name(child_device)); ret = -ENODEV; } return ret; } /* * vmbus_remove - Remove a vmbus device */ static int vmbus_remove(struct device *child_device) { struct hv_driver *drv = drv_to_hv_drv(child_device->driver); struct hv_device *dev = device_to_hv_device(child_device); if (drv->remove) drv->remove(dev); else pr_err("remove not set for driver %s\n", dev_name(child_device)); return 0; } /* * vmbus_shutdown - Shutdown a vmbus device */ static void vmbus_shutdown(struct device *child_device) { struct hv_driver *drv; struct hv_device *dev = device_to_hv_device(child_device); /* The device may not be attached yet */ if (!child_device->driver) return; drv = drv_to_hv_drv(child_device->driver); if (drv->shutdown) drv->shutdown(dev); return; } /* * vmbus_device_release - Final callback release of the vmbus child device */ static void vmbus_device_release(struct device *device) { struct hv_device *hv_dev = device_to_hv_device(device); kfree(hv_dev); } /* The one and only one */ static struct bus_type hv_bus = { .name = "vmbus", .match = vmbus_match, .shutdown = vmbus_shutdown, .remove = vmbus_remove, .probe = vmbus_probe, .uevent = vmbus_uevent, .dev_attrs = vmbus_device_attrs, }; static const char *driver_name = "hyperv"; struct onmessage_work_context { struct work_struct work; struct hv_message msg; }; static void vmbus_onmessage_work(struct work_struct *work) { struct onmessage_work_context *ctx; ctx = container_of(work, struct onmessage_work_context, work); vmbus_onmessage(&ctx->msg); kfree(ctx); } static void vmbus_on_msg_dpc(unsigned long data) { int cpu = smp_processor_id(); void *page_addr = hv_context.synic_message_page[cpu]; struct hv_message *msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; struct onmessage_work_context *ctx; while (1) { if (msg->header.message_type == HVMSG_NONE) { /* no msg */ break; } else { ctx = kmalloc(sizeof(*ctx), GFP_ATOMIC); if (ctx == NULL) continue; INIT_WORK(&ctx->work, vmbus_onmessage_work); memcpy(&ctx->msg, msg, sizeof(*msg)); queue_work(vmbus_connection.work_queue, &ctx->work); } msg->header.message_type = HVMSG_NONE; /* * Make sure the write to MessageType (ie set to * HVMSG_NONE) happens before we read the * MessagePending and EOMing. Otherwise, the EOMing * will not deliver any more messages since there is * no empty slot */ mb(); if (msg->header.message_flags.msg_pending) { /* * This will cause message queue rescan to * possibly deliver another msg from the * hypervisor */ wrmsrl(HV_X64_MSR_EOM, 0); } } } static irqreturn_t vmbus_isr(int irq, void *dev_id) { int cpu = smp_processor_id(); void *page_addr; struct hv_message *msg; union hv_synic_event_flags *event; bool handled = false; page_addr = hv_context.synic_event_page[cpu]; if (page_addr == NULL) return IRQ_NONE; event = (union hv_synic_event_flags *)page_addr + VMBUS_MESSAGE_SINT; /* * Check for events before checking for messages. This is the order * in which events and messages are checked in Windows guests on * Hyper-V, and the Windows team suggested we do the same. */ if ((vmbus_proto_version == VERSION_WS2008) || (vmbus_proto_version == VERSION_WIN7)) { /* Since we are a child, we only need to check bit 0 */ if (sync_test_and_clear_bit(0, (unsigned long *) &event->flags32[0])) { handled = true; } } else { /* * Our host is win8 or above. The signaling mechanism * has changed and we can directly look at the event page. * If bit n is set then we have an interrup on the channel * whose id is n. */ handled = true; } if (handled) tasklet_schedule(hv_context.event_dpc[cpu]); page_addr = hv_context.synic_message_page[cpu]; msg = (struct hv_message *)page_addr + VMBUS_MESSAGE_SINT; /* Check if there are actual msgs to be processed */ if (msg->header.message_type != HVMSG_NONE) { handled = true; tasklet_schedule(&msg_dpc); } if (handled) return IRQ_HANDLED; else return IRQ_NONE; } /* * vmbus interrupt flow handler: * vmbus interrupts can concurrently occur on multiple CPUs and * can be handled concurrently. */ static void vmbus_flow_handler(unsigned int irq, struct irq_desc *desc) { kstat_incr_irqs_this_cpu(irq, desc); desc->action->handler(irq, desc->action->dev_id); } /* * vmbus_bus_init -Main vmbus driver initialization routine. * * Here, we * - initialize the vmbus driver context * - invoke the vmbus hv main init routine * - get the irq resource * - retrieve the channel offers */ static int vmbus_bus_init(int irq) { int ret; /* Hypervisor initialization...setup hypercall page..etc */ ret = hv_init(); if (ret != 0) { pr_err("Unable to initialize the hypervisor - 0x%x\n", ret); return ret; } tasklet_init(&msg_dpc, vmbus_on_msg_dpc, 0); ret = bus_register(&hv_bus); if (ret) goto err_cleanup; ret = request_irq(irq, vmbus_isr, 0, driver_name, hv_acpi_dev); if (ret != 0) { pr_err("Unable to request IRQ %d\n", irq); goto err_unregister; } /* * Vmbus interrupts can be handled concurrently on * different CPUs. Establish an appropriate interrupt flow * handler that can support this model. */ irq_set_handler(irq, vmbus_flow_handler); /* * Register our interrupt handler. */ hv_register_vmbus_handler(irq, vmbus_isr); /* * Initialize the per-cpu interrupt state and * connect to the host. */ on_each_cpu(hv_synic_init, NULL, 1); ret = vmbus_connect(); if (ret) goto err_irq; vmbus_request_offers(); return 0; err_irq: free_irq(irq, hv_acpi_dev); err_unregister: bus_unregister(&hv_bus); err_cleanup: hv_cleanup(); return ret; } /** * __vmbus_child_driver_register - Register a vmbus's driver * @drv: Pointer to driver structure you want to register * @owner: owner module of the drv * @mod_name: module name string * * Registers the given driver with Linux through the 'driver_register()' call * and sets up the hyper-v vmbus handling for this driver. * It will return the state of the 'driver_register()' call. * */ int __vmbus_driver_register(struct hv_driver *hv_driver, struct module *owner, const char *mod_name) { int ret; pr_info("registering driver %s\n", hv_driver->name); ret = vmbus_exists(); if (ret < 0) return ret; hv_driver->driver.name = hv_driver->name; hv_driver->driver.owner = owner; hv_driver->driver.mod_name = mod_name; hv_driver->driver.bus = &hv_bus; ret = driver_register(&hv_driver->driver); return ret; } EXPORT_SYMBOL_GPL(__vmbus_driver_register); /** * vmbus_driver_unregister() - Unregister a vmbus's driver * @drv: Pointer to driver structure you want to un-register * * Un-register the given driver that was previous registered with a call to * vmbus_driver_register() */ void vmbus_driver_unregister(struct hv_driver *hv_driver) { pr_info("unregistering driver %s\n", hv_driver->name); if (!vmbus_exists()) driver_unregister(&hv_driver->driver); } EXPORT_SYMBOL_GPL(vmbus_driver_unregister); /* * vmbus_device_create - Creates and registers a new child device * on the vmbus. */ struct hv_device *vmbus_device_create(uuid_le *type, uuid_le *instance, struct vmbus_channel *channel) { struct hv_device *child_device_obj; child_device_obj = kzalloc(sizeof(struct hv_device), GFP_KERNEL); if (!child_device_obj) { pr_err("Unable to allocate device object for child device\n"); return NULL; } child_device_obj->channel = channel; memcpy(&child_device_obj->dev_type, type, sizeof(uuid_le)); memcpy(&child_device_obj->dev_instance, instance, sizeof(uuid_le)); return child_device_obj; } /* * vmbus_device_register - Register the child device */ int vmbus_device_register(struct hv_device *child_device_obj) { int ret = 0; static atomic_t device_num = ATOMIC_INIT(0); dev_set_name(&child_device_obj->device, "vmbus_0_%d", atomic_inc_return(&device_num)); child_device_obj->device.bus = &hv_bus; child_device_obj->device.parent = &hv_acpi_dev->dev; child_device_obj->device.release = vmbus_device_release; /* * Register with the LDM. This will kick off the driver/device * binding...which will eventually call vmbus_match() and vmbus_probe() */ ret = device_register(&child_device_obj->device); if (ret) pr_err("Unable to register child device\n"); else pr_info("child device %s registered\n", dev_name(&child_device_obj->device)); return ret; } /* * vmbus_device_unregister - Remove the specified child device * from the vmbus. */ void vmbus_device_unregister(struct hv_device *device_obj) { /* * Kick off the process of unregistering the device. * This will call vmbus_remove() and eventually vmbus_device_release() */ device_unregister(&device_obj->device); pr_info("child device %s unregistered\n", dev_name(&device_obj->device)); } /* * VMBUS is an acpi enumerated device. Get the the IRQ information * from DSDT. */ static acpi_status vmbus_walk_resources(struct acpi_resource *res, void *irq) { if (res->type == ACPI_RESOURCE_TYPE_IRQ) { struct acpi_resource_irq *irqp; irqp = &res->data.irq; *((unsigned int *)irq) = irqp->interrupts[0]; } return AE_OK; } static int vmbus_acpi_add(struct acpi_device *device) { acpi_status result; hv_acpi_dev = device; result = acpi_walk_resources(device->handle, METHOD_NAME__CRS, vmbus_walk_resources, &irq); if (ACPI_FAILURE(result)) { complete(&probe_event); return -ENODEV; } complete(&probe_event); return 0; } static const struct acpi_device_id vmbus_acpi_device_ids[] = { {"VMBUS", 0}, {"VMBus", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, vmbus_acpi_device_ids); static struct acpi_driver vmbus_acpi_driver = { .name = "vmbus", .ids = vmbus_acpi_device_ids, .ops = { .add = vmbus_acpi_add, }, }; static int __init hv_acpi_init(void) { int ret, t; if (x86_hyper != &x86_hyper_ms_hyperv) return -ENODEV; init_completion(&probe_event); /* * Get irq resources first. */ ret = acpi_bus_register_driver(&vmbus_acpi_driver); if (ret) return ret; t = wait_for_completion_timeout(&probe_event, 5*HZ); if (t == 0) { ret = -ETIMEDOUT; goto cleanup; } if (irq <= 0) { ret = -ENODEV; goto cleanup; } ret = vmbus_bus_init(irq); if (ret) goto cleanup; return 0; cleanup: acpi_bus_unregister_driver(&vmbus_acpi_driver); hv_acpi_dev = NULL; return ret; } static void __exit vmbus_exit(void) { free_irq(irq, hv_acpi_dev); vmbus_free_channels(); bus_unregister(&hv_bus); hv_cleanup(); acpi_bus_unregister_driver(&vmbus_acpi_driver); } MODULE_LICENSE("GPL"); MODULE_VERSION(HV_DRV_VERSION); subsys_initcall(hv_acpi_init); module_exit(vmbus_exit);
gpl-2.0
jamison904/N920T
drivers/scsi/qla2xxx/qla_nx.c
2110
117151
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2013 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include <linux/delay.h> #include <linux/pci.h> #include <linux/ratelimit.h> #include <linux/vmalloc.h> #include <scsi/scsi_tcq.h> #define MASK(n) ((1ULL<<(n))-1) #define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | \ ((addr >> 25) & 0x3ff)) #define MS_WIN(addr) (addr & 0x0ffc0000) #define QLA82XX_PCI_MN_2M (0) #define QLA82XX_PCI_MS_2M (0x80000) #define QLA82XX_PCI_OCM0_2M (0xc0000) #define VALID_OCM_ADDR(addr) (((addr) & 0x3f800) != 0x3f800) #define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) #define BLOCK_PROTECT_BITS 0x0F /* CRB window related */ #define CRB_BLK(off) ((off >> 20) & 0x3f) #define CRB_SUBBLK(off) ((off >> 16) & 0xf) #define CRB_WINDOW_2M (0x130060) #define QLA82XX_PCI_CAMQM_2M_END (0x04800800UL) #define CRB_HI(off) ((qla82xx_crb_hub_agt[CRB_BLK(off)] << 20) | \ ((off) & 0xf0000)) #define QLA82XX_PCI_CAMQM_2M_BASE (0x000ff800UL) #define CRB_INDIRECT_2M (0x1e0000UL) #define MAX_CRB_XFORM 60 static unsigned long crb_addr_xform[MAX_CRB_XFORM]; static int qla82xx_crb_table_initialized; #define qla82xx_crb_addr_transform(name) \ (crb_addr_xform[QLA82XX_HW_PX_MAP_CRB_##name] = \ QLA82XX_HW_CRB_HUB_AGT_ADR_##name << 20) static void qla82xx_crb_addr_transform_setup(void) { qla82xx_crb_addr_transform(XDMA); qla82xx_crb_addr_transform(TIMR); qla82xx_crb_addr_transform(SRE); qla82xx_crb_addr_transform(SQN3); qla82xx_crb_addr_transform(SQN2); qla82xx_crb_addr_transform(SQN1); qla82xx_crb_addr_transform(SQN0); qla82xx_crb_addr_transform(SQS3); qla82xx_crb_addr_transform(SQS2); qla82xx_crb_addr_transform(SQS1); qla82xx_crb_addr_transform(SQS0); qla82xx_crb_addr_transform(RPMX7); qla82xx_crb_addr_transform(RPMX6); qla82xx_crb_addr_transform(RPMX5); qla82xx_crb_addr_transform(RPMX4); qla82xx_crb_addr_transform(RPMX3); qla82xx_crb_addr_transform(RPMX2); qla82xx_crb_addr_transform(RPMX1); qla82xx_crb_addr_transform(RPMX0); qla82xx_crb_addr_transform(ROMUSB); qla82xx_crb_addr_transform(SN); qla82xx_crb_addr_transform(QMN); qla82xx_crb_addr_transform(QMS); qla82xx_crb_addr_transform(PGNI); qla82xx_crb_addr_transform(PGND); qla82xx_crb_addr_transform(PGN3); qla82xx_crb_addr_transform(PGN2); qla82xx_crb_addr_transform(PGN1); qla82xx_crb_addr_transform(PGN0); qla82xx_crb_addr_transform(PGSI); qla82xx_crb_addr_transform(PGSD); qla82xx_crb_addr_transform(PGS3); qla82xx_crb_addr_transform(PGS2); qla82xx_crb_addr_transform(PGS1); qla82xx_crb_addr_transform(PGS0); qla82xx_crb_addr_transform(PS); qla82xx_crb_addr_transform(PH); qla82xx_crb_addr_transform(NIU); qla82xx_crb_addr_transform(I2Q); qla82xx_crb_addr_transform(EG); qla82xx_crb_addr_transform(MN); qla82xx_crb_addr_transform(MS); qla82xx_crb_addr_transform(CAS2); qla82xx_crb_addr_transform(CAS1); qla82xx_crb_addr_transform(CAS0); qla82xx_crb_addr_transform(CAM); qla82xx_crb_addr_transform(C2C1); qla82xx_crb_addr_transform(C2C0); qla82xx_crb_addr_transform(SMB); qla82xx_crb_addr_transform(OCM0); /* * Used only in P3 just define it for P2 also. */ qla82xx_crb_addr_transform(I2C0); qla82xx_crb_table_initialized = 1; } static struct crb_128M_2M_block_map crb_128M_2M_map[64] = { {{{0, 0, 0, 0} } }, {{{1, 0x0100000, 0x0102000, 0x120000}, {1, 0x0110000, 0x0120000, 0x130000}, {1, 0x0120000, 0x0122000, 0x124000}, {1, 0x0130000, 0x0132000, 0x126000}, {1, 0x0140000, 0x0142000, 0x128000}, {1, 0x0150000, 0x0152000, 0x12a000}, {1, 0x0160000, 0x0170000, 0x110000}, {1, 0x0170000, 0x0172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x01e0000, 0x01e0800, 0x122000}, {0, 0x0000000, 0x0000000, 0x000000} } } , {{{1, 0x0200000, 0x0210000, 0x180000} } }, {{{0, 0, 0, 0} } }, {{{1, 0x0400000, 0x0401000, 0x169000} } }, {{{1, 0x0500000, 0x0510000, 0x140000} } }, {{{1, 0x0600000, 0x0610000, 0x1c0000} } }, {{{1, 0x0700000, 0x0704000, 0x1b8000} } }, {{{1, 0x0800000, 0x0802000, 0x170000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x08f0000, 0x08f2000, 0x172000} } }, {{{1, 0x0900000, 0x0902000, 0x174000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x09f0000, 0x09f2000, 0x176000} } }, {{{0, 0x0a00000, 0x0a02000, 0x178000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0af0000, 0x0af2000, 0x17a000} } }, {{{0, 0x0b00000, 0x0b02000, 0x17c000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {1, 0x0bf0000, 0x0bf2000, 0x17e000} } }, {{{1, 0x0c00000, 0x0c04000, 0x1d4000} } }, {{{1, 0x0d00000, 0x0d04000, 0x1a4000} } }, {{{1, 0x0e00000, 0x0e04000, 0x1a0000} } }, {{{1, 0x0f00000, 0x0f01000, 0x164000} } }, {{{0, 0x1000000, 0x1004000, 0x1a8000} } }, {{{1, 0x1100000, 0x1101000, 0x160000} } }, {{{1, 0x1200000, 0x1201000, 0x161000} } }, {{{1, 0x1300000, 0x1301000, 0x162000} } }, {{{1, 0x1400000, 0x1401000, 0x163000} } }, {{{1, 0x1500000, 0x1501000, 0x165000} } }, {{{1, 0x1600000, 0x1601000, 0x166000} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{0, 0, 0, 0} } }, {{{1, 0x1d00000, 0x1d10000, 0x190000} } }, {{{1, 0x1e00000, 0x1e01000, 0x16a000} } }, {{{1, 0x1f00000, 0x1f10000, 0x150000} } }, {{{0} } }, {{{1, 0x2100000, 0x2102000, 0x120000}, {1, 0x2110000, 0x2120000, 0x130000}, {1, 0x2120000, 0x2122000, 0x124000}, {1, 0x2130000, 0x2132000, 0x126000}, {1, 0x2140000, 0x2142000, 0x128000}, {1, 0x2150000, 0x2152000, 0x12a000}, {1, 0x2160000, 0x2170000, 0x110000}, {1, 0x2170000, 0x2172000, 0x12e000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000}, {0, 0x0000000, 0x0000000, 0x000000} } }, {{{1, 0x2200000, 0x2204000, 0x1b0000} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{0} } }, {{{1, 0x2800000, 0x2804000, 0x1a4000} } }, {{{1, 0x2900000, 0x2901000, 0x16b000} } }, {{{1, 0x2a00000, 0x2a00400, 0x1ac400} } }, {{{1, 0x2b00000, 0x2b00400, 0x1ac800} } }, {{{1, 0x2c00000, 0x2c00400, 0x1acc00} } }, {{{1, 0x2d00000, 0x2d00400, 0x1ad000} } }, {{{1, 0x2e00000, 0x2e00400, 0x1ad400} } }, {{{1, 0x2f00000, 0x2f00400, 0x1ad800} } }, {{{1, 0x3000000, 0x3000400, 0x1adc00} } }, {{{0, 0x3100000, 0x3104000, 0x1a8000} } }, {{{1, 0x3200000, 0x3204000, 0x1d4000} } }, {{{1, 0x3300000, 0x3304000, 0x1a0000} } }, {{{0} } }, {{{1, 0x3500000, 0x3500400, 0x1ac000} } }, {{{1, 0x3600000, 0x3600400, 0x1ae000} } }, {{{1, 0x3700000, 0x3700400, 0x1ae400} } }, {{{1, 0x3800000, 0x3804000, 0x1d0000} } }, {{{1, 0x3900000, 0x3904000, 0x1b4000} } }, {{{1, 0x3a00000, 0x3a04000, 0x1d8000} } }, {{{0} } }, {{{0} } }, {{{1, 0x3d00000, 0x3d04000, 0x1dc000} } }, {{{1, 0x3e00000, 0x3e01000, 0x167000} } }, {{{1, 0x3f00000, 0x3f01000, 0x168000} } } }; /* * top 12 bits of crb internal address (hub, agent) */ static unsigned qla82xx_crb_hub_agt[64] = { 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_MN, QLA82XX_HW_CRB_HUB_AGT_ADR_MS, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SRE, QLA82XX_HW_CRB_HUB_AGT_ADR_NIU, QLA82XX_HW_CRB_HUB_AGT_ADR_QMN, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN0, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN1, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN2, QLA82XX_HW_CRB_HUB_AGT_ADR_SQN3, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN4, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGN3, QLA82XX_HW_CRB_HUB_AGT_ADR_PGND, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNI, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS1, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS2, QLA82XX_HW_CRB_HUB_AGT_ADR_PGS3, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGSI, QLA82XX_HW_CRB_HUB_AGT_ADR_SN, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_EG, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PS, QLA82XX_HW_CRB_HUB_AGT_ADR_CAM, 0, 0, 0, 0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_TIMR, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX1, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX2, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX3, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX4, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX5, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX6, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX7, QLA82XX_HW_CRB_HUB_AGT_ADR_XDMA, QLA82XX_HW_CRB_HUB_AGT_ADR_I2Q, QLA82XX_HW_CRB_HUB_AGT_ADR_ROMUSB, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX0, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX8, QLA82XX_HW_CRB_HUB_AGT_ADR_RPMX9, QLA82XX_HW_CRB_HUB_AGT_ADR_OCM0, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_SMB, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C0, QLA82XX_HW_CRB_HUB_AGT_ADR_I2C1, 0, QLA82XX_HW_CRB_HUB_AGT_ADR_PGNC, 0, }; /* Device states */ static char *q_dev_state[] = { "Unknown", "Cold", "Initializing", "Ready", "Need Reset", "Need Quiescent", "Failed", "Quiescent", }; char *qdev_state(uint32_t dev_state) { return q_dev_state[dev_state]; } /* * In: 'off' is offset from CRB space in 128M pci map * Out: 'off' is 2M pci map addr * side effect: lock crb window */ static void qla82xx_pci_set_crbwindow_2M(struct qla_hw_data *ha, ulong *off) { u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ha->crb_win = CRB_HI(*off); writel(ha->crb_win, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); /* Read back value to make sure write has gone through before trying * to use it. */ win_read = RD_REG_DWORD((void __iomem *) (CRB_WINDOW_2M + ha->nx_pcibase)); if (win_read != ha->crb_win) { ql_dbg(ql_dbg_p3p, vha, 0xb000, "%s: Written crbwin (0x%x) " "!= Read crbwin (0x%x), off=0x%lx.\n", __func__, ha->crb_win, win_read, *off); } *off = (*off & MASK(16)) + CRB_INDIRECT_2M + ha->nx_pcibase; } static inline unsigned long qla82xx_pci_set_crbwindow(struct qla_hw_data *ha, u64 off) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); /* See if we are currently pointing to the region we want to use next */ if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_DDR_NET)) { /* No need to change window. PCIX and PCIEregs are in both * regs are in both windows. */ return off; } if ((off >= QLA82XX_CRB_PCIX_HOST) && (off < QLA82XX_CRB_PCIX_HOST2)) { /* We are in first CRB window */ if (ha->curr_window != 0) WARN_ON(1); return off; } if ((off > QLA82XX_CRB_PCIX_HOST2) && (off < QLA82XX_CRB_MAX)) { /* We are in second CRB window */ off = off - QLA82XX_CRB_PCIX_HOST2 + QLA82XX_CRB_PCIX_HOST; if (ha->curr_window != 1) return off; /* We are in the QM or direct access * register region - do nothing */ if ((off >= QLA82XX_PCI_DIRECT_CRB) && (off < QLA82XX_PCI_CAMQM_MAX)) return off; } /* strange address given */ ql_dbg(ql_dbg_p3p, vha, 0xb001, "%s: Warning: unm_nic_pci_set_crbwindow " "called with an unknown address(%llx).\n", QLA2XXX_DRIVER_NAME, off); return off; } static int qla82xx_pci_get_crb_addr_2M(struct qla_hw_data *ha, ulong *off) { struct crb_128M_2M_sub_block_map *m; if (*off >= QLA82XX_CRB_MAX) return -1; if (*off >= QLA82XX_PCI_CAMQM && (*off < QLA82XX_PCI_CAMQM_2M_END)) { *off = (*off - QLA82XX_PCI_CAMQM) + QLA82XX_PCI_CAMQM_2M_BASE + ha->nx_pcibase; return 0; } if (*off < QLA82XX_PCI_CRBSPACE) return -1; *off -= QLA82XX_PCI_CRBSPACE; /* Try direct map */ m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { *off = *off + m->start_2M - m->start_128M + ha->nx_pcibase; return 0; } /* Not in direct map, use crb window */ return 1; } #define CRB_WIN_LOCK_TIMEOUT 100000000 static int qla82xx_crb_win_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; while (!done) { /* acquire semaphore3 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_LOCK)); if (done == 1) break; if (timeout >= CRB_WIN_LOCK_TIMEOUT) return -1; timeout++; } qla82xx_wr_32(ha, QLA82XX_CRB_WIN_LOCK_ID, ha->portnum); return 0; } int qla82xx_wr_32(struct qla_hw_data *ha, ulong off, u32 data) { unsigned long flags = 0; int rv; rv = qla82xx_pci_get_crb_addr_2M(ha, &off); BUG_ON(rv == -1); if (rv == 1) { write_lock_irqsave(&ha->hw_lock, flags); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, &off); } writel(data, (void __iomem *)off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); write_unlock_irqrestore(&ha->hw_lock, flags); } return 0; } int qla82xx_rd_32(struct qla_hw_data *ha, ulong off) { unsigned long flags = 0; int rv; u32 data; rv = qla82xx_pci_get_crb_addr_2M(ha, &off); BUG_ON(rv == -1); if (rv == 1) { write_lock_irqsave(&ha->hw_lock, flags); qla82xx_crb_win_lock(ha); qla82xx_pci_set_crbwindow_2M(ha, &off); } data = RD_REG_DWORD((void __iomem *)off); if (rv == 1) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM7_UNLOCK)); write_unlock_irqrestore(&ha->hw_lock, flags); } return data; } #define IDC_LOCK_TIMEOUT 100000000 int qla82xx_idc_lock(struct qla_hw_data *ha) { int i; int done = 0, timeout = 0; while (!done) { /* acquire semaphore5 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_LOCK)); if (done == 1) break; if (timeout >= IDC_LOCK_TIMEOUT) return -1; timeout++; /* Yield CPU */ if (!in_interrupt()) schedule(); else { for (i = 0; i < 20; i++) cpu_relax(); } } return 0; } void qla82xx_idc_unlock(struct qla_hw_data *ha) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM5_UNLOCK)); } /* PCI Windowing for DDR regions. */ #define QLA82XX_ADDR_IN_RANGE(addr, low, high) \ (((addr) <= (high)) && ((addr) >= (low))) /* * check memory access boundary. * used by test agent. support ddr access only for now */ static unsigned long qla82xx_pci_mem_bound_check(struct qla_hw_data *ha, unsigned long long addr, int size) { if (!QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || !QLA82XX_ADDR_IN_RANGE(addr + size - 1, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX) || ((size != 1) && (size != 2) && (size != 4) && (size != 8))) return 0; else return 1; } static int qla82xx_pci_set_window_warning_count; static unsigned long qla82xx_pci_set_window(struct qla_hw_data *ha, unsigned long long addr) { int window; u32 win_read; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) { /* DDR network side */ window = MN_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); if ((win_read << 17) != window) { ql_dbg(ql_dbg_p3p, vha, 0xb003, "%s: Written MNwin (0x%x) != Read MNwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_DDR_NET; } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) { unsigned int temp1; if ((addr & 0x00ff800) == 0xff800) { ql_log(ql_log_warn, vha, 0xb004, "%s: QM access not handled.\n", __func__); addr = -1UL; } window = OCM_WIN(addr); ha->ddr_mn_window = window; qla82xx_wr_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->mn_win_crb | QLA82XX_PCI_CRBSPACE); temp1 = ((window & 0x1FF) << 7) | ((window & 0x0FFFE0000) >> 17); if (win_read != temp1) { ql_log(ql_log_warn, vha, 0xb005, "%s: Written OCMwin (0x%x) != Read OCMwin (0x%x).\n", __func__, temp1, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_OCM0_2M; } else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, QLA82XX_P3_ADDR_QDR_NET_MAX)) { /* QDR network side */ window = MS_WIN(addr); ha->qdr_sn_window = window; qla82xx_wr_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE, window); win_read = qla82xx_rd_32(ha, ha->ms_win_crb | QLA82XX_PCI_CRBSPACE); if (win_read != window) { ql_log(ql_log_warn, vha, 0xb006, "%s: Written MSwin (0x%x) != Read MSwin (0x%x).\n", __func__, window, win_read); } addr = GET_MEM_OFFS_2M(addr) + QLA82XX_PCI_QDR_NET; } else { /* * peg gdb frequently accesses memory that doesn't exist, * this limits the chit chat so debugging isn't slowed down. */ if ((qla82xx_pci_set_window_warning_count++ < 8) || (qla82xx_pci_set_window_warning_count%64 == 0)) { ql_log(ql_log_warn, vha, 0xb007, "%s: Warning:%s Unknown address range!.\n", __func__, QLA2XXX_DRIVER_NAME); } addr = -1UL; } return addr; } /* check if address is in the same windows as the previous access */ static int qla82xx_pci_is_same_window(struct qla_hw_data *ha, unsigned long long addr) { int window; unsigned long long qdr_max; qdr_max = QLA82XX_P3_ADDR_QDR_NET_MAX; /* DDR network side */ if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_DDR_NET, QLA82XX_ADDR_DDR_NET_MAX)) BUG(); else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM0, QLA82XX_ADDR_OCM0_MAX)) return 1; else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_OCM1, QLA82XX_ADDR_OCM1_MAX)) return 1; else if (QLA82XX_ADDR_IN_RANGE(addr, QLA82XX_ADDR_QDR_NET, qdr_max)) { /* QDR network side */ window = ((addr - QLA82XX_ADDR_QDR_NET) >> 22) & 0x3f; if (ha->qdr_sn_window == window) return 1; } return 0; } static int qla82xx_pci_mem_read_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb008, "%s out of bound pci memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE * 2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) { *(u8 *)data = 0; return -1; } addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: *(u8 *)data = readb(addr); break; case 2: *(u16 *)data = readw(addr); break; case 4: *(u32 *)data = readl(addr); break; case 8: *(u64 *)data = readq(addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } static int qla82xx_pci_mem_write_direct(struct qla_hw_data *ha, u64 off, void *data, int size) { unsigned long flags; void __iomem *addr = NULL; int ret = 0; u64 start; uint8_t __iomem *mem_ptr = NULL; unsigned long mem_base; unsigned long mem_page; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); write_lock_irqsave(&ha->hw_lock, flags); /* * If attempting to access unknown address or straddle hw windows, * do not access. */ start = qla82xx_pci_set_window(ha, off); if ((start == -1UL) || (qla82xx_pci_is_same_window(ha, off + size - 1) == 0)) { write_unlock_irqrestore(&ha->hw_lock, flags); ql_log(ql_log_fatal, vha, 0xb009, "%s out of bount memory " "access, offset is 0x%llx.\n", QLA2XXX_DRIVER_NAME, off); return -1; } write_unlock_irqrestore(&ha->hw_lock, flags); mem_base = pci_resource_start(ha->pdev, 0); mem_page = start & PAGE_MASK; /* Map two pages whenever user tries to access addresses in two * consecutive pages. */ if (mem_page != ((start + size - 1) & PAGE_MASK)) mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE*2); else mem_ptr = ioremap(mem_base + mem_page, PAGE_SIZE); if (mem_ptr == NULL) return -1; addr = mem_ptr; addr += start & (PAGE_SIZE - 1); write_lock_irqsave(&ha->hw_lock, flags); switch (size) { case 1: writeb(*(u8 *)data, addr); break; case 2: writew(*(u16 *)data, addr); break; case 4: writel(*(u32 *)data, addr); break; case 8: writeq(*(u64 *)data, addr); break; default: ret = -1; break; } write_unlock_irqrestore(&ha->hw_lock, flags); if (mem_ptr) iounmap(mem_ptr); return ret; } #define MTU_FUDGE_FACTOR 100 static unsigned long qla82xx_decode_crb_addr(unsigned long addr) { int i; unsigned long base_addr, offset, pci_base; if (!qla82xx_crb_table_initialized) qla82xx_crb_addr_transform_setup(); pci_base = ADDR_ERROR; base_addr = addr & 0xfff00000; offset = addr & 0x000fffff; for (i = 0; i < MAX_CRB_XFORM; i++) { if (crb_addr_xform[i] == base_addr) { pci_base = i << 20; break; } } if (pci_base == ADDR_ERROR) return pci_base; return pci_base + offset; } static long rom_max_timeout = 100; static long qla82xx_rom_lock_timeout = 100; static int qla82xx_rom_lock(struct qla_hw_data *ha) { int done = 0, timeout = 0; uint32_t lock_owner = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (!done) { /* acquire semaphore2 from PCI HW block */ done = qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK)); if (done == 1) break; if (timeout >= qla82xx_rom_lock_timeout) { lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID); ql_dbg(ql_dbg_p3p, vha, 0xb085, "Failed to acquire rom lock, acquired by %d.\n", lock_owner); return -1; } timeout++; } qla82xx_wr_32(ha, QLA82XX_ROM_LOCK_ID, ROM_LOCK_DRIVER); return 0; } static void qla82xx_rom_unlock(struct qla_hw_data *ha) { qla82xx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_UNLOCK)); } static int qla82xx_wait_rom_busy(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 4; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00a, "%s: Timeout reached waiting for rom busy.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_wait_rom_done(struct qla_hw_data *ha) { long timeout = 0; long done = 0 ; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while (done == 0) { done = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_STATUS); done &= 2; timeout++; if (timeout >= rom_max_timeout) { ql_dbg(ql_dbg_p3p, vha, 0xb00b, "%s: Timeout reached waiting for rom done.\n", QLA2XXX_DRIVER_NAME); return -1; } } return 0; } static int qla82xx_md_rw_32(struct qla_hw_data *ha, uint32_t off, u32 data, uint8_t flag) { uint32_t off_value, rval = 0; WRT_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase), (off & 0xFFFF0000)); /* Read back value to make sure write has gone through */ RD_REG_DWORD((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase)); off_value = (off & 0x0000FFFF); if (flag) WRT_REG_DWORD((void __iomem *) (off_value + CRB_INDIRECT_2M + ha->nx_pcibase), data); else rval = RD_REG_DWORD((void __iomem *) (off_value + CRB_INDIRECT_2M + ha->nx_pcibase)); return rval; } static int qla82xx_do_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { /* Dword reads to flash. */ qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (addr & 0xFFFF0000), 1); *valp = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (addr & 0x0000FFFF), 0, 0); return 0; } static int qla82xx_rom_fast_read(struct qla_hw_data *ha, int addr, int *valp) { int ret, loops = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); schedule(); loops++; } if (loops >= 50000) { ql_log(ql_log_fatal, vha, 0x00b9, "Failed to acquire SEM2 lock.\n"); return -1; } ret = qla82xx_do_rom_fast_read(ha, addr, valp); qla82xx_rom_unlock(ha); return ret; } static int qla82xx_read_status_reg(struct qla_hw_data *ha, uint32_t *val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_RDSR); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00c, "Error waiting for rom done.\n"); return -1; } *val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_ROM_RDATA); return 0; } static int qla82xx_flash_wait_write_finish(struct qla_hw_data *ha) { long timeout = 0; uint32_t done = 1 ; uint32_t val; int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); while ((done != 0) && (ret == 0)) { ret = qla82xx_read_status_reg(ha, &val); done = val & 1; timeout++; udelay(10); cond_resched(); if (timeout >= 50000) { ql_log(ql_log_warn, vha, 0xb00d, "Timeout reached waiting for write finish.\n"); return -1; } } return ret; } static int qla82xx_flash_set_write_enable(struct qla_hw_data *ha) { uint32_t val; qla82xx_wait_rom_busy(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 0); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WREN); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) return -1; if (qla82xx_read_status_reg(ha, &val) != 0) return -1; if ((val & 2) != 2) return -1; return 0; } static int qla82xx_write_status_reg(struct qla_hw_data *ha, uint32_t val) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (qla82xx_flash_set_write_enable(ha)) return -1; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, val); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, 0x1); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00e, "Error waiting for rom done.\n"); return -1; } return qla82xx_flash_wait_write_finish(ha); } static int qla82xx_write_disable_flash(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_WRDI); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb00f, "Error waiting for rom done.\n"); return -1; } return 0; } static int ql82xx_rom_lock_d(struct qla_hw_data *ha) { int loops = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); while ((qla82xx_rom_lock(ha) != 0) && (loops < 50000)) { udelay(100); cond_resched(); loops++; } if (loops >= 50000) { ql_log(ql_log_warn, vha, 0xb010, "ROM lock failed.\n"); return -1; } return 0; } static int qla82xx_write_flash_dword(struct qla_hw_data *ha, uint32_t flashaddr, uint32_t data) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb011, "ROM lock failed.\n"); return ret; } if (qla82xx_flash_set_write_enable(ha)) goto done_write; qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_WDATA, data); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, flashaddr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_PP); qla82xx_wait_rom_busy(ha); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb012, "Error waiting for rom done.\n"); ret = -1; goto done_write; } ret = qla82xx_flash_wait_write_finish(ha); done_write: qla82xx_rom_unlock(ha); return ret; } /* This routine does CRB initialize sequence * to put the ISP into operational state */ static int qla82xx_pinit_from_rom(scsi_qla_host_t *vha) { int addr, val; int i ; struct crb_addr_pair *buf; unsigned long off; unsigned offset, n; struct qla_hw_data *ha = vha->hw; struct crb_addr_pair { long addr; long data; }; /* Halt all the individual PEGs and other blocks of the ISP */ qla82xx_rom_lock(ha); /* disable all I2Q */ qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x14, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x1c, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x20, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_I2Q + 0x24, 0x0); /* disable all niu interrupts */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x40, 0xff); /* disable xge rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x70000, 0x00); /* disable xg1 rx/tx */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x80000, 0x00); /* disable sideband mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x90000, 0x00); /* disable ap0 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xa0000, 0x00); /* disable ap1 mac */ qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0xb0000, 0x00); /* halt sre */ val = qla82xx_rd_32(ha, QLA82XX_CRB_SRE + 0x1000); qla82xx_wr_32(ha, QLA82XX_CRB_SRE + 0x1000, val & (~(0x1))); /* halt epg */ qla82xx_wr_32(ha, QLA82XX_CRB_EPG + 0x1300, 0x1); /* halt timers */ qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x0, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x8, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x10, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x18, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x100, 0x0); qla82xx_wr_32(ha, QLA82XX_CRB_TIMER + 0x200, 0x0); /* halt pegs */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c, 1); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c, 1); msleep(20); /* big hammer */ if (test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) /* don't reset CAM block on reset */ qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xfeffffff); else qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0xffffffff); qla82xx_rom_unlock(ha); /* Read the signature value from the flash. * Offset 0: Contain signature (0xcafecafe) * Offset 4: Offset and number of addr/value pairs * that present in CRB initialize sequence */ if (qla82xx_rom_fast_read(ha, 0, &n) != 0 || n != 0xcafecafeUL || qla82xx_rom_fast_read(ha, 4, &n) != 0) { ql_log(ql_log_fatal, vha, 0x006e, "Error Reading crb_init area: n: %08x.\n", n); return -1; } /* Offset in flash = lower 16 bits * Number of entries = upper 16 bits */ offset = n & 0xffffU; n = (n >> 16) & 0xffffU; /* number of addr/value pair should not exceed 1024 entries */ if (n >= 1024) { ql_log(ql_log_fatal, vha, 0x0071, "Card flash not initialized:n=0x%x.\n", n); return -1; } ql_log(ql_log_info, vha, 0x0072, "%d CRB init values found in ROM.\n", n); buf = kmalloc(n * sizeof(struct crb_addr_pair), GFP_KERNEL); if (buf == NULL) { ql_log(ql_log_fatal, vha, 0x010c, "Unable to allocate memory.\n"); return -1; } for (i = 0; i < n; i++) { if (qla82xx_rom_fast_read(ha, 8*i + 4*offset, &val) != 0 || qla82xx_rom_fast_read(ha, 8*i + 4*offset + 4, &addr) != 0) { kfree(buf); return -1; } buf[i].addr = addr; buf[i].data = val; } for (i = 0; i < n; i++) { /* Translate internal CRB initialization * address to PCI bus address */ off = qla82xx_decode_crb_addr((unsigned long)buf[i].addr) + QLA82XX_PCI_CRBSPACE; /* Not all CRB addr/value pair to be written, * some of them are skipped */ /* skipping cold reboot MAGIC */ if (off == QLA82XX_CAM_RAM(0x1fc)) continue; /* do not reset PCI */ if (off == (ROMUSB_GLB + 0xbc)) continue; /* skip core clock, so that firmware can increase the clock */ if (off == (ROMUSB_GLB + 0xc8)) continue; /* skip the function enable register */ if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION)) continue; if (off == QLA82XX_PCIE_REG(PCIE_SETUP_FUNCTION2)) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_SMB) continue; if ((off & 0x0ff00000) == QLA82XX_CRB_DDR_NET) continue; if (off == ADDR_ERROR) { ql_log(ql_log_fatal, vha, 0x0116, "Unknow addr: 0x%08lx.\n", buf[i].addr); continue; } qla82xx_wr_32(ha, off, buf[i].data); /* ISP requires much bigger delay to settle down, * else crb_window returns 0xffffffff */ if (off == QLA82XX_ROMUSB_GLB_SW_RESET) msleep(1000); /* ISP requires millisec delay between * successive CRB register updation */ msleep(1); } kfree(buf); /* Resetting the data and instruction cache */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0xec, 0x1e); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_D+0x4c, 8); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_I+0x4c, 8); /* Clear all protocol processing engines */ qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_1+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_2+0xc, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0x8, 0); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_3+0xc, 0); return 0; } static int qla82xx_pci_mem_write_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j, ret = 0, loop, sz[2], off0; int scale, shift_amount, startword; uint32_t temp; uint64_t off8, mem_crb, tmpw, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_write_direct(ha, off, data, size); } off0 = off & 0x7; sz[0] = (size < (8 - off0)) ? size : (8 - off0); sz[1] = size - sz[0]; off8 = off & 0xfffffff0; loop = (((off & 0xf) + size - 1) >> 4) + 1; shift_amount = 4; scale = 2; startword = (off & 0xf)/8; for (i = 0; i < loop; i++) { if (qla82xx_pci_mem_read_2M(ha, off8 + (i << shift_amount), &word[i * scale], 8)) return -1; } switch (size) { case 1: tmpw = *((uint8_t *)data); break; case 2: tmpw = *((uint16_t *)data); break; case 4: tmpw = *((uint32_t *)data); break; case 8: default: tmpw = *((uint64_t *)data); break; } if (sz[0] == 8) { word[startword] = tmpw; } else { word[startword] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); word[startword] |= tmpw << (off0 * 8); } if (sz[1] != 0) { word[startword+1] &= ~(~0ULL << (sz[1] * 8)); word[startword+1] |= tmpw >> (sz[0] * 8); } for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_ADDR_HI, temp); temp = word[i * scale] & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_LO, temp); temp = (word[i * scale] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb+MIU_TEST_AGT_WRDATA_HI, temp); temp = word[i*scale + 1] & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_LO, temp); temp = (word[i*scale + 1] >> 32) & 0xffffffff; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_WRDATA_UPPER_HI, temp); temp = MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to write through agent.\n"); ret = -1; break; } } return ret; } static int qla82xx_fw_load_from_flash(struct qla_hw_data *ha) { int i; long size = 0; long flashaddr = ha->flt_region_bootload << 2; long memaddr = BOOTLD_START; u64 data; u32 high, low; size = (IMAGE_START - BOOTLD_START) / 8; for (i = 0; i < size; i++) { if ((qla82xx_rom_fast_read(ha, flashaddr, (int *)&low)) || (qla82xx_rom_fast_read(ha, flashaddr + 4, (int *)&high))) { return -1; } data = ((u64)high << 32) | low ; qla82xx_pci_mem_write_2M(ha, memaddr, &data, 8); flashaddr += 8; memaddr += 8; if (i % 0x1000 == 0) msleep(1); } udelay(100); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } int qla82xx_pci_mem_read_2M(struct qla_hw_data *ha, u64 off, void *data, int size) { int i, j = 0, k, start, end, loop, sz[2], off0[2]; int shift_amount; uint32_t temp; uint64_t off8, val, mem_crb, word[2] = {0, 0}; /* * If not MN, go check for MS or invalid. */ if (off >= QLA82XX_ADDR_QDR_NET && off <= QLA82XX_P3_ADDR_QDR_NET_MAX) mem_crb = QLA82XX_CRB_QDR_NET; else { mem_crb = QLA82XX_CRB_DDR_NET; if (qla82xx_pci_mem_bound_check(ha, off, size) == 0) return qla82xx_pci_mem_read_direct(ha, off, data, size); } off8 = off & 0xfffffff0; off0[0] = off & 0xf; sz[0] = (size < (16 - off0[0])) ? size : (16 - off0[0]); shift_amount = 4; loop = ((off0[0] + size - 1) >> shift_amount) + 1; off0[1] = 0; sz[1] = size - sz[0]; for (i = 0; i < loop; i++) { temp = off8 + (i << shift_amount); qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_LO, temp); temp = 0; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_ADDR_HI, temp); temp = MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); temp = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_wr_32(ha, mem_crb + MIU_TEST_AGT_CTRL, temp); for (j = 0; j < MAX_CTL_CHECK; j++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_CTRL); if ((temp & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { if (printk_ratelimit()) dev_err(&ha->pdev->dev, "failed to read through agent.\n"); break; } start = off0[i] >> 2; end = (off0[i] + sz[i] - 1) >> 2; for (k = start; k <= end; k++) { temp = qla82xx_rd_32(ha, mem_crb + MIU_TEST_AGT_RDDATA(k)); word[i] |= ((uint64_t)temp << (32 * (k & 1))); } } if (j >= MAX_CTL_CHECK) return -1; if ((off0[0] & 7) == 0) { val = word[0]; } else { val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); } switch (size) { case 1: *(uint8_t *)data = val; break; case 2: *(uint16_t *)data = val; break; case 4: *(uint32_t *)data = val; break; case 8: *(uint64_t *)data = val; break; } return 0; } static struct qla82xx_uri_table_desc * qla82xx_get_table_desc(const u8 *unirom, int section) { uint32_t i; struct qla82xx_uri_table_desc *directory = (struct qla82xx_uri_table_desc *)&unirom[0]; __le32 offset; __le32 tab_type; __le32 entries = cpu_to_le32(directory->num_entries); for (i = 0; i < entries; i++) { offset = cpu_to_le32(directory->findex) + (i * cpu_to_le32(directory->entry_size)); tab_type = cpu_to_le32(*((u32 *)&unirom[offset] + 8)); if (tab_type == section) return (struct qla82xx_uri_table_desc *)&unirom[offset]; } return NULL; } static struct qla82xx_uri_data_desc * qla82xx_get_data_desc(struct qla_hw_data *ha, u32 section, u32 idx_offset) { const u8 *unirom = ha->hablob->fw->data; int idx = cpu_to_le32(*((int *)&unirom[ha->file_prd_off] + idx_offset)); struct qla82xx_uri_table_desc *tab_desc = NULL; __le32 offset; tab_desc = qla82xx_get_table_desc(unirom, section); if (!tab_desc) return NULL; offset = cpu_to_le32(tab_desc->findex) + (cpu_to_le32(tab_desc->entry_size) * idx); return (struct qla82xx_uri_data_desc *)&unirom[offset]; } static u8 * qla82xx_get_bootld_offset(struct qla_hw_data *ha) { u32 offset = BOOTLD_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_BOOTLD, QLA82XX_URI_BOOTLD_IDX_OFF); if (uri_desc) offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } static __le32 qla82xx_get_fw_size(struct qla_hw_data *ha) { struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) return cpu_to_le32(uri_desc->size); } return cpu_to_le32(*(u32 *)&ha->hablob->fw->data[FW_SIZE_OFFSET]); } static u8 * qla82xx_get_fw_offs(struct qla_hw_data *ha) { u32 offset = IMAGE_START; struct qla82xx_uri_data_desc *uri_desc = NULL; if (ha->fw_type == QLA82XX_UNIFIED_ROMIMAGE) { uri_desc = qla82xx_get_data_desc(ha, QLA82XX_URI_DIR_SECT_FW, QLA82XX_URI_FIRMWARE_IDX_OFF); if (uri_desc) offset = cpu_to_le32(uri_desc->findex); } return (u8 *)&ha->hablob->fw->data[offset]; } /* PCI related functions */ int qla82xx_pci_region_offset(struct pci_dev *pdev, int region) { unsigned long val = 0; u32 control; switch (region) { case 0: val = 0; break; case 1: pci_read_config_dword(pdev, QLA82XX_PCI_REG_MSIX_TBL, &control); val = control + QLA82XX_MSIX_TBL_SPACE; break; } return val; } int qla82xx_iospace_config(struct qla_hw_data *ha) { uint32_t len = 0; if (pci_request_regions(ha->pdev, QLA2XXX_DRIVER_NAME)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000c, "Failed to reserver selected regions.\n"); goto iospace_error_exit; } /* Use MMIO operations for all accesses. */ if (!(pci_resource_flags(ha->pdev, 0) & IORESOURCE_MEM)) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000d, "Region #0 not an MMIO resource, aborting.\n"); goto iospace_error_exit; } len = pci_resource_len(ha->pdev, 0); ha->nx_pcibase = (unsigned long)ioremap(pci_resource_start(ha->pdev, 0), len); if (!ha->nx_pcibase) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000e, "Cannot remap pcibase MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer */ ha->iobase = (device_reg_t __iomem *)((uint8_t *)ha->nx_pcibase + 0xbc000 + (ha->pdev->devfn << 11)); if (!ql2xdbwr) { ha->nxdb_wr_ptr = (unsigned long)ioremap((pci_resource_start(ha->pdev, 4) + (ha->pdev->devfn << 12)), 4); if (!ha->nxdb_wr_ptr) { ql_log_pci(ql_log_fatal, ha->pdev, 0x000f, "Cannot remap MMIO, aborting.\n"); goto iospace_error_exit; } /* Mapping of IO base pointer, * door bell read and write pointer */ ha->nxdb_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) + (ha->pdev->devfn * 8); } else { ha->nxdb_wr_ptr = (ha->pdev->devfn == 6 ? QLA82XX_CAMRAM_DB1 : QLA82XX_CAMRAM_DB2); } ha->max_req_queues = ha->max_rsp_queues = 1; ha->msix_count = ha->max_rsp_queues + 1; ql_dbg_pci(ql_dbg_multiq, ha->pdev, 0xc006, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); ql_dbg_pci(ql_dbg_init, ha->pdev, 0x0010, "nx_pci_base=%p iobase=%p " "max_req_queues=%d msix_count=%d.\n", (void *)ha->nx_pcibase, ha->iobase, ha->max_req_queues, ha->msix_count); return 0; iospace_error_exit: return -ENOMEM; } /* GS related functions */ /* Initialization related functions */ /** * qla82xx_pci_config() - Setup ISP82xx PCI configuration registers. * @ha: HA context * * Returns 0 on success. */ int qla82xx_pci_config(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int ret; pci_set_master(ha->pdev); ret = pci_set_mwi(ha->pdev); ha->chip_revision = ha->pdev->revision; ql_dbg(ql_dbg_init, vha, 0x0043, "Chip revision:%d.\n", ha->chip_revision); return 0; } /** * qla82xx_reset_chip() - Setup ISP82xx PCI configuration registers. * @ha: HA context * * Returns 0 on success. */ void qla82xx_reset_chip(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; ha->isp_ops->disable_intrs(ha); } void qla82xx_config_rings(struct scsi_qla_host *vha) { struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; struct init_cb_81xx *icb; struct req_que *req = ha->req_q_map[0]; struct rsp_que *rsp = ha->rsp_q_map[0]; /* Setup ring parameters in initialization control block. */ icb = (struct init_cb_81xx *)ha->init_cb; icb->request_q_outpointer = __constant_cpu_to_le16(0); icb->response_q_inpointer = __constant_cpu_to_le16(0); icb->request_q_length = cpu_to_le16(req->length); icb->response_q_length = cpu_to_le16(rsp->length); icb->request_q_address[0] = cpu_to_le32(LSD(req->dma)); icb->request_q_address[1] = cpu_to_le32(MSD(req->dma)); icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); WRT_REG_DWORD((unsigned long __iomem *)&reg->req_q_out[0], 0); WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_in[0], 0); WRT_REG_DWORD((unsigned long __iomem *)&reg->rsp_q_out[0], 0); } static int qla82xx_fw_load_from_blob(struct qla_hw_data *ha) { u64 *ptr64; u32 i, flashaddr, size; __le64 data; size = (IMAGE_START - BOOTLD_START) / 8; ptr64 = (u64 *)qla82xx_get_bootld_offset(ha); flashaddr = BOOTLD_START; for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } flashaddr = FLASH_ADDR_START; size = (__force u32)qla82xx_get_fw_size(ha) / 8; ptr64 = (u64 *)qla82xx_get_fw_offs(ha); for (i = 0; i < size; i++) { data = cpu_to_le64(ptr64[i]); if (qla82xx_pci_mem_write_2M(ha, flashaddr, &data, 8)) return -EIO; flashaddr += 8; } udelay(100); /* Write a magic value to CAMRAM register * at a specified offset to indicate * that all data is written and * ready for firmware to initialize. */ qla82xx_wr_32(ha, QLA82XX_CAM_RAM(0x1fc), QLA82XX_BDINFO_MAGIC); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x18, 0x1020); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, 0x80001e); read_unlock(&ha->hw_lock); return 0; } static int qla82xx_set_product_offset(struct qla_hw_data *ha) { struct qla82xx_uri_table_desc *ptab_desc = NULL; const uint8_t *unirom = ha->hablob->fw->data; uint32_t i; __le32 entries; __le32 flags, file_chiprev, offset; uint8_t chiprev = ha->chip_revision; /* Hardcoding mn_present flag for P3P */ int mn_present = 0; uint32_t flagbit; ptab_desc = qla82xx_get_table_desc(unirom, QLA82XX_URI_DIR_SECT_PRODUCT_TBL); if (!ptab_desc) return -1; entries = cpu_to_le32(ptab_desc->num_entries); for (i = 0; i < entries; i++) { offset = cpu_to_le32(ptab_desc->findex) + (i * cpu_to_le32(ptab_desc->entry_size)); flags = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_FLAGS_OFF)); file_chiprev = cpu_to_le32(*((int *)&unirom[offset] + QLA82XX_URI_CHIP_REV_OFF)); flagbit = mn_present ? 1 : 2; if ((chiprev == file_chiprev) && ((1ULL << flagbit) & flags)) { ha->file_prd_off = offset; return 0; } } return -1; } static int qla82xx_validate_firmware_blob(scsi_qla_host_t *vha, uint8_t fw_type) { __le32 val; uint32_t min_size; struct qla_hw_data *ha = vha->hw; const struct firmware *fw = ha->hablob->fw; ha->fw_type = fw_type; if (fw_type == QLA82XX_UNIFIED_ROMIMAGE) { if (qla82xx_set_product_offset(ha)) return -EINVAL; min_size = QLA82XX_URI_FW_MIN_SIZE; } else { val = cpu_to_le32(*(u32 *)&fw->data[QLA82XX_FW_MAGIC_OFFSET]); if ((__force u32)val != QLA82XX_BDINFO_MAGIC) return -EINVAL; min_size = QLA82XX_FW_MIN_SIZE; } if (fw->size < min_size) return -EINVAL; return 0; } static int qla82xx_check_cmdpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_CMDPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00a8, "CRB_CMDPEG_STATE: 0x%x and retries:0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00a9, "Cmd Peg initialization failed: 0x%x.\n", val); val = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_PEGTUNE_DONE); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_CMDPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } static int qla82xx_check_rcvpeg_state(struct qla_hw_data *ha) { u32 val = 0; int retries = 60; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); do { read_lock(&ha->hw_lock); val = qla82xx_rd_32(ha, CRB_RCVPEG_STATE); read_unlock(&ha->hw_lock); switch (val) { case PHAN_INITIALIZE_COMPLETE: case PHAN_INITIALIZE_ACK: return QLA_SUCCESS; case PHAN_INITIALIZE_FAILED: break; default: break; } ql_log(ql_log_info, vha, 0x00ab, "CRB_RCVPEG_STATE: 0x%x and retries: 0x%x.\n", val, retries); msleep(500); } while (--retries); ql_log(ql_log_fatal, vha, 0x00ac, "Rcv Peg initializatin failed: 0x%x.\n", val); read_lock(&ha->hw_lock); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, PHAN_INITIALIZE_FAILED); read_unlock(&ha->hw_lock); return QLA_FUNCTION_FAILED; } /* ISR related functions */ static struct qla82xx_legacy_intr_set legacy_intr[] = \ QLA82XX_LEGACY_INTR_CONFIG; /* * qla82xx_mbx_completion() - Process mailbox command completions. * @ha: SCSI driver HA context * @mb0: Mailbox0 register */ static void qla82xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0) { uint16_t cnt; uint16_t __iomem *wptr; struct qla_hw_data *ha = vha->hw; struct device_reg_82xx __iomem *reg = &ha->iobase->isp82; wptr = (uint16_t __iomem *)&reg->mailbox_out[1]; /* Load return mailbox registers. */ ha->flags.mbox_int = 1; ha->mailbox_out[0] = mb0; for (cnt = 1; cnt < ha->mbx_count; cnt++) { ha->mailbox_out[cnt] = RD_REG_WORD(wptr); wptr++; } if (!ha->mcp) ql_dbg(ql_dbg_async, vha, 0x5053, "MBX pointer ERROR.\n"); } /* * qla82xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx. * @irq: * @dev_id: SCSI driver HA context * @regs: * * Called by system whenever the host adapter generates an interrupt. * * Returns handled flag. */ irqreturn_t qla82xx_intr_handler(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0, status1 = 0; unsigned long flags; unsigned long iter; uint32_t stat = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { ql_log(ql_log_info, NULL, 0xb053, "%s: NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; if (!ha->flags.msi_enabled) { status = qla82xx_rd_32(ha, ISR_INT_VECTOR); if (!(status & ha->nx_legacy_intr.int_vec_bit)) return IRQ_NONE; status1 = qla82xx_rd_32(ha, ISR_INT_STATE_REG); if (!ISR_IS_LEGACY_INTR_TRIGGERED(status1)) return IRQ_NONE; } /* clear the interrupt */ qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_status_reg, 0xffffffff); /* read twice to ensure write is flushed */ qla82xx_rd_32(ha, ISR_INT_VECTOR); qla82xx_rd_32(ha, ISR_INT_VECTOR); reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); for (iter = 1; iter--; ) { if (RD_REG_DWORD(&reg->host_int)) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5054, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); } #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) ql_log(ql_log_warn, vha, 0x503d, "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (!ha->flags.msi_enabled) qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_default(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; unsigned long flags; uint32_t stat = 0; uint16_t mb[4]; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); do { if (RD_REG_DWORD(&reg->host_int)) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_async, vha, 0x5041, "Unrecognized interrupt type (%d).\n", stat & 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); } while (0); #ifdef QL_DEBUG_LEVEL_17 if (!irq && ha->flags.eeh_busy) ql_log(ql_log_warn, vha, 0x5044, "isr:status %x, cmd_flags %lx, mbox_int %x, stat %x.\n", status, ha->mbx_cmd_flags, ha->flags.mbox_int, stat); #endif qla2x00_handle_mbx_completion(ha, status); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } irqreturn_t qla82xx_msix_rsp_q(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return IRQ_NONE; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); WRT_REG_DWORD(&reg->host_int, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); return IRQ_HANDLED; } void qla82xx_poll(int irq, void *dev_id) { scsi_qla_host_t *vha; struct qla_hw_data *ha; struct rsp_que *rsp; struct device_reg_82xx __iomem *reg; int status = 0; uint32_t stat; uint16_t mb[4]; unsigned long flags; rsp = (struct rsp_que *) dev_id; if (!rsp) { printk(KERN_INFO "%s(): NULL response queue pointer.\n", __func__); return; } ha = rsp->hw; reg = &ha->iobase->isp82; spin_lock_irqsave(&ha->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); if (RD_REG_DWORD(&reg->host_int)) { stat = RD_REG_DWORD(&reg->host_status); switch (stat & 0xff) { case 0x1: case 0x2: case 0x10: case 0x11: qla82xx_mbx_completion(vha, MSW(stat)); status |= MBX_INTERRUPT; break; case 0x12: mb[0] = MSW(stat); mb[1] = RD_REG_WORD(&reg->mailbox_out[1]); mb[2] = RD_REG_WORD(&reg->mailbox_out[2]); mb[3] = RD_REG_WORD(&reg->mailbox_out[3]); qla2x00_async_event(vha, rsp, mb); break; case 0x13: qla24xx_process_response_queue(vha, rsp); break; default: ql_dbg(ql_dbg_p3p, vha, 0xb013, "Unrecognized interrupt type (%d).\n", stat * 0xff); break; } } WRT_REG_DWORD(&reg->host_int, 0); spin_unlock_irqrestore(&ha->hardware_lock, flags); } void qla82xx_enable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_enable(vha); spin_lock_irq(&ha->hardware_lock); qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0xfbff); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 1; } void qla82xx_disable_intrs(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); qla82xx_mbx_intr_disable(vha); spin_lock_irq(&ha->hardware_lock); qla82xx_wr_32(ha, ha->nx_legacy_intr.tgt_mask_reg, 0x0400); spin_unlock_irq(&ha->hardware_lock); ha->interrupts_on = 0; } void qla82xx_init_flags(struct qla_hw_data *ha) { struct qla82xx_legacy_intr_set *nx_legacy_intr; /* ISP 8021 initializations */ rwlock_init(&ha->hw_lock); ha->qdr_sn_window = -1; ha->ddr_mn_window = -1; ha->curr_window = 255; ha->portnum = PCI_FUNC(ha->pdev->devfn); nx_legacy_intr = &legacy_intr[ha->portnum]; ha->nx_legacy_intr.int_vec_bit = nx_legacy_intr->int_vec_bit; ha->nx_legacy_intr.tgt_status_reg = nx_legacy_intr->tgt_status_reg; ha->nx_legacy_intr.tgt_mask_reg = nx_legacy_intr->tgt_mask_reg; ha->nx_legacy_intr.pci_int_reg = nx_legacy_intr->pci_int_reg; } inline void qla82xx_set_idc_version(scsi_qla_host_t *vha) { int idc_ver; uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (drv_active == (QLA82XX_DRV_ACTIVE << (ha->portnum * 4))) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); ql_log(ql_log_info, vha, 0xb082, "IDC version updated to %d\n", QLA82XX_IDC_VERSION); } else { idc_ver = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_IDC_VERSION); if (idc_ver != QLA82XX_IDC_VERSION) ql_log(ql_log_info, vha, 0xb083, "qla2xxx driver IDC version %d is not compatible " "with IDC version %d of the other drivers\n", QLA82XX_IDC_VERSION, idc_ver); } } inline void qla82xx_set_drv_active(scsi_qla_host_t *vha) { uint32_t drv_active; struct qla_hw_data *ha = vha->hw; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* If reset value is all FF's, initialize DRV_ACTIVE */ if (drv_active == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, QLA82XX_DRV_NOT_ACTIVE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); } drv_active |= (QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } inline void qla82xx_clear_drv_active(struct qla_hw_data *ha) { uint32_t drv_active; drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active &= ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); } static inline int qla82xx_need_reset(struct qla_hw_data *ha) { uint32_t drv_state; int rval; if (ha->flags.nic_core_reset_owner) return 1; else { drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); rval = drv_state & (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); return rval; } } static inline void qla82xx_set_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); /* If reset value is all FF's, initialize DRV_STATE */ if (drv_state == 0xffffffff) { qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, QLA82XX_DRVST_NOT_RDY); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); } drv_state |= (QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); ql_dbg(ql_dbg_init, vha, 0x00bb, "drv_state = 0x%08x.\n", drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_clear_rst_ready(struct qla_hw_data *ha) { uint32_t drv_state; drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_state &= ~(QLA82XX_DRVST_RST_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); } static inline void qla82xx_set_qsnt_ready(struct qla_hw_data *ha) { uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state |= (QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } void qla82xx_clear_qsnt_ready(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t qsnt_state; qsnt_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); qsnt_state &= ~(QLA82XX_DRVST_QSNT_RDY << (ha->portnum * 4)); qla82xx_wr_32(ha, QLA82XX_CRB_DRV_STATE, qsnt_state); } static int qla82xx_load_fw(scsi_qla_host_t *vha) { int rst; struct fw_blob *blob; struct qla_hw_data *ha = vha->hw; if (qla82xx_pinit_from_rom(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x009f, "Error during CRB initialization.\n"); return QLA_FUNCTION_FAILED; } udelay(500); /* Bring QM and CAMRAM out of reset */ rst = qla82xx_rd_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET); rst &= ~((1 << 28) | (1 << 24)); qla82xx_wr_32(ha, QLA82XX_ROMUSB_GLB_SW_RESET, rst); /* * FW Load priority: * 1) Operational firmware residing in flash. * 2) Firmware via request-firmware interface (.bin file). */ if (ql2xfwloadbin == 2) goto try_blob_fw; ql_log(ql_log_info, vha, 0x00a0, "Attempting to load firmware from flash.\n"); if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a1, "Firmware loaded successfully from flash.\n"); return QLA_SUCCESS; } else { ql_log(ql_log_warn, vha, 0x0108, "Firmware load from flash failed.\n"); } try_blob_fw: ql_log(ql_log_info, vha, 0x00a2, "Attempting to load firmware from blob.\n"); /* Load firmware blob. */ blob = ha->hablob = qla2x00_request_firmware(vha); if (!blob) { ql_log(ql_log_fatal, vha, 0x00a3, "Firmware image not present.\n"); goto fw_load_failed; } /* Validating firmware blob */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_FLASH_ROMIMAGE)) { /* Fallback to URI format */ if (qla82xx_validate_firmware_blob(vha, QLA82XX_UNIFIED_ROMIMAGE)) { ql_log(ql_log_fatal, vha, 0x00a4, "No valid firmware image found.\n"); return QLA_FUNCTION_FAILED; } } if (qla82xx_fw_load_from_blob(ha) == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0x00a5, "Firmware loaded successfully from binary blob.\n"); return QLA_SUCCESS; } else { ql_log(ql_log_fatal, vha, 0x00a6, "Firmware load failed for binary blob.\n"); blob->fw = NULL; blob = NULL; goto fw_load_failed; } return QLA_SUCCESS; fw_load_failed: return QLA_FUNCTION_FAILED; } int qla82xx_start_firmware(scsi_qla_host_t *vha) { uint16_t lnk; struct qla_hw_data *ha = vha->hw; /* scrub dma mask expansion register */ qla82xx_wr_32(ha, CRB_DMA_SHIFT, QLA82XX_DMA_SHIFT_VALUE); /* Put both the PEG CMD and RCV PEG to default state * of 0 before resetting the hardware */ qla82xx_wr_32(ha, CRB_CMDPEG_STATE, 0); qla82xx_wr_32(ha, CRB_RCVPEG_STATE, 0); /* Overwrite stale initialization register values */ qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS1, 0); qla82xx_wr_32(ha, QLA82XX_PEG_HALT_STATUS2, 0); if (qla82xx_load_fw(vha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00a7, "Error trying to start fw.\n"); return QLA_FUNCTION_FAILED; } /* Handshake with the card before we register the devices. */ if (qla82xx_check_cmdpeg_state(ha) != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00aa, "Error during card handshake.\n"); return QLA_FUNCTION_FAILED; } /* Negotiated Link width */ pcie_capability_read_word(ha->pdev, PCI_EXP_LNKSTA, &lnk); ha->link_width = (lnk >> 4) & 0x3f; /* Synchronize with Receive peg */ return qla82xx_check_rcvpeg_state(ha); } static uint32_t * qla82xx_read_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr, uint32_t length) { uint32_t i; uint32_t val; struct qla_hw_data *ha = vha->hw; /* Dword reads to flash. */ for (i = 0; i < length/4; i++, faddr += 4) { if (qla82xx_rom_fast_read(ha, faddr, &val)) { ql_log(ql_log_warn, vha, 0x0106, "Do ROM fast read failed.\n"); goto done_read; } dwptr[i] = __constant_cpu_to_le32(val); } done_read: return dwptr; } static int qla82xx_unprotect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb014, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_unprotect; val &= ~(BLOCK_PROTECT_BITS << 2); ret = qla82xx_write_status_reg(ha, val); if (ret < 0) { val |= (BLOCK_PROTECT_BITS << 2); qla82xx_write_status_reg(ha, val); } if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb015, "Write disable failed.\n"); done_unprotect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_protect_flash(struct qla_hw_data *ha) { int ret; uint32_t val; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb016, "ROM Lock failed.\n"); return ret; } ret = qla82xx_read_status_reg(ha, &val); if (ret < 0) goto done_protect; val |= (BLOCK_PROTECT_BITS << 2); /* LOCK all sectors */ ret = qla82xx_write_status_reg(ha, val); if (ret < 0) ql_log(ql_log_warn, vha, 0xb017, "Write status register failed.\n"); if (qla82xx_write_disable_flash(ha) != 0) ql_log(ql_log_warn, vha, 0xb018, "Write disable failed.\n"); done_protect: qla82xx_rom_unlock(ha); return ret; } static int qla82xx_erase_sector(struct qla_hw_data *ha, int addr) { int ret = 0; scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); ret = ql82xx_rom_lock_d(ha); if (ret < 0) { ql_log(ql_log_warn, vha, 0xb019, "ROM Lock failed.\n"); return ret; } qla82xx_flash_set_write_enable(ha); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ADDRESS, addr); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_ABYTE_CNT, 3); qla82xx_wr_32(ha, QLA82XX_ROMUSB_ROM_INSTR_OPCODE, M25P_INSTR_SE); if (qla82xx_wait_rom_done(ha)) { ql_log(ql_log_warn, vha, 0xb01a, "Error waiting for rom done.\n"); ret = -1; goto done; } ret = qla82xx_flash_wait_write_finish(ha); done: qla82xx_rom_unlock(ha); return ret; } /* * Address and length are byte address */ uint8_t * qla82xx_read_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t offset, uint32_t length) { scsi_block_requests(vha->host); qla82xx_read_flash_data(vha, (uint32_t *)buf, offset, length); scsi_unblock_requests(vha->host); return buf; } static int qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr, uint32_t faddr, uint32_t dwords) { int ret; uint32_t liter; uint32_t sec_mask, rest_addr; dma_addr_t optrom_dma; void *optrom = NULL; int page_mode = 0; struct qla_hw_data *ha = vha->hw; ret = -1; /* Prepare burst-capable write on supported ISPs. */ if (page_mode && !(faddr & 0xfff) && dwords > OPTROM_BURST_DWORDS) { optrom = dma_alloc_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, &optrom_dma, GFP_KERNEL); if (!optrom) { ql_log(ql_log_warn, vha, 0xb01b, "Unable to allocate memory " "for optrom burst write (%x KB).\n", OPTROM_BURST_SIZE / 1024); } } rest_addr = ha->fdt_block_size - 1; sec_mask = ~rest_addr; ret = qla82xx_unprotect_flash(ha); if (ret) { ql_log(ql_log_warn, vha, 0xb01c, "Unable to unprotect flash for update.\n"); goto write_done; } for (liter = 0; liter < dwords; liter++, faddr += 4, dwptr++) { /* Are we at the beginning of a sector? */ if ((faddr & rest_addr) == 0) { ret = qla82xx_erase_sector(ha, faddr); if (ret) { ql_log(ql_log_warn, vha, 0xb01d, "Unable to erase sector: address=%x.\n", faddr); break; } } /* Go with burst-write. */ if (optrom && (liter + OPTROM_BURST_DWORDS) <= dwords) { /* Copy data to DMA'ble buffer. */ memcpy(optrom, dwptr, OPTROM_BURST_SIZE); ret = qla2x00_load_ram(vha, optrom_dma, (ha->flash_data_off | faddr), OPTROM_BURST_DWORDS); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0xb01e, "Unable to burst-write optrom segment " "(%x/%x/%llx).\n", ret, (ha->flash_data_off | faddr), (unsigned long long)optrom_dma); ql_log(ql_log_warn, vha, 0xb01f, "Reverting to slow-write.\n"); dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); optrom = NULL; } else { liter += OPTROM_BURST_DWORDS - 1; faddr += OPTROM_BURST_DWORDS - 1; dwptr += OPTROM_BURST_DWORDS - 1; continue; } } ret = qla82xx_write_flash_dword(ha, faddr, cpu_to_le32(*dwptr)); if (ret) { ql_dbg(ql_dbg_p3p, vha, 0xb020, "Unable to program flash address=%x data=%x.\n", faddr, *dwptr); break; } } ret = qla82xx_protect_flash(ha); if (ret) ql_log(ql_log_warn, vha, 0xb021, "Unable to protect flash after update.\n"); write_done: if (optrom) dma_free_coherent(&ha->pdev->dev, OPTROM_BURST_SIZE, optrom, optrom_dma); return ret; } int qla82xx_write_optrom_data(struct scsi_qla_host *vha, uint8_t *buf, uint32_t offset, uint32_t length) { int rval; /* Suspend HBA. */ scsi_block_requests(vha->host); rval = qla82xx_write_flash_data(vha, (uint32_t *)buf, offset, length >> 2); scsi_unblock_requests(vha->host); /* Convert return ISP82xx to generic */ if (rval) rval = QLA_FUNCTION_FAILED; else rval = QLA_SUCCESS; return rval; } void qla82xx_start_iocbs(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; struct device_reg_82xx __iomem *reg; uint32_t dbval; /* Adjust ring index. */ req->ring_index++; if (req->ring_index == req->length) { req->ring_index = 0; req->ring_ptr = req->ring; } else req->ring_ptr++; reg = &ha->iobase->isp82; dbval = 0x04 | (ha->portnum << 5); dbval = dbval | (req->id << 8) | (req->ring_index << 16); if (ql2xdbwr) qla82xx_wr_32(ha, ha->nxdb_wr_ptr, dbval); else { WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); wmb(); while (RD_REG_DWORD((void __iomem *)ha->nxdb_rd_ptr) != dbval) { WRT_REG_DWORD((unsigned long __iomem *)ha->nxdb_wr_ptr, dbval); wmb(); } } } static void qla82xx_rom_lock_recovery(struct qla_hw_data *ha) { scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); if (qla82xx_rom_lock(ha)) /* Someone else is holding the lock. */ ql_log(ql_log_info, vha, 0xb022, "Resetting rom_lock.\n"); /* * Either we got the lock, or someone * else died while holding it. * In either case, unlock. */ qla82xx_rom_unlock(ha); } /* * qla82xx_device_bootstrap * Initialize device, set DEV_READY, start fw * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static int qla82xx_device_bootstrap(scsi_qla_host_t *vha) { int rval = QLA_SUCCESS; int i, timeout; uint32_t old_count, count; struct qla_hw_data *ha = vha->hw; int need_reset = 0, peg_stuck = 1; need_reset = qla82xx_need_reset(ha); old_count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); for (i = 0; i < 10; i++) { timeout = msleep_interruptible(200); if (timeout) { qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); return QLA_FUNCTION_FAILED; } count = qla82xx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER); if (count != old_count) peg_stuck = 0; } if (need_reset) { /* We are trying to perform a recovery here. */ if (peg_stuck) qla82xx_rom_lock_recovery(ha); goto dev_initialize; } else { /* Start of day for this ha context. */ if (peg_stuck) { /* Either we are the first or recovery in progress. */ qla82xx_rom_lock_recovery(ha); goto dev_initialize; } else /* Firmware already running. */ goto dev_ready; } return rval; dev_initialize: /* set to DEV_INITIALIZING */ ql_log(ql_log_info, vha, 0x009e, "HW State: INITIALIZING.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_INITIALIZING); qla82xx_idc_unlock(ha); rval = qla82xx_start_firmware(vha); qla82xx_idc_lock(ha); if (rval != QLA_SUCCESS) { ql_log(ql_log_fatal, vha, 0x00ad, "HW State: FAILED.\n"); qla82xx_clear_drv_active(ha); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_FAILED); return rval; } dev_ready: ql_log(ql_log_info, vha, 0x00ae, "HW State: READY.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); return QLA_SUCCESS; } /* * qla82xx_need_qsnt_handler * Code to start quiescence sequence * * Note: * IDC lock must be held upon entry * * Return: void */ static void qla82xx_need_qsnt_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state, drv_state, drv_active; unsigned long reset_timeout; if (vha->flags.online) { /*Block any further I/O and wait for pending cmnds to complete*/ qla2x00_quiesce_io(vha); } /* Set the quiescence ready bit */ qla82xx_set_qsnt_ready(ha); /*wait for 30 secs for other functions to ack */ reset_timeout = jiffies + (30 * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); /* Its 2 that is written when qsnt is acked, moving one bit */ drv_active = drv_active << 0x01; while (drv_state != drv_active) { if (time_after_eq(jiffies, reset_timeout)) { /* quiescence timeout, other functions didn't ack * changing the state to DEV_READY */ ql_log(ql_log_info, vha, 0xb023, "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d " "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME, drv_active, drv_state); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_READY); ql_log(ql_log_info, vha, 0xb025, "HW State: DEV_READY.\n"); qla82xx_idc_unlock(ha); qla2x00_perform_loop_resync(vha); qla82xx_idc_lock(ha); qla82xx_clear_qsnt_ready(vha); return; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); drv_active = drv_active << 0x01; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); /* everyone acked so set the state to DEV_QUIESCENCE */ if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT) { ql_log(ql_log_info, vha, 0xb026, "HW State: DEV_QUIESCENT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_QUIESCENT); } } /* * qla82xx_wait_for_state_change * Wait for device state to change from given current state * * Note: * IDC lock must not be held upon entry * * Return: * Changed device state. */ uint32_t qla82xx_wait_for_state_change(scsi_qla_host_t *vha, uint32_t curr_state) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state; do { msleep(1000); qla82xx_idc_lock(ha); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); qla82xx_idc_unlock(ha); } while (dev_state == curr_state); return dev_state; } void qla8xxx_dev_failed_handler(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Disable the board */ ql_log(ql_log_fatal, vha, 0x00b8, "Disabling the board.\n"); if (IS_QLA82XX(ha)) { qla82xx_clear_drv_active(ha); qla82xx_idc_unlock(ha); } /* Set DEV_FAILED flag to disable timer */ vha->device_flags |= DFLG_DEV_FAILED; qla2x00_abort_all_cmds(vha, DID_NO_CONNECT << 16); qla2x00_mark_all_devices_lost(vha, 0); vha->flags.online = 0; vha->flags.init_done = 0; } /* * qla82xx_need_reset_handler * Code to start reset sequence * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ static void qla82xx_need_reset_handler(scsi_qla_host_t *vha) { uint32_t dev_state, drv_state, drv_active; uint32_t active_mask = 0; unsigned long reset_timeout; struct qla_hw_data *ha = vha->hw; struct req_que *req = ha->req_q_map[0]; if (vha->flags.online) { qla82xx_idc_unlock(ha); qla2x00_abort_isp_cleanup(vha); ha->isp_ops->get_flash_version(vha, req->ring); ha->isp_ops->nvram_config(vha); qla82xx_idc_lock(ha); } drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (!ha->flags.nic_core_reset_owner) { ql_dbg(ql_dbg_p3p, vha, 0xb028, "reset_acknowledged by 0x%x\n", ha->portnum); qla82xx_set_rst_ready(ha); } else { active_mask = ~(QLA82XX_DRV_ACTIVE << (ha->portnum * 4)); drv_active &= active_mask; ql_dbg(ql_dbg_p3p, vha, 0xb029, "active_mask: 0x%08x\n", active_mask); } /* wait for 10 seconds for reset ack from all functions */ reset_timeout = jiffies + (ha->fcoe_reset_timeout * HZ); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); ql_dbg(ql_dbg_p3p, vha, 0xb02a, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); while (drv_state != drv_active && dev_state != QLA8XXX_DEV_INITIALIZING) { if (time_after_eq(jiffies, reset_timeout)) { ql_log(ql_log_warn, vha, 0x00b5, "Reset timeout.\n"); break; } qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); drv_state = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_STATE); drv_active = qla82xx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); if (ha->flags.nic_core_reset_owner) drv_active &= active_mask; dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); } ql_dbg(ql_dbg_p3p, vha, 0xb02b, "drv_state: 0x%08x, drv_active: 0x%08x, " "dev_state: 0x%08x, active_mask: 0x%08x\n", drv_state, drv_active, dev_state, active_mask); ql_log(ql_log_info, vha, 0x00b6, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* Force to DEV_COLD unless someone else is starting a reset */ if (dev_state != QLA8XXX_DEV_INITIALIZING && dev_state != QLA8XXX_DEV_COLD) { ql_log(ql_log_info, vha, 0x00b7, "HW State: COLD/RE-INIT.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_COLD); qla82xx_set_rst_ready(ha); if (ql2xmdenable) { if (qla82xx_md_collect(vha)) ql_log(ql_log_warn, vha, 0xb02c, "Minidump not collected.\n"); } else ql_log(ql_log_warn, vha, 0xb04f, "Minidump disabled.\n"); } } int qla82xx_check_md_needed(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint16_t fw_major_version, fw_minor_version, fw_subminor_version; int rval = QLA_SUCCESS; fw_major_version = ha->fw_major_version; fw_minor_version = ha->fw_minor_version; fw_subminor_version = ha->fw_subminor_version; rval = qla2x00_get_fw_version(vha); if (rval != QLA_SUCCESS) return rval; if (ql2xmdenable) { if (!ha->fw_dumped) { if (fw_major_version != ha->fw_major_version || fw_minor_version != ha->fw_minor_version || fw_subminor_version != ha->fw_subminor_version) { ql_log(ql_log_info, vha, 0xb02d, "Firmware version differs " "Previous version: %d:%d:%d - " "New version: %d:%d:%d\n", fw_major_version, fw_minor_version, fw_subminor_version, ha->fw_major_version, ha->fw_minor_version, ha->fw_subminor_version); /* Release MiniDump resources */ qla82xx_md_free(vha); /* ALlocate MiniDump resources */ qla82xx_md_prep(vha); } } else ql_log(ql_log_info, vha, 0xb02e, "Firmware dump available to retrieve\n"); } return rval; } static int qla82xx_check_fw_alive(scsi_qla_host_t *vha) { uint32_t fw_heartbeat_counter; int status = 0; fw_heartbeat_counter = qla82xx_rd_32(vha->hw, QLA82XX_PEG_ALIVE_COUNTER); /* all 0xff, assume AER/EEH in progress, ignore */ if (fw_heartbeat_counter == 0xffffffff) { ql_dbg(ql_dbg_timer, vha, 0x6003, "FW heartbeat counter is 0xffffffff, " "returning status=%d.\n", status); return status; } if (vha->fw_heartbeat_counter == fw_heartbeat_counter) { vha->seconds_since_last_heartbeat++; /* FW not alive after 2 seconds */ if (vha->seconds_since_last_heartbeat == 2) { vha->seconds_since_last_heartbeat = 0; status = 1; } } else vha->seconds_since_last_heartbeat = 0; vha->fw_heartbeat_counter = fw_heartbeat_counter; if (status) ql_dbg(ql_dbg_timer, vha, 0x6004, "Returning status=%d.\n", status); return status; } /* * qla82xx_device_state_handler * Main state handler * * Note: * IDC lock must be held upon entry * * Return: * Success : 0 * Failed : 1 */ int qla82xx_device_state_handler(scsi_qla_host_t *vha) { uint32_t dev_state; uint32_t old_dev_state; int rval = QLA_SUCCESS; unsigned long dev_init_timeout; struct qla_hw_data *ha = vha->hw; int loopcount = 0; qla82xx_idc_lock(ha); if (!vha->flags.init_done) { qla82xx_set_drv_active(vha); qla82xx_set_idc_version(vha); } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); old_dev_state = dev_state; ql_log(ql_log_info, vha, 0x009b, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); /* wait for 30 seconds for device to go ready */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); while (1) { if (time_after_eq(jiffies, dev_init_timeout)) { ql_log(ql_log_fatal, vha, 0x009c, "Device init failed.\n"); rval = QLA_FUNCTION_FAILED; break; } dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (old_dev_state != dev_state) { loopcount = 0; old_dev_state = dev_state; } if (loopcount < 5) { ql_log(ql_log_info, vha, 0x009d, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } switch (dev_state) { case QLA8XXX_DEV_READY: ha->flags.nic_core_reset_owner = 0; goto rel_lock; case QLA8XXX_DEV_COLD: rval = qla82xx_device_bootstrap(vha); break; case QLA8XXX_DEV_INITIALIZING: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); break; case QLA8XXX_DEV_NEED_RESET: if (!ql2xdontresethba) qla82xx_need_reset_handler(vha); else { qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout * HZ); break; case QLA8XXX_DEV_NEED_QUIESCENT: qla82xx_need_qsnt_handler(vha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ * HZ); break; case QLA8XXX_DEV_QUIESCENT: /* Owner will exit and other will wait for the state * to get changed */ if (ha->flags.quiesce_owner) goto rel_lock; qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); /* Reset timeout value after quiescence handler */ dev_init_timeout = jiffies + (ha->fcoe_dev_init_timeout\ * HZ); break; case QLA8XXX_DEV_FAILED: qla8xxx_dev_failed_handler(vha); rval = QLA_FUNCTION_FAILED; goto exit; default: qla82xx_idc_unlock(ha); msleep(1000); qla82xx_idc_lock(ha); } loopcount++; } rel_lock: qla82xx_idc_unlock(ha); exit: return rval; } static int qla82xx_check_temp(scsi_qla_host_t *vha) { uint32_t temp, temp_state, temp_val; struct qla_hw_data *ha = vha->hw; temp = qla82xx_rd_32(ha, CRB_TEMP_STATE); temp_state = qla82xx_get_temp_state(temp); temp_val = qla82xx_get_temp_val(temp); if (temp_state == QLA82XX_TEMP_PANIC) { ql_log(ql_log_warn, vha, 0x600e, "Device temperature %d degrees C exceeds " " maximum allowed. Hardware has been shut down.\n", temp_val); return 1; } else if (temp_state == QLA82XX_TEMP_WARN) { ql_log(ql_log_warn, vha, 0x600f, "Device temperature %d degrees C exceeds " "operating range. Immediate action needed.\n", temp_val); } return 0; } void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; if (ha->flags.mbox_busy) { ha->flags.mbox_int = 1; ha->flags.mbox_busy = 0; ql_log(ql_log_warn, vha, 0x6010, "Doing premature completion of mbx command.\n"); if (test_and_clear_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags)) complete(&ha->mbx_intr_comp); } } void qla82xx_watchdog(scsi_qla_host_t *vha) { uint32_t dev_state, halt_status; struct qla_hw_data *ha = vha->hw; /* don't poll if reset is going on */ if (!ha->flags.nic_core_reset_hdlr_active) { dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (qla82xx_check_temp(vha)) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else if (dev_state == QLA8XXX_DEV_NEED_RESET && !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6001, "Adapter reset needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_NEED_QUIESCENT && !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { ql_log(ql_log_warn, vha, 0x6002, "Quiescent needed.\n"); set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); } else if (dev_state == QLA8XXX_DEV_FAILED && !test_bit(ISP_UNRECOVERABLE, &vha->dpc_flags) && vha->flags.online == 1) { ql_log(ql_log_warn, vha, 0xb055, "Adapter state is failed. Offlining.\n"); set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); } else { if (qla82xx_check_fw_alive(vha)) { ql_dbg(ql_dbg_timer, vha, 0x6011, "disabling pause transmit on port 0 & 1.\n"); qla82xx_wr_32(ha, QLA82XX_CRB_NIU + 0x98, CRB_NIU_XG_PAUSE_CTL_P0|CRB_NIU_XG_PAUSE_CTL_P1); halt_status = qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS1); ql_log(ql_log_info, vha, 0x6005, "dumping hw/fw registers:.\n " " PEG_HALT_STATUS1: 0x%x, PEG_HALT_STATUS2: 0x%x,.\n " " PEG_NET_0_PC: 0x%x, PEG_NET_1_PC: 0x%x,.\n " " PEG_NET_2_PC: 0x%x, PEG_NET_3_PC: 0x%x,.\n " " PEG_NET_4_PC: 0x%x.\n", halt_status, qla82xx_rd_32(ha, QLA82XX_PEG_HALT_STATUS2), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_0 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_1 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_2 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_3 + 0x3c), qla82xx_rd_32(ha, QLA82XX_CRB_PEG_NET_4 + 0x3c)); if (((halt_status & 0x1fffff00) >> 8) == 0x67) ql_log(ql_log_warn, vha, 0xb052, "Firmware aborted with " "error code 0x00006700. Device is " "being reset.\n"); if (halt_status & HALT_STATUS_UNRECOVERABLE) { set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags); } else { ql_log(ql_log_info, vha, 0x6006, "Detect abort needed.\n"); set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); } ha->flags.isp82xx_fw_hung = 1; ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); qla82xx_clear_pending_mbx(vha); } } } } int qla82xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr) { int rval; rval = qla82xx_device_state_handler(vha); return rval; } void qla82xx_set_reset_owner(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint32_t dev_state; dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); if (dev_state == QLA8XXX_DEV_READY) { ql_log(ql_log_info, vha, 0xb02f, "HW State: NEED RESET\n"); qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA8XXX_DEV_NEED_RESET); ha->flags.nic_core_reset_owner = 1; ql_dbg(ql_dbg_p3p, vha, 0xb030, "reset_owner is 0x%x\n", ha->portnum); } else ql_log(ql_log_info, vha, 0xb031, "Device state is 0x%x = %s.\n", dev_state, dev_state < MAX_STATES ? qdev_state(dev_state) : "Unknown"); } /* * qla82xx_abort_isp * Resets ISP and aborts all outstanding commands. * * Input: * ha = adapter block pointer. * * Returns: * 0 = success */ int qla82xx_abort_isp(scsi_qla_host_t *vha) { int rval; struct qla_hw_data *ha = vha->hw; if (vha->device_flags & DFLG_DEV_FAILED) { ql_log(ql_log_warn, vha, 0x8024, "Device in failed state, exiting.\n"); return QLA_SUCCESS; } ha->flags.nic_core_reset_hdlr_active = 1; qla82xx_idc_lock(ha); qla82xx_set_reset_owner(vha); qla82xx_idc_unlock(ha); rval = qla82xx_device_state_handler(vha); qla82xx_idc_lock(ha); qla82xx_clear_rst_ready(ha); qla82xx_idc_unlock(ha); if (rval == QLA_SUCCESS) { ha->flags.isp82xx_fw_hung = 0; ha->flags.nic_core_reset_hdlr_active = 0; qla82xx_restart_isp(vha); } if (rval) { vha->flags.online = 1; if (test_bit(ISP_ABORT_RETRY, &vha->dpc_flags)) { if (ha->isp_abort_cnt == 0) { ql_log(ql_log_warn, vha, 0x8027, "ISP error recover failed - board " "disabled.\n"); /* * The next call disables the board * completely. */ ha->isp_ops->reset_adapter(vha); vha->flags.online = 0; clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_SUCCESS; } else { /* schedule another ISP abort */ ha->isp_abort_cnt--; ql_log(ql_log_warn, vha, 0x8036, "ISP abort - retry remaining %d.\n", ha->isp_abort_cnt); rval = QLA_FUNCTION_FAILED; } } else { ha->isp_abort_cnt = MAX_RETRIES_OF_ISP_ABORT; ql_dbg(ql_dbg_taskm, vha, 0x8029, "ISP error recovery - retrying (%d) more times.\n", ha->isp_abort_cnt); set_bit(ISP_ABORT_RETRY, &vha->dpc_flags); rval = QLA_FUNCTION_FAILED; } } return rval; } /* * qla82xx_fcoe_ctx_reset * Perform a quick reset and aborts all outstanding commands. * This will only perform an FCoE context reset and avoids a full blown * chip reset. * * Input: * ha = adapter block pointer. * is_reset_path = flag for identifying the reset path. * * Returns: * 0 = success */ int qla82xx_fcoe_ctx_reset(scsi_qla_host_t *vha) { int rval = QLA_FUNCTION_FAILED; if (vha->flags.online) { /* Abort all outstanding commands, so as to be requeued later */ qla2x00_abort_isp_cleanup(vha); } /* Stop currently executing firmware. * This will destroy existing FCoE context at the F/W end. */ qla2x00_try_to_stop_firmware(vha); /* Restart. Creates a new FCoE context on INIT_FIRMWARE. */ rval = qla82xx_restart_isp(vha); return rval; } /* * qla2x00_wait_for_fcoe_ctx_reset * Wait till the FCoE context is reset. * * Note: * Does context switching here. * Release SPIN_LOCK (if any) before calling this routine. * * Return: * Success (fcoe_ctx reset is done) : 0 * Failed (fcoe_ctx reset not completed within max loop timout ) : 1 */ int qla2x00_wait_for_fcoe_ctx_reset(scsi_qla_host_t *vha) { int status = QLA_FUNCTION_FAILED; unsigned long wait_reset; wait_reset = jiffies + (MAX_LOOP_TIMEOUT * HZ); while ((test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) || test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) && time_before(jiffies, wait_reset)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ); if (!test_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags) && !test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) { status = QLA_SUCCESS; break; } } ql_dbg(ql_dbg_p3p, vha, 0xb027, "%s: status=%d.\n", __func__, status); return status; } void qla82xx_chip_reset_cleanup(scsi_qla_host_t *vha) { int i; unsigned long flags; struct qla_hw_data *ha = vha->hw; /* Check if 82XX firmware is alive or not * We may have arrived here from NEED_RESET * detection only */ if (!ha->flags.isp82xx_fw_hung) { for (i = 0; i < 2; i++) { msleep(1000); if (qla82xx_check_fw_alive(vha)) { ha->flags.isp82xx_fw_hung = 1; qla82xx_clear_pending_mbx(vha); break; } } } ql_dbg(ql_dbg_init, vha, 0x00b0, "Entered %s fw_hung=%d.\n", __func__, ha->flags.isp82xx_fw_hung); /* Abort all commands gracefully if fw NOT hung */ if (!ha->flags.isp82xx_fw_hung) { int cnt, que; srb_t *sp; struct req_que *req; spin_lock_irqsave(&ha->hardware_lock, flags); for (que = 0; que < ha->max_req_queues; que++) { req = ha->req_q_map[que]; if (!req) continue; for (cnt = 1; cnt < req->num_outstanding_cmds; cnt++) { sp = req->outstanding_cmds[cnt]; if (sp) { if (!sp->u.scmd.ctx || (sp->flags & SRB_FCP_CMND_DMA_VALID)) { spin_unlock_irqrestore( &ha->hardware_lock, flags); if (ha->isp_ops->abort_command(sp)) { ql_log(ql_log_info, vha, 0x00b1, "mbx abort failed.\n"); } else { ql_log(ql_log_info, vha, 0x00b2, "mbx abort success.\n"); } spin_lock_irqsave(&ha->hardware_lock, flags); } } } } spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for pending cmds (physical and virtual) to complete */ if (!qla2x00_eh_wait_for_pending_commands(vha, 0, 0, WAIT_HOST) == QLA_SUCCESS) { ql_dbg(ql_dbg_init, vha, 0x00b3, "Done wait for " "pending commands.\n"); } } } /* Minidump related functions */ static int qla82xx_minidump_process_control(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; struct qla82xx_md_entry_crb *crb_entry; uint32_t read_value, opcode, poll_time; uint32_t addr, index, crb_addr; unsigned long wtime; struct qla82xx_md_template_hdr *tmplt_hdr; uint32_t rval = QLA_SUCCESS; int i; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; crb_entry = (struct qla82xx_md_entry_crb *)entry_hdr; crb_addr = crb_entry->addr; for (i = 0; i < crb_entry->op_count; i++) { opcode = crb_entry->crb_ctrl.opcode; if (opcode & QLA82XX_DBG_OPCODE_WR) { qla82xx_md_rw_32(ha, crb_addr, crb_entry->value_1, 1); opcode &= ~QLA82XX_DBG_OPCODE_WR; } if (opcode & QLA82XX_DBG_OPCODE_RW) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_RW; } if (opcode & QLA82XX_DBG_OPCODE_AND) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value &= crb_entry->value_2; opcode &= ~QLA82XX_DBG_OPCODE_AND; if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value |= crb_entry->value_3; opcode &= ~QLA82XX_DBG_OPCODE_OR; } qla82xx_md_rw_32(ha, crb_addr, read_value, 1); } if (opcode & QLA82XX_DBG_OPCODE_OR) { read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); read_value |= crb_entry->value_3; qla82xx_md_rw_32(ha, crb_addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_OR; } if (opcode & QLA82XX_DBG_OPCODE_POLL) { poll_time = crb_entry->crb_strd.poll_timeout; wtime = jiffies + poll_time; read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); do { if ((read_value & crb_entry->value_2) == crb_entry->value_1) break; else if (time_after_eq(jiffies, wtime)) { /* capturing dump failed */ rval = QLA_FUNCTION_FAILED; break; } else read_value = qla82xx_md_rw_32(ha, crb_addr, 0, 0); } while (1); opcode &= ~QLA82XX_DBG_OPCODE_POLL; } if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; read_value = qla82xx_md_rw_32(ha, addr, 0, 0); index = crb_entry->crb_ctrl.state_index_v; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE; } if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) { if (crb_entry->crb_strd.state_index_a) { index = crb_entry->crb_strd.state_index_a; addr = tmplt_hdr->saved_state_array[index]; } else addr = crb_addr; if (crb_entry->crb_ctrl.state_index_v) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; } else read_value = crb_entry->value_1; qla82xx_md_rw_32(ha, addr, read_value, 1); opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE; } if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) { index = crb_entry->crb_ctrl.state_index_v; read_value = tmplt_hdr->saved_state_array[index]; read_value <<= crb_entry->crb_ctrl.shl; read_value >>= crb_entry->crb_ctrl.shr; if (crb_entry->value_2) read_value &= crb_entry->value_2; read_value |= crb_entry->value_3; read_value += crb_entry->value_1; tmplt_hdr->saved_state_array[index] = read_value; opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE; } crb_addr += crb_entry->crb_strd.addr_stride; } return rval; } static void qla82xx_minidump_process_rdocm(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_rdocm *ocm_hdr; uint32_t *data_ptr = *d_ptr; ocm_hdr = (struct qla82xx_md_entry_rdocm *)entry_hdr; r_addr = ocm_hdr->read_addr; r_stride = ocm_hdr->read_addr_stride; loop_cnt = ocm_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = RD_REG_DWORD((void __iomem *) (r_addr + ha->nx_pcibase)); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdmux(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value; struct qla82xx_md_entry_mux *mux_hdr; uint32_t *data_ptr = *d_ptr; mux_hdr = (struct qla82xx_md_entry_mux *)entry_hdr; r_addr = mux_hdr->read_addr; s_addr = mux_hdr->select_addr; s_stride = mux_hdr->select_value_stride; s_value = mux_hdr->select_value; loop_cnt = mux_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, s_value, 1); r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(s_value); *data_ptr++ = cpu_to_le32(r_value); s_value += s_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdcrb(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_stride, loop_cnt, i, r_value; struct qla82xx_md_entry_crb *crb_hdr; uint32_t *data_ptr = *d_ptr; crb_hdr = (struct qla82xx_md_entry_crb *)entry_hdr; r_addr = crb_hdr->addr; r_stride = crb_hdr->crb_strd.addr_stride; loop_cnt = crb_hdr->op_count; for (i = 0; i < loop_cnt; i++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_addr); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } *d_ptr = data_ptr; } static int qla82xx_minidump_process_l2tag(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; unsigned long p_wait, w_time, p_mask; uint32_t c_value_w, c_value_r; struct qla82xx_md_entry_cache *cache_hdr; int rval = QLA_FUNCTION_FAILED; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; p_wait = cache_hdr->cache_ctrl.poll_wait; p_mask = cache_hdr->cache_ctrl.poll_mask; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); if (c_value_w) qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); if (p_mask) { w_time = jiffies + p_wait; do { c_value_r = qla82xx_md_rw_32(ha, c_addr, 0, 0); if ((c_value_r & p_mask) == 0) break; else if (time_after_eq(jiffies, w_time)) { /* capturing dump failed */ ql_dbg(ql_dbg_p3p, vha, 0xb032, "c_value_r: 0x%x, poll_mask: 0x%lx, " "w_time: 0x%lx\n", c_value_r, p_mask, w_time); return rval; } } while (1); } addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; return QLA_SUCCESS; } static void qla82xx_minidump_process_l1cache(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t addr, r_addr, c_addr, t_r_addr; uint32_t i, k, loop_count, t_value, r_cnt, r_value; uint32_t c_value_w; struct qla82xx_md_entry_cache *cache_hdr; uint32_t *data_ptr = *d_ptr; cache_hdr = (struct qla82xx_md_entry_cache *)entry_hdr; loop_count = cache_hdr->op_count; r_addr = cache_hdr->read_addr; c_addr = cache_hdr->control_addr; c_value_w = cache_hdr->cache_ctrl.write_value; t_r_addr = cache_hdr->tag_reg_addr; t_value = cache_hdr->addr_ctrl.init_tag_value; r_cnt = cache_hdr->read_ctrl.read_addr_cnt; for (i = 0; i < loop_count; i++) { qla82xx_md_rw_32(ha, t_r_addr, t_value, 1); qla82xx_md_rw_32(ha, c_addr, c_value_w, 1); addr = r_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); addr += cache_hdr->read_ctrl.read_addr_stride; } t_value += cache_hdr->addr_ctrl.tag_value_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_queue(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t s_addr, r_addr; uint32_t r_stride, r_value, r_cnt, qid = 0; uint32_t i, k, loop_cnt; struct qla82xx_md_entry_queue *q_hdr; uint32_t *data_ptr = *d_ptr; q_hdr = (struct qla82xx_md_entry_queue *)entry_hdr; s_addr = q_hdr->select_addr; r_cnt = q_hdr->rd_strd.read_addr_cnt; r_stride = q_hdr->rd_strd.read_addr_stride; loop_cnt = q_hdr->op_count; for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, s_addr, qid, 1); r_addr = q_hdr->read_addr; for (k = 0; k < r_cnt; k++) { r_value = qla82xx_md_rw_32(ha, r_addr, 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += r_stride; } qid += q_hdr->q_strd.queue_id_stride; } *d_ptr = data_ptr; } static void qla82xx_minidump_process_rdrom(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value; uint32_t i, loop_cnt; struct qla82xx_md_entry_rdrom *rom_hdr; uint32_t *data_ptr = *d_ptr; rom_hdr = (struct qla82xx_md_entry_rdrom *)entry_hdr; r_addr = rom_hdr->read_addr; loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW, (r_addr & 0xFFFF0000), 1); r_value = qla82xx_md_rw_32(ha, MD_DIRECT_ROM_READ_BASE + (r_addr & 0x0000FFFF), 0, 0); *data_ptr++ = cpu_to_le32(r_value); r_addr += sizeof(uint32_t); } *d_ptr = data_ptr; } static int qla82xx_minidump_process_rdmem(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, uint32_t **d_ptr) { struct qla_hw_data *ha = vha->hw; uint32_t r_addr, r_value, r_data; uint32_t i, j, loop_cnt; struct qla82xx_md_entry_rdmem *m_hdr; unsigned long flags; int rval = QLA_FUNCTION_FAILED; uint32_t *data_ptr = *d_ptr; m_hdr = (struct qla82xx_md_entry_rdmem *)entry_hdr; r_addr = m_hdr->read_addr; loop_cnt = m_hdr->read_data_size/16; if (r_addr & 0xf) { ql_log(ql_log_warn, vha, 0xb033, "Read addr 0x%x not 16 bytes aligned\n", r_addr); return rval; } if (m_hdr->read_data_size % 16) { ql_log(ql_log_warn, vha, 0xb034, "Read data[0x%x] not multiple of 16 bytes\n", m_hdr->read_data_size); return rval; } ql_dbg(ql_dbg_p3p, vha, 0xb035, "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n", __func__, r_addr, m_hdr->read_data_size, loop_cnt); write_lock_irqsave(&ha->hw_lock, flags); for (i = 0; i < loop_cnt; i++) { qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1); r_value = 0; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1); r_value = MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE; qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1); for (j = 0; j < MAX_CTL_CHECK; j++) { r_value = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, 0, 0); if ((r_value & MIU_TA_CTL_BUSY) == 0) break; } if (j >= MAX_CTL_CHECK) { printk_ratelimited(KERN_ERR "failed to read through agent\n"); write_unlock_irqrestore(&ha->hw_lock, flags); return rval; } for (j = 0; j < 4; j++) { r_data = qla82xx_md_rw_32(ha, MD_MIU_TEST_AGT_RDDATA[j], 0, 0); *data_ptr++ = cpu_to_le32(r_data); } r_addr += 16; } write_unlock_irqrestore(&ha->hw_lock, flags); *d_ptr = data_ptr; return QLA_SUCCESS; } static int qla82xx_validate_template_chksum(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; uint64_t chksum = 0; uint32_t *d_ptr = (uint32_t *)ha->md_tmplt_hdr; int count = ha->md_template_size/sizeof(uint32_t); while (count-- > 0) chksum += *d_ptr++; while (chksum >> 32) chksum = (chksum & 0xFFFFFFFF) + (chksum >> 32); return ~chksum; } static void qla82xx_mark_entry_skipped(scsi_qla_host_t *vha, qla82xx_md_entry_hdr_t *entry_hdr, int index) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb036, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", index, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); } int qla82xx_md_collect(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int no_entry_hdr = 0; qla82xx_md_entry_hdr_t *entry_hdr; struct qla82xx_md_template_hdr *tmplt_hdr; uint32_t *data_ptr; uint32_t total_data_size = 0, f_capture_mask, data_collected = 0; int i = 0, rval = QLA_FUNCTION_FAILED; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; data_ptr = (uint32_t *)ha->md_dump; if (ha->fw_dumped) { ql_log(ql_log_warn, vha, 0xb037, "Firmware has been previously dumped (%p) " "-- ignoring request.\n", ha->fw_dump); goto md_failed; } ha->fw_dumped = 0; if (!ha->md_tmplt_hdr || !ha->md_dump) { ql_log(ql_log_warn, vha, 0xb038, "Memory not allocated for minidump capture\n"); goto md_failed; } if (ha->flags.isp82xx_no_md_cap) { ql_log(ql_log_warn, vha, 0xb054, "Forced reset from application, " "ignore minidump capture\n"); ha->flags.isp82xx_no_md_cap = 0; goto md_failed; } if (qla82xx_validate_template_chksum(vha)) { ql_log(ql_log_info, vha, 0xb039, "Template checksum validation error\n"); goto md_failed; } no_entry_hdr = tmplt_hdr->num_of_entries; ql_dbg(ql_dbg_p3p, vha, 0xb03a, "No of entry headers in Template: 0x%x\n", no_entry_hdr); ql_dbg(ql_dbg_p3p, vha, 0xb03b, "Capture Mask obtained: 0x%x\n", tmplt_hdr->capture_debug_level); f_capture_mask = tmplt_hdr->capture_debug_level & 0xFF; /* Validate whether required debug level is set */ if ((f_capture_mask & 0x3) != 0x3) { ql_log(ql_log_warn, vha, 0xb03c, "Minimum required capture mask[0x%x] level not set\n", f_capture_mask); goto md_failed; } tmplt_hdr->driver_capture_mask = ql2xmdcapmask; tmplt_hdr->driver_info[0] = vha->host_no; tmplt_hdr->driver_info[1] = (QLA_DRIVER_MAJOR_VER << 24) | (QLA_DRIVER_MINOR_VER << 16) | (QLA_DRIVER_PATCH_VER << 8) | QLA_DRIVER_BETA_VER; total_data_size = ha->md_dump_size; ql_dbg(ql_dbg_p3p, vha, 0xb03d, "Total minidump data_size 0x%x to be captured\n", total_data_size); /* Check whether template obtained is valid */ if (tmplt_hdr->entry_type != QLA82XX_TLHDR) { ql_log(ql_log_warn, vha, 0xb04e, "Bad template header entry type: 0x%x obtained\n", tmplt_hdr->entry_type); goto md_failed; } entry_hdr = (qla82xx_md_entry_hdr_t *) \ (((uint8_t *)ha->md_tmplt_hdr) + tmplt_hdr->first_entry_offset); /* Walk through the entry headers */ for (i = 0; i < no_entry_hdr; i++) { if (data_collected > total_data_size) { ql_log(ql_log_warn, vha, 0xb03e, "More MiniDump data collected: [0x%x]\n", data_collected); goto md_failed; } if (!(entry_hdr->d_ctrl.entry_capture_mask & ql2xmdcapmask)) { entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG; ql_dbg(ql_dbg_p3p, vha, 0xb03f, "Skipping entry[%d]: " "ETYPE[0x%x]-ELEVEL[0x%x]\n", i, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); goto skip_nxt_entry; } ql_dbg(ql_dbg_p3p, vha, 0xb040, "[%s]: data ptr[%d]: %p, entry_hdr: %p\n" "entry_type: 0x%x, captrue_mask: 0x%x\n", __func__, i, data_ptr, entry_hdr, entry_hdr->entry_type, entry_hdr->d_ctrl.entry_capture_mask); ql_dbg(ql_dbg_p3p, vha, 0xb041, "Data collected: [0x%x], Dump size left:[0x%x]\n", data_collected, (ha->md_dump_size - data_collected)); /* Decode the entry type and take * required action to capture debug data */ switch (entry_hdr->entry_type) { case QLA82XX_RDEND: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; case QLA82XX_CNTRL: rval = qla82xx_minidump_process_control(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_RDCRB: qla82xx_minidump_process_rdcrb(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMEM: rval = qla82xx_minidump_process_rdmem(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_BOARD: case QLA82XX_RDROM: qla82xx_minidump_process_rdrom(vha, entry_hdr, &data_ptr); break; case QLA82XX_L2DTG: case QLA82XX_L2ITG: case QLA82XX_L2DAT: case QLA82XX_L2INS: rval = qla82xx_minidump_process_l2tag(vha, entry_hdr, &data_ptr); if (rval != QLA_SUCCESS) { qla82xx_mark_entry_skipped(vha, entry_hdr, i); goto md_failed; } break; case QLA82XX_L1DAT: case QLA82XX_L1INS: qla82xx_minidump_process_l1cache(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDOCM: qla82xx_minidump_process_rdocm(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDMUX: qla82xx_minidump_process_rdmux(vha, entry_hdr, &data_ptr); break; case QLA82XX_QUEUE: qla82xx_minidump_process_queue(vha, entry_hdr, &data_ptr); break; case QLA82XX_RDNOP: default: qla82xx_mark_entry_skipped(vha, entry_hdr, i); break; } ql_dbg(ql_dbg_p3p, vha, 0xb042, "[%s]: data ptr[%d]: %p\n", __func__, i, data_ptr); data_collected = (uint8_t *)data_ptr - (uint8_t *)ha->md_dump; skip_nxt_entry: entry_hdr = (qla82xx_md_entry_hdr_t *) \ (((uint8_t *)entry_hdr) + entry_hdr->entry_size); } if (data_collected != total_data_size) { ql_dbg(ql_dbg_p3p, vha, 0xb043, "MiniDump data mismatch: Data collected: [0x%x]," "total_data_size:[0x%x]\n", data_collected, total_data_size); goto md_failed; } ql_log(ql_log_info, vha, 0xb044, "Firmware dump saved to temp buffer (%ld/%p %ld/%p).\n", vha->host_no, ha->md_tmplt_hdr, vha->host_no, ha->md_dump); ha->fw_dumped = 1; qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP); md_failed: return rval; } int qla82xx_md_alloc(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int i, k; struct qla82xx_md_template_hdr *tmplt_hdr; tmplt_hdr = (struct qla82xx_md_template_hdr *)ha->md_tmplt_hdr; if (ql2xmdcapmask < 0x3 || ql2xmdcapmask > 0x7F) { ql2xmdcapmask = tmplt_hdr->capture_debug_level & 0xFF; ql_log(ql_log_info, vha, 0xb045, "Forcing driver capture mask to firmware default capture mask: 0x%x.\n", ql2xmdcapmask); } for (i = 0x2, k = 1; (i & QLA82XX_DEFAULT_CAP_MASK); i <<= 1, k++) { if (i & ql2xmdcapmask) ha->md_dump_size += tmplt_hdr->capture_size_array[k]; } if (ha->md_dump) { ql_log(ql_log_warn, vha, 0xb046, "Firmware dump previously allocated.\n"); return 1; } ha->md_dump = vmalloc(ha->md_dump_size); if (ha->md_dump == NULL) { ql_log(ql_log_warn, vha, 0xb047, "Unable to allocate memory for Minidump size " "(0x%x).\n", ha->md_dump_size); return 1; } return 0; } void qla82xx_md_free(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; /* Release the template header allocated */ if (ha->md_tmplt_hdr) { ql_log(ql_log_info, vha, 0xb048, "Free MiniDump template: %p, size (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } /* Release the template data buffer allocated */ if (ha->md_dump) { ql_log(ql_log_info, vha, 0xb049, "Free MiniDump memory: %p, size (%d KB)\n", ha->md_dump, ha->md_dump_size / 1024); vfree(ha->md_dump); ha->md_dump_size = 0; ha->md_dump = NULL; } } void qla82xx_md_prep(scsi_qla_host_t *vha) { struct qla_hw_data *ha = vha->hw; int rval; /* Get Minidump template size */ rval = qla82xx_md_get_template_size(vha); if (rval == QLA_SUCCESS) { ql_log(ql_log_info, vha, 0xb04a, "MiniDump Template size obtained (%d KB)\n", ha->md_template_size / 1024); /* Get Minidump template */ rval = qla82xx_md_get_template(vha); if (rval == QLA_SUCCESS) { ql_dbg(ql_dbg_p3p, vha, 0xb04b, "MiniDump Template obtained\n"); /* Allocate memory for minidump */ rval = qla82xx_md_alloc(vha); if (rval == QLA_SUCCESS) ql_log(ql_log_info, vha, 0xb04c, "MiniDump memory allocated (%d KB)\n", ha->md_dump_size / 1024); else { ql_log(ql_log_info, vha, 0xb04d, "Free MiniDump template: %p, size: (%d KB)\n", ha->md_tmplt_hdr, ha->md_template_size / 1024); dma_free_coherent(&ha->pdev->dev, ha->md_template_size, ha->md_tmplt_hdr, ha->md_tmplt_hdr_dma); ha->md_tmplt_hdr = NULL; } } } } int qla82xx_beacon_on(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 1); if (rval) { ql_log(ql_log_warn, vha, 0xb050, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 1; exit: qla82xx_idc_unlock(ha); return rval; } int qla82xx_beacon_off(struct scsi_qla_host *vha) { int rval; struct qla_hw_data *ha = vha->hw; qla82xx_idc_lock(ha); rval = qla82xx_mbx_beacon_ctl(vha, 0); if (rval) { ql_log(ql_log_warn, vha, 0xb051, "mbx set led config failed in %s\n", __func__); goto exit; } ha->beacon_blink_led = 0; exit: qla82xx_idc_unlock(ha); return rval; }
gpl-2.0
corphish/zapdos_mint
net/sched/sch_red.c
2622
8044
/* * net/sched/sch_red.c Random Early Detection queue. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru> * * Changes: * J Hadi Salim 980914: computation fixes * Alexey Makarenko <makar@phoenix.kharkov.ua> 990814: qave on idle link was calculated incorrectly. * J Hadi Salim 980816: ECN support */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <net/pkt_sched.h> #include <net/inet_ecn.h> #include <net/red.h> /* Parameters, settable by user: ----------------------------- limit - bytes (must be > qth_max + burst) Hard limit on queue length, should be chosen >qth_max to allow packet bursts. This parameter does not affect the algorithms behaviour and can be chosen arbitrarily high (well, less than ram size) Really, this limit will never be reached if RED works correctly. */ struct red_sched_data { u32 limit; /* HARD maximal queue length */ unsigned char flags; struct red_parms parms; struct red_stats stats; struct Qdisc *qdisc; }; static inline int red_use_ecn(struct red_sched_data *q) { return q->flags & TC_RED_ECN; } static inline int red_use_harddrop(struct red_sched_data *q) { return q->flags & TC_RED_HARDDROP; } static int red_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; int ret; q->parms.qavg = red_calc_qavg(&q->parms, child->qstats.backlog); if (red_is_idling(&q->parms)) red_end_of_idle_period(&q->parms); switch (red_action(&q->parms, q->parms.qavg)) { case RED_DONT_MARK: break; case RED_PROB_MARK: sch->qstats.overlimits++; if (!red_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.prob_drop++; goto congestion_drop; } q->stats.prob_mark++; break; case RED_HARD_MARK: sch->qstats.overlimits++; if (red_use_harddrop(q) || !red_use_ecn(q) || !INET_ECN_set_ce(skb)) { q->stats.forced_drop++; goto congestion_drop; } q->stats.forced_mark++; break; } ret = qdisc_enqueue(skb, child); if (likely(ret == NET_XMIT_SUCCESS)) { sch->q.qlen++; } else if (net_xmit_drop_count(ret)) { q->stats.pdrop++; sch->qstats.drops++; } return ret; congestion_drop: qdisc_drop(skb, sch); return NET_XMIT_CN; } static struct sk_buff *red_dequeue(struct Qdisc *sch) { struct sk_buff *skb; struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; skb = child->dequeue(child); if (skb) { qdisc_bstats_update(sch, skb); sch->q.qlen--; } else { if (!red_is_idling(&q->parms)) red_start_of_idle_period(&q->parms); } return skb; } static struct sk_buff *red_peek(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; return child->ops->peek(child); } static unsigned int red_drop(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); struct Qdisc *child = q->qdisc; unsigned int len; if (child->ops->drop && (len = child->ops->drop(child)) > 0) { q->stats.other++; sch->qstats.drops++; sch->q.qlen--; return len; } if (!red_is_idling(&q->parms)) red_start_of_idle_period(&q->parms); return 0; } static void red_reset(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); qdisc_reset(q->qdisc); sch->q.qlen = 0; red_restart(&q->parms); } static void red_destroy(struct Qdisc *sch) { struct red_sched_data *q = qdisc_priv(sch); qdisc_destroy(q->qdisc); } static const struct nla_policy red_policy[TCA_RED_MAX + 1] = { [TCA_RED_PARMS] = { .len = sizeof(struct tc_red_qopt) }, [TCA_RED_STAB] = { .len = RED_STAB_SIZE }, }; static int red_change(struct Qdisc *sch, struct nlattr *opt) { struct red_sched_data *q = qdisc_priv(sch); struct nlattr *tb[TCA_RED_MAX + 1]; struct tc_red_qopt *ctl; struct Qdisc *child = NULL; int err; if (opt == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_RED_MAX, opt, red_policy); if (err < 0) return err; if (tb[TCA_RED_PARMS] == NULL || tb[TCA_RED_STAB] == NULL) return -EINVAL; ctl = nla_data(tb[TCA_RED_PARMS]); if (ctl->limit > 0) { child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit); if (IS_ERR(child)) return PTR_ERR(child); } sch_tree_lock(sch); q->flags = ctl->flags; q->limit = ctl->limit; if (child) { qdisc_tree_decrease_qlen(q->qdisc, q->qdisc->q.qlen); qdisc_destroy(q->qdisc); q->qdisc = child; } red_set_parms(&q->parms, ctl->qth_min, ctl->qth_max, ctl->Wlog, ctl->Plog, ctl->Scell_log, nla_data(tb[TCA_RED_STAB])); if (skb_queue_empty(&sch->q)) red_end_of_idle_period(&q->parms); sch_tree_unlock(sch); return 0; } static int red_init(struct Qdisc *sch, struct nlattr *opt) { struct red_sched_data *q = qdisc_priv(sch); q->qdisc = &noop_qdisc; return red_change(sch, opt); } static int red_dump(struct Qdisc *sch, struct sk_buff *skb) { struct red_sched_data *q = qdisc_priv(sch); struct nlattr *opts = NULL; struct tc_red_qopt opt = { .limit = q->limit, .flags = q->flags, .qth_min = q->parms.qth_min >> q->parms.Wlog, .qth_max = q->parms.qth_max >> q->parms.Wlog, .Wlog = q->parms.Wlog, .Plog = q->parms.Plog, .Scell_log = q->parms.Scell_log, }; sch->qstats.backlog = q->qdisc->qstats.backlog; opts = nla_nest_start(skb, TCA_OPTIONS); if (opts == NULL) goto nla_put_failure; NLA_PUT(skb, TCA_RED_PARMS, sizeof(opt), &opt); return nla_nest_end(skb, opts); nla_put_failure: nla_nest_cancel(skb, opts); return -EMSGSIZE; } static int red_dump_stats(struct Qdisc *sch, struct gnet_dump *d) { struct red_sched_data *q = qdisc_priv(sch); struct tc_red_xstats st = { .early = q->stats.prob_drop + q->stats.forced_drop, .pdrop = q->stats.pdrop, .other = q->stats.other, .marked = q->stats.prob_mark + q->stats.forced_mark, }; return gnet_stats_copy_app(d, &st, sizeof(st)); } static int red_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct red_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(1); tcm->tcm_info = q->qdisc->handle; return 0; } static int red_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct red_sched_data *q = qdisc_priv(sch); if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->qdisc; q->qdisc = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc *red_leaf(struct Qdisc *sch, unsigned long arg) { struct red_sched_data *q = qdisc_priv(sch); return q->qdisc; } static unsigned long red_get(struct Qdisc *sch, u32 classid) { return 1; } static void red_put(struct Qdisc *sch, unsigned long arg) { } static void red_walk(struct Qdisc *sch, struct qdisc_walker *walker) { if (!walker->stop) { if (walker->count >= walker->skip) if (walker->fn(sch, 1, walker) < 0) { walker->stop = 1; return; } walker->count++; } } static const struct Qdisc_class_ops red_class_ops = { .graft = red_graft, .leaf = red_leaf, .get = red_get, .put = red_put, .walk = red_walk, .dump = red_dump_class, }; static struct Qdisc_ops red_qdisc_ops __read_mostly = { .id = "red", .priv_size = sizeof(struct red_sched_data), .cl_ops = &red_class_ops, .enqueue = red_enqueue, .dequeue = red_dequeue, .peek = red_peek, .drop = red_drop, .init = red_init, .reset = red_reset, .destroy = red_destroy, .change = red_change, .dump = red_dump, .dump_stats = red_dump_stats, .owner = THIS_MODULE, }; static int __init red_module_init(void) { return register_qdisc(&red_qdisc_ops); } static void __exit red_module_exit(void) { unregister_qdisc(&red_qdisc_ops); } module_init(red_module_init) module_exit(red_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
TRKP/android_kernel_samsung_i9300
drivers/connector/cn_proc.c
2622
7797
/* * cn_proc.c - process events connector * * Copyright (C) Matt Helsley, IBM Corp. 2005 * Based on cn_fork.c by Guillaume Thouvenin <guillaume.thouvenin@bull.net> * Original copyright notice follows: * Copyright (C) 2005 BULL SA. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/init.h> #include <linux/connector.h> #include <linux/gfp.h> #include <asm/atomic.h> #include <asm/unaligned.h> #include <linux/cn_proc.h> #define CN_PROC_MSG_SIZE (sizeof(struct cn_msg) + sizeof(struct proc_event)) static atomic_t proc_event_num_listeners = ATOMIC_INIT(0); static struct cb_id cn_proc_event_id = { CN_IDX_PROC, CN_VAL_PROC }; /* proc_event_counts is used as the sequence number of the netlink message */ static DEFINE_PER_CPU(__u32, proc_event_counts) = { 0 }; static inline void get_seq(__u32 *ts, int *cpu) { preempt_disable(); *ts = __this_cpu_inc_return(proc_event_counts) -1; *cpu = smp_processor_id(); preempt_enable(); } void proc_fork_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->what = PROC_EVENT_FORK; ev->event_data.fork.parent_pid = task->real_parent->pid; ev->event_data.fork.parent_tgid = task->real_parent->tgid; ev->event_data.fork.child_pid = task->pid; ev->event_data.fork.child_tgid = task->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); /* If cn_netlink_send() failed, the data is not sent */ cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } void proc_exec_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; struct timespec ts; __u8 buffer[CN_PROC_MSG_SIZE]; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->what = PROC_EVENT_EXEC; ev->event_data.exec.process_pid = task->pid; ev->event_data.exec.process_tgid = task->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } void proc_id_connector(struct task_struct *task, int which_id) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; const struct cred *cred; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; ev->what = which_id; ev->event_data.id.process_pid = task->pid; ev->event_data.id.process_tgid = task->tgid; rcu_read_lock(); cred = __task_cred(task); if (which_id == PROC_EVENT_UID) { ev->event_data.id.r.ruid = cred->uid; ev->event_data.id.e.euid = cred->euid; } else if (which_id == PROC_EVENT_GID) { ev->event_data.id.r.rgid = cred->gid; ev->event_data.id.e.egid = cred->egid; } else { rcu_read_unlock(); return; } rcu_read_unlock(); get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } void proc_sid_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; struct timespec ts; __u8 buffer[CN_PROC_MSG_SIZE]; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg *)buffer; ev = (struct proc_event *)msg->data; get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->what = PROC_EVENT_SID; ev->event_data.sid.process_pid = task->pid; ev->event_data.sid.process_tgid = task->tgid; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } void proc_exit_connector(struct task_struct *task) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; get_seq(&msg->seq, &ev->cpu); ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->what = PROC_EVENT_EXIT; ev->event_data.exit.process_pid = task->pid; ev->event_data.exit.process_tgid = task->tgid; ev->event_data.exit.exit_code = task->exit_code; ev->event_data.exit.exit_signal = task->exit_signal; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = 0; /* not used */ msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } /* * Send an acknowledgement message to userspace * * Use 0 for success, EFOO otherwise. * Note: this is the negative of conventional kernel error * values because it's not being returned via syscall return * mechanisms. */ static void cn_proc_ack(int err, int rcvd_seq, int rcvd_ack) { struct cn_msg *msg; struct proc_event *ev; __u8 buffer[CN_PROC_MSG_SIZE]; struct timespec ts; if (atomic_read(&proc_event_num_listeners) < 1) return; msg = (struct cn_msg*)buffer; ev = (struct proc_event*)msg->data; msg->seq = rcvd_seq; ktime_get_ts(&ts); /* get high res monotonic timestamp */ put_unaligned(timespec_to_ns(&ts), (__u64 *)&ev->timestamp_ns); ev->cpu = -1; ev->what = PROC_EVENT_NONE; ev->event_data.ack.err = err; memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id)); msg->ack = rcvd_ack + 1; msg->len = sizeof(*ev); cn_netlink_send(msg, CN_IDX_PROC, GFP_KERNEL); } /** * cn_proc_mcast_ctl * @data: message sent from userspace via the connector */ static void cn_proc_mcast_ctl(struct cn_msg *msg, struct netlink_skb_parms *nsp) { enum proc_cn_mcast_op *mc_op = NULL; int err = 0; if (msg->len != sizeof(*mc_op)) return; mc_op = (enum proc_cn_mcast_op*)msg->data; switch (*mc_op) { case PROC_CN_MCAST_LISTEN: atomic_inc(&proc_event_num_listeners); break; case PROC_CN_MCAST_IGNORE: atomic_dec(&proc_event_num_listeners); break; default: err = EINVAL; break; } cn_proc_ack(err, msg->seq, msg->ack); } /* * cn_proc_init - initialization entry point * * Adds the connector callback to the connector driver. */ static int __init cn_proc_init(void) { int err; if ((err = cn_add_callback(&cn_proc_event_id, "cn_proc", &cn_proc_mcast_ctl))) { printk(KERN_WARNING "cn_proc failed to register\n"); return err; } return 0; } module_init(cn_proc_init);
gpl-2.0
grancier/linux-3.14.3-c720
fs/ntfs/namei.c
3390
14444
/* * namei.c - NTFS kernel directory inode operations. Part of the Linux-NTFS * project. * * Copyright (c) 2001-2006 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/dcache.h> #include <linux/exportfs.h> #include <linux/security.h> #include <linux/slab.h> #include "attrib.h" #include "debug.h" #include "dir.h" #include "mft.h" #include "ntfs.h" /** * ntfs_lookup - find the inode represented by a dentry in a directory inode * @dir_ino: directory inode in which to look for the inode * @dent: dentry representing the inode to look for * @nd: lookup nameidata * * In short, ntfs_lookup() looks for the inode represented by the dentry @dent * in the directory inode @dir_ino and if found attaches the inode to the * dentry @dent. * * In more detail, the dentry @dent specifies which inode to look for by * supplying the name of the inode in @dent->d_name.name. ntfs_lookup() * converts the name to Unicode and walks the contents of the directory inode * @dir_ino looking for the converted Unicode name. If the name is found in the * directory, the corresponding inode is loaded by calling ntfs_iget() on its * inode number and the inode is associated with the dentry @dent via a call to * d_splice_alias(). * * If the name is not found in the directory, a NULL inode is inserted into the * dentry @dent via a call to d_add(). The dentry is then termed a negative * dentry. * * Only if an actual error occurs, do we return an error via ERR_PTR(). * * In order to handle the case insensitivity issues of NTFS with regards to the * dcache and the dcache requiring only one dentry per directory, we deal with * dentry aliases that only differ in case in ->ntfs_lookup() while maintaining * a case sensitive dcache. This means that we get the full benefit of dcache * speed when the file/directory is looked up with the same case as returned by * ->ntfs_readdir() but that a lookup for any other case (or for the short file * name) will not find anything in dcache and will enter ->ntfs_lookup() * instead, where we search the directory for a fully matching file name * (including case) and if that is not found, we search for a file name that * matches with different case and if that has non-POSIX semantics we return * that. We actually do only one search (case sensitive) and keep tabs on * whether we have found a case insensitive match in the process. * * To simplify matters for us, we do not treat the short vs long filenames as * two hard links but instead if the lookup matches a short filename, we * return the dentry for the corresponding long filename instead. * * There are three cases we need to distinguish here: * * 1) @dent perfectly matches (i.e. including case) a directory entry with a * file name in the WIN32 or POSIX namespaces. In this case * ntfs_lookup_inode_by_name() will return with name set to NULL and we * just d_splice_alias() @dent. * 2) @dent matches (not including case) a directory entry with a file name in * the WIN32 namespace. In this case ntfs_lookup_inode_by_name() will return * with name set to point to a kmalloc()ed ntfs_name structure containing * the properly cased little endian Unicode name. We convert the name to the * current NLS code page, search if a dentry with this name already exists * and if so return that instead of @dent. At this point things are * complicated by the possibility of 'disconnected' dentries due to NFS * which we deal with appropriately (see the code comments). The VFS will * then destroy the old @dent and use the one we returned. If a dentry is * not found, we allocate a new one, d_splice_alias() it, and return it as * above. * 3) @dent matches either perfectly or not (i.e. we don't care about case) a * directory entry with a file name in the DOS namespace. In this case * ntfs_lookup_inode_by_name() will return with name set to point to a * kmalloc()ed ntfs_name structure containing the mft reference (cpu endian) * of the inode. We use the mft reference to read the inode and to find the * file name in the WIN32 namespace corresponding to the matched short file * name. We then convert the name to the current NLS code page, and proceed * searching for a dentry with this name, etc, as in case 2), above. * * Locking: Caller must hold i_mutex on the directory. */ static struct dentry *ntfs_lookup(struct inode *dir_ino, struct dentry *dent, unsigned int flags) { ntfs_volume *vol = NTFS_SB(dir_ino->i_sb); struct inode *dent_inode; ntfschar *uname; ntfs_name *name = NULL; MFT_REF mref; unsigned long dent_ino; int uname_len; ntfs_debug("Looking up %s in directory inode 0x%lx.", dent->d_name.name, dir_ino->i_ino); /* Convert the name of the dentry to Unicode. */ uname_len = ntfs_nlstoucs(vol, dent->d_name.name, dent->d_name.len, &uname); if (uname_len < 0) { if (uname_len != -ENAMETOOLONG) ntfs_error(vol->sb, "Failed to convert name to " "Unicode."); return ERR_PTR(uname_len); } mref = ntfs_lookup_inode_by_name(NTFS_I(dir_ino), uname, uname_len, &name); kmem_cache_free(ntfs_name_cache, uname); if (!IS_ERR_MREF(mref)) { dent_ino = MREF(mref); ntfs_debug("Found inode 0x%lx. Calling ntfs_iget.", dent_ino); dent_inode = ntfs_iget(vol->sb, dent_ino); if (likely(!IS_ERR(dent_inode))) { /* Consistency check. */ if (is_bad_inode(dent_inode) || MSEQNO(mref) == NTFS_I(dent_inode)->seq_no || dent_ino == FILE_MFT) { /* Perfect WIN32/POSIX match. -- Case 1. */ if (!name) { ntfs_debug("Done. (Case 1.)"); return d_splice_alias(dent_inode, dent); } /* * We are too indented. Handle imperfect * matches and short file names further below. */ goto handle_name; } ntfs_error(vol->sb, "Found stale reference to inode " "0x%lx (reference sequence number = " "0x%x, inode sequence number = 0x%x), " "returning -EIO. Run chkdsk.", dent_ino, MSEQNO(mref), NTFS_I(dent_inode)->seq_no); iput(dent_inode); dent_inode = ERR_PTR(-EIO); } else ntfs_error(vol->sb, "ntfs_iget(0x%lx) failed with " "error code %li.", dent_ino, PTR_ERR(dent_inode)); kfree(name); /* Return the error code. */ return (struct dentry *)dent_inode; } /* It is guaranteed that @name is no longer allocated at this point. */ if (MREF_ERR(mref) == -ENOENT) { ntfs_debug("Entry was not found, adding negative dentry."); /* The dcache will handle negative entries. */ d_add(dent, NULL); ntfs_debug("Done."); return NULL; } ntfs_error(vol->sb, "ntfs_lookup_ino_by_name() failed with error " "code %i.", -MREF_ERR(mref)); return ERR_PTR(MREF_ERR(mref)); // TODO: Consider moving this lot to a separate function! (AIA) handle_name: { MFT_RECORD *m; ntfs_attr_search_ctx *ctx; ntfs_inode *ni = NTFS_I(dent_inode); int err; struct qstr nls_name; nls_name.name = NULL; if (name->type != FILE_NAME_DOS) { /* Case 2. */ ntfs_debug("Case 2."); nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&name->name, name->len, (unsigned char**)&nls_name.name, 0); kfree(name); } else /* if (name->type == FILE_NAME_DOS) */ { /* Case 3. */ FILE_NAME_ATTR *fn; ntfs_debug("Case 3."); kfree(name); /* Find the WIN32 name corresponding to the matched DOS name. */ ni = NTFS_I(dent_inode); m = map_mft_record(ni); if (IS_ERR(m)) { err = PTR_ERR(m); m = NULL; ctx = NULL; goto err_out; } ctx = ntfs_attr_get_search_ctx(ni, m); if (unlikely(!ctx)) { err = -ENOMEM; goto err_out; } do { ATTR_RECORD *a; u32 val_len; err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, 0, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_error(vol->sb, "Inode corrupt: No WIN32 " "namespace counterpart to DOS " "file name. Run chkdsk."); if (err == -ENOENT) err = -EIO; goto err_out; } /* Consistency checks. */ a = ctx->attr; if (a->non_resident || a->flags) goto eio_err_out; val_len = le32_to_cpu(a->data.resident.value_length); if (le16_to_cpu(a->data.resident.value_offset) + val_len > le32_to_cpu(a->length)) goto eio_err_out; fn = (FILE_NAME_ATTR*)((u8*)ctx->attr + le16_to_cpu( ctx->attr->data.resident.value_offset)); if ((u32)(fn->file_name_length * sizeof(ntfschar) + sizeof(FILE_NAME_ATTR)) > val_len) goto eio_err_out; } while (fn->file_name_type != FILE_NAME_WIN32); /* Convert the found WIN32 name to current NLS code page. */ nls_name.len = (unsigned)ntfs_ucstonls(vol, (ntfschar*)&fn->file_name, fn->file_name_length, (unsigned char**)&nls_name.name, 0); ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); } m = NULL; ctx = NULL; /* Check if a conversion error occurred. */ if ((signed)nls_name.len < 0) { err = (signed)nls_name.len; goto err_out; } nls_name.hash = full_name_hash(nls_name.name, nls_name.len); dent = d_add_ci(dent, dent_inode, &nls_name); kfree(nls_name.name); return dent; eio_err_out: ntfs_error(vol->sb, "Illegal file name attribute. Run chkdsk."); err = -EIO; err_out: if (ctx) ntfs_attr_put_search_ctx(ctx); if (m) unmap_mft_record(ni); iput(dent_inode); ntfs_error(vol->sb, "Failed, returning error code %i.", err); return ERR_PTR(err); } } /** * Inode operations for directories. */ const struct inode_operations ntfs_dir_inode_ops = { .lookup = ntfs_lookup, /* VFS: Lookup directory. */ }; /** * ntfs_get_parent - find the dentry of the parent of a given directory dentry * @child_dent: dentry of the directory whose parent directory to find * * Find the dentry for the parent directory of the directory specified by the * dentry @child_dent. This function is called from * fs/exportfs/expfs.c::find_exported_dentry() which in turn is called from the * default ->decode_fh() which is export_decode_fh() in the same file. * * The code is based on the ext3 ->get_parent() implementation found in * fs/ext3/namei.c::ext3_get_parent(). * * Note: ntfs_get_parent() is called with @child_dent->d_inode->i_mutex down. * * Return the dentry of the parent directory on success or the error code on * error (IS_ERR() is true). */ static struct dentry *ntfs_get_parent(struct dentry *child_dent) { struct inode *vi = child_dent->d_inode; ntfs_inode *ni = NTFS_I(vi); MFT_RECORD *mrec; ntfs_attr_search_ctx *ctx; ATTR_RECORD *attr; FILE_NAME_ATTR *fn; unsigned long parent_ino; int err; ntfs_debug("Entering for inode 0x%lx.", vi->i_ino); /* Get the mft record of the inode belonging to the child dentry. */ mrec = map_mft_record(ni); if (IS_ERR(mrec)) return (struct dentry *)mrec; /* Find the first file name attribute in the mft record. */ ctx = ntfs_attr_get_search_ctx(ni, mrec); if (unlikely(!ctx)) { unmap_mft_record(ni); return ERR_PTR(-ENOMEM); } try_next: err = ntfs_attr_lookup(AT_FILE_NAME, NULL, 0, CASE_SENSITIVE, 0, NULL, 0, ctx); if (unlikely(err)) { ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); if (err == -ENOENT) ntfs_error(vi->i_sb, "Inode 0x%lx does not have a " "file name attribute. Run chkdsk.", vi->i_ino); return ERR_PTR(err); } attr = ctx->attr; if (unlikely(attr->non_resident)) goto try_next; fn = (FILE_NAME_ATTR *)((u8 *)attr + le16_to_cpu(attr->data.resident.value_offset)); if (unlikely((u8 *)fn + le32_to_cpu(attr->data.resident.value_length) > (u8*)attr + le32_to_cpu(attr->length))) goto try_next; /* Get the inode number of the parent directory. */ parent_ino = MREF_LE(fn->parent_directory); /* Release the search context and the mft record of the child. */ ntfs_attr_put_search_ctx(ctx); unmap_mft_record(ni); return d_obtain_alias(ntfs_iget(vi->i_sb, parent_ino)); } static struct inode *ntfs_nfs_get_inode(struct super_block *sb, u64 ino, u32 generation) { struct inode *inode; inode = ntfs_iget(sb, ino); if (!IS_ERR(inode)) { if (is_bad_inode(inode) || inode->i_generation != generation) { iput(inode); inode = ERR_PTR(-ESTALE); } } return inode; } static struct dentry *ntfs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_dentry(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } static struct dentry *ntfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { return generic_fh_to_parent(sb, fid, fh_len, fh_type, ntfs_nfs_get_inode); } /** * Export operations allowing NFS exporting of mounted NTFS partitions. * * We use the default ->encode_fh() for now. Note that they * use 32 bits to store the inode number which is an unsigned long so on 64-bit * architectures is usually 64 bits so it would all fail horribly on huge * volumes. I guess we need to define our own encode and decode fh functions * that store 64-bit inode numbers at some point but for now we will ignore the * problem... * * We also use the default ->get_name() helper (used by ->decode_fh() via * fs/exportfs/expfs.c::find_exported_dentry()) as that is completely fs * independent. * * The default ->get_parent() just returns -EACCES so we have to provide our * own and the default ->get_dentry() is incompatible with NTFS due to not * allowing the inode number 0 which is used in NTFS for the system file $MFT * and due to using iget() whereas NTFS needs ntfs_iget(). */ const struct export_operations ntfs_export_ops = { .get_parent = ntfs_get_parent, /* Find the parent of a given directory. */ .fh_to_dentry = ntfs_fh_to_dentry, .fh_to_parent = ntfs_fh_to_parent, };
gpl-2.0
TeamWin/android_kernel_oneplus_msm8974
arch/x86/kernel/process_64.c
4158
13959
/* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 * * X86-64 port * Andi Kleen. * * CPU hotplug support - ashok.raj@intel.com */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/slab.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/ptrace.h> #include <linux/notifier.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/prctl.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/ftrace.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/fpu-internal.h> #include <asm/mmu_context.h> #include <asm/prctl.h> #include <asm/desc.h> #include <asm/proto.h> #include <asm/ia32.h> #include <asm/idle.h> #include <asm/syscalls.h> #include <asm/debugreg.h> #include <asm/switch_to.h> asmlinkage extern void ret_from_fork(void); DEFINE_PER_CPU(unsigned long, old_rsp); /* Prints also some state that isn't saved in the pt_regs */ void __show_regs(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs; unsigned long d0, d1, d2, d3, d6, d7; unsigned int fsindex, gsindex; unsigned int ds, cs, es; show_regs_common(); printk(KERN_DEFAULT "RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->ip); printk_address(regs->ip, 1); printk(KERN_DEFAULT "RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->sp, regs->flags); printk(KERN_DEFAULT "RAX: %016lx RBX: %016lx RCX: %016lx\n", regs->ax, regs->bx, regs->cx); printk(KERN_DEFAULT "RDX: %016lx RSI: %016lx RDI: %016lx\n", regs->dx, regs->si, regs->di); printk(KERN_DEFAULT "RBP: %016lx R08: %016lx R09: %016lx\n", regs->bp, regs->r8, regs->r9); printk(KERN_DEFAULT "R10: %016lx R11: %016lx R12: %016lx\n", regs->r10, regs->r11, regs->r12); printk(KERN_DEFAULT "R13: %016lx R14: %016lx R15: %016lx\n", regs->r13, regs->r14, regs->r15); asm("movl %%ds,%0" : "=r" (ds)); asm("movl %%cs,%0" : "=r" (cs)); asm("movl %%es,%0" : "=r" (es)); asm("movl %%fs,%0" : "=r" (fsindex)); asm("movl %%gs,%0" : "=r" (gsindex)); rdmsrl(MSR_FS_BASE, fs); rdmsrl(MSR_GS_BASE, gs); rdmsrl(MSR_KERNEL_GS_BASE, shadowgs); if (!all) return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4(); printk(KERN_DEFAULT "FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n", fs, fsindex, gs, gsindex, shadowgs); printk(KERN_DEFAULT "CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0); printk(KERN_DEFAULT "CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); printk(KERN_DEFAULT "DR0: %016lx DR1: %016lx DR2: %016lx\n", d0, d1, d2); get_debugreg(d3, 3); get_debugreg(d6, 6); get_debugreg(d7, 7); printk(KERN_DEFAULT "DR3: %016lx DR6: %016lx DR7: %016lx\n", d3, d6, d7); } void release_thread(struct task_struct *dead_task) { if (dead_task->mm) { if (dead_task->mm->context.size) { printk("WARNING: dead process %8s still has LDT? <%p/%d>\n", dead_task->comm, dead_task->mm->context.ldt, dead_task->mm->context.size); BUG(); } } } static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr) { struct user_desc ud = { .base_addr = addr, .limit = 0xfffff, .seg_32bit = 1, .limit_in_pages = 1, .useable = 1, }; struct desc_struct *desc = t->thread.tls_array; desc += tls; fill_ldt(desc, &ud); } static inline u32 read_32bit_tls(struct task_struct *t, int tls) { return get_desc_base(&t->thread.tls_array[tls]); } /* * This gets called before we allocate a new thread and copy * the current task into it. */ void prepare_to_copy(struct task_struct *tsk) { unlazy_fpu(tsk); } int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { int err; struct pt_regs *childregs; struct task_struct *me = current; childregs = ((struct pt_regs *) (THREAD_SIZE + task_stack_page(p))) - 1; *childregs = *regs; childregs->ax = 0; if (user_mode(regs)) childregs->sp = sp; else childregs->sp = (unsigned long)childregs; p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); p->thread.usersp = me->thread.usersp; set_tsk_thread_flag(p, TIF_FORK); p->fpu_counter = 0; p->thread.io_bitmap_ptr = NULL; savesegment(gs, p->thread.gsindex); p->thread.gs = p->thread.gsindex ? 0 : me->thread.gs; savesegment(fs, p->thread.fsindex); p->thread.fs = p->thread.fsindex ? 0 : me->thread.fs; savesegment(es, p->thread.es); savesegment(ds, p->thread.ds); err = -ENOMEM; memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(test_tsk_thread_flag(me, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(me->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } set_tsk_thread_flag(p, TIF_IO_BITMAP); } /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) { #ifdef CONFIG_IA32_EMULATION if (test_thread_flag(TIF_IA32)) err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); else #endif err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8); if (err) goto out; } err = 0; out: if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } return err; } static void start_thread_common(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp, unsigned int _cs, unsigned int _ss, unsigned int _ds) { loadsegment(fs, 0); loadsegment(es, _ds); loadsegment(ds, _ds); load_gs_index(0); current->thread.usersp = new_sp; regs->ip = new_ip; regs->sp = new_sp; percpu_write(old_rsp, new_sp); regs->cs = _cs; regs->ss = _ss; regs->flags = X86_EFLAGS_IF; /* * Free the old FP and other extended state */ free_thread_xstate(current); } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { start_thread_common(regs, new_ip, new_sp, __USER_CS, __USER_DS, 0); } #ifdef CONFIG_IA32_EMULATION void start_thread_ia32(struct pt_regs *regs, u32 new_ip, u32 new_sp) { start_thread_common(regs, new_ip, new_sp, test_thread_flag(TIF_X32) ? __USER_CS : __USER32_CS, __USER_DS, __USER_DS); } #endif /* * switch_to(x,y) should switch tasks from x to y. * * This could still be optimized: * - fold all the options into a flag word and test it with a single test. * - could test fs/gs bitsliced * * Kprobes not supported here. Set the probe on schedule instead. * Function graph tracer not supported too. */ __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread; struct thread_struct *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); unsigned fsindex, gsindex; fpu_switch_t fpu; fpu = switch_fpu_prepare(prev_p, next_p, cpu); /* * Reload esp0, LDT and the page table pointer: */ load_sp0(tss, next); /* * Switch DS and ES. * This won't pick up thread selector changes, but I guess that is ok. */ savesegment(es, prev->es); if (unlikely(next->es | prev->es)) loadsegment(es, next->es); savesegment(ds, prev->ds); if (unlikely(next->ds | prev->ds)) loadsegment(ds, next->ds); /* We must save %fs and %gs before load_TLS() because * %fs and %gs may be cleared by load_TLS(). * * (e.g. xen_load_tls()) */ savesegment(fs, fsindex); savesegment(gs, gsindex); load_TLS(next, cpu); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be * done before math_state_restore, so the TS bit is up * to date. */ arch_end_context_switch(next_p); /* * Switch FS and GS. * * Segment register != 0 always requires a reload. Also * reload when it has changed. When prev process used 64bit * base always reload to avoid an information leak. */ if (unlikely(fsindex | next->fsindex | prev->fs)) { loadsegment(fs, next->fsindex); /* * Check if the user used a selector != 0; if yes * clear 64bit base, since overloaded base is always * mapped to the Null selector */ if (fsindex) prev->fs = 0; } /* when next process has a 64bit base use it */ if (next->fs) wrmsrl(MSR_FS_BASE, next->fs); prev->fsindex = fsindex; if (unlikely(gsindex | next->gsindex | prev->gs)) { load_gs_index(next->gsindex); if (gsindex) prev->gs = 0; } if (next->gs) wrmsrl(MSR_KERNEL_GS_BASE, next->gs); prev->gsindex = gsindex; switch_fpu_finish(next_p, fpu); /* * Switch the PDA and FPU contexts. */ prev->usersp = percpu_read(old_rsp); percpu_write(old_rsp, next->usersp); percpu_write(current_task, next_p); percpu_write(kernel_stack, (unsigned long)task_stack_page(next_p) + THREAD_SIZE - KERNEL_STACK_OFFSET); /* * Now maybe reload the debug registers and handle I/O bitmaps */ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT || task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV)) __switch_to_xtra(prev_p, next_p, tss); return prev_p; } void set_personality_64bit(void) { /* inherit personality from parent */ /* Make sure to be in 64bit mode */ clear_thread_flag(TIF_IA32); clear_thread_flag(TIF_ADDR32); clear_thread_flag(TIF_X32); /* Ensure the corresponding mm is not marked. */ if (current->mm) current->mm->context.ia32_compat = 0; /* TBD: overwrites user setup. Should have two bits. But 64bit processes have always behaved this way, so it's not too bad. The main problem is just that 32bit childs are affected again. */ current->personality &= ~READ_IMPLIES_EXEC; } void set_personality_ia32(bool x32) { /* inherit personality from parent */ /* Make sure to be in 32bit mode */ set_thread_flag(TIF_ADDR32); /* Mark the associated mm as containing 32-bit tasks. */ if (current->mm) current->mm->context.ia32_compat = 1; if (x32) { clear_thread_flag(TIF_IA32); set_thread_flag(TIF_X32); current->personality &= ~READ_IMPLIES_EXEC; /* is_compat_task() uses the presence of the x32 syscall bit flag to determine compat status */ current_thread_info()->status &= ~TS_COMPAT; } else { set_thread_flag(TIF_IA32); clear_thread_flag(TIF_X32); current->personality |= force_personality32; /* Prepare the first "return" to user space */ current_thread_info()->status |= TS_COMPAT; } } EXPORT_SYMBOL_GPL(set_personality_ia32); unsigned long get_wchan(struct task_struct *p) { unsigned long stack; u64 fp, ip; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack = (unsigned long)task_stack_page(p); if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) return 0; fp = *(u64 *)(p->thread.sp); do { if (fp < (unsigned long)stack || fp >= (unsigned long)stack+THREAD_SIZE) return 0; ip = *(u64 *)(fp+8); if (!in_sched_functions(ip)) return ip; fp = *(u64 *)fp; } while (count++ < 16); return 0; } long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) { int ret = 0; int doit = task == current; int cpu; switch (code) { case ARCH_SET_GS: if (addr >= TASK_SIZE_OF(task)) return -EPERM; cpu = get_cpu(); /* handle small bases via the GDT because that's faster to switch. */ if (addr <= 0xffffffff) { set_32bit_tls(task, GS_TLS, addr); if (doit) { load_TLS(&task->thread, cpu); load_gs_index(GS_TLS_SEL); } task->thread.gsindex = GS_TLS_SEL; task->thread.gs = 0; } else { task->thread.gsindex = 0; task->thread.gs = addr; if (doit) { load_gs_index(0); ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr); } } put_cpu(); break; case ARCH_SET_FS: /* Not strictly needed for fs, but do it for symmetry with gs */ if (addr >= TASK_SIZE_OF(task)) return -EPERM; cpu = get_cpu(); /* handle small bases via the GDT because that's faster to switch. */ if (addr <= 0xffffffff) { set_32bit_tls(task, FS_TLS, addr); if (doit) { load_TLS(&task->thread, cpu); loadsegment(fs, FS_TLS_SEL); } task->thread.fsindex = FS_TLS_SEL; task->thread.fs = 0; } else { task->thread.fsindex = 0; task->thread.fs = addr; if (doit) { /* set the selector to 0 to not confuse __switch_to */ loadsegment(fs, 0); ret = checking_wrmsrl(MSR_FS_BASE, addr); } } put_cpu(); break; case ARCH_GET_FS: { unsigned long base; if (task->thread.fsindex == FS_TLS_SEL) base = read_32bit_tls(task, FS_TLS); else if (doit) rdmsrl(MSR_FS_BASE, base); else base = task->thread.fs; ret = put_user(base, (unsigned long __user *)addr); break; } case ARCH_GET_GS: { unsigned long base; unsigned gsindex; if (task->thread.gsindex == GS_TLS_SEL) base = read_32bit_tls(task, GS_TLS); else if (doit) { savesegment(gs, gsindex); if (gsindex) rdmsrl(MSR_KERNEL_GS_BASE, base); else base = task->thread.gs; } else base = task->thread.gs; ret = put_user(base, (unsigned long __user *)addr); break; } default: ret = -EINVAL; break; } return ret; } long sys_arch_prctl(int code, unsigned long addr) { return do_arch_prctl(current, code, addr); } unsigned long KSTK_ESP(struct task_struct *task) { return (test_tsk_thread_flag(task, TIF_IA32)) ? (task_pt_regs(task)->sp) : ((task)->thread.usersp); }
gpl-2.0
kaneawk/android_kernel_motorola_msm8996
arch/arm/mach-ixp4xx/nslu2-pci.c
4926
1650
/* * arch/arm/mach-ixp4xx/nslu2-pci.c * * NSLU2 board-level PCI initialization * * based on ixdp425-pci.c: * Copyright (C) 2002 Intel Corporation. * Copyright (C) 2003-2004 MontaVista Software, Inc. * * Maintainer: http://www.nslu2-linux.org/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/pci.h> #include <linux/init.h> #include <linux/irq.h> #include <asm/mach/pci.h> #include <asm/mach-types.h> #define MAX_DEV 3 #define IRQ_LINES 3 /* PCI controller GPIO to IRQ pin mappings */ #define INTA 11 #define INTB 10 #define INTC 9 #define INTD 8 void __init nslu2_pci_preinit(void) { irq_set_irq_type(IXP4XX_GPIO_IRQ(INTA), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTB), IRQ_TYPE_LEVEL_LOW); irq_set_irq_type(IXP4XX_GPIO_IRQ(INTC), IRQ_TYPE_LEVEL_LOW); ixp4xx_pci_preinit(); } static int __init nslu2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { static int pci_irq_table[IRQ_LINES] = { IXP4XX_GPIO_IRQ(INTA), IXP4XX_GPIO_IRQ(INTB), IXP4XX_GPIO_IRQ(INTC), }; if (slot >= 1 && slot <= MAX_DEV && pin >= 1 && pin <= IRQ_LINES) return pci_irq_table[(slot + pin - 2) % IRQ_LINES]; return -1; } struct hw_pci __initdata nslu2_pci = { .nr_controllers = 1, .ops = &ixp4xx_ops, .preinit = nslu2_pci_preinit, .setup = ixp4xx_setup, .map_irq = nslu2_map_irq, }; int __init nslu2_pci_init(void) /* monkey see, monkey do */ { if (machine_is_nslu2()) pci_common_init(&nslu2_pci); return 0; } subsys_initcall(nslu2_pci_init);
gpl-2.0
PaoloW8/kernel_ZOPO
net/atm/svc.c
7998
16833
/* net/atm/svc.c - ATM SVC sockets */ /* Written 1995-2000 by Werner Almesberger, EPFL LRC/ICA */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ #include <linux/string.h> #include <linux/net.h> /* struct socket, struct proto_ops */ #include <linux/errno.h> /* error codes */ #include <linux/kernel.h> /* printk */ #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/sched.h> /* jiffies and HZ */ #include <linux/fcntl.h> /* O_NONBLOCK */ #include <linux/init.h> #include <linux/atm.h> /* ATM stuff */ #include <linux/atmsap.h> #include <linux/atmsvc.h> #include <linux/atmdev.h> #include <linux/bitops.h> #include <net/sock.h> /* for sock_no_* */ #include <linux/uaccess.h> #include <linux/export.h> #include "resources.h" #include "common.h" /* common for PVCs and SVCs */ #include "signaling.h" #include "addr.h" static int svc_create(struct net *net, struct socket *sock, int protocol, int kern); /* * Note: since all this is still nicely synchronized with the signaling demon, * there's no need to protect sleep loops with clis. If signaling is * moved into the kernel, that would change. */ static int svc_shutdown(struct socket *sock, int how) { return 0; } static void svc_disconnect(struct atm_vcc *vcc) { DEFINE_WAIT(wait); struct sk_buff *skb; struct sock *sk = sk_atm(vcc); pr_debug("%p\n", vcc); if (test_bit(ATM_VF_REGIS, &vcc->flags)) { prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc, as_close, NULL, NULL, NULL); while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); } /* beware - socket is still in use by atmsigd until the last as_indicate has been answered */ while ((skb = skb_dequeue(&sk->sk_receive_queue)) != NULL) { atm_return(vcc, skb->truesize); pr_debug("LISTEN REL\n"); sigd_enq2(NULL, as_reject, vcc, NULL, NULL, &vcc->qos, 0); dev_kfree_skb(skb); } clear_bit(ATM_VF_REGIS, &vcc->flags); /* ... may retry later */ } static int svc_release(struct socket *sock) { struct sock *sk = sock->sk; struct atm_vcc *vcc; if (sk) { vcc = ATM_SD(sock); pr_debug("%p\n", vcc); clear_bit(ATM_VF_READY, &vcc->flags); /* * VCC pointer is used as a reference, * so we must not free it (thereby subjecting it to re-use) * before all pending connections are closed */ svc_disconnect(vcc); vcc_release(sock); } return 0; } static int svc_bind(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct sockaddr_atmsvc *addr; struct atm_vcc *vcc; int error; if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) return -EINVAL; lock_sock(sk); if (sock->state == SS_CONNECTED) { error = -EISCONN; goto out; } if (sock->state != SS_UNCONNECTED) { error = -EINVAL; goto out; } vcc = ATM_SD(sock); addr = (struct sockaddr_atmsvc *) sockaddr; if (addr->sas_family != AF_ATMSVC) { error = -EAFNOSUPPORT; goto out; } clear_bit(ATM_VF_BOUND, &vcc->flags); /* failing rebind will kill old binding */ /* @@@ check memory (de)allocation on rebind */ if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { error = -EBADFD; goto out; } vcc->local = *addr; set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc, as_bind, NULL, NULL, &vcc->local); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); clear_bit(ATM_VF_REGIS, &vcc->flags); /* doesn't count */ if (!sigd) { error = -EUNATCH; goto out; } if (!sk->sk_err) set_bit(ATM_VF_BOUND, &vcc->flags); error = -sk->sk_err; out: release_sock(sk); return error; } static int svc_connect(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len, int flags) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct sockaddr_atmsvc *addr; struct atm_vcc *vcc = ATM_SD(sock); int error; pr_debug("%p\n", vcc); lock_sock(sk); if (sockaddr_len != sizeof(struct sockaddr_atmsvc)) { error = -EINVAL; goto out; } switch (sock->state) { default: error = -EINVAL; goto out; case SS_CONNECTED: error = -EISCONN; goto out; case SS_CONNECTING: if (test_bit(ATM_VF_WAITING, &vcc->flags)) { error = -EALREADY; goto out; } sock->state = SS_UNCONNECTED; if (sk->sk_err) { error = -sk->sk_err; goto out; } break; case SS_UNCONNECTED: addr = (struct sockaddr_atmsvc *) sockaddr; if (addr->sas_family != AF_ATMSVC) { error = -EAFNOSUPPORT; goto out; } if (!test_bit(ATM_VF_HASQOS, &vcc->flags)) { error = -EBADFD; goto out; } if (vcc->qos.txtp.traffic_class == ATM_ANYCLASS || vcc->qos.rxtp.traffic_class == ATM_ANYCLASS) { error = -EINVAL; goto out; } if (!vcc->qos.txtp.traffic_class && !vcc->qos.rxtp.traffic_class) { error = -EINVAL; goto out; } vcc->remote = *addr; set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sigd_enq(vcc, as_connect, NULL, NULL, &vcc->remote); if (flags & O_NONBLOCK) { finish_wait(sk_sleep(sk), &wait); sock->state = SS_CONNECTING; error = -EINPROGRESS; goto out; } error = 0; while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); if (!signal_pending(current)) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); continue; } pr_debug("*ABORT*\n"); /* * This is tricky: * Kernel ---close--> Demon * Kernel <--close--- Demon * or * Kernel ---close--> Demon * Kernel <--error--- Demon * or * Kernel ---close--> Demon * Kernel <--okay---- Demon * Kernel <--close--- Demon */ sigd_enq(vcc, as_close, NULL, NULL, NULL); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); schedule(); } if (!sk->sk_err) while (!test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); schedule(); } clear_bit(ATM_VF_REGIS, &vcc->flags); clear_bit(ATM_VF_RELEASED, &vcc->flags); clear_bit(ATM_VF_CLOSE, &vcc->flags); /* we're gone now but may connect later */ error = -EINTR; break; } finish_wait(sk_sleep(sk), &wait); if (error) goto out; if (!sigd) { error = -EUNATCH; goto out; } if (sk->sk_err) { error = -sk->sk_err; goto out; } } /* * Not supported yet * * #ifndef CONFIG_SINGLE_SIGITF */ vcc->qos.txtp.max_pcr = SELECT_TOP_PCR(vcc->qos.txtp); vcc->qos.txtp.pcr = 0; vcc->qos.txtp.min_pcr = 0; /* * #endif */ error = vcc_connect(sock, vcc->itf, vcc->vpi, vcc->vci); if (!error) sock->state = SS_CONNECTED; else (void)svc_disconnect(vcc); out: release_sock(sk); return error; } static int svc_listen(struct socket *sock, int backlog) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; pr_debug("%p\n", vcc); lock_sock(sk); /* let server handle listen on unbound sockets */ if (test_bit(ATM_VF_SESSION, &vcc->flags)) { error = -EINVAL; goto out; } if (test_bit(ATM_VF_LISTEN, &vcc->flags)) { error = -EADDRINUSE; goto out; } set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(vcc, as_listen, NULL, NULL, &vcc->local); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (!sigd) { error = -EUNATCH; goto out; } set_bit(ATM_VF_LISTEN, &vcc->flags); vcc_insert_socket(sk); sk->sk_max_ack_backlog = backlog > 0 ? backlog : ATM_BACKLOG_DEFAULT; error = -sk->sk_err; out: release_sock(sk); return error; } static int svc_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; struct atmsvc_msg *msg; struct atm_vcc *old_vcc = ATM_SD(sock); struct atm_vcc *new_vcc; int error; lock_sock(sk); error = svc_create(sock_net(sk), newsock, 0, 0); if (error) goto out; new_vcc = ATM_SD(newsock); pr_debug("%p -> %p\n", old_vcc, new_vcc); while (1) { DEFINE_WAIT(wait); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); while (!(skb = skb_dequeue(&sk->sk_receive_queue)) && sigd) { if (test_bit(ATM_VF_RELEASED, &old_vcc->flags)) break; if (test_bit(ATM_VF_CLOSE, &old_vcc->flags)) { error = -sk->sk_err; break; } if (flags & O_NONBLOCK) { error = -EAGAIN; break; } release_sock(sk); schedule(); lock_sock(sk); if (signal_pending(current)) { error = -ERESTARTSYS; break; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (error) goto out; if (!skb) { error = -EUNATCH; goto out; } msg = (struct atmsvc_msg *)skb->data; new_vcc->qos = msg->qos; set_bit(ATM_VF_HASQOS, &new_vcc->flags); new_vcc->remote = msg->svc; new_vcc->local = msg->local; new_vcc->sap = msg->sap; error = vcc_connect(newsock, msg->pvc.sap_addr.itf, msg->pvc.sap_addr.vpi, msg->pvc.sap_addr.vci); dev_kfree_skb(skb); sk->sk_ack_backlog--; if (error) { sigd_enq2(NULL, as_reject, old_vcc, NULL, NULL, &old_vcc->qos, error); error = error == -EAGAIN ? -EBUSY : error; goto out; } /* wait should be short, so we ignore the non-blocking flag */ set_bit(ATM_VF_WAITING, &new_vcc->flags); prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, TASK_UNINTERRUPTIBLE); sigd_enq(new_vcc, as_accept, old_vcc, NULL, NULL); while (test_bit(ATM_VF_WAITING, &new_vcc->flags) && sigd) { release_sock(sk); schedule(); lock_sock(sk); prepare_to_wait(sk_sleep(sk_atm(new_vcc)), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk_atm(new_vcc)), &wait); if (!sigd) { error = -EUNATCH; goto out; } if (!sk_atm(new_vcc)->sk_err) break; if (sk_atm(new_vcc)->sk_err != ERESTARTSYS) { error = -sk_atm(new_vcc)->sk_err; goto out; } } newsock->state = SS_CONNECTED; out: release_sock(sk); return error; } static int svc_getname(struct socket *sock, struct sockaddr *sockaddr, int *sockaddr_len, int peer) { struct sockaddr_atmsvc *addr; *sockaddr_len = sizeof(struct sockaddr_atmsvc); addr = (struct sockaddr_atmsvc *) sockaddr; memcpy(addr, peer ? &ATM_SD(sock)->remote : &ATM_SD(sock)->local, sizeof(struct sockaddr_atmsvc)); return 0; } int svc_change_qos(struct atm_vcc *vcc, struct atm_qos *qos) { struct sock *sk = sk_atm(vcc); DEFINE_WAIT(wait); set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); sigd_enq2(vcc, as_modify, NULL, NULL, &vcc->local, qos, 0); while (test_bit(ATM_VF_WAITING, &vcc->flags) && !test_bit(ATM_VF_RELEASED, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_UNINTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (!sigd) return -EUNATCH; return -sk->sk_err; } static int svc_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int value, error = 0; lock_sock(sk); switch (optname) { case SO_ATMSAP: if (level != SOL_ATM || optlen != sizeof(struct atm_sap)) { error = -EINVAL; goto out; } if (copy_from_user(&vcc->sap, optval, optlen)) { error = -EFAULT; goto out; } set_bit(ATM_VF_HASSAP, &vcc->flags); break; case SO_MULTIPOINT: if (level != SOL_ATM || optlen != sizeof(int)) { error = -EINVAL; goto out; } if (get_user(value, (int __user *)optval)) { error = -EFAULT; goto out; } if (value == 1) set_bit(ATM_VF_SESSION, &vcc->flags); else if (value == 0) clear_bit(ATM_VF_SESSION, &vcc->flags); else error = -EINVAL; break; default: error = vcc_setsockopt(sock, level, optname, optval, optlen); } out: release_sock(sk); return error; } static int svc_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int error = 0, len; lock_sock(sk); if (!__SO_LEVEL_MATCH(optname, level) || optname != SO_ATMSAP) { error = vcc_getsockopt(sock, level, optname, optval, optlen); goto out; } if (get_user(len, optlen)) { error = -EFAULT; goto out; } if (len != sizeof(struct atm_sap)) { error = -EINVAL; goto out; } if (copy_to_user(optval, &ATM_SD(sock)->sap, sizeof(struct atm_sap))) { error = -EFAULT; goto out; } out: release_sock(sk); return error; } static int svc_addparty(struct socket *sock, struct sockaddr *sockaddr, int sockaddr_len, int flags) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; lock_sock(sk); set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sigd_enq(vcc, as_addparty, NULL, NULL, (struct sockaddr_atmsvc *) sockaddr); if (flags & O_NONBLOCK) { finish_wait(sk_sleep(sk), &wait); error = -EINPROGRESS; goto out; } pr_debug("added wait queue\n"); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); error = xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; } static int svc_dropparty(struct socket *sock, int ep_ref) { DEFINE_WAIT(wait); struct sock *sk = sock->sk; struct atm_vcc *vcc = ATM_SD(sock); int error; lock_sock(sk); set_bit(ATM_VF_WAITING, &vcc->flags); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); sigd_enq2(vcc, as_dropparty, NULL, NULL, NULL, NULL, ep_ref); while (test_bit(ATM_VF_WAITING, &vcc->flags) && sigd) { schedule(); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (!sigd) { error = -EUNATCH; goto out; } error = xchg(&sk->sk_err_soft, 0); out: release_sock(sk); return error; } static int svc_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { int error, ep_ref; struct sockaddr_atmsvc sa; struct atm_vcc *vcc = ATM_SD(sock); switch (cmd) { case ATM_ADDPARTY: if (!test_bit(ATM_VF_SESSION, &vcc->flags)) return -EINVAL; if (copy_from_user(&sa, (void __user *) arg, sizeof(sa))) return -EFAULT; error = svc_addparty(sock, (struct sockaddr *)&sa, sizeof(sa), 0); break; case ATM_DROPPARTY: if (!test_bit(ATM_VF_SESSION, &vcc->flags)) return -EINVAL; if (copy_from_user(&ep_ref, (void __user *) arg, sizeof(int))) return -EFAULT; error = svc_dropparty(sock, ep_ref); break; default: error = vcc_ioctl(sock, cmd, arg); } return error; } #ifdef CONFIG_COMPAT static int svc_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* The definition of ATM_ADDPARTY uses the size of struct atm_iobuf. But actually it takes a struct sockaddr_atmsvc, which doesn't need compat handling. So all we have to do is fix up cmd... */ if (cmd == COMPAT_ATM_ADDPARTY) cmd = ATM_ADDPARTY; if (cmd == ATM_ADDPARTY || cmd == ATM_DROPPARTY) return svc_ioctl(sock, cmd, arg); else return vcc_compat_ioctl(sock, cmd, arg); } #endif /* CONFIG_COMPAT */ static const struct proto_ops svc_proto_ops = { .family = PF_ATMSVC, .owner = THIS_MODULE, .release = svc_release, .bind = svc_bind, .connect = svc_connect, .socketpair = sock_no_socketpair, .accept = svc_accept, .getname = svc_getname, .poll = vcc_poll, .ioctl = svc_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = svc_compat_ioctl, #endif .listen = svc_listen, .shutdown = svc_shutdown, .setsockopt = svc_setsockopt, .getsockopt = svc_getsockopt, .sendmsg = vcc_sendmsg, .recvmsg = vcc_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static int svc_create(struct net *net, struct socket *sock, int protocol, int kern) { int error; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; sock->ops = &svc_proto_ops; error = vcc_create(net, sock, protocol, AF_ATMSVC); if (error) return error; ATM_SD(sock)->local.sas_family = AF_ATMSVC; ATM_SD(sock)->remote.sas_family = AF_ATMSVC; return 0; } static const struct net_proto_family svc_family_ops = { .family = PF_ATMSVC, .create = svc_create, .owner = THIS_MODULE, }; /* * Initialize the ATM SVC protocol family */ int __init atmsvc_init(void) { return sock_register(&svc_family_ops); } void atmsvc_exit(void) { sock_unregister(PF_ATMSVC); }
gpl-2.0
embeddedarm/linux-3.14-pxa16x
sound/core/ctljack.c
8766
1577
/* * Helper functions for jack-detection kcontrols * * Copyright (c) 2011 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/kernel.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #define jack_detect_kctl_info snd_ctl_boolean_mono_info static int jack_detect_kctl_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = kcontrol->private_value; return 0; } static struct snd_kcontrol_new jack_detect_kctl = { /* name is filled later */ .iface = SNDRV_CTL_ELEM_IFACE_CARD, .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = jack_detect_kctl_info, .get = jack_detect_kctl_get, }; struct snd_kcontrol * snd_kctl_jack_new(const char *name, int idx, void *private_data) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&jack_detect_kctl, private_data); if (!kctl) return NULL; snprintf(kctl->id.name, sizeof(kctl->id.name), "%s Jack", name); kctl->id.index = idx; kctl->private_value = 0; return kctl; } EXPORT_SYMBOL_GPL(snd_kctl_jack_new); void snd_kctl_jack_report(struct snd_card *card, struct snd_kcontrol *kctl, bool status) { if (kctl->private_value == status) return; kctl->private_value = status; snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &kctl->id); } EXPORT_SYMBOL_GPL(snd_kctl_jack_report);
gpl-2.0
invisiblek/android_kernel_lge_g3
arch/mips/pci/fixup-sni.c
9534
5102
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SNI specific PCI support for RM200/RM300. * * Copyright (C) 1997 - 2000, 2003, 04 Ralf Baechle (ralf@linux-mips.org) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <asm/mipsregs.h> #include <asm/sni.h> #include <irq.h> /* * PCIMT Shortcuts ... */ #define SCSI PCIMT_IRQ_SCSI #define ETH PCIMT_IRQ_ETHERNET #define INTA PCIMT_IRQ_INTA #define INTB PCIMT_IRQ_INTB #define INTC PCIMT_IRQ_INTC #define INTD PCIMT_IRQ_INTD /* * Device 0: PCI EISA Bridge (directly routed) * Device 1: NCR53c810 SCSI (directly routed) * Device 2: PCnet32 Ethernet (directly routed) * Device 3: VGA (routed to INTB) * Device 4: Unused * Device 5: Slot 2 * Device 6: Slot 3 * Device 7: Slot 4 * * Documentation says the VGA is device 5 and device 3 is unused but that * seem to be a documentation error. At least on my RM200C the Cirrus * Logic CL-GD5434 VGA is device 3. */ static char irq_tab_rm200[8][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */ { INTB, INTB, INTB, INTB, INTB }, /* VGA */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ }; /* * In Revision D of the RM300 Device 2 has become a normal purpose Slot 1 * * The VGA card is optional for RM300 systems. */ static char irq_tab_rm300d[8][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* EISA bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ { 0, INTC, INTD, INTA, INTB }, /* Slot 1 */ { INTB, INTB, INTB, INTB, INTB }, /* VGA */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ }; static char irq_tab_rm300e[5][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI, SCSI, SCSI, SCSI, SCSI }, /* SCSI */ { 0, INTC, INTD, INTA, INTB }, /* Bridge/i960 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 1 */ { 0, INTA, INTB, INTC, INTD }, /* Slot 2 */ }; #undef SCSI #undef ETH #undef INTA #undef INTB #undef INTC #undef INTD /* * PCIT Shortcuts ... */ #define SCSI0 PCIT_IRQ_SCSI0 #define SCSI1 PCIT_IRQ_SCSI1 #define ETH PCIT_IRQ_ETHERNET #define INTA PCIT_IRQ_INTA #define INTB PCIT_IRQ_INTB #define INTC PCIT_IRQ_INTC #define INTD PCIT_IRQ_INTD static char irq_tab_pcit[13][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { SCSI0, SCSI0, SCSI0, SCSI0, SCSI0 }, /* SCSI */ { SCSI1, SCSI1, SCSI1, SCSI1, SCSI1 }, /* SCSI */ { ETH, ETH, ETH, ETH, ETH }, /* Ethernet */ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTA, INTB, INTC, INTD }, /* Slot 1 */ { 0, INTB, INTC, INTD, INTA }, /* Slot 2 */ { 0, INTC, INTD, INTA, INTB }, /* Slot 3 */ { 0, INTD, INTA, INTB, INTC }, /* Slot 4 */ { 0, INTA, INTB, INTC, INTD }, /* Slot 5 */ }; static char irq_tab_pcit_cplus[13][5] __initdata = { /* INTA INTB INTC INTD */ { 0, 0, 0, 0, 0 }, /* HOST bridge */ { 0, INTB, INTC, INTD, INTA }, /* PCI Slot 9 */ { 0, 0, 0, 0, 0 }, /* PCI-EISA */ { 0, 0, 0, 0, 0 }, /* Unused */ { 0, INTA, INTB, INTC, INTD }, /* PCI-PCI bridge */ { 0, INTB, INTC, INTD, INTA }, /* fixup */ }; static inline int is_rm300_revd(void) { unsigned char csmsr = *(volatile unsigned char *)PCIMT_CSMSR; return (csmsr & 0xa0) == 0x20; } int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { switch (sni_brd_type) { case SNI_BRD_PCI_TOWER_CPLUS: if (slot == 4) { /* * SNI messed up interrupt wiring for onboard * PCI bus 1; we need to fix this up here */ while (dev && dev->bus->number != 1) dev = dev->bus->self; if (dev && dev->devfn >= PCI_DEVFN(4, 0)) slot = 5; } return irq_tab_pcit_cplus[slot][pin]; case SNI_BRD_PCI_TOWER: return irq_tab_pcit[slot][pin]; case SNI_BRD_PCI_MTOWER: if (is_rm300_revd()) return irq_tab_rm300d[slot][pin]; /* fall through */ case SNI_BRD_PCI_DESKTOP: return irq_tab_rm200[slot][pin]; case SNI_BRD_PCI_MTOWER_CPLUS: return irq_tab_rm300e[slot][pin]; } return 0; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
FreeOptimusProject/android_kernel_lge_p970
arch/mips/pci/fixup-pmcmsp.c
9534
10453
/* * PMC-Sierra MSP board specific pci fixups. * * Copyright 2001 MontaVista Software Inc. * Copyright 2005-2007 PMC-Sierra, Inc * * Author: MontaVista Software, Inc. * ppopov@mvista.com or source@mvista.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #ifdef CONFIG_PCI #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <asm/byteorder.h> #include <msp_pci.h> #include <msp_cic_int.h> /* PCI interrupt pins */ #define IRQ4 MSP_INT_EXT4 #define IRQ5 MSP_INT_EXT5 #define IRQ6 MSP_INT_EXT6 #if defined(CONFIG_PMC_MSP7120_GW) /* Garibaldi Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, IRQ4, IRQ4, 0, 0 }, /* 18 (AD[28]): slot 0 */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, IRQ5, IRQ5, 0, 0 }, /* 20 (AD[30]): slot 1 */ {0, IRQ6, IRQ6, 0, 0 } /* 21 (AD[31]): slot 2 */ }; #elif defined(CONFIG_PMC_MSP7120_EVAL) /* MSP7120 Eval Board IRQ wiring to PCI slots */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, IRQ6, IRQ6, 0, 0 }, /* 6 (AD[16]): slot 3 (mini) */ {0, IRQ5, IRQ5, 0, 0 }, /* 7 (AD[17]): slot 2 (mini) */ {0, IRQ4, IRQ4, IRQ4, IRQ4}, /* 8 (AD[18]): slot 0 (PCI) */ {0, IRQ5, IRQ5, IRQ5, IRQ5}, /* 9 (AD[19]): slot 1 (PCI) */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #else /* Unknown board -- don't assign any IRQs */ static char irq_tab[][5] __initdata = { /* INTA INTB INTC INTD */ {0, 0, 0, 0, 0 }, /* (AD[0]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[1]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[2]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[3]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[4]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[5]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[6]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[7]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[8]): Unused */ {0, 0, 0, 0, 0 }, /* (AD[9]): Unused */ {0, 0, 0, 0, 0 }, /* 0 (AD[10]): Unused */ {0, 0, 0, 0, 0 }, /* 1 (AD[11]): Unused */ {0, 0, 0, 0, 0 }, /* 2 (AD[12]): Unused */ {0, 0, 0, 0, 0 }, /* 3 (AD[13]): Unused */ {0, 0, 0, 0, 0 }, /* 4 (AD[14]): Unused */ {0, 0, 0, 0, 0 }, /* 5 (AD[15]): Unused */ {0, 0, 0, 0, 0 }, /* 6 (AD[16]): Unused */ {0, 0, 0, 0, 0 }, /* 7 (AD[17]): Unused */ {0, 0, 0, 0, 0 }, /* 8 (AD[18]): Unused */ {0, 0, 0, 0, 0 }, /* 9 (AD[19]): Unused */ {0, 0, 0, 0, 0 }, /* 10 (AD[20]): Unused */ {0, 0, 0, 0, 0 }, /* 11 (AD[21]): Unused */ {0, 0, 0, 0, 0 }, /* 12 (AD[22]): Unused */ {0, 0, 0, 0, 0 }, /* 13 (AD[23]): Unused */ {0, 0, 0, 0, 0 }, /* 14 (AD[24]): Unused */ {0, 0, 0, 0, 0 }, /* 15 (AD[25]): Unused */ {0, 0, 0, 0, 0 }, /* 16 (AD[26]): Unused */ {0, 0, 0, 0, 0 }, /* 17 (AD[27]): Unused */ {0, 0, 0, 0, 0 }, /* 18 (AD[28]): Unused */ {0, 0, 0, 0, 0 }, /* 19 (AD[29]): Unused */ {0, 0, 0, 0, 0 }, /* 20 (AD[30]): Unused */ {0, 0, 0, 0, 0 } /* 21 (AD[31]): Unused */ }; #endif /***************************************************************************** * * FUNCTION: pcibios_plat_dev_init * _________________________________________________________________________ * * DESCRIPTION: Perform platform specific device initialization at * pci_enable_device() time. * None are needed for the MSP7120 PCI Controller. * * INPUTS: dev - structure describing the PCI device * * OUTPUTS: none * * RETURNS: PCIBIOS_SUCCESSFUL * ****************************************************************************/ int pcibios_plat_dev_init(struct pci_dev *dev) { return PCIBIOS_SUCCESSFUL; } /***************************************************************************** * * FUNCTION: pcibios_map_irq * _________________________________________________________________________ * * DESCRIPTION: Perform board supplied PCI IRQ mapping routine. * * INPUTS: dev - unused * slot - PCI slot. Identified by which bit of the AD[] bus * drives the IDSEL line. AD[10] is 0, AD[31] is * slot 21. * pin - numbered using the scheme of the PCI_INTERRUPT_PIN * field of the config header. * * OUTPUTS: none * * RETURNS: IRQ number * ****************************************************************************/ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { #if !defined(CONFIG_PMC_MSP7120_GW) && !defined(CONFIG_PMC_MSP7120_EVAL) printk(KERN_WARNING "PCI: unknown board, no PCI IRQs assigned.\n"); #endif printk(KERN_WARNING "PCI: irq_tab returned %d for slot=%d pin=%d\n", irq_tab[slot][pin], slot, pin); return irq_tab[slot][pin]; } #endif /* CONFIG_PCI */
gpl-2.0
NeverLEX/linux
arch/sh/boot/compressed/misc.c
11326
2699
/* * arch/sh/boot/compressed/misc.c * * This is a collection of several routines from gzip-1.0.3 * adapted for Linux. * * malloc by Hannu Savolainen 1993 and Matthias Urlichs 1994 * * Adapted for SH by Stuart Menefy, Aug 1999 * * Modified to use standard LinuxSH BIOS by Greg Banks 7Jul2000 */ #include <asm/uaccess.h> #include <asm/addrspace.h> #include <asm/page.h> /* * gzip declarations */ #define STATIC static #undef memset #undef memcpy #define memzero(s, n) memset ((s), 0, (n)) /* cache.c */ #define CACHE_ENABLE 0 #define CACHE_DISABLE 1 int cache_control(unsigned int command); extern char input_data[]; extern int input_len; static unsigned char *output; static void error(char *m); int puts(const char *); extern int _text; /* Defined in vmlinux.lds.S */ extern int _end; static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; #ifdef CONFIG_HAVE_KERNEL_BZIP2 #define HEAP_SIZE 0x400000 #else #define HEAP_SIZE 0x10000 #endif #ifdef CONFIG_KERNEL_GZIP #include "../../../../lib/decompress_inflate.c" #endif #ifdef CONFIG_KERNEL_BZIP2 #include "../../../../lib/decompress_bunzip2.c" #endif #ifdef CONFIG_KERNEL_LZMA #include "../../../../lib/decompress_unlzma.c" #endif #ifdef CONFIG_KERNEL_XZ #include "../../../../lib/decompress_unxz.c" #endif #ifdef CONFIG_KERNEL_LZO #include "../../../../lib/decompress_unlzo.c" #endif int puts(const char *s) { /* This should be updated to use the sh-sci routines */ return 0; } void* memset(void* s, int c, size_t n) { int i; char *ss = (char*)s; for (i=0;i<n;i++) ss[i] = c; return s; } void* memcpy(void* __dest, __const void* __src, size_t __n) { int i; char *d = (char *)__dest, *s = (char *)__src; for (i=0;i<__n;i++) d[i] = s[i]; return __dest; } static void error(char *x) { puts("\n\n"); puts(x); puts("\n\n -- System halted"); while(1); /* Halt */ } #ifdef CONFIG_SUPERH64 #define stackalign 8 #else #define stackalign 4 #endif #define STACK_SIZE (4096) long __attribute__ ((aligned(stackalign))) user_stack[STACK_SIZE]; long *stack_start = &user_stack[STACK_SIZE]; void decompress_kernel(void) { unsigned long output_addr; #ifdef CONFIG_SUPERH64 output_addr = (CONFIG_MEMORY_START + 0x2000); #else output_addr = __pa((unsigned long)&_text+PAGE_SIZE); #if defined(CONFIG_29BIT) output_addr |= P2SEG; #endif #endif output = (unsigned char *)output_addr; free_mem_ptr = (unsigned long)&_end; free_mem_end_ptr = free_mem_ptr + HEAP_SIZE; puts("Uncompressing Linux... "); cache_control(CACHE_ENABLE); decompress(input_data, input_len, NULL, NULL, output, NULL, error); cache_control(CACHE_DISABLE); puts("Ok, booting the kernel.\n"); }
gpl-2.0
BORETS24/Zenfone-2-500CL
linux/kernel/drivers/external_drivers/intel_media/otm_hdmi/pil/specific/mfld/ps_hdmi.c
63
15387
/* This file is provided under a dual BSD/GPLv2 license. When using or redistributing this file, you may do so under either license. GPL LICENSE SUMMARY Copyright(c) 2011 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called LICENSE.GPL. Contact Information: Intel Corporation 2200 Mission College Blvd. Santa Clara, CA 95054 BSD LICENSE Copyright(c) 2011 Intel Corporation. All rights reserved. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "otm_hdmi_types.h" #include <asm/io.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/delay.h> #include "otm_hdmi.h" #include "ipil_hdmi.h" #include "ps_hdmi.h" #include "psb_drv.h" #include <asm/intel_scu_pmic.h> #include "psb_powermgmt.h" /* Implementation of the Medfield specific PCI driver for receiving * Hotplug and other device status signals. * In Medfield platform, the HPD and OCP signals are delivered to the * display sub-system from the MSIC chip. */ /* Constants */ #define PS_HDMI_HPD_PCI_DRIVER_NAME "Medfield HDMI MSIC Driver" /* Globals */ static hdmi_context_t *g_context; #define PS_HDMI_MMIO_RESOURCE 0 #define PS_VDC_OFFSET 0x00000000 #define PS_VDC_SIZE 0x000080000 #define PS_MSIC_PCI_DEVICE_ID 0x0831 #define PS_MSIC_VRINT_ADDR 0xFFFF7FCB #define PS_MSIC_VRINT_IOADDR_LEN 0x02 #define PS_HDMI_OCP_STATUS (1 << 2) #define PS_HDMI_HPD_STATUS_BIT (1 << 3) #define PS_MSIC_VCC330CNT 0xd3 #define PS_VCC330_OFF 0x24 #define PS_VCC330_ON 0x37 #define PS_MSIC_VHDMICNT 0xde #define PS_VHDMI_OFF 0x24 #define PS_VHDMI_ON 0xa4 #define PS_VHDMI_DB_30MS 0x60 #define PS_MSIC_HDMI_STATUS_CMD 0x281 #define PS_MSIC_HDMI_STATUS (1 << 0) #define PS_MSIC_IRQLVL1_MASK 0x21 #define PS_VREG_MASK (1 << 5) otm_hdmi_ret_t ps_hdmi_pci_dev_init(void *context, struct pci_dev *pdev) { otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS; int result = 0; unsigned int vdc_start; uint32_t pci_address = 0; uint8_t pci_dev_revision = 0; hdmi_context_t *ctx = NULL; if (pdev == NULL || context == NULL) { rc = OTM_HDMI_ERR_INTERNAL; goto exit; } ctx = (hdmi_context_t *)context; pr_debug("get resource start\n"); result = pci_read_config_dword(pdev, 16, &vdc_start); if (result != 0) { rc = OTM_HDMI_ERR_FAILED; goto exit; } pci_address = vdc_start + PS_VDC_OFFSET; pr_debug("map IO region\n"); /* Map IO region and save its length */ ctx->io_length = PS_VDC_SIZE; ctx->io_address = ioremap_cache(pci_address, ctx->io_length); if (!ctx->io_address) { rc = OTM_HDMI_ERR_FAILED; goto exit; } pr_debug("get PCI dev revision\n"); result = pci_read_config_byte(pdev, 8, &pci_dev_revision); if (result != 0) { rc = OTM_HDMI_ERR_FAILED; goto exit; } ctx->dev.id = pci_dev_revision; /* Store this context for use by MSIC PCI driver */ g_context = ctx; exit: return rc; } otm_hdmi_ret_t ps_hdmi_pci_dev_deinit(void *context) { otm_hdmi_ret_t rc = OTM_HDMI_SUCCESS; hdmi_context_t *ctx = NULL; if (context == NULL) { rc = OTM_HDMI_ERR_INTERNAL; goto exit; } ctx = (hdmi_context_t *)context; /* unmap IO region */ iounmap(ctx->io_address) ; exit: return rc; } otm_hdmi_ret_t ps_hdmi_i2c_edid_read(void *ctx, unsigned int sp, unsigned int offset, void *buffer, unsigned int size) { hdmi_context_t *context = (hdmi_context_t *)ctx; char *src = context->edid_raw + sp * SEGMENT_SIZE + offset; memcpy(buffer, src, size); return OTM_HDMI_SUCCESS; } static unsigned char vrint_data; bool ps_hdmi_power_rails_on(void) { int ret = 0; pr_debug("Entered %s\n", __func__); if (vrint_data == 0) { /* If it is not invoked in response to hot plug event, * then simply a NOP as power rails are never turned off. */ pr_debug("%s: NOP as there is no HPD.\n", __func__); return true; } /* Turn on HDMI power rails. These will be on in all non-S0iX * states so that HPD and connection status will work. VCC330 * will have ~1.7mW usage during idle states when the display * is active */ ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_ON); if (ret) { pr_debug("%s: Failed to power on VCC330.\n", __func__); return false; } if (vrint_data & PS_HDMI_OCP_STATUS) { /* When there occurs overcurrent in MSIC HDMI HDP, * need to reset VHDMIEN by clearing to 0 then set to 1 */ ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT, PS_VHDMI_OFF); if (ret) { pr_debug("%s: Failed to power off VHDMI.\n", __func__); goto err; } vrint_data = 0; } /* MSIC documentation requires that there be a 500us * delay after enabling VCC330 before you can enable * VHDMI */ usleep_range(500, 1000); /* Extend VHDMI switch de-bounce time, to avoid * redundant MSIC VREG/HDMI interrupt during HDMI * cable plugged in/out */ ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT, PS_VHDMI_ON | PS_VHDMI_DB_30MS); if (ret) { pr_debug("%s: Failed to power on VHDMI.\n", __func__); goto err; } return true; err: ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_OFF); if (ret) { pr_debug("%s: Failed to power off VCC330 during clean up.\n", __func__); /* Fall through */ } return false; } bool ps_hdmi_power_rails_off(void) { /* VCC330 must stay on always for HPD. */ return true; } /* enable/disable HPD */ bool ps_hdmi_enable_hpd(bool enable) { } bool ps_hdmi_power_islands_on() { /* * If pmu_nc_set_power_state fails then accessing HW * reg would result in a crash - IERR/Fabric error. */ if (pmu_nc_set_power_state(OSPM_DISPLAY_B_ISLAND, OSPM_ISLAND_UP, OSPM_REG_TYPE)) BUG(); return true; } void ps_hdmi_power_islands_off() { } void ps_hdmi_vblank_control(struct drm_device *dev, bool on) { if (on) psb_enable_vblank(dev, 1); else psb_disable_vblank(dev, 1); } /* * ps_hdmi_get_cable_status - Get HDMI cable connection status * @context: hdmi device context * * Returns - boolean state. * true - HDMI cable connected * false - HDMI cable disconnected */ bool ps_hdmi_get_cable_status(void *context) { hdmi_context_t *ctx = (hdmi_context_t *)context; u8 data = 0; if (ctx == NULL) return false; /* Read HDMI cable status from MSIC chip */ intel_scu_ipc_ioread8(PS_MSIC_HDMI_STATUS_CMD, &data); if (data & PS_MSIC_HDMI_STATUS) ctx->is_connected = true; else ctx->is_connected = false; return ctx->is_connected; } /** * hdmi interrupt handler (upper half). * @irq: irq number * @data: data for the interrupt handler * * Returns: IRQ_HANDLED on NULL input arguments, and if the * interrupt is not HDMI HPD interrupts. * IRQ_WAKE_THREAD if this is a HDMI HPD interrupt. * hdmi interrupt handler (upper half). handles the interrupts * by reading hdmi status register and waking up bottom half if needed. */ irqreturn_t ps_hdmi_irq_handler(int irq, void *data) { /* Read interrupt status register */ if (g_context != NULL) { vrint_data = readb(g_context->dev.irq_io_address); /* handle HDMI HPD interrupts. */ if (vrint_data & (PS_HDMI_HPD_STATUS_BIT|PS_HDMI_OCP_STATUS)) return IRQ_WAKE_THREAD; } return IRQ_HANDLED; } /* Power management functions */ /* * Platform specific resume function after deep-sleep * This function is used to carry out any specific actviity * to aid HDMI IP resume in the context of system resume. * This function will always be scheduled to execute after * the system has finished resuming. */ void ps_post_resume_wq(struct work_struct *work) { hdmi_context_t *ctx = container_of(work, hdmi_context_t, post_resume_work); int ret = 0; pr_debug("Entered %s\n", __func__); if (ctx == NULL) { pr_err("%s: NULL context!\n", __func__); return; } /* While going to suspend state, the HPD interrupts from MSIC * were masked. During the resume, we do not immediately unmask * the interrupt to avoid race between the resultant hotplug * handlers and system resume activity. Instead, we simply turn * on the HDMI MSIC power rails and schedule this function to be * called after the system finishes a complete resume. At this * time, it is safe to re-enable HPD interrupts. */ ret = intel_scu_ipc_update_register(PS_MSIC_IRQLVL1_MASK, 0x0, PS_VREG_MASK); if (ret) { pr_debug("%s: Failed to unmask VREG IRQ.\n", __func__); goto exit; } exit: pr_debug("Exiting %s\n", __func__); } static int ps_hdmi_hpd_suspend(struct device *dev) { int ret = 0; pr_debug("Entered %s\n", __func__); /* suspend process is irreversible */ ret = intel_scu_ipc_update_register(PS_MSIC_IRQLVL1_MASK, 0xff, PS_VREG_MASK); if (ret) { pr_debug("%s: Failed to mask VREG IRQ.\n", __func__); } ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT, PS_VHDMI_OFF); if (ret) { pr_debug("%s: Failed to power off VHDMI.\n", __func__); } ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_OFF); if (ret) { pr_debug("%s: Failed to power off VCC330.\n", __func__); } pr_debug("Exiting %s\n", __func__); return ret; } static int ps_hdmi_hpd_resume(struct device *dev) { int ret = 0; pr_debug("Entered %s\n", __func__); ret = intel_scu_ipc_iowrite8(PS_MSIC_VCC330CNT, PS_VCC330_ON); if (ret) { pr_debug("%s: Failed to power on VCC330.\n", __func__); goto err; } /* MSIC documentation requires that there be a 500us delay after enabling VCC330 before you can enable VHDMI */ usleep_range(500, 1000); ret = intel_scu_ipc_iowrite8(PS_MSIC_VHDMICNT, PS_VHDMI_ON | PS_VHDMI_DB_30MS); if (ret) { pr_debug("%s: Failed to power on VHDMI.\n", __func__); goto err; } /* We schedule a delayed wok item to be executed only after the * the full system has resumed. */ queue_work(g_context->post_resume_wq, &g_context->post_resume_work); pr_debug("Exiting %s\n", __func__); return ret; err: pr_debug("Exiting %s\n", __func__); return ret; } int ps_hdmi_get_hpd_pin(void) { return 0; } void ps_hdmi_override_cable_status(bool state, bool auto_state) { return; } /* PCI probe function */ static int ps_hdmi_hpd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int result = 0; hdmi_context_t *ctx = g_context; if (pdev == NULL || ctx == NULL) { pr_err("%s: called with NULL device or context\n", __func__); result = -EINVAL; return result; } /* Verify probe is called for the intended device */ if (pdev->device != PS_MSIC_PCI_DEVICE_ID) { pr_err("%s: called for wrong device id = 0x%x\n", __func__, pdev->device); result = -EINVAL; goto exit; } pr_debug("pci_enable_device for 0x%x\n", PS_MSIC_PCI_DEVICE_ID); result = pci_enable_device(pdev); if (result) { pr_err("%s: Failed to enable MSIC PCI device = 0x%x\n", __func__, PS_MSIC_PCI_DEVICE_ID); goto exit; } /* Map IO region for IRQ registers */ ctx->dev.irq_io_address = ioremap_nocache(PS_MSIC_VRINT_ADDR, PS_MSIC_VRINT_IOADDR_LEN); if (!ctx->dev.irq_io_address) { pr_err("%s: Failed to map IO region for MSIC IRQ\n", __func__); result = -ENOMEM; goto exit2; } ctx->irq_number = pdev->irq; pr_debug("%s: IRQ number assigned = %d\n", __func__, pdev->irq); result = request_threaded_irq(ctx->irq_number, ps_hdmi_irq_handler, ctx->hpd_callback, IRQF_SHARED, PS_HDMI_HPD_PCI_DRIVER_NAME, ctx->hpd_data); if (result) { pr_debug("%s: Register irq interrupt %d failed\n", __func__, ctx->irq_number); goto exit3; } /* Create Freezable workqueue for post resume HPD operations */ ctx->post_resume_wq = create_freezable_workqueue("MFLD Post-Resume WQ"); if (!ctx->post_resume_wq) { pr_debug("%s: Failed to create post-resume workqueue\n", __func__); goto exit3; } INIT_WORK(&ctx->post_resume_work, ps_post_resume_wq); return result; exit3: iounmap(ctx->dev.irq_io_address); exit2: pci_disable_device(pdev); exit: pci_dev_put(pdev); return result; } /* PCI driver related structures */ static DEFINE_PCI_DEVICE_TABLE(ps_hdmi_hpd_pci_id) = { { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PS_MSIC_PCI_DEVICE_ID) }, { 0 } }; static const struct dev_pm_ops ps_hdmi_hpd_pm_ops = { .suspend = ps_hdmi_hpd_suspend, .resume = ps_hdmi_hpd_resume, }; static struct pci_driver ps_hdmi_hpd_driver = { .name = PS_HDMI_HPD_PCI_DRIVER_NAME, .id_table = ps_hdmi_hpd_pci_id, .probe = ps_hdmi_hpd_probe, .driver.pm = &ps_hdmi_hpd_pm_ops, }; /* PCI Driver registration function */ int ps_hdmi_hpd_register_driver(void) { pr_debug("%s: Registering PCI driver for HDMI HPD\n", __func__); return pci_register_driver(&ps_hdmi_hpd_driver); } /* PCI Driver Cleanup function */ int ps_hdmi_hpd_unregister_driver(void) { /* unmap IO region */ iounmap((void *)g_context->dev.irq_io_address); pci_unregister_driver(&ps_hdmi_hpd_driver); return 0; } /** * notify security component of hdcp and hdmi cable status * * @hdcp HDCP status: true if phase1 is enabled * @cable HDMI connection status: true if connected * * Returns: none */ void ps_hdmi_update_security_hdmi_hdcp_status(bool hdcp, bool cable) { /* Note: do nothing since not clear if mfld needs this or not */ return; }
gpl-2.0
dajuke/AK-OnePone
arch/arm/mm/dma-mapping.c
63
54214
/* * linux/arch/arm/mm/dma-mapping.c * * Copyright (C) 2000-2004 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * DMA uncached mapping support. */ #include <linux/bootmem.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/gfp.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/init.h> #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/dma-contiguous.h> #include <linux/highmem.h> #include <linux/memblock.h> #include <linux/slab.h> #include <linux/iommu.h> #include <linux/io.h> #include <linux/vmalloc.h> #include <asm/memory.h> #include <asm/highmem.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/sizes.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/system_info.h> #include <asm/dma-contiguous.h> #include <asm/dma-iommu.h> #include "mm.h" /* * The DMA API is built upon the notion of "buffer ownership". A buffer * is either exclusively owned by the CPU (and therefore may be accessed * by it) or exclusively owned by the DMA device. These helper functions * represent the transitions between these two ownership states. * * Note, however, that on later ARMs, this notion does not work due to * speculative prefetches. We model our approach on the assumption that * the CPU does do speculative prefetches, which means we clean caches * before transfers and delay cache invalidation until transfer completion. * */ static void __dma_page_cpu_to_dev(struct page *, unsigned long, size_t, enum dma_data_direction); static void __dma_page_dev_to_cpu(struct page *, unsigned long, size_t, enum dma_data_direction); /** * arm_dma_map_page - map a portion of a page for streaming DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * * Ensure that any data held in the cache is appropriately discarded * or written back. * * The device owns this memory once this call has completed. The CPU * can regain ownership by calling dma_unmap_page(). */ static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); return pfn_to_dma(dev, page_to_pfn(page)) + offset; } static dma_addr_t arm_coherent_dma_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { return pfn_to_dma(dev, page_to_pfn(page)) + offset; } /** * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page() * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) * * Unmap a page streaming mode DMA translation. The handle and size * must match what was provided in the previous dma_map_page() call. * All other usages are undefined. * * After this call, reads by the CPU to the buffer are guaranteed to see * whatever the device wrote there. */ static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), handle & ~PAGE_MASK, size, dir); } static void arm_dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { unsigned int offset = handle & (PAGE_SIZE - 1); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); __dma_page_dev_to_cpu(page, offset, size, dir); } static void arm_dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { unsigned int offset = handle & (PAGE_SIZE - 1); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); __dma_page_cpu_to_dev(page, offset, size, dir); } static int arm_dma_set_mask(struct device *dev, u64 dma_mask); struct dma_map_ops arm_dma_ops = { .alloc = arm_dma_alloc, .free = arm_dma_free, .mmap = arm_dma_mmap, .get_sgtable = arm_dma_get_sgtable, .map_page = arm_dma_map_page, .unmap_page = arm_dma_unmap_page, .map_sg = arm_dma_map_sg, .unmap_sg = arm_dma_unmap_sg, .sync_single_for_cpu = arm_dma_sync_single_for_cpu, .sync_single_for_device = arm_dma_sync_single_for_device, .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu, .sync_sg_for_device = arm_dma_sync_sg_for_device, .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_dma_ops); static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs); static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs); struct dma_map_ops arm_coherent_dma_ops = { .alloc = arm_coherent_dma_alloc, .free = arm_coherent_dma_free, .mmap = arm_dma_mmap, .get_sgtable = arm_dma_get_sgtable, .map_page = arm_coherent_dma_map_page, .map_sg = arm_dma_map_sg, .set_dma_mask = arm_dma_set_mask, }; EXPORT_SYMBOL(arm_coherent_dma_ops); static int __dma_supported(struct device *dev, u64 mask, bool warn) { unsigned long max_dma_pfn; /* * If the mask allows for more memory than we can address, * and we actually have that much memory, then we must * indicate that DMA to this device is not supported. */ if (sizeof(mask) != sizeof(dma_addr_t) && mask > (dma_addr_t)~0 && dma_to_pfn(dev, ~0) < max_pfn) { if (warn) { dev_warn(dev, "Coherent DMA mask %#llx is larger than dma_addr_t allows\n", mask); dev_warn(dev, "Driver did not use or check the return value from dma_set_coherent_mask()?\n"); } return 0; } max_dma_pfn = min(max_pfn, arm_dma_pfn_limit); /* * Translate the device's DMA mask to a PFN limit. This * PFN number includes the page which we can DMA to. */ if (dma_to_pfn(dev, mask) < max_dma_pfn) { if (warn) dev_warn(dev, "Coherent DMA mask %#llx (pfn %#lx-%#lx) covers a smaller range of system memory than the DMA zone pfn 0x0-%#lx\n", mask, dma_to_pfn(dev, 0), dma_to_pfn(dev, mask) + 1, max_dma_pfn + 1); return 0; } return 1; } static u64 get_coherent_dma_mask(struct device *dev) { u64 mask = (u64)arm_dma_limit; if (dev) { mask = dev->coherent_dma_mask; /* * Sanity check the DMA mask - it must be non-zero, and * must be able to be satisfied by a DMA allocation. */ if (mask == 0) { dev_warn(dev, "coherent DMA mask is unset\n"); return 0; } if (!__dma_supported(dev, mask, true)) return 0; } return mask; } static void __dma_clear_buffer(struct page *page, size_t size, struct dma_attrs *attrs) { /* * Ensure that the allocated pages are zeroed, and that any data * lurking in the kernel direct-mapped region is invalidated. */ if (PageHighMem(page)) { phys_addr_t base = __pfn_to_phys(page_to_pfn(page)); phys_addr_t end = base + size; while (size > 0) { void *ptr = kmap_atomic(page); if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)) memset(ptr, 0, PAGE_SIZE); dmac_flush_range(ptr, ptr + PAGE_SIZE); kunmap_atomic(ptr); page++; size -= PAGE_SIZE; } outer_flush_range(base, end); } else { void *ptr = page_address(page); if (!dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs)) memset(ptr, 0, size); dmac_flush_range(ptr, ptr + size); outer_flush_range(__pa(ptr), __pa(ptr) + size); } } /* * Allocate a DMA buffer for 'dev' of size 'size' using the * specified gfp mask. Note that 'size' must be page aligned. */ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp) { unsigned long order = get_order(size); struct page *page, *p, *e; page = alloc_pages(gfp, order); if (!page) return NULL; /* * Now split the huge page and free the excess pages */ split_page(page, order); for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) __free_page(p); __dma_clear_buffer(page, size, NULL); return page; } /* * Free a DMA buffer. 'size' must be page aligned. */ static void __dma_free_buffer(struct page *page, size_t size) { struct page *e = page + (size >> PAGE_SHIFT); while (page < e) { __free_page(page); page++; } } #ifdef CONFIG_MMU #ifdef CONFIG_HUGETLB_PAGE #error ARM Coherent DMA allocator does not (yet) support huge TLB #endif static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, const void *caller, struct dma_attrs *attrs); static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, const void *caller); static void * __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { struct vm_struct *area; unsigned long addr; /* * DMA allocation can be mapped to user space, so lets * set VM_USERMAP flags too. */ area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, caller); if (!area) return NULL; addr = (unsigned long)area->addr; area->phys_addr = __pfn_to_phys(page_to_pfn(page)); if (ioremap_page_range(addr, addr + size, area->phys_addr, prot)) { vunmap((void *)addr); return NULL; } return (void *)addr; } static void __dma_free_remap(void *cpu_addr, size_t size, bool no_warn) { unsigned int flags = VM_ARM_DMA_CONSISTENT | VM_USERMAP; struct vm_struct *area = find_vm_area(cpu_addr); if (!area || (area->flags & flags) != flags) { WARN(!no_warn, "trying to free invalid coherent area: %p\n", cpu_addr); return; } unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); } #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K struct dma_pool { size_t size; spinlock_t lock; unsigned long *bitmap; unsigned long nr_pages; void *vaddr; struct page **pages; }; static struct dma_pool atomic_pool = { .size = DEFAULT_DMA_COHERENT_POOL_SIZE, }; static int __init early_coherent_pool(char *p) { atomic_pool.size = memparse(p, &p); return 0; } early_param("coherent_pool", early_coherent_pool); void __init init_dma_coherent_pool_size(unsigned long size) { /* * Catch any attempt to set the pool size too late. */ BUG_ON(atomic_pool.vaddr); /* * Set architecture specific coherent pool size only if * it has not been changed by kernel command line parameter. */ if (atomic_pool.size == DEFAULT_DMA_COHERENT_POOL_SIZE) atomic_pool.size = size; } /* * Initialise the coherent pool for atomic allocations. */ static int __init atomic_pool_init(void) { struct dma_pool *pool = &atomic_pool; pgprot_t prot = pgprot_dmacoherent(PAGE_KERNEL); gfp_t gfp = GFP_KERNEL | GFP_DMA; unsigned long nr_pages = pool->size >> PAGE_SHIFT; unsigned long *bitmap; struct page *page; struct page **pages; void *ptr; int bitmap_size = BITS_TO_LONGS(nr_pages) * sizeof(long); bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!bitmap) goto no_bitmap; pages = kzalloc(nr_pages * sizeof(struct page *), GFP_KERNEL); if (!pages) goto no_pages; if (IS_ENABLED(CONFIG_CMA)) ptr = __alloc_from_contiguous(NULL, pool->size, prot, &page, atomic_pool_init, NULL); else ptr = __alloc_remap_buffer(NULL, pool->size, gfp, prot, &page, atomic_pool_init); if (ptr) { int i; for (i = 0; i < nr_pages; i++) pages[i] = page + i; spin_lock_init(&pool->lock); pool->vaddr = ptr; pool->pages = pages; pool->bitmap = bitmap; pool->nr_pages = nr_pages; pr_info("DMA: preallocated %u KiB pool for atomic coherent allocations\n", (unsigned)pool->size / 1024); return 0; } kfree(pages); no_pages: kfree(bitmap); no_bitmap: pr_err("DMA: failed to allocate %u KiB pool for atomic coherent allocation\n", (unsigned)pool->size / 1024); return -ENOMEM; } /* * CMA is activated by core_initcall, so we must be called after it. */ postcore_initcall(atomic_pool_init); struct dma_contig_early_reserve { phys_addr_t base; unsigned long size; }; static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata; static int dma_mmu_remap_num __initdata; void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { dma_mmu_remap[dma_mmu_remap_num].base = base; dma_mmu_remap[dma_mmu_remap_num].size = size; dma_mmu_remap_num++; } void __init dma_contiguous_remap(void) { int i; for (i = 0; i < dma_mmu_remap_num; i++) { phys_addr_t start = dma_mmu_remap[i].base; phys_addr_t end = start + dma_mmu_remap[i].size; struct map_desc map; unsigned long addr; if (end > arm_lowmem_limit) end = arm_lowmem_limit; if (start >= end) continue; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY_DMA_READY; /* * Clear previous low-memory mapping */ for (addr = __phys_to_virt(start); addr < __phys_to_virt(end); addr += PMD_SIZE) pmd_clear(pmd_off_k(addr)); iotable_init(&map, 1); } } static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { struct page *page = virt_to_page(addr); pgprot_t prot = *(pgprot_t *)data; set_pte_ext(pte, mk_pte(page, prot), 0); return 0; } static int __dma_clear_pte(pte_t *pte, pgtable_t token, unsigned long addr, void *data) { pte_clear(&init_mm, addr, pte); return 0; } static void __dma_remap(struct page *page, size_t size, pgprot_t prot, bool no_kernel_map) { unsigned long start = (unsigned long) page_address(page); unsigned end = start + size; int (*func)(pte_t *pte, pgtable_t token, unsigned long addr, void *data); if (no_kernel_map) func = __dma_clear_pte; else func = __dma_update_pte; apply_to_page_range(&init_mm, start, size, func, &prot); dsb(); flush_tlb_kernel_range(start, end); } static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp, pgprot_t prot, struct page **ret_page, const void *caller) { struct page *page; void *ptr; page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; ptr = __dma_alloc_remap(page, size, gfp, prot, caller); if (!ptr) { __dma_free_buffer(page, size); return NULL; } *ret_page = page; return ptr; } static void *__alloc_from_pool(size_t size, struct page **ret_page) { struct dma_pool *pool = &atomic_pool; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int pageno; unsigned long flags; void *ptr = NULL; unsigned long align_mask; if (!pool->vaddr) { WARN(1, "coherent pool not initialised!\n"); return NULL; } /* * Align the region allocation - allocations from pool are rather * small, so align them to their order in pages, minimum is a page * size. This helps reduce fragmentation of the DMA space. */ align_mask = (1 << get_order(size)) - 1; spin_lock_irqsave(&pool->lock, flags); pageno = bitmap_find_next_zero_area(pool->bitmap, pool->nr_pages, 0, count, align_mask); if (pageno < pool->nr_pages) { bitmap_set(pool->bitmap, pageno, count); ptr = pool->vaddr + PAGE_SIZE * pageno; *ret_page = pool->pages[pageno]; } else { pr_err_once("ERROR: %u KiB atomic DMA coherent pool is too small!\n" "Please increase it with coherent_pool= kernel parameter!\n", (unsigned)pool->size / 1024); } spin_unlock_irqrestore(&pool->lock, flags); return ptr; } static bool __in_atomic_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; void *end = start + size; void *pool_start = pool->vaddr; void *pool_end = pool->vaddr + pool->size; if (start < pool_start || start >= pool_end) return false; if (end <= pool_end) return true; WARN(1, "Wrong coherent size(%p-%p) from atomic pool(%p-%p)\n", start, end - 1, pool_start, pool_end - 1); return false; } static int __free_from_pool(void *start, size_t size) { struct dma_pool *pool = &atomic_pool; unsigned long pageno, count; unsigned long flags; if (!__in_atomic_pool(start, size)) return 0; pageno = (start - pool->vaddr) >> PAGE_SHIFT; count = size >> PAGE_SHIFT; spin_lock_irqsave(&pool->lock, flags); bitmap_clear(pool->bitmap, pageno, count); spin_unlock_irqrestore(&pool->lock, flags); return 1; } #define NO_KERNEL_MAPPING_DUMMY 0x2222 static void *__alloc_from_contiguous(struct device *dev, size_t size, pgprot_t prot, struct page **ret_page, const void *caller, struct dma_attrs *attrs) { unsigned long order = get_order(size); size_t count = size >> PAGE_SHIFT; unsigned long pfn; struct page *page; void *ptr; bool no_kernel_mapping = dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs); pfn = dma_alloc_from_contiguous(dev, count, order); if (!pfn) return NULL; page = pfn_to_page(pfn); /* * skip completely if we neither need to zero nor sync. */ if (!(dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs) && dma_get_attr(DMA_ATTR_SKIP_ZEROING, attrs))) __dma_clear_buffer(page, size, attrs); if (PageHighMem(page)) { if (no_kernel_mapping) { /* * Something non-NULL needs to be returned here. Give * back a dummy address that is unmapped to catch * clients trying to use the address incorrectly */ ptr = (void *)NO_KERNEL_MAPPING_DUMMY; } else { ptr = __dma_alloc_remap(page, size, GFP_KERNEL, prot, caller); if (!ptr) { dma_release_from_contiguous(dev, pfn, count); return NULL; } } } else { __dma_remap(page, size, prot, no_kernel_mapping); ptr = page_address(page); } *ret_page = page; return ptr; } static void __free_from_contiguous(struct device *dev, struct page *page, void *cpu_addr, size_t size) { if (PageHighMem(page)) __dma_free_remap(cpu_addr, size, true); else __dma_remap(page, size, PAGE_KERNEL, false); dma_release_from_contiguous(dev, page_to_pfn(page), size >> PAGE_SHIFT); } static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot) { if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs)) prot = pgprot_writecombine(prot); else if (dma_get_attr(DMA_ATTR_STRONGLY_ORDERED, attrs)) prot = pgprot_stronglyordered(prot); /* if non-consistent just pass back what was given */ else if (!dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) prot = pgprot_dmacoherent(prot); return prot; } #define nommu() 0 #else /* !CONFIG_MMU */ #define nommu() 1 #define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL #define __alloc_from_pool(dev, size, ret_page, c) NULL #define __alloc_from_contiguous(dev, size, prot, ret, c, w) NULL #define __free_from_pool(cpu_addr, size) 0 #define __free_from_contiguous(dev, page, cpu_addr, size) do { } while (0) #define __dma_free_remap(cpu_addr, size, w) do { } while (0) #define __get_dma_pgprot(attrs, prot) __pgprot(0) #endif /* CONFIG_MMU */ static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp, struct page **ret_page) { struct page *page; page = __dma_alloc_buffer(dev, size, gfp); if (!page) return NULL; *ret_page = page; return page_address(page); } static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, pgprot_t prot, bool is_coherent, const void *caller, struct dma_attrs *attrs) { u64 mask = get_coherent_dma_mask(dev); struct page *page = NULL; void *addr; #ifdef CONFIG_DMA_API_DEBUG u64 limit = (mask + 1) & ~mask; if (limit && size >= limit) { dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n", size, mask); return NULL; } #endif if (!mask) return NULL; if (mask < 0xffffffffULL) gfp |= GFP_DMA; /* * Following is a work-around (a.k.a. hack) to prevent pages * with __GFP_COMP being passed to split_page() which cannot * handle them. The real problem is that this flag probably * should be 0 on ARM as it is not supported on this * platform; see CONFIG_HUGETLBFS. */ gfp &= ~(__GFP_COMP); *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); if (is_coherent || nommu()) addr = __alloc_simple_buffer(dev, size, gfp, &page); else if (!(gfp & __GFP_WAIT)) addr = __alloc_from_pool(size, &page); else if (!IS_ENABLED(CONFIG_CMA)) addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); else addr = __alloc_from_contiguous(dev, size, prot, &page, caller, attrs); if (addr) *handle = pfn_to_dma(dev, page_to_pfn(page)); return addr; } /* * Allocate DMA-coherent memory space and return both the kernel remapped * virtual and bus address for that space. */ void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; return __dma_alloc(dev, size, handle, gfp, prot, false, __builtin_return_address(0), attrs); } static void *arm_coherent_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); void *memory; if (dma_alloc_from_coherent(dev, size, handle, &memory)) return memory; return __dma_alloc(dev, size, handle, gfp, prot, true, __builtin_return_address(0), attrs); } /* * Create userspace mapping for the DMA-coherent memory. */ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { int ret = -ENXIO; #ifdef CONFIG_MMU unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long pfn = dma_to_pfn(dev, dma_addr); unsigned long off = vma->vm_pgoff; vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret)) return ret; if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) { ret = remap_pfn_range(vma, vma->vm_start, pfn + off, vma->vm_end - vma->vm_start, vma->vm_page_prot); } #endif /* CONFIG_MMU */ return ret; } /* * Free a buffer as defined by the above mapping. */ static void __arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs, bool is_coherent) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) return; size = PAGE_ALIGN(size); if (is_coherent || nommu()) { __dma_free_buffer(page, size); } else if (__free_from_pool(cpu_addr, size)) { return; } else if (!IS_ENABLED(CONFIG_CMA)) { __dma_free_remap(cpu_addr, size, false); __dma_free_buffer(page, size); } else { /* * Non-atomic allocations cannot be freed with IRQs disabled */ WARN_ON(irqs_disabled()); __free_from_contiguous(dev, page, cpu_addr, size); } } void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { __arm_dma_free(dev, size, cpu_addr, handle, attrs, false); } static void arm_coherent_dma_free(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { __arm_dma_free(dev, size, cpu_addr, handle, attrs, true); } int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t handle, size_t size, struct dma_attrs *attrs) { struct page *page = pfn_to_page(dma_to_pfn(dev, handle)); int ret; ret = sg_alloc_table(sgt, 1, GFP_KERNEL); if (unlikely(ret)) return ret; sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); return 0; } static void dma_cache_maint_page(struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, void (*op)(const void *, size_t, int)) { unsigned long pfn; size_t left = size; pfn = page_to_pfn(page) + offset / PAGE_SIZE; offset %= PAGE_SIZE; /* * A single sg entry may refer to multiple physically contiguous * pages. But we still need to process highmem pages individually. * If highmem is not configured then the bulk of this loop gets * optimized out. */ do { size_t len = left; void *vaddr; page = pfn_to_page(pfn); if (PageHighMem(page)) { if (len + offset > PAGE_SIZE) len = PAGE_SIZE - offset; if (cache_is_vipt_nonaliasing()) { vaddr = kmap_atomic(page); op(vaddr + offset, len, dir); kunmap_atomic(vaddr); } else { vaddr = kmap_high_get(page); if (vaddr) { op(vaddr + offset, len, dir); kunmap_high(page); } } } else { vaddr = page_address(page) + offset; op(vaddr, len, dir); } offset = 0; pfn++; left -= len; } while (left); } /* * Make an area consistent for devices. * Note: Drivers should NOT use this function directly, as it will break * platforms with CONFIG_DMABOUNCE. * Use the driver DMA support - see dma-mapping.h (dma_sync_*) */ static void __dma_page_cpu_to_dev(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr; dma_cache_maint_page(page, off, size, dir, dmac_map_area); paddr = page_to_phys(page) + off; if (dir == DMA_FROM_DEVICE) { outer_inv_range(paddr, paddr + size); } else { outer_clean_range(paddr, paddr + size); } /* FIXME: non-speculating: flush on bidirectional mappings? */ } static void __dma_page_dev_to_cpu(struct page *page, unsigned long off, size_t size, enum dma_data_direction dir) { unsigned long paddr = page_to_phys(page) + off; /* FIXME: non-speculating: not required */ /* don't bother invalidating if DMA to device */ if (dir != DMA_TO_DEVICE) outer_inv_range(paddr, paddr + size); dma_cache_maint_page(page, off, size, dir, dmac_unmap_area); /* * Mark the D-cache clean for this page to avoid extra flushing. */ if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) set_bit(PG_dcache_clean, &page->flags); } /** * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * This is the scatter-gather version of the dma_map_single interface. * Here the scatter gather list elements are each tagged with the * appropriate dma address and length. They are obtained via * sg_dma_{address,length}. * * Device ownership issues as mentioned for dma_map_single are the same * here. */ int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i, j; for_each_sg(sg, s, nents, i) { #ifdef CONFIG_NEED_SG_DMA_LENGTH s->dma_length = s->length; #endif s->dma_address = ops->map_page(dev, sg_page(s), s->offset, s->length, dir, attrs); if (dma_mapping_error(dev, s->dma_address)) goto bad_mapping; } return nents; bad_mapping: for_each_sg(sg, s, i, j) ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); return 0; } /** * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs); } /** * arm_dma_sync_sg_for_cpu * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length, dir); } /** * arm_dma_sync_sg_for_device * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct dma_map_ops *ops = get_dma_ops(dev); struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) ops->sync_single_for_device(dev, sg_dma_address(s), s->length, dir); } /* * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits * during bus mastering, then you would pass 0x00ffffff as the mask * to this function. */ int dma_supported(struct device *dev, u64 mask) { return __dma_supported(dev, mask, false); } EXPORT_SYMBOL(dma_supported); static int arm_dma_set_mask(struct device *dev, u64 dma_mask) { if (!dev->dma_mask || !dma_supported(dev, dma_mask)) return -EIO; *dev->dma_mask = dma_mask; return 0; } #define PREALLOC_DMA_DEBUG_ENTRIES 4096 static int __init dma_debug_do_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); return 0; } fs_initcall(dma_debug_do_init); #ifdef CONFIG_ARM_DMA_USE_IOMMU /* IOMMU */ static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping, size_t size) { unsigned int order = get_order(size); unsigned int align = 0; unsigned int count, start; unsigned long flags; if (order > CONFIG_ARM_DMA_IOMMU_ALIGNMENT) order = CONFIG_ARM_DMA_IOMMU_ALIGNMENT; count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) + (1 << mapping->order) - 1) >> mapping->order; if (order > mapping->order) align = (1 << (order - mapping->order)) - 1; spin_lock_irqsave(&mapping->lock, flags); start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0, count, align); if (start > mapping->bits) { spin_unlock_irqrestore(&mapping->lock, flags); return DMA_ERROR_CODE; } bitmap_set(mapping->bitmap, start, count); spin_unlock_irqrestore(&mapping->lock, flags); return mapping->base + (start << (mapping->order + PAGE_SHIFT)); } static inline void __free_iova(struct dma_iommu_mapping *mapping, dma_addr_t addr, size_t size) { unsigned int start = (addr - mapping->base) >> (mapping->order + PAGE_SHIFT); unsigned int count = ((size >> PAGE_SHIFT) + (1 << mapping->order) - 1) >> mapping->order; unsigned long flags; spin_lock_irqsave(&mapping->lock, flags); bitmap_clear(mapping->bitmap, start, count); spin_unlock_irqrestore(&mapping->lock, flags); } static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp, struct dma_attrs *attrs) { struct page **pages; int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i = 0; if (array_size <= PAGE_SIZE) pages = kzalloc(array_size, gfp); else pages = vzalloc(array_size); if (!pages) return NULL; if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { unsigned long order = get_order(size); struct page *page; unsigned long pfn; pfn = dma_alloc_from_contiguous(dev, count, order); if (!pfn) goto error; page = pfn_to_page(pfn); __dma_clear_buffer(page, size, NULL); for (i = 0; i < count; i++) pages[i] = page + i; return pages; } /* * IOMMU can map any pages, so himem can also be used here */ gfp |= __GFP_NOWARN | __GFP_HIGHMEM; while (count) { int j, order = __fls(count); pages[i] = alloc_pages(gfp, order); while (!pages[i] && order) pages[i] = alloc_pages(gfp, --order); if (!pages[i]) goto error; if (order) { split_page(pages[i], order); j = 1 << order; while (--j) pages[i + j] = pages[i] + j; } __dma_clear_buffer(pages[i], PAGE_SIZE << order, NULL); i += 1 << order; count -= 1 << order; } return pages; error: while (i--) if (pages[i]) __free_pages(pages[i], 0); if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); return NULL; } static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size, struct dma_attrs *attrs) { int count = size >> PAGE_SHIFT; int array_size = count * sizeof(struct page *); int i; if (dma_get_attr(DMA_ATTR_FORCE_CONTIGUOUS, attrs)) { dma_release_from_contiguous(dev, page_to_pfn(pages[0]), count); } else { for (i = 0; i < count; i++) if (pages[i]) __free_pages(pages[i], 0); } if (array_size <= PAGE_SIZE) kfree(pages); else vfree(pages); return 0; } /* * Create a CPU mapping for a specified pages */ static void * __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot, const void *caller) { unsigned int i, nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; struct vm_struct *area; unsigned long p; area = get_vm_area_caller(size, VM_ARM_DMA_CONSISTENT | VM_USERMAP, caller); if (!area) return NULL; area->pages = pages; area->nr_pages = nr_pages; p = (unsigned long)area->addr; for (i = 0; i < nr_pages; i++) { phys_addr_t phys = __pfn_to_phys(page_to_pfn(pages[i])); if (ioremap_page_range(p, p + PAGE_SIZE, phys, prot)) goto err; p += PAGE_SIZE; } return area->addr; err: unmap_kernel_range((unsigned long)area->addr, size); vunmap(area->addr); return NULL; } /* * Create a mapping in device IO address space for specified pages */ static dma_addr_t __iommu_create_mapping(struct device *dev, struct page **pages, size_t size) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; dma_addr_t dma_addr, iova; int i, ret = DMA_ERROR_CODE; dma_addr = __alloc_iova(mapping, size); if (dma_addr == DMA_ERROR_CODE) return dma_addr; iova = dma_addr; for (i = 0; i < count; ) { unsigned int next_pfn = page_to_pfn(pages[i]) + 1; phys_addr_t phys = page_to_phys(pages[i]); unsigned int len, j; for (j = i + 1; j < count; j++, next_pfn++) if (page_to_pfn(pages[j]) != next_pfn) break; len = (j - i) << PAGE_SHIFT; ret = iommu_map(mapping->domain, iova, phys, len, 0); if (ret < 0) goto fail; iova += len; i = j; } return dma_addr; fail: iommu_unmap(mapping->domain, dma_addr, iova-dma_addr); __free_iova(mapping, dma_addr, size); return DMA_ERROR_CODE; } static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; /* * add optional in-page offset from iova to size and align * result to page size */ size = PAGE_ALIGN((iova & ~PAGE_MASK) + size); iova &= PAGE_MASK; iommu_unmap(mapping->domain, iova, size); __free_iova(mapping, iova, size); return 0; } static struct page **__atomic_get_pages(void *addr) { struct dma_pool *pool = &atomic_pool; struct page **pages = pool->pages; int offs = (addr - pool->vaddr) >> PAGE_SHIFT; return pages + offs; } static struct page **__iommu_get_pages(void *cpu_addr, struct dma_attrs *attrs) { struct vm_struct *area; if (__in_atomic_pool(cpu_addr, PAGE_SIZE)) return __atomic_get_pages(cpu_addr); if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return cpu_addr; area = find_vm_area(cpu_addr); if (area && (area->flags & VM_ARM_DMA_CONSISTENT)) return area->pages; return NULL; } static void *__iommu_alloc_atomic(struct device *dev, size_t size, dma_addr_t *handle) { struct page *page; void *addr; addr = __alloc_from_pool(size, &page); if (!addr) return NULL; *handle = __iommu_create_mapping(dev, &page, size); if (*handle == DMA_ERROR_CODE) goto err_mapping; return addr; err_mapping: __free_from_pool(addr, size); return NULL; } static void __iommu_free_atomic(struct device *dev, void *cpu_addr, dma_addr_t handle, size_t size) { __iommu_remove_mapping(dev, handle, size); __free_from_pool(cpu_addr, size); } static void *arm_iommu_alloc_attrs(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs) { pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL); struct page **pages; void *addr = NULL; *handle = DMA_ERROR_CODE; size = PAGE_ALIGN(size); if (!(gfp & __GFP_WAIT)) return __iommu_alloc_atomic(dev, size, handle); pages = __iommu_alloc_buffer(dev, size, gfp, attrs); if (!pages) return NULL; *handle = __iommu_create_mapping(dev, pages, size); if (*handle == DMA_ERROR_CODE) goto err_buffer; if (dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) return pages; addr = __iommu_alloc_remap(pages, size, gfp, prot, __builtin_return_address(0)); if (!addr) goto err_mapping; return addr; err_mapping: __iommu_remove_mapping(dev, *handle, size); err_buffer: __iommu_free_buffer(dev, pages, size, attrs); return NULL; } static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { unsigned long uaddr = vma->vm_start; unsigned long usize = vma->vm_end - vma->vm_start; struct page **pages = __iommu_get_pages(cpu_addr, attrs); vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot); if (!pages) return -ENXIO; do { int ret = vm_insert_page(vma, uaddr, *pages++); if (ret) { pr_err("Remapping memory failed: %d\n", ret); return ret; } uaddr += PAGE_SIZE; usize -= PAGE_SIZE; } while (usize > 0); return 0; } /* * free a page as defined by the above mapping. * Must not be called with IRQs disabled. */ void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle, struct dma_attrs *attrs) { struct page **pages = __iommu_get_pages(cpu_addr, attrs); size = PAGE_ALIGN(size); if (!pages) { WARN(1, "trying to free invalid coherent area: %p\n", cpu_addr); return; } if (__in_atomic_pool(cpu_addr, size)) { __iommu_free_atomic(dev, cpu_addr, handle, size); return; } if (!dma_get_attr(DMA_ATTR_NO_KERNEL_MAPPING, attrs)) { unmap_kernel_range((unsigned long)cpu_addr, size); vunmap(cpu_addr); } __iommu_remove_mapping(dev, handle, size); __iommu_free_buffer(dev, pages, size, attrs); } static int arm_iommu_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) { unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; struct page **pages = __iommu_get_pages(cpu_addr, attrs); if (!pages) return -ENXIO; return sg_alloc_table_from_pages(sgt, pages, count, 0, size, GFP_KERNEL); } /* * Map a part of the scatter-gather list into contiguous io address space */ static int __map_sg_chunk(struct device *dev, struct scatterlist *sg, size_t size, dma_addr_t *handle, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova, iova_base; int ret = 0; unsigned int count; struct scatterlist *s; size = PAGE_ALIGN(size); *handle = DMA_ERROR_CODE; iova_base = iova = __alloc_iova(mapping, size); if (iova == DMA_ERROR_CODE) return -ENOMEM; for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) { phys_addr_t phys = page_to_phys(sg_page(s)); unsigned int len = PAGE_ALIGN(s->offset + s->length); if (!is_coherent && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); ret = iommu_map(mapping->domain, iova, phys, len, 0); if (ret < 0) goto fail; count += len >> PAGE_SHIFT; iova += len; } *handle = iova_base; return 0; fail: iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE); __free_iova(mapping, iova_base, size); return ret; } static int __iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { struct scatterlist *s = sg, *dma = sg, *start = sg; int i, count = 0; unsigned int offset = s->offset; unsigned int size = s->offset + s->length; unsigned int max = dma_get_max_seg_size(dev); for (i = 1; i < nents; i++) { s = sg_next(s); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) { if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, is_coherent) < 0) goto bad_mapping; dma->dma_address += offset; dma->dma_length = size - offset; size = offset = s->offset; start = s; dma = sg_next(dma); count += 1; } size += s->length; } if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir, attrs, is_coherent) < 0) goto bad_mapping; dma->dma_address += offset; dma->dma_length = size - offset; return count+1; bad_mapping: for_each_sg(sg, s, count, i) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); return 0; } /** * arm_coherent_iommu_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of i/o coherent buffers described by scatterlist in streaming * mode for DMA. The scatter gather list elements are merged together (if * possible) and tagged with the appropriate dma address and length. They are * obtained via sg_dma_{address,length}. */ int arm_coherent_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, true); } /** * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map * @dir: DMA transfer direction * * Map a set of buffers described by scatterlist in streaming mode for DMA. * The scatter gather list elements are merged together (if possible) and * tagged with the appropriate dma address and length. They are obtained via * sg_dma_{address,length}. */ int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { return __iommu_map_sg(dev, sg, nents, dir, attrs, false); } static void __iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs, bool is_coherent) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) { if (sg_dma_len(s)) __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s)); if (!is_coherent && !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } } /** * arm_coherent_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_coherent_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, true); } /** * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to unmap (same as was passed to dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) * * Unmap a set of streaming mode DMA translations. Again, CPU access * rules concerning calls here are the same as for dma_unmap_single(). */ void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir, struct dma_attrs *attrs) { __iommu_unmap_sg(dev, sg, nents, dir, attrs, false); } /** * arm_iommu_sync_sg_for_cpu * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir); } /** * arm_iommu_sync_sg_for_device * @dev: valid struct device pointer * @sg: list of buffers * @nents: number of buffers to map (returned from dma_map_sg) * @dir: DMA transfer direction (same as was passed to dma_map_sg) */ void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction dir) { struct scatterlist *s; int i; for_each_sg(sg, s, nents, i) __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir); } /** * arm_coherent_iommu_map_page * @dev: valid struct device pointer * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * * Coherent IOMMU aware version of arm_dma_map_page() */ static dma_addr_t arm_coherent_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t dma_addr; int ret, len = PAGE_ALIGN(size + offset); dma_addr = __alloc_iova(mapping, len); if (dma_addr == DMA_ERROR_CODE) return dma_addr; ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0); if (ret < 0) goto fail; return dma_addr + offset; fail: __free_iova(mapping, dma_addr, len); return DMA_ERROR_CODE; } /** * arm_iommu_map_page * @dev: valid struct device pointer * @page: page that buffer resides in * @offset: offset into page for start of buffer * @size: size of buffer to map * @dir: DMA transfer direction * * IOMMU aware version of arm_dma_map_page() */ static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_cpu_to_dev(page, offset, size, dir); return arm_coherent_iommu_map_page(dev, page, offset, size, dir, attrs); } /** * arm_coherent_iommu_unmap_page * @dev: valid struct device pointer * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) * * Coherent IOMMU aware version of arm_dma_unmap_page() */ static void arm_coherent_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); } /** * arm_iommu_unmap_page * @dev: valid struct device pointer * @handle: DMA address of buffer * @size: size of buffer (same as passed to dma_map_page) * @dir: DMA transfer direction (same as passed to dma_map_page) * * IOMMU aware version of arm_dma_unmap_page() */ static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); int offset = handle & ~PAGE_MASK; int len = PAGE_ALIGN(size + offset); if (!iova) return; if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs)) __dma_page_dev_to_cpu(page, offset, size, dir); iommu_unmap(mapping->domain, iova, len); __free_iova(mapping, iova, len); } static void arm_iommu_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_dev_to_cpu(page, offset, size, dir); } static void arm_iommu_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, enum dma_data_direction dir) { struct dma_iommu_mapping *mapping = dev->archdata.mapping; dma_addr_t iova = handle & PAGE_MASK; struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova)); unsigned int offset = handle & ~PAGE_MASK; if (!iova) return; __dma_page_cpu_to_dev(page, offset, size, dir); } struct dma_map_ops iommu_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, .mmap = arm_iommu_mmap_attrs, .get_sgtable = arm_iommu_get_sgtable, .map_page = arm_iommu_map_page, .unmap_page = arm_iommu_unmap_page, .sync_single_for_cpu = arm_iommu_sync_single_for_cpu, .sync_single_for_device = arm_iommu_sync_single_for_device, .map_sg = arm_iommu_map_sg, .unmap_sg = arm_iommu_unmap_sg, .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu, .sync_sg_for_device = arm_iommu_sync_sg_for_device, .set_dma_mask = arm_dma_set_mask, }; struct dma_map_ops iommu_coherent_ops = { .alloc = arm_iommu_alloc_attrs, .free = arm_iommu_free_attrs, .mmap = arm_iommu_mmap_attrs, .get_sgtable = arm_iommu_get_sgtable, .map_page = arm_coherent_iommu_map_page, .unmap_page = arm_coherent_iommu_unmap_page, .map_sg = arm_coherent_iommu_map_sg, .unmap_sg = arm_coherent_iommu_unmap_sg, .set_dma_mask = arm_dma_set_mask, }; /** * arm_iommu_create_mapping * @bus: pointer to the bus holding the client device (for IOMMU calls) * @base: start address of the valid IO address space * @size: size of the valid IO address space * @order: accuracy of the IO addresses allocations * * Creates a mapping structure which holds information about used/unused * IO address ranges, which is required to perform memory allocation and * mapping with IOMMU aware functions. * * The client device need to be attached to the mapping with * arm_iommu_attach_device function. */ struct dma_iommu_mapping * arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size, int order) { unsigned int count = size >> (PAGE_SHIFT + order); unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long); struct dma_iommu_mapping *mapping; int err = -ENOMEM; if (!count) return ERR_PTR(-EINVAL); mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL); if (!mapping) goto err; mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL); if (!mapping->bitmap) goto err2; mapping->base = base; mapping->bits = BITS_PER_BYTE * bitmap_size; mapping->order = order; spin_lock_init(&mapping->lock); mapping->domain = iommu_domain_alloc(bus); if (!mapping->domain) goto err3; kref_init(&mapping->kref); return mapping; err3: kfree(mapping->bitmap); err2: kfree(mapping); err: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(arm_iommu_create_mapping); static void release_iommu_mapping(struct kref *kref) { struct dma_iommu_mapping *mapping = container_of(kref, struct dma_iommu_mapping, kref); iommu_domain_free(mapping->domain); kfree(mapping->bitmap); kfree(mapping); } void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping) { if (mapping) kref_put(&mapping->kref, release_iommu_mapping); } EXPORT_SYMBOL_GPL(arm_iommu_release_mapping); /** * arm_iommu_attach_device * @dev: valid struct device pointer * @mapping: io address space mapping structure (returned from * arm_iommu_create_mapping) * * Attaches specified io address space mapping to the provided device, * this replaces the dma operations (dma_map_ops pointer) with the * IOMMU aware version. More than one client might be attached to * the same io address space mapping. */ int arm_iommu_attach_device(struct device *dev, struct dma_iommu_mapping *mapping) { int err; err = iommu_attach_device(mapping->domain, dev); if (err) return err; kref_get(&mapping->kref); dev->archdata.mapping = mapping; set_dma_ops(dev, &iommu_ops); pr_debug("Attached IOMMU controller to %s device.\n", dev_name(dev)); return 0; } EXPORT_SYMBOL_GPL(arm_iommu_attach_device); /** * arm_iommu_detach_device * @dev: valid struct device pointer * * Detaches the provided device from a previously attached map. * This voids the dma operations (dma_map_ops pointer) */ void arm_iommu_detach_device(struct device *dev) { struct dma_iommu_mapping *mapping; mapping = to_dma_iommu_mapping(dev); if (!mapping) { dev_warn(dev, "Not attached\n"); return; } iommu_detach_device(mapping->domain, dev); kref_put(&mapping->kref, release_iommu_mapping); mapping = NULL; set_dma_ops(dev, NULL); pr_debug("Detached IOMMU controller from %s device.\n", dev_name(dev)); } EXPORT_SYMBOL_GPL(arm_iommu_detach_device); #endif
gpl-2.0
ArthySundaram/chromeos-3.8
arch/arm/mach-omap2/devices.c
63
17053
/* * linux/arch/arm/mach-omap2/devices.c * * OMAP2 platform device setup/initialization * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/gpio.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/pinctrl/machine.h> #include <linux/platform_data/omap4-keypad.h> #include <linux/platform_data/omap_ocp2scp.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <linux/omap-dma.h> #include "iomap.h" #include "omap_hwmod.h" #include "omap_device.h" #include "omap4-keypad.h" #include "soc.h" #include "common.h" #include "mux.h" #include "control.h" #include "devices.h" #include "dma.h" #define L3_MODULES_MAX_LEN 12 #define L3_MODULES 3 static int __init omap3_l3_init(void) { struct omap_hwmod *oh; struct platform_device *pdev; char oh_name[L3_MODULES_MAX_LEN]; /* * To avoid code running on other OMAPs in * multi-omap builds */ if (!(cpu_is_omap34xx())) return -ENODEV; snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main"); oh = omap_hwmod_lookup(oh_name); if (!oh) pr_err("could not look up %s\n", oh_name); pdev = omap_device_build("omap_l3_smx", 0, oh, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; } postcore_initcall(omap3_l3_init); static int __init omap4_l3_init(void) { int i; struct omap_hwmod *oh[3]; struct platform_device *pdev; char oh_name[L3_MODULES_MAX_LEN]; /* If dtb is there, the devices will be created dynamically */ if (of_have_populated_dt()) return -ENODEV; /* * To avoid code running on other OMAPs in * multi-omap builds */ if (!cpu_is_omap44xx() && !soc_is_omap54xx()) return -ENODEV; for (i = 0; i < L3_MODULES; i++) { snprintf(oh_name, L3_MODULES_MAX_LEN, "l3_main_%d", i+1); oh[i] = omap_hwmod_lookup(oh_name); if (!(oh[i])) pr_err("could not look up %s\n", oh_name); } pdev = omap_device_build_ss("omap_l3_noc", 0, oh, 3, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "could not build omap_device for %s\n", oh_name); return IS_ERR(pdev) ? PTR_ERR(pdev) : 0; } postcore_initcall(omap4_l3_init); #if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE) static struct resource omap2cam_resources[] = { { .start = OMAP24XX_CAMERA_BASE, .end = OMAP24XX_CAMERA_BASE + 0xfff, .flags = IORESOURCE_MEM, }, { .start = 24 + OMAP_INTC_START, .flags = IORESOURCE_IRQ, } }; static struct platform_device omap2cam_device = { .name = "omap24xxcam", .id = -1, .num_resources = ARRAY_SIZE(omap2cam_resources), .resource = omap2cam_resources, }; #endif #if defined(CONFIG_IOMMU_API) #include <linux/platform_data/iommu-omap.h> static struct resource omap3isp_resources[] = { { .start = OMAP3430_ISP_BASE, .end = OMAP3430_ISP_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_CCP2_BASE, .end = OMAP3430_ISP_CCP2_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_CCDC_BASE, .end = OMAP3430_ISP_CCDC_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_HIST_BASE, .end = OMAP3430_ISP_HIST_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_H3A_BASE, .end = OMAP3430_ISP_H3A_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_PREV_BASE, .end = OMAP3430_ISP_PREV_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_RESZ_BASE, .end = OMAP3430_ISP_RESZ_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_SBL_BASE, .end = OMAP3430_ISP_SBL_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_CSI2A_REGS1_BASE, .end = OMAP3430_ISP_CSI2A_REGS1_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3430_ISP_CSIPHY2_BASE, .end = OMAP3430_ISP_CSIPHY2_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3630_ISP_CSI2A_REGS2_BASE, .end = OMAP3630_ISP_CSI2A_REGS2_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3630_ISP_CSI2C_REGS1_BASE, .end = OMAP3630_ISP_CSI2C_REGS1_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3630_ISP_CSIPHY1_BASE, .end = OMAP3630_ISP_CSIPHY1_END, .flags = IORESOURCE_MEM, }, { .start = OMAP3630_ISP_CSI2C_REGS2_BASE, .end = OMAP3630_ISP_CSI2C_REGS2_END, .flags = IORESOURCE_MEM, }, { .start = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE, .end = OMAP343X_CTRL_BASE + OMAP343X_CONTROL_CSIRXFE + 3, .flags = IORESOURCE_MEM, }, { .start = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL, .end = OMAP343X_CTRL_BASE + OMAP3630_CONTROL_CAMERA_PHY_CTRL + 3, .flags = IORESOURCE_MEM, }, { .start = 24 + OMAP_INTC_START, .flags = IORESOURCE_IRQ, } }; static struct platform_device omap3isp_device = { .name = "omap3isp", .id = -1, .num_resources = ARRAY_SIZE(omap3isp_resources), .resource = omap3isp_resources, }; static struct omap_iommu_arch_data omap3_isp_iommu = { .name = "mmu_isp", }; int omap3_init_camera(struct isp_platform_data *pdata) { omap3isp_device.dev.platform_data = pdata; omap3isp_device.dev.archdata.iommu = &omap3_isp_iommu; return platform_device_register(&omap3isp_device); } #else /* !CONFIG_IOMMU_API */ int omap3_init_camera(struct isp_platform_data *pdata) { return 0; } #endif static inline void omap_init_camera(void) { #if defined(CONFIG_VIDEO_OMAP2) || defined(CONFIG_VIDEO_OMAP2_MODULE) if (cpu_is_omap24xx()) platform_device_register(&omap2cam_device); #endif } int __init omap4_keyboard_init(struct omap4_keypad_platform_data *sdp4430_keypad_data, struct omap_board_data *bdata) { struct platform_device *pdev; struct omap_hwmod *oh; struct omap4_keypad_platform_data *keypad_data; unsigned int id = -1; char *oh_name = "kbd"; char *name = "omap4-keypad"; oh = omap_hwmod_lookup(oh_name); if (!oh) { pr_err("Could not look up %s\n", oh_name); return -ENODEV; } keypad_data = sdp4430_keypad_data; pdev = omap_device_build(name, id, oh, keypad_data, sizeof(struct omap4_keypad_platform_data), NULL, 0, 0); if (IS_ERR(pdev)) { WARN(1, "Can't build omap_device for %s:%s.\n", name, oh->name); return PTR_ERR(pdev); } oh->mux = omap_hwmod_mux_init(bdata->pads, bdata->pads_cnt); return 0; } #if defined(CONFIG_OMAP_MBOX_FWK) || defined(CONFIG_OMAP_MBOX_FWK_MODULE) static inline void __init omap_init_mbox(void) { struct omap_hwmod *oh; struct platform_device *pdev; oh = omap_hwmod_lookup("mailbox"); if (!oh) { pr_err("%s: unable to find hwmod\n", __func__); return; } pdev = omap_device_build("omap-mailbox", -1, oh, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "%s: could not build device, err %ld\n", __func__, PTR_ERR(pdev)); } #else static inline void omap_init_mbox(void) { } #endif /* CONFIG_OMAP_MBOX_FWK */ static inline void omap_init_sti(void) {} #if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE) static struct platform_device omap_pcm = { .name = "omap-pcm-audio", .id = -1, }; static void omap_init_audio(void) { platform_device_register(&omap_pcm); } #else static inline void omap_init_audio(void) {} #endif #if defined(CONFIG_SND_OMAP_SOC_MCPDM) || \ defined(CONFIG_SND_OMAP_SOC_MCPDM_MODULE) static void __init omap_init_mcpdm(void) { struct omap_hwmod *oh; struct platform_device *pdev; oh = omap_hwmod_lookup("mcpdm"); if (!oh) { printk(KERN_ERR "Could not look up mcpdm hw_mod\n"); return; } pdev = omap_device_build("omap-mcpdm", -1, oh, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "Can't build omap_device for omap-mcpdm.\n"); } #else static inline void omap_init_mcpdm(void) {} #endif #if defined(CONFIG_SND_OMAP_SOC_DMIC) || \ defined(CONFIG_SND_OMAP_SOC_DMIC_MODULE) static void __init omap_init_dmic(void) { struct omap_hwmod *oh; struct platform_device *pdev; oh = omap_hwmod_lookup("dmic"); if (!oh) { pr_err("Could not look up dmic hw_mod\n"); return; } pdev = omap_device_build("omap-dmic", -1, oh, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "Can't build omap_device for omap-dmic.\n"); } #else static inline void omap_init_dmic(void) {} #endif #if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \ defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE) static struct platform_device omap_hdmi_audio = { .name = "omap-hdmi-audio", .id = -1, }; static void __init omap_init_hdmi_audio(void) { struct omap_hwmod *oh; struct platform_device *pdev; oh = omap_hwmod_lookup("dss_hdmi"); if (!oh) { printk(KERN_ERR "Could not look up dss_hdmi hw_mod\n"); return; } pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "Can't build omap_device for omap-hdmi-audio-dai.\n"); platform_device_register(&omap_hdmi_audio); } #else static inline void omap_init_hdmi_audio(void) {} #endif #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE) #include <linux/platform_data/spi-omap2-mcspi.h> static int __init omap_mcspi_init(struct omap_hwmod *oh, void *unused) { struct platform_device *pdev; char *name = "omap2_mcspi"; struct omap2_mcspi_platform_config *pdata; static int spi_num; struct omap2_mcspi_dev_attr *mcspi_attrib = oh->dev_attr; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("Memory allocation for McSPI device failed\n"); return -ENOMEM; } pdata->num_cs = mcspi_attrib->num_chipselect; switch (oh->class->rev) { case OMAP2_MCSPI_REV: case OMAP3_MCSPI_REV: pdata->regs_offset = 0; break; case OMAP4_MCSPI_REV: pdata->regs_offset = OMAP4_MCSPI_REG_OFFSET; break; default: pr_err("Invalid McSPI Revision value\n"); kfree(pdata); return -EINVAL; } spi_num++; pdev = omap_device_build(name, spi_num, oh, pdata, sizeof(*pdata), NULL, 0, 0); WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s\n", name, oh->name); kfree(pdata); return 0; } static void omap_init_mcspi(void) { omap_hwmod_for_each_by_class("mcspi", omap_mcspi_init, NULL); } #else static inline void omap_init_mcspi(void) {} #endif /** * omap_init_rng - bind the RNG hwmod to the RNG omap_device * * Bind the RNG hwmod to the RNG omap_device. No return value. */ static void omap_init_rng(void) { struct omap_hwmod *oh; struct platform_device *pdev; oh = omap_hwmod_lookup("rng"); if (!oh) return; pdev = omap_device_build("omap_rng", -1, oh, NULL, 0, NULL, 0, 0); WARN(IS_ERR(pdev), "Can't build omap_device for omap_rng\n"); } #if defined(CONFIG_CRYPTO_DEV_OMAP_SHAM) || defined(CONFIG_CRYPTO_DEV_OMAP_SHAM_MODULE) #ifdef CONFIG_ARCH_OMAP2 static struct resource omap2_sham_resources[] = { { .start = OMAP24XX_SEC_SHA1MD5_BASE, .end = OMAP24XX_SEC_SHA1MD5_BASE + 0x64, .flags = IORESOURCE_MEM, }, { .start = 51 + OMAP_INTC_START, .flags = IORESOURCE_IRQ, } }; static int omap2_sham_resources_sz = ARRAY_SIZE(omap2_sham_resources); #else #define omap2_sham_resources NULL #define omap2_sham_resources_sz 0 #endif #ifdef CONFIG_ARCH_OMAP3 static struct resource omap3_sham_resources[] = { { .start = OMAP34XX_SEC_SHA1MD5_BASE, .end = OMAP34XX_SEC_SHA1MD5_BASE + 0x64, .flags = IORESOURCE_MEM, }, { .start = 49 + OMAP_INTC_START, .flags = IORESOURCE_IRQ, }, { .start = OMAP34XX_DMA_SHA1MD5_RX, .flags = IORESOURCE_DMA, } }; static int omap3_sham_resources_sz = ARRAY_SIZE(omap3_sham_resources); #else #define omap3_sham_resources NULL #define omap3_sham_resources_sz 0 #endif static struct platform_device sham_device = { .name = "omap-sham", .id = -1, }; static void omap_init_sham(void) { if (cpu_is_omap24xx()) { sham_device.resource = omap2_sham_resources; sham_device.num_resources = omap2_sham_resources_sz; } else if (cpu_is_omap34xx()) { sham_device.resource = omap3_sham_resources; sham_device.num_resources = omap3_sham_resources_sz; } else { pr_err("%s: platform not supported\n", __func__); return; } platform_device_register(&sham_device); } #else static inline void omap_init_sham(void) { } #endif #if defined(CONFIG_CRYPTO_DEV_OMAP_AES) || defined(CONFIG_CRYPTO_DEV_OMAP_AES_MODULE) #ifdef CONFIG_ARCH_OMAP2 static struct resource omap2_aes_resources[] = { { .start = OMAP24XX_SEC_AES_BASE, .end = OMAP24XX_SEC_AES_BASE + 0x4C, .flags = IORESOURCE_MEM, }, { .start = OMAP24XX_DMA_AES_TX, .flags = IORESOURCE_DMA, }, { .start = OMAP24XX_DMA_AES_RX, .flags = IORESOURCE_DMA, } }; static int omap2_aes_resources_sz = ARRAY_SIZE(omap2_aes_resources); #else #define omap2_aes_resources NULL #define omap2_aes_resources_sz 0 #endif #ifdef CONFIG_ARCH_OMAP3 static struct resource omap3_aes_resources[] = { { .start = OMAP34XX_SEC_AES_BASE, .end = OMAP34XX_SEC_AES_BASE + 0x4C, .flags = IORESOURCE_MEM, }, { .start = OMAP34XX_DMA_AES2_TX, .flags = IORESOURCE_DMA, }, { .start = OMAP34XX_DMA_AES2_RX, .flags = IORESOURCE_DMA, } }; static int omap3_aes_resources_sz = ARRAY_SIZE(omap3_aes_resources); #else #define omap3_aes_resources NULL #define omap3_aes_resources_sz 0 #endif static struct platform_device aes_device = { .name = "omap-aes", .id = -1, }; static void omap_init_aes(void) { if (cpu_is_omap24xx()) { aes_device.resource = omap2_aes_resources; aes_device.num_resources = omap2_aes_resources_sz; } else if (cpu_is_omap34xx()) { aes_device.resource = omap3_aes_resources; aes_device.num_resources = omap3_aes_resources_sz; } else { pr_err("%s: platform not supported\n", __func__); return; } platform_device_register(&aes_device); } #else static inline void omap_init_aes(void) { } #endif /*-------------------------------------------------------------------------*/ #if defined(CONFIG_VIDEO_OMAP2_VOUT) || \ defined(CONFIG_VIDEO_OMAP2_VOUT_MODULE) #if defined(CONFIG_FB_OMAP2) || defined(CONFIG_FB_OMAP2_MODULE) static struct resource omap_vout_resource[3 - CONFIG_FB_OMAP2_NUM_FBS] = { }; #else static struct resource omap_vout_resource[2] = { }; #endif static struct platform_device omap_vout_device = { .name = "omap_vout", .num_resources = ARRAY_SIZE(omap_vout_resource), .resource = &omap_vout_resource[0], .id = -1, }; static void omap_init_vout(void) { if (platform_device_register(&omap_vout_device) < 0) printk(KERN_ERR "Unable to register OMAP-VOUT device\n"); } #else static inline void omap_init_vout(void) {} #endif #if defined(CONFIG_OMAP_OCP2SCP) || defined(CONFIG_OMAP_OCP2SCP_MODULE) static int count_ocp2scp_devices(struct omap_ocp2scp_dev *ocp2scp_dev) { int cnt = 0; while (ocp2scp_dev->drv_name != NULL) { cnt++; ocp2scp_dev++; } return cnt; } static void __init omap_init_ocp2scp(void) { struct omap_hwmod *oh; struct platform_device *pdev; int bus_id = -1, dev_cnt = 0, i; struct omap_ocp2scp_dev *ocp2scp_dev; const char *oh_name, *name; struct omap_ocp2scp_platform_data *pdata; if (!cpu_is_omap44xx()) return; oh_name = "ocp2scp_usb_phy"; name = "omap-ocp2scp"; oh = omap_hwmod_lookup(oh_name); if (!oh) { pr_err("%s: could not find omap_hwmod for %s\n", __func__, oh_name); return; } pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("%s: No memory for ocp2scp pdata\n", __func__); return; } ocp2scp_dev = oh->dev_attr; dev_cnt = count_ocp2scp_devices(ocp2scp_dev); if (!dev_cnt) { pr_err("%s: No devices connected to ocp2scp\n", __func__); kfree(pdata); return; } pdata->devices = kzalloc(sizeof(struct omap_ocp2scp_dev *) * dev_cnt, GFP_KERNEL); if (!pdata->devices) { pr_err("%s: No memory for ocp2scp pdata devices\n", __func__); kfree(pdata); return; } for (i = 0; i < dev_cnt; i++, ocp2scp_dev++) pdata->devices[i] = ocp2scp_dev; pdata->dev_cnt = dev_cnt; pdev = omap_device_build(name, bus_id, oh, pdata, sizeof(*pdata), NULL, 0, false); if (IS_ERR(pdev)) { pr_err("Could not build omap_device for %s %s\n", name, oh_name); kfree(pdata->devices); kfree(pdata); return; } } #else static inline void omap_init_ocp2scp(void) { } #endif /*-------------------------------------------------------------------------*/ static int __init omap2_init_devices(void) { /* Enable dummy states for those platforms without pinctrl support */ if (!of_have_populated_dt()) pinctrl_provide_dummies(); /* * please keep these calls, and their implementations above, * in alphabetical order so they're easier to sort through. */ omap_init_audio(); omap_init_camera(); omap_init_hdmi_audio(); omap_init_mbox(); /* If dtb is there, the devices will be created dynamically */ if (!of_have_populated_dt()) { omap_init_dmic(); omap_init_mcpdm(); omap_init_mcspi(); } omap_init_sti(); omap_init_rng(); omap_init_sham(); omap_init_aes(); omap_init_vout(); omap_init_ocp2scp(); return 0; } arch_initcall(omap2_init_devices);
gpl-2.0
chosener/PopStar
cocos2d/plugin/protocols/platform/android/ProtocolAds.cpp
63
5549
/**************************************************************************** Copyright (c) 2012-2013 cocos2d-x.org http://www.cocos2d-x.org Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ****************************************************************************/ #include "ProtocolAds.h" #include "PluginJniHelper.h" #include <android/log.h> #include "PluginUtils.h" #include "PluginJavaData.h" namespace cocos2d { namespace plugin { extern "C" { JNIEXPORT void JNICALL Java_org_cocos2dx_plugin_AdsWrapper_nativeOnAdsResult(JNIEnv* env, jobject thiz, jstring className, jint ret, jstring msg) { std::string strMsg = PluginJniHelper::jstring2string(msg); std::string strClassName = PluginJniHelper::jstring2string(className); PluginProtocol* pPlugin = PluginUtils::getPluginPtr(strClassName); PluginUtils::outputLog("ProtocolAds", "nativeOnAdsResult(), Get plugin ptr : %p", pPlugin); if (pPlugin != NULL) { PluginUtils::outputLog("ProtocolAds", "nativeOnAdsResult(), Get plugin name : %s", pPlugin->getPluginName()); ProtocolAds* pAds = dynamic_cast<ProtocolAds*>(pPlugin); if (pAds != NULL) { AdsListener* listener = pAds->getAdsListener(); if (listener) { listener->onAdsResult((AdsResultCode) ret, strMsg.c_str()); } } } } JNIEXPORT void JNICALL Java_org_cocos2dx_plugin_AdsWrapper_nativeOnPlayerGetPoints(JNIEnv* env, jobject thiz, jstring className, jint points) { std::string strClassName = PluginJniHelper::jstring2string(className); PluginProtocol* pPlugin = PluginUtils::getPluginPtr(strClassName); PluginUtils::outputLog("ProtocolAds", "nativeOnPlayerGetPoints(), Get plugin ptr : %p", pPlugin); if (pPlugin != NULL) { PluginUtils::outputLog("ProtocolAds", "nativeOnPlayerGetPoints(), Get plugin name : %s", pPlugin->getPluginName()); ProtocolAds* pAds = dynamic_cast<ProtocolAds*>(pPlugin); if (pAds != NULL) { AdsListener* listener = pAds->getAdsListener(); if (listener) { listener->onPlayerGetPoints(pAds, points); } } } } } ProtocolAds::ProtocolAds() : _listener(NULL) { } ProtocolAds::~ProtocolAds() { } void ProtocolAds::configDeveloperInfo(TAdsDeveloperInfo devInfo) { if (devInfo.empty()) { PluginUtils::outputLog("ProtocolAds", "The application info is empty!"); return; } else { PluginJavaData* pData = PluginUtils::getPluginJavaData(this); PluginJniMethodInfo t; if (PluginJniHelper::getMethodInfo(t , pData->jclassName.c_str() , "configDeveloperInfo" , "(Ljava/util/Hashtable;)V")) { // generate the hashtable from map jobject obj_Map = PluginUtils::createJavaMapObject(&devInfo); // invoke java method t.env->CallVoidMethod(pData->jobj, t.methodID, obj_Map); t.env->DeleteLocalRef(obj_Map); t.env->DeleteLocalRef(t.classID); } } } void ProtocolAds::showAds(TAdsInfo info, AdsPos pos) { PluginJavaData* pData = PluginUtils::getPluginJavaData(this); PluginJniMethodInfo t; PluginUtils::outputLog("ProtocolAds", "Class name : %s", pData->jclassName.c_str()); if (PluginJniHelper::getMethodInfo(t , pData->jclassName.c_str() , "showAds" , "(Ljava/util/Hashtable;I)V")) { jobject obj_Map = PluginUtils::createJavaMapObject(&info); t.env->CallVoidMethod(pData->jobj, t.methodID, obj_Map, pos); t.env->DeleteLocalRef(obj_Map); t.env->DeleteLocalRef(t.classID); } } void ProtocolAds::hideAds(TAdsInfo info) { PluginJavaData* pData = PluginUtils::getPluginJavaData(this); PluginJniMethodInfo t; PluginUtils::outputLog("ProtocolAds", "Class name : %s", pData->jclassName.c_str()); if (PluginJniHelper::getMethodInfo(t , pData->jclassName.c_str() , "hideAds" , "(Ljava/util/Hashtable;)V")) { jobject obj_Map = PluginUtils::createJavaMapObject(&info); t.env->CallVoidMethod(pData->jobj, t.methodID, obj_Map); t.env->DeleteLocalRef(obj_Map); t.env->DeleteLocalRef(t.classID); } } void ProtocolAds::queryPoints() { PluginUtils::callJavaFunctionWithName(this, "queryPoints"); } void ProtocolAds::spendPoints(int points) { PluginUtils::callJavaFunctionWithName_oneParam(this, "spendPoints", "(I)V", points); } }} // namespace cocos2d { namespace plugin {
gpl-2.0
steev/luna-kernel
sound/firewire/isight.c
319
18237
/* * Apple iSight audio driver * * Copyright (c) Clemens Ladisch <clemens@ladisch.de> * Licensed under the terms of the GNU General Public License, version 2. */ #include <asm/byteorder.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/firewire.h> #include <linux/firewire-constants.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/mutex.h> #include <linux/string.h> #include <sound/control.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/tlv.h> #include "lib.h" #include "iso-resources.h" #include "packets-buffer.h" #define OUI_APPLE 0x000a27 #define MODEL_APPLE_ISIGHT 0x000008 #define SW_ISIGHT_AUDIO 0x000010 #define REG_AUDIO_ENABLE 0x000 #define AUDIO_ENABLE 0x80000000 #define REG_DEF_AUDIO_GAIN 0x204 #define REG_GAIN_RAW_START 0x210 #define REG_GAIN_RAW_END 0x214 #define REG_GAIN_DB_START 0x218 #define REG_GAIN_DB_END 0x21c #define REG_SAMPLE_RATE_INQUIRY 0x280 #define REG_ISO_TX_CONFIG 0x300 #define SPEED_SHIFT 16 #define REG_SAMPLE_RATE 0x400 #define RATE_48000 0x80000000 #define REG_GAIN 0x500 #define REG_MUTE 0x504 #define MAX_FRAMES_PER_PACKET 475 #define QUEUE_LENGTH 20 struct isight { struct snd_card *card; struct fw_unit *unit; struct fw_device *device; u64 audio_base; struct snd_pcm_substream *pcm; struct mutex mutex; struct iso_packets_buffer buffer; struct fw_iso_resources resources; struct fw_iso_context *context; bool pcm_active; bool pcm_running; bool first_packet; int packet_index; u32 total_samples; unsigned int buffer_pointer; unsigned int period_counter; s32 gain_min, gain_max; unsigned int gain_tlv[4]; }; struct audio_payload { __be32 sample_count; __be32 signature; __be32 sample_total; __be32 reserved; __be16 samples[2 * MAX_FRAMES_PER_PACKET]; }; MODULE_DESCRIPTION("iSight audio driver"); MODULE_AUTHOR("Clemens Ladisch <clemens@ladisch.de>"); MODULE_LICENSE("GPL v2"); static struct fw_iso_packet audio_packet = { .payload_length = sizeof(struct audio_payload), .interrupt = 1, .header_length = 4, }; static void isight_update_pointers(struct isight *isight, unsigned int count) { struct snd_pcm_runtime *runtime = isight->pcm->runtime; unsigned int ptr; smp_wmb(); /* update buffer data before buffer pointer */ ptr = isight->buffer_pointer; ptr += count; if (ptr >= runtime->buffer_size) ptr -= runtime->buffer_size; ACCESS_ONCE(isight->buffer_pointer) = ptr; isight->period_counter += count; if (isight->period_counter >= runtime->period_size) { isight->period_counter -= runtime->period_size; snd_pcm_period_elapsed(isight->pcm); } } static void isight_samples(struct isight *isight, const __be16 *samples, unsigned int count) { struct snd_pcm_runtime *runtime; unsigned int count1; if (!ACCESS_ONCE(isight->pcm_running)) return; runtime = isight->pcm->runtime; if (isight->buffer_pointer + count <= runtime->buffer_size) { memcpy(runtime->dma_area + isight->buffer_pointer * 4, samples, count * 4); } else { count1 = runtime->buffer_size - isight->buffer_pointer; memcpy(runtime->dma_area + isight->buffer_pointer * 4, samples, count1 * 4); samples += count1 * 2; memcpy(runtime->dma_area, samples, (count - count1) * 4); } isight_update_pointers(isight, count); } static void isight_pcm_abort(struct isight *isight) { unsigned long flags; if (ACCESS_ONCE(isight->pcm_active)) { snd_pcm_stream_lock_irqsave(isight->pcm, flags); if (snd_pcm_running(isight->pcm)) snd_pcm_stop(isight->pcm, SNDRV_PCM_STATE_XRUN); snd_pcm_stream_unlock_irqrestore(isight->pcm, flags); } } static void isight_dropped_samples(struct isight *isight, unsigned int total) { struct snd_pcm_runtime *runtime; u32 dropped; unsigned int count1; if (!ACCESS_ONCE(isight->pcm_running)) return; runtime = isight->pcm->runtime; dropped = total - isight->total_samples; if (dropped < runtime->buffer_size) { if (isight->buffer_pointer + dropped <= runtime->buffer_size) { memset(runtime->dma_area + isight->buffer_pointer * 4, 0, dropped * 4); } else { count1 = runtime->buffer_size - isight->buffer_pointer; memset(runtime->dma_area + isight->buffer_pointer * 4, 0, count1 * 4); memset(runtime->dma_area, 0, (dropped - count1) * 4); } isight_update_pointers(isight, dropped); } else { isight_pcm_abort(isight); } } static void isight_packet(struct fw_iso_context *context, u32 cycle, size_t header_length, void *header, void *data) { struct isight *isight = data; const struct audio_payload *payload; unsigned int index, length, count, total; int err; if (isight->packet_index < 0) return; index = isight->packet_index; payload = isight->buffer.packets[index].buffer; length = be32_to_cpup(header) >> 16; if (likely(length >= 16 && payload->signature == cpu_to_be32(0x73676874/*"sght"*/))) { count = be32_to_cpu(payload->sample_count); if (likely(count <= (length - 16) / 4)) { total = be32_to_cpu(payload->sample_total); if (unlikely(total != isight->total_samples)) { if (!isight->first_packet) isight_dropped_samples(isight, total); isight->first_packet = false; isight->total_samples = total; } isight_samples(isight, payload->samples, count); isight->total_samples += count; } } err = fw_iso_context_queue(isight->context, &audio_packet, &isight->buffer.iso_buffer, isight->buffer.packets[index].offset); if (err < 0) { dev_err(&isight->unit->device, "queueing error: %d\n", err); isight_pcm_abort(isight); isight->packet_index = -1; return; } fw_iso_context_queue_flush(isight->context); if (++index >= QUEUE_LENGTH) index = 0; isight->packet_index = index; } static int isight_connect(struct isight *isight) { int ch, err; __be32 value; retry_after_bus_reset: ch = fw_iso_resources_allocate(&isight->resources, sizeof(struct audio_payload), isight->device->max_speed); if (ch < 0) { err = ch; goto error; } value = cpu_to_be32(ch | (isight->device->max_speed << SPEED_SHIFT)); err = snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST, isight->audio_base + REG_ISO_TX_CONFIG, &value, 4, FW_FIXED_GENERATION | isight->resources.generation); if (err == -EAGAIN) { fw_iso_resources_free(&isight->resources); goto retry_after_bus_reset; } else if (err < 0) { goto err_resources; } return 0; err_resources: fw_iso_resources_free(&isight->resources); error: return err; } static int isight_open(struct snd_pcm_substream *substream) { static const struct snd_pcm_hardware hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BATCH | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER, .formats = SNDRV_PCM_FMTBIT_S16_BE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = 4 * 1024 * 1024, .period_bytes_min = MAX_FRAMES_PER_PACKET * 4, .period_bytes_max = 1024 * 1024, .periods_min = 2, .periods_max = UINT_MAX, }; struct isight *isight = substream->private_data; substream->runtime->hw = hardware; return iso_packets_buffer_init(&isight->buffer, isight->unit, QUEUE_LENGTH, sizeof(struct audio_payload), DMA_FROM_DEVICE); } static int isight_close(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; iso_packets_buffer_destroy(&isight->buffer, isight->unit); return 0; } static int isight_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct isight *isight = substream->private_data; int err; err = snd_pcm_lib_alloc_vmalloc_buffer(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; ACCESS_ONCE(isight->pcm_active) = true; return 0; } static int reg_read(struct isight *isight, int offset, __be32 *value) { return snd_fw_transaction(isight->unit, TCODE_READ_QUADLET_REQUEST, isight->audio_base + offset, value, 4, 0); } static int reg_write(struct isight *isight, int offset, __be32 value) { return snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST, isight->audio_base + offset, &value, 4, 0); } static void isight_stop_streaming(struct isight *isight) { __be32 value; if (!isight->context) return; fw_iso_context_stop(isight->context); fw_iso_context_destroy(isight->context); isight->context = NULL; fw_iso_resources_free(&isight->resources); value = 0; snd_fw_transaction(isight->unit, TCODE_WRITE_QUADLET_REQUEST, isight->audio_base + REG_AUDIO_ENABLE, &value, 4, FW_QUIET); } static int isight_hw_free(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; ACCESS_ONCE(isight->pcm_active) = false; mutex_lock(&isight->mutex); isight_stop_streaming(isight); mutex_unlock(&isight->mutex); return snd_pcm_lib_free_vmalloc_buffer(substream); } static int isight_start_streaming(struct isight *isight) { unsigned int i; int err; if (isight->context) { if (isight->packet_index < 0) isight_stop_streaming(isight); else return 0; } err = reg_write(isight, REG_SAMPLE_RATE, cpu_to_be32(RATE_48000)); if (err < 0) goto error; err = isight_connect(isight); if (err < 0) goto error; err = reg_write(isight, REG_AUDIO_ENABLE, cpu_to_be32(AUDIO_ENABLE)); if (err < 0) goto err_resources; isight->context = fw_iso_context_create(isight->device->card, FW_ISO_CONTEXT_RECEIVE, isight->resources.channel, isight->device->max_speed, 4, isight_packet, isight); if (IS_ERR(isight->context)) { err = PTR_ERR(isight->context); isight->context = NULL; goto err_resources; } for (i = 0; i < QUEUE_LENGTH; ++i) { err = fw_iso_context_queue(isight->context, &audio_packet, &isight->buffer.iso_buffer, isight->buffer.packets[i].offset); if (err < 0) goto err_context; } isight->first_packet = true; isight->packet_index = 0; err = fw_iso_context_start(isight->context, -1, 0, FW_ISO_CONTEXT_MATCH_ALL_TAGS/*?*/); if (err < 0) goto err_context; return 0; err_context: fw_iso_context_destroy(isight->context); isight->context = NULL; err_resources: fw_iso_resources_free(&isight->resources); reg_write(isight, REG_AUDIO_ENABLE, 0); error: return err; } static int isight_prepare(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; int err; isight->buffer_pointer = 0; isight->period_counter = 0; mutex_lock(&isight->mutex); err = isight_start_streaming(isight); mutex_unlock(&isight->mutex); return err; } static int isight_trigger(struct snd_pcm_substream *substream, int cmd) { struct isight *isight = substream->private_data; switch (cmd) { case SNDRV_PCM_TRIGGER_START: ACCESS_ONCE(isight->pcm_running) = true; break; case SNDRV_PCM_TRIGGER_STOP: ACCESS_ONCE(isight->pcm_running) = false; break; default: return -EINVAL; } return 0; } static snd_pcm_uframes_t isight_pointer(struct snd_pcm_substream *substream) { struct isight *isight = substream->private_data; return ACCESS_ONCE(isight->buffer_pointer); } static int isight_create_pcm(struct isight *isight) { static struct snd_pcm_ops ops = { .open = isight_open, .close = isight_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = isight_hw_params, .hw_free = isight_hw_free, .prepare = isight_prepare, .trigger = isight_trigger, .pointer = isight_pointer, .page = snd_pcm_lib_get_vmalloc_page, .mmap = snd_pcm_lib_mmap_vmalloc, }; struct snd_pcm *pcm; int err; err = snd_pcm_new(isight->card, "iSight", 0, 0, 1, &pcm); if (err < 0) return err; pcm->private_data = isight; strcpy(pcm->name, "iSight"); isight->pcm = pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream; isight->pcm->ops = &ops; return 0; } static int isight_gain_info(struct snd_kcontrol *ctl, struct snd_ctl_elem_info *info) { struct isight *isight = ctl->private_data; info->type = SNDRV_CTL_ELEM_TYPE_INTEGER; info->count = 1; info->value.integer.min = isight->gain_min; info->value.integer.max = isight->gain_max; return 0; } static int isight_gain_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; __be32 gain; int err; err = reg_read(isight, REG_GAIN, &gain); if (err < 0) return err; value->value.integer.value[0] = (s32)be32_to_cpu(gain); return 0; } static int isight_gain_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; if (value->value.integer.value[0] < isight->gain_min || value->value.integer.value[0] > isight->gain_max) return -EINVAL; return reg_write(isight, REG_GAIN, cpu_to_be32(value->value.integer.value[0])); } static int isight_mute_get(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; __be32 mute; int err; err = reg_read(isight, REG_MUTE, &mute); if (err < 0) return err; value->value.integer.value[0] = !mute; return 0; } static int isight_mute_put(struct snd_kcontrol *ctl, struct snd_ctl_elem_value *value) { struct isight *isight = ctl->private_data; return reg_write(isight, REG_MUTE, (__force __be32)!value->value.integer.value[0]); } static int isight_create_mixer(struct isight *isight) { static const struct snd_kcontrol_new gain_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Capture Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, .info = isight_gain_info, .get = isight_gain_get, .put = isight_gain_put, }; static const struct snd_kcontrol_new mute_control = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Mic Capture Switch", .info = snd_ctl_boolean_mono_info, .get = isight_mute_get, .put = isight_mute_put, }; __be32 value; struct snd_kcontrol *ctl; int err; err = reg_read(isight, REG_GAIN_RAW_START, &value); if (err < 0) return err; isight->gain_min = be32_to_cpu(value); err = reg_read(isight, REG_GAIN_RAW_END, &value); if (err < 0) return err; isight->gain_max = be32_to_cpu(value); isight->gain_tlv[0] = SNDRV_CTL_TLVT_DB_MINMAX; isight->gain_tlv[1] = 2 * sizeof(unsigned int); err = reg_read(isight, REG_GAIN_DB_START, &value); if (err < 0) return err; isight->gain_tlv[2] = (s32)be32_to_cpu(value) * 100; err = reg_read(isight, REG_GAIN_DB_END, &value); if (err < 0) return err; isight->gain_tlv[3] = (s32)be32_to_cpu(value) * 100; ctl = snd_ctl_new1(&gain_control, isight); if (ctl) ctl->tlv.p = isight->gain_tlv; err = snd_ctl_add(isight->card, ctl); if (err < 0) return err; err = snd_ctl_add(isight->card, snd_ctl_new1(&mute_control, isight)); if (err < 0) return err; return 0; } static void isight_card_free(struct snd_card *card) { struct isight *isight = card->private_data; fw_iso_resources_destroy(&isight->resources); fw_unit_put(isight->unit); mutex_destroy(&isight->mutex); } static u64 get_unit_base(struct fw_unit *unit) { struct fw_csr_iterator i; int key, value; fw_csr_iterator_init(&i, unit->directory); while (fw_csr_iterator_next(&i, &key, &value)) if (key == CSR_OFFSET) return CSR_REGISTER_BASE + value * 4; return 0; } static int isight_probe(struct fw_unit *unit, const struct ieee1394_device_id *id) { struct fw_device *fw_dev = fw_parent_device(unit); struct snd_card *card; struct isight *isight; int err; err = snd_card_create(-1, NULL, THIS_MODULE, sizeof(*isight), &card); if (err < 0) return err; snd_card_set_dev(card, &unit->device); isight = card->private_data; isight->card = card; mutex_init(&isight->mutex); isight->unit = fw_unit_get(unit); isight->device = fw_dev; isight->audio_base = get_unit_base(unit); if (!isight->audio_base) { dev_err(&unit->device, "audio unit base not found\n"); err = -ENXIO; goto err_unit; } fw_iso_resources_init(&isight->resources, unit); card->private_free = isight_card_free; strcpy(card->driver, "iSight"); strcpy(card->shortname, "Apple iSight"); snprintf(card->longname, sizeof(card->longname), "Apple iSight (GUID %08x%08x) at %s, S%d", fw_dev->config_rom[3], fw_dev->config_rom[4], dev_name(&unit->device), 100 << fw_dev->max_speed); strcpy(card->mixername, "iSight"); err = isight_create_pcm(isight); if (err < 0) goto error; err = isight_create_mixer(isight); if (err < 0) goto error; err = snd_card_register(card); if (err < 0) goto error; dev_set_drvdata(&unit->device, isight); return 0; err_unit: fw_unit_put(isight->unit); mutex_destroy(&isight->mutex); error: snd_card_free(card); return err; } static void isight_bus_reset(struct fw_unit *unit) { struct isight *isight = dev_get_drvdata(&unit->device); if (fw_iso_resources_update(&isight->resources) < 0) { isight_pcm_abort(isight); mutex_lock(&isight->mutex); isight_stop_streaming(isight); mutex_unlock(&isight->mutex); } } static void isight_remove(struct fw_unit *unit) { struct isight *isight = dev_get_drvdata(&unit->device); isight_pcm_abort(isight); snd_card_disconnect(isight->card); mutex_lock(&isight->mutex); isight_stop_streaming(isight); mutex_unlock(&isight->mutex); snd_card_free_when_closed(isight->card); } static const struct ieee1394_device_id isight_id_table[] = { { .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, .specifier_id = OUI_APPLE, .version = SW_ISIGHT_AUDIO, }, { } }; MODULE_DEVICE_TABLE(ieee1394, isight_id_table); static struct fw_driver isight_driver = { .driver = { .owner = THIS_MODULE, .name = KBUILD_MODNAME, .bus = &fw_bus_type, }, .probe = isight_probe, .update = isight_bus_reset, .remove = isight_remove, .id_table = isight_id_table, }; static int __init alsa_isight_init(void) { return driver_register(&isight_driver.driver); } static void __exit alsa_isight_exit(void) { driver_unregister(&isight_driver.driver); } module_init(alsa_isight_init); module_exit(alsa_isight_exit);
gpl-2.0
davidel/linux
security/apparmor/file.c
831
13066
/* * AppArmor security module * * This file contains AppArmor mediation of files * * Copyright (C) 1998-2008 Novell/SUSE * Copyright 2009-2010 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2 of the * License. */ #include "include/apparmor.h" #include "include/audit.h" #include "include/file.h" #include "include/match.h" #include "include/path.h" #include "include/policy.h" struct file_perms nullperms; /** * audit_file_mask - convert mask to permission string * @buffer: buffer to write string to (NOT NULL) * @mask: permission mask to convert */ static void audit_file_mask(struct audit_buffer *ab, u32 mask) { char str[10]; char *m = str; if (mask & AA_EXEC_MMAP) *m++ = 'm'; if (mask & (MAY_READ | AA_MAY_META_READ)) *m++ = 'r'; if (mask & (MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CHMOD | AA_MAY_CHOWN)) *m++ = 'w'; else if (mask & MAY_APPEND) *m++ = 'a'; if (mask & AA_MAY_CREATE) *m++ = 'c'; if (mask & AA_MAY_DELETE) *m++ = 'd'; if (mask & AA_MAY_LINK) *m++ = 'l'; if (mask & AA_MAY_LOCK) *m++ = 'k'; if (mask & MAY_EXEC) *m++ = 'x'; *m = '\0'; audit_log_string(ab, str); } /** * file_audit_cb - call back for file specific audit fields * @ab: audit_buffer (NOT NULL) * @va: audit struct to audit values of (NOT NULL) */ static void file_audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; kuid_t fsuid = current_fsuid(); if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) { audit_log_format(ab, " requested_mask="); audit_file_mask(ab, sa->aad->fs.request); } if (sa->aad->fs.denied & AA_AUDIT_FILE_MASK) { audit_log_format(ab, " denied_mask="); audit_file_mask(ab, sa->aad->fs.denied); } if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) { audit_log_format(ab, " fsuid=%d", from_kuid(&init_user_ns, fsuid)); audit_log_format(ab, " ouid=%d", from_kuid(&init_user_ns, sa->aad->fs.ouid)); } if (sa->aad->fs.target) { audit_log_format(ab, " target="); audit_log_untrustedstring(ab, sa->aad->fs.target); } } /** * aa_audit_file - handle the auditing of file operations * @profile: the profile being enforced (NOT NULL) * @perms: the permissions computed for the request (NOT NULL) * @gfp: allocation flags * @op: operation being mediated * @request: permissions requested * @name: name of object being mediated (MAYBE NULL) * @target: name of target (MAYBE NULL) * @ouid: object uid * @info: extra information message (MAYBE NULL) * @error: 0 if operation allowed else failure error code * * Returns: %0 or error on failure */ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, gfp_t gfp, int op, u32 request, const char *name, const char *target, kuid_t ouid, const char *info, int error) { int type = AUDIT_APPARMOR_AUTO; struct common_audit_data sa; struct apparmor_audit_data aad = {0,}; sa.type = LSM_AUDIT_DATA_NONE; sa.aad = &aad; aad.op = op, aad.fs.request = request; aad.name = name; aad.fs.target = target; aad.fs.ouid = ouid; aad.info = info; aad.error = error; if (likely(!sa.aad->error)) { u32 mask = perms->audit; if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL)) mask = 0xffff; /* mask off perms that are not being force audited */ sa.aad->fs.request &= mask; if (likely(!sa.aad->fs.request)) return 0; type = AUDIT_APPARMOR_AUDIT; } else { /* only report permissions that were denied */ sa.aad->fs.request = sa.aad->fs.request & ~perms->allow; if (sa.aad->fs.request & perms->kill) type = AUDIT_APPARMOR_KILL; /* quiet known rejects, assumes quiet and kill do not overlap */ if ((sa.aad->fs.request & perms->quiet) && AUDIT_MODE(profile) != AUDIT_NOQUIET && AUDIT_MODE(profile) != AUDIT_ALL) sa.aad->fs.request &= ~perms->quiet; if (!sa.aad->fs.request) return COMPLAIN_MODE(profile) ? 0 : sa.aad->error; } sa.aad->fs.denied = sa.aad->fs.request & ~perms->allow; return aa_audit(type, profile, gfp, &sa, file_audit_cb); } /** * map_old_perms - map old file perms layout to the new layout * @old: permission set in old mapping * * Returns: new permission mapping */ static u32 map_old_perms(u32 old) { u32 new = old & 0xf; if (old & MAY_READ) new |= AA_MAY_META_READ; if (old & MAY_WRITE) new |= AA_MAY_META_WRITE | AA_MAY_CREATE | AA_MAY_DELETE | AA_MAY_CHMOD | AA_MAY_CHOWN; if (old & 0x10) new |= AA_MAY_LINK; /* the old mapping lock and link_subset flags where overlaid * and use was determined by part of a pair that they were in */ if (old & 0x20) new |= AA_MAY_LOCK | AA_LINK_SUBSET; if (old & 0x40) /* AA_EXEC_MMAP */ new |= AA_EXEC_MMAP; return new; } /** * compute_perms - convert dfa compressed perms to internal perms * @dfa: dfa to compute perms for (NOT NULL) * @state: state in dfa * @cond: conditions to consider (NOT NULL) * * TODO: convert from dfa + state to permission entry, do computation conversion * at load time. * * Returns: computed permission set */ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state, struct path_cond *cond) { struct file_perms perms; /* FIXME: change over to new dfa format * currently file perms are encoded in the dfa, new format * splits the permissions from the dfa. This mapping can be * done at profile load */ perms.kill = 0; if (uid_eq(current_fsuid(), cond->uid)) { perms.allow = map_old_perms(dfa_user_allow(dfa, state)); perms.audit = map_old_perms(dfa_user_audit(dfa, state)); perms.quiet = map_old_perms(dfa_user_quiet(dfa, state)); perms.xindex = dfa_user_xindex(dfa, state); } else { perms.allow = map_old_perms(dfa_other_allow(dfa, state)); perms.audit = map_old_perms(dfa_other_audit(dfa, state)); perms.quiet = map_old_perms(dfa_other_quiet(dfa, state)); perms.xindex = dfa_other_xindex(dfa, state); } perms.allow |= AA_MAY_META_READ; /* change_profile wasn't determined by ownership in old mapping */ if (ACCEPT_TABLE(dfa)[state] & 0x80000000) perms.allow |= AA_MAY_CHANGE_PROFILE; if (ACCEPT_TABLE(dfa)[state] & 0x40000000) perms.allow |= AA_MAY_ONEXEC; return perms; } /** * aa_str_perms - find permission that match @name * @dfa: to match against (MAYBE NULL) * @state: state to start matching in * @name: string to match against dfa (NOT NULL) * @cond: conditions to consider for permission set computation (NOT NULL) * @perms: Returns - the permissions found when matching @name * * Returns: the final state in @dfa when beginning @start and walking @name */ unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start, const char *name, struct path_cond *cond, struct file_perms *perms) { unsigned int state; if (!dfa) { *perms = nullperms; return DFA_NOMATCH; } state = aa_dfa_match(dfa, start, name); *perms = compute_perms(dfa, state, cond); return state; } /** * is_deleted - test if a file has been completely unlinked * @dentry: dentry of file to test for deletion (NOT NULL) * * Returns: %1 if deleted else %0 */ static inline bool is_deleted(struct dentry *dentry) { if (d_unlinked(dentry) && d_backing_inode(dentry)->i_nlink == 0) return 1; return 0; } /** * aa_path_perm - do permissions check & audit for @path * @op: operation being checked * @profile: profile being enforced (NOT NULL) * @path: path to check permissions of (NOT NULL) * @flags: any additional path flags beyond what the profile specifies * @request: requested permissions * @cond: conditional info for this request (NOT NULL) * * Returns: %0 else error if access denied or other error */ int aa_path_perm(int op, struct aa_profile *profile, struct path *path, int flags, u32 request, struct path_cond *cond) { char *buffer = NULL; struct file_perms perms = {}; const char *name, *info = NULL; int error; flags |= profile->path_flags | (S_ISDIR(cond->mode) ? PATH_IS_DIR : 0); error = aa_path_name(path, flags, &buffer, &name, &info); if (error) { if (error == -ENOENT && is_deleted(path->dentry)) { /* Access to open files that are deleted are * give a pass (implicit delegation) */ error = 0; info = NULL; perms.allow = request; } } else { aa_str_perms(profile->file.dfa, profile->file.start, name, cond, &perms); if (request & ~perms.allow) error = -EACCES; } error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request, name, NULL, cond->uid, info, error); kfree(buffer); return error; } /** * xindex_is_subset - helper for aa_path_link * @link: link permission set * @target: target permission set * * test target x permissions are equal OR a subset of link x permissions * this is done as part of the subset test, where a hardlink must have * a subset of permissions that the target has. * * Returns: %1 if subset else %0 */ static inline bool xindex_is_subset(u32 link, u32 target) { if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) || ((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE))) return 0; return 1; } /** * aa_path_link - Handle hard link permission check * @profile: the profile being enforced (NOT NULL) * @old_dentry: the target dentry (NOT NULL) * @new_dir: directory the new link will be created in (NOT NULL) * @new_dentry: the link being created (NOT NULL) * * Handle the permission test for a link & target pair. Permission * is encoded as a pair where the link permission is determined * first, and if allowed, the target is tested. The target test * is done from the point of the link match (not start of DFA) * making the target permission dependent on the link permission match. * * The subset test if required forces that permissions granted * on link are a subset of the permission granted to target. * * Returns: %0 if allowed else error */ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { struct path link = { new_dir->mnt, new_dentry }; struct path target = { new_dir->mnt, old_dentry }; struct path_cond cond = { d_backing_inode(old_dentry)->i_uid, d_backing_inode(old_dentry)->i_mode }; char *buffer = NULL, *buffer2 = NULL; const char *lname, *tname = NULL, *info = NULL; struct file_perms lperms, perms; u32 request = AA_MAY_LINK; unsigned int state; int error; lperms = nullperms; /* buffer freed below, lname is pointer in buffer */ error = aa_path_name(&link, profile->path_flags, &buffer, &lname, &info); if (error) goto audit; /* buffer2 freed below, tname is pointer in buffer2 */ error = aa_path_name(&target, profile->path_flags, &buffer2, &tname, &info); if (error) goto audit; error = -EACCES; /* aa_str_perms - handles the case of the dfa being NULL */ state = aa_str_perms(profile->file.dfa, profile->file.start, lname, &cond, &lperms); if (!(lperms.allow & AA_MAY_LINK)) goto audit; /* test to see if target can be paired with link */ state = aa_dfa_null_transition(profile->file.dfa, state); aa_str_perms(profile->file.dfa, state, tname, &cond, &perms); /* force audit/quiet masks for link are stored in the second entry * in the link pair. */ lperms.audit = perms.audit; lperms.quiet = perms.quiet; lperms.kill = perms.kill; if (!(perms.allow & AA_MAY_LINK)) { info = "target restricted"; goto audit; } /* done if link subset test is not required */ if (!(perms.allow & AA_LINK_SUBSET)) goto done_tests; /* Do link perm subset test requiring allowed permission on link are a * subset of the allowed permissions on target. */ aa_str_perms(profile->file.dfa, profile->file.start, tname, &cond, &perms); /* AA_MAY_LINK is not considered in the subset test */ request = lperms.allow & ~AA_MAY_LINK; lperms.allow &= perms.allow | AA_MAY_LINK; request |= AA_AUDIT_FILE_MASK & (lperms.allow & ~perms.allow); if (request & ~lperms.allow) { goto audit; } else if ((lperms.allow & MAY_EXEC) && !xindex_is_subset(lperms.xindex, perms.xindex)) { lperms.allow &= ~MAY_EXEC; request |= MAY_EXEC; info = "link not subset of target"; goto audit; } done_tests: error = 0; audit: error = aa_audit_file(profile, &lperms, GFP_KERNEL, OP_LINK, request, lname, tname, cond.uid, info, error); kfree(buffer); kfree(buffer2); return error; } /** * aa_file_perm - do permission revalidation check & audit for @file * @op: operation being checked * @profile: profile being enforced (NOT NULL) * @file: file to revalidate access permissions on (NOT NULL) * @request: requested permissions * * Returns: %0 if access allowed else error */ int aa_file_perm(int op, struct aa_profile *profile, struct file *file, u32 request) { struct path_cond cond = { .uid = file_inode(file)->i_uid, .mode = file_inode(file)->i_mode }; return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED, request, &cond); }
gpl-2.0
VanirAOSP/kernel_oppo_n1
drivers/misc/slimport_anx7808/slimport_tx_drv.c
831
84548
/* * Copyright(c) 2012, Analogix Semiconductor. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/delay.h> #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/slimport.h> #include "slimport_tx_drv.h" #include "slimport_tx_reg.h" static unchar bytebuf[MAX_BUF_CNT]; /* EDID access break */ unchar bedid_break; static unchar bedid_checksum; unchar bedid_extblock[128] = {0}; unchar bedid_firstblock[128] = {0}; static ulong pclk; static ulong m_val, n_val; enum SP_LINK_BW sp_tx_bw; unchar sp_tx_link_config_done; unchar sp_tx_hw_lt_done; bool sp_tx_hw_lt_enable; static bool sp_tx_test_lt; static unchar sp_tx_test_bw; static bool sp_tx_test_edid; static unchar sp_tx_ds_vid_stb_cntr; /* for HDCP */ static unchar sp_tx_hdcp_auth_pass; static unchar sp_tx_hdcp_auth_fail_counter; static unchar sp_tx_hdcp_capable_chk; static unchar sp_tx_hw_hdcp_en; static unchar sp_tx_hdcp_auth_done; bool anx7808_ver_ba; unchar sp_tx_pd_mode; unchar sp_tx_rx_anx7730; unchar sp_tx_rx_mydp; static struct AudiInfoframe sp_tx_audioinfoframe; static struct Packet_AVI sp_tx_packet_avi; static struct Packet_SPD sp_tx_packet_spd; static struct Packet_MPEG sp_tx_packet_mpeg; enum SP_TX_System_State sp_tx_system_state; /* ***************************************************************** */ /* GLOBAL VARIABLES DEFINITION FOR HDMI START */ /* ***************************************************************** */ static unchar g_hdmi_dvi_status; static unchar g_cur_pix_clk; static unchar g_video_stable_cntr; static unchar g_audio_stable_cntr; static unchar g_sync_expire_cntr; static unchar g_hdcp_err_cnt; static ulong g_cur_h_res; static ulong g_cur_v_res; static unchar g_video_muted; static unchar g_audio_muted; static unchar g_cts_got; static unchar g_audio_got; static unchar g_vsi_got; static unchar g_no_vsi_counter; static enum HDMI_RX_System_State hdmi_system_state; /* ***************************************************************** */ /* GLOBAL VARIABLES DEFINITION FOR HDMI END */ /* ***************************************************************** */ void sp_tx_variable_init(void) { sp_tx_hdcp_auth_fail_counter = 0; sp_tx_hdcp_auth_pass = 0; sp_tx_hw_hdcp_en = 0; sp_tx_hdcp_capable_chk = 0; sp_tx_hdcp_auth_done = 0; sp_tx_pd_mode = 1; sp_tx_rx_anx7730 = 0; sp_tx_rx_mydp = 0; sp_tx_hw_lt_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_link_config_done = 0; sp_tx_ds_vid_stb_cntr = 0; bedid_break = 0; bedid_checksum = 0; sp_tx_test_edid = 0; sp_tx_test_bw = 0; sp_tx_test_lt = 0; sp_tx_bw = BW_54G; } static void sp_tx_api_m_gen_clk_select(unchar bspreading) { unchar c; sp_read_reg(TX_P0, SP_TX_M_CALCU_CTRL, &c); if (bspreading) { c |= M_GEN_CLK_SEL; sp_write_reg(TX_P0, SP_TX_M_CALCU_CTRL, c); } else { c &= ~M_GEN_CLK_SEL; sp_write_reg(TX_P0, SP_TX_M_CALCU_CTRL, c); } } static void sp_tx_link_phy_initialization(void) { /* PHY parameter for cts */ sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG4, 0x1b); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG7, 0x22); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG9, 0x23); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG14, 0x09); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG17, 0x16); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG19, 0x1F); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG1, 0x26); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG5, 0x28); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG8, 0x2F); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG15, 0x10); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG18, 0x1F); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG2, 0x36); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG6, 0x3c); if (anx7808_ver_ba) { sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG0, 0x19); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG16, 0x18); } else { sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG0, 0x16); sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG16, 0x10); } sp_write_reg(TX_P1, SP_TX_LT_CTRL_REG3, 0x3F); } void sp_tx_initialization(void) { unchar c; sp_read_reg(TX_P0, SP_TX_EXTRA_ADDR_REG, &c); c |= I2C_EXTRA_ADDR | I2C_STRETCH_DISABLE; sp_write_reg(TX_P0, SP_TX_EXTRA_ADDR_REG, c); sp_read_reg(TX_P0, SP_TX_HDCP_CTRL , &c); c |= LINK_POLLING; c &= ~AUTO_START; c &= ~AUTO_EN; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL, c); sp_read_reg(TX_P0, SP_TX_LINK_DEBUG_REG , &c); c |= M_VID_DEBUG; sp_write_reg(TX_P0, SP_TX_LINK_DEBUG_REG, c); sp_read_reg(TX_P0, SP_TX_DEBUG_REG1, &c); c |= FORCE_HPD | FORCE_PLL_LOCK | POLLING_EN; sp_write_reg(TX_P0, SP_TX_DEBUG_REG1, c); sp_read_reg(TX_P2, SP_TX_PLL_FILTER_CTRL11, &c); c |= AUX_TERM_50OHM; sp_write_reg(TX_P2, SP_TX_PLL_FILTER_CTRL11, c); sp_read_reg(TX_P2, SP_TX_PLL_FILTER_CTRL6, &c); c &= ~P5V_PROTECT_PD; c &= ~SHORT_PROTECT_PD; sp_write_reg(TX_P2, SP_TX_PLL_FILTER_CTRL6, c); sp_read_reg(TX_P2, SP_TX_ANALOG_DEBUG_REG2, &c); c |= POWERON_TIME_1P5MS; sp_write_reg(TX_P2, SP_TX_ANALOG_DEBUG_REG2, c); sp_read_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, &c); c |= BKSV_SRM_PASS; c |= KSVLIST_VLD; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, c); if (anx7808_ver_ba) { sp_write_reg(TX_P2, SP_TX_ANALOG_CTRL, 0xC5); sp_write_reg(TX_P0, I2C_GEN_10US_TIMER0, 0x0E); sp_write_reg(TX_P0, I2C_GEN_10US_TIMER1, 0x01); } c = AUTO_POLLING_DISABLE; sp_write_reg(TX_P0, SP_TX_DP_POLLING_CTRL_REG, c); /*Short the link check timer for HDCP CTS item1a-07*/ sp_write_reg(TX_P0, SP_TX_LINK_CHK_TIMER, 0x1D); sp_read_reg(TX_P0, SP_TX_MISC_CTRL_REG, &c); c |= EQ_TRAINING_LOOP; sp_write_reg(TX_P0, SP_TX_MISC_CTRL_REG, c); sp_write_reg(TX_P0, 0x20, 0xa2); sp_write_reg(TX_P0, 0x21, 0x7e); sp_write_reg(TX_P0, 0x1f, 0x04); sp_tx_link_phy_initialization(); sp_tx_api_m_gen_clk_select(1); } void sp_tx_power_down(enum SP_TX_POWER_BLOCK sp_tx_pd_block) { unchar c; sp_read_reg(TX_P2, SP_POWERD_CTRL_REG, &c); if (sp_tx_pd_block == SP_TX_PWR_REG) c |= REGISTER_PD; else if (sp_tx_pd_block == SP_TX_PWR_HDCP) c |= HDCP_PD; else if (sp_tx_pd_block == SP_TX_PWR_AUDIO) c |= AUDIO_PD; else if (sp_tx_pd_block == SP_TX_PWR_VIDEO) c |= VIDEO_PD; else if (sp_tx_pd_block == SP_TX_PWR_LINK) c |= LINK_PD; else if (sp_tx_pd_block == SP_TX_PWR_TOTAL) c |= TOTAL_PD; sp_write_reg(TX_P2, SP_POWERD_CTRL_REG, c); pr_info("sp_tx_power_down"); } void sp_tx_power_on(enum SP_TX_POWER_BLOCK sp_tx_pd_block) { unchar c; sp_read_reg(TX_P2, SP_POWERD_CTRL_REG, &c); if (sp_tx_pd_block == SP_TX_PWR_REG) c &= ~REGISTER_PD; else if (sp_tx_pd_block == SP_TX_PWR_HDCP) c &= ~HDCP_PD; else if (sp_tx_pd_block == SP_TX_PWR_AUDIO) c &= ~AUDIO_PD; else if (sp_tx_pd_block == SP_TX_PWR_VIDEO) c &= ~VIDEO_PD; else if (sp_tx_pd_block == SP_TX_PWR_LINK) c &= ~LINK_PD; else if (sp_tx_pd_block == SP_TX_PWR_TOTAL) c &= ~TOTAL_PD; sp_write_reg(TX_P2, SP_POWERD_CTRL_REG, c); pr_info("sp_tx_power_on"); } void sp_tx_rst_aux(void) { unchar c, c1; sp_read_reg(TX_P0, SP_TX_DEBUG_REG1, &c1); c = c1; c1 &= ~HPD_POLLING_EN; c1 &= ~POLLING_EN; sp_write_reg(TX_P0, SP_TX_DEBUG_REG1, c1); sp_read_reg(TX_P2, SP_TX_RST_CTRL2_REG, &c1); c1 |= AUX_RST; sp_write_reg(TX_P2, SP_TX_RST_CTRL2_REG, c1); msleep(1); c1 &= ~AUX_RST; sp_write_reg(TX_P2, SP_TX_RST_CTRL2_REG, c1); /* enable polling after reset AUX-ANX.Fei-2011.9.19 */ sp_write_reg(TX_P0, SP_TX_DEBUG_REG1, c); } static unchar sp_tx_wait_aux_finished(void) { unchar c; unchar cCnt; cCnt = 0; sp_read_reg(TX_P0, SP_TX_AUX_STATUS, &c); while (c & AUX_BUSY) { cCnt++; sp_read_reg(TX_P0, SP_TX_AUX_STATUS, &c); if (cCnt > 100) { pr_err("AUX Operaton does not finished, and time out.\n"); break; } } if (c & 0x0F) { pr_err("aux operation failed %.2x\n", (uint)c); return 0; } else return 1; } static unchar sp_tx_aux_dpcdread_bytes(unchar addrh, unchar addrm, unchar addrl, unchar cCount, unchar *pBuf) { unchar c, i; unchar bOK; sp_write_reg(TX_P0, SP_TX_BUF_DATA_COUNT_REG, 0x80); c = ((cCount - 1) << 4) | 0x09; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, c); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_7_0_REG, addrl); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_15_8_REG, addrm); sp_read_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, &c); c = (c & 0xf0) | addrh; sp_write_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, c); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); c |= AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); msleep(2); bOK = sp_tx_wait_aux_finished(); if (!bOK) { pr_err("aux read failed\n"); sp_tx_rst_aux(); return AUX_ERR; } for (i = 0; i < cCount; i++) { sp_read_reg(TX_P0, SP_TX_BUF_DATA_0_REG + i, &c); *(pBuf + i) = c; if (i >= MAX_BUF_CNT) break; } return AUX_OK; } static unchar sp_tx_aux_dpcdwrite_bytes(unchar addrh, unchar addrm, unchar addrl, unchar cCount, unchar *pBuf) { unchar c, i; unchar bOK; sp_write_reg(TX_P0, SP_TX_BUF_DATA_COUNT_REG, 0x80); c = ((cCount - 1) << 4) | 0x08; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, c); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_7_0_REG, addrl); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_15_8_REG, addrm); sp_read_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, &c); c = (c & 0xf0) | addrh; sp_write_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, c); for (i = 0; i < cCount; i++) { c = *pBuf; pBuf++; sp_write_reg(TX_P0, SP_TX_BUF_DATA_0_REG + i, c); if (i >= MAX_BUF_CNT) break; } sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); c |= 0x01; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); bOK = sp_tx_wait_aux_finished(); if (bOK) return AUX_OK; else return AUX_ERR; } static void sp_tx_aux_dpcdwrite_byte(unchar addrh, unchar addrm, unchar addrl, unchar data1) { unchar c; sp_write_reg(TX_P0, SP_TX_BUF_DATA_COUNT_REG, 0x80); c = (0 << 4) | 0x08; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, c); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_7_0_REG, addrl); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_15_8_REG, addrm); sp_read_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, &c); c = (c & 0xf0) | addrh; sp_write_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, c); sp_write_reg(TX_P0, SP_TX_BUF_DATA_0_REG, data1); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); c |= AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); sp_tx_wait_aux_finished(); return; } void sp_tx_set_colorspace(void) { unchar c; unchar color_space; sp_read_reg(RX_P1, HDMI_RX_AVI_DATA00_REG, &color_space); color_space &= 0x60; sp_read_reg(TX_P2, SP_TX_VID_CTRL2_REG, &c); c = (c & 0xfc) | color_space >> 5; sp_write_reg(TX_P2, SP_TX_VID_CTRL2_REG, c); } static void sp_tx_vsi_setup(void) { unchar c; int i; for (i = 0; i < 10; i++) { sp_read_reg(RX_P1, (HDMI_RX_MPEG_DATA00_REG + i), &c); sp_tx_packet_mpeg.MPEG_data[i] = c; } } static void sp_tx_mpeg_setup(void) { unchar c; int i; for (i = 0; i < 10; i++) { sp_read_reg(RX_P1, (HDMI_RX_MPEG_DATA00_REG + i), &c); sp_tx_packet_mpeg.MPEG_data[i] = c; } } static void sp_tx_get_int_status(enum INTStatus IntIndex, unchar *cStatus) { unchar c; sp_read_reg(TX_P2, SP_COMMON_INT_STATUS1 + IntIndex, &c); sp_write_reg(TX_P2, SP_COMMON_INT_STATUS1 + IntIndex, c); *cStatus = c; } static unchar sp_tx_get_pll_lock_status(void) { unchar c; sp_read_reg(TX_P0, SP_TX_DEBUG_REG1, &c); if (c & DEBUG_PLL_LOCK) return 1; else return 0; } void sp_tx_set_3d_packets(void) { unchar c; unchar hdmi_video_format, vsi_header, v3d_structure; if (g_vsi_got) { sp_read_reg(TX_P0, SP_TX_3D_VSC_CTRL, &c); if (!(c & 0x01)) { sp_read_reg(RX_P1, HDMI_RX_MPEG_TYPE_REG, &vsi_header); sp_read_reg(RX_P1, HDMI_RX_MPEG_DATA03_REG, &hdmi_video_format); if ((vsi_header == 0x81) && ((hdmi_video_format & 0xe0) == 0x40)) { pr_info("3D VSI packet is detected. Config VSI and VSC packet"); sp_tx_vsi_setup(); sp_tx_config_packets(VSI_PACKETS); sp_read_reg(RX_P1, HDMI_RX_MPEG_DATA05_REG, &v3d_structure); switch (v3d_structure & 0xf0) { case 0x00:/* frame packing */ v3d_structure = 0x02; break; case 0x20:/* Line alternative */ v3d_structure = 0x03; break; case 0x30: /* Side-by-side(full) */ v3d_structure = 0x04; break; default: v3d_structure = 0x00; pr_warn("3D format is not supported"); break; } sp_write_reg(TX_P0, SP_TX_VSC_DB1, v3d_structure); sp_read_reg(TX_P0, SP_TX_3D_VSC_CTRL, &c); c = c|0x01; sp_write_reg(TX_P0, SP_TX_3D_VSC_CTRL, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c = c & 0xfe; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c = c|0x10; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c = c|0x01; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); } } g_no_vsi_counter = 0; g_vsi_got = 0; } else { g_no_vsi_counter++; if (g_no_vsi_counter > 5) { sp_read_reg(TX_P0, 0xea, &c); if (c & 0x01) { pr_info("No new VSI is received, disable VSC packet"); /* disable VSC */ sp_write_reg(TX_P0, 0xea, 0x00); sp_tx_mpeg_setup(); sp_tx_config_packets(MPEG_PACKETS); } g_no_vsi_counter = 0; } } } static void sp_tx_lvttl_bit_mapping(void) { enum HDMI_color_depth hdmi_input_color_depth = Hdmi_legacy; unchar c, c1; unchar vid_bit, value; sp_read_reg(RX_P0, HDMI_RX_VIDEO_STATUS_REG1, &c1); c1 &= COLOR_DEPTH; if (c1 == 0x00) hdmi_input_color_depth = Hdmi_legacy; else if (c1 == 0x40) hdmi_input_color_depth = Hdmi_24bit; else if (c1 == 0x50) hdmi_input_color_depth = Hdmi_30bit; else if (c1 == 0x60) hdmi_input_color_depth = Hdmi_36bit; else pr_warn("HDMI input color depth is not supported .\n"); switch (hdmi_input_color_depth) { case Hdmi_legacy: case Hdmi_24bit: sp_read_reg(TX_P2, SP_TX_VID_CTRL1_REG, &c); c = c & ~IN_BIT_SEl; sp_write_reg(TX_P2, SP_TX_VID_CTRL1_REG, c); sp_read_reg(TX_P2, SP_TX_VID_CTRL2_REG, &c); c = (c & 0x8f) | IN_BPC_8BIT; sp_write_reg(TX_P2, SP_TX_VID_CTRL2_REG, c); for (c = 0; c < 24; c++) { vid_bit = SP_TX_VID_BIT_CTRL0_REG + c; value = c; sp_write_reg(TX_P2, vid_bit, value); } break; case Hdmi_30bit: sp_read_reg(TX_P2, SP_TX_VID_CTRL1_REG, &c); c = c & ~IN_BIT_SEl; sp_write_reg(TX_P2, SP_TX_VID_CTRL1_REG, c); sp_read_reg(TX_P2, SP_TX_VID_CTRL2_REG, &c); c = (c & 0x8f) | IN_BPC_10BIT; sp_write_reg(TX_P2, SP_TX_VID_CTRL2_REG, c); for (c = 0; c < 10; c++) { vid_bit = SP_TX_VID_BIT_CTRL0_REG + c; value = 0x02 + c; sp_write_reg(TX_P2, vid_bit, value); } for (c = 0; c < 10; c++) { vid_bit = SP_TX_VID_BIT_CTRL10_REG + c; value = 0x0E + c; sp_write_reg(TX_P2, vid_bit, value); } for (c = 0; c < 10; c++) { vid_bit = SP_TX_VID_BIT_CTRL20_REG + c; value = 0x1A + c; sp_write_reg(TX_P2, vid_bit, value); } break; case Hdmi_36bit: sp_read_reg(TX_P2, SP_TX_VID_CTRL1_REG, &c); c = c & ~IN_BIT_SEl; sp_write_reg(TX_P2, SP_TX_VID_CTRL1_REG, c); sp_read_reg(TX_P2, SP_TX_VID_CTRL2_REG, &c); c = ((c & 0x8f) | IN_BPC_12BIT); sp_write_reg(TX_P2, SP_TX_VID_CTRL2_REG, c); for (c = 0; c < 36; c++) { vid_bit = SP_TX_VID_BIT_CTRL0_REG + c; value = c; sp_write_reg(TX_P2, vid_bit, value); } break; default: break; } /* config blank with YUV color space video */ sp_read_reg(RX_P1, HDMI_RX_AVI_DATA00_REG, &c); if (c & 0x60) { sp_write_reg(TX_P0, SP_TX_VID_BLANK_SET1, 0x80); sp_write_reg(TX_P0, SP_TX_VID_BLANK_SET1, 0x00); sp_write_reg(TX_P0, SP_TX_VID_BLANK_SET1, 0x80); } } void sp_tx_enable_video_input(unchar enable) { unchar c; if (enable) { sp_read_reg(TX_P2, SP_TX_VID_CTRL1_REG, &c); c = (c & 0xf7) | VIDEO_EN; sp_write_reg(TX_P2, SP_TX_VID_CTRL1_REG, c); sp_write_reg(TX_P2, SP_COMMON_INT_MASK1, 0xf5); sp_write_reg(TX_P2, SP_COMMON_INT_STATUS1, 0x0a); pr_info("Slimport Video is enabled!\n"); } else { sp_read_reg(TX_P2, SP_TX_VID_CTRL1_REG, &c); c &= ~VIDEO_EN; sp_write_reg(TX_P2, SP_TX_VID_CTRL1_REG, c); pr_info("Slimport Video is disabled!\n"); } } static void sp_tx_enhancemode_set(void) { unchar c; sp_tx_aux_dpcdread_bytes(0x00, 0x00, DPCD_MAX_LANE_COUNT, 1, &c); if (c & ENHANCED_FRAME_CAP) { sp_read_reg(TX_P0, SP_TX_SYS_CTRL4_REG, &c); c |= ENHANCED_MODE; sp_write_reg(TX_P0, SP_TX_SYS_CTRL4_REG, c); sp_tx_aux_dpcdread_bytes(0x00, 0x01, DPCD_LANE_COUNT_SET, 1, &c); c |= ENHANCED_FRAME_EN; sp_tx_aux_dpcdwrite_byte(0x00, 0x01, DPCD_LANE_COUNT_SET, c); pr_info("Enhance mode enabled\n"); } else { sp_read_reg(TX_P0, SP_TX_SYS_CTRL4_REG, &c); c &= ~ENHANCED_MODE; sp_write_reg(TX_P0, SP_TX_SYS_CTRL4_REG, c); sp_tx_aux_dpcdread_bytes(0x00, 0x01, DPCD_LANE_COUNT_SET, 1, &c); c &= ~ENHANCED_FRAME_EN; sp_tx_aux_dpcdwrite_byte(0x00, 0x01, DPCD_LANE_COUNT_SET, c); pr_info("Enhance mode disabled\n"); } } static void sp_tx_hdcp_reauth(void) { unchar c; sp_read_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, &c); c |= RE_AUTH; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, c); c &= ~RE_AUTH; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, c); } static void sp_tx_clean_hdcp_status(void) { sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, 0x00); sp_tx_hdcp_reauth(); } static void sp_tx_hdcp_encryption_disable(void) { unchar c; sp_read_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, &c); c &= ~ENC_EN; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, c); } static void sp_tx_hdcp_encryption_enable(void) { unchar c; sp_read_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, &c); c |= ENC_EN; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, c); } static void sp_tx_hw_hdcp_enable(void) { unchar c; sp_read_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, &c); c &= ~ENC_EN; c &= ~HARD_AUTH_EN; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, c); sp_read_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, &c); c |= HARD_AUTH_EN; c |= BKSV_SRM_PASS; c |= KSVLIST_VLD; c |= ENC_EN; sp_write_reg(TX_P0, SP_TX_HDCP_CTRL0_REG, c); sp_write_reg(TX_P0, SP_TX_WAIT_R0_TIME, 0xb0); sp_write_reg(TX_P0, SP_TX_WAIT_KSVR_TIME, 0xc8); sp_write_reg(TX_P2, SP_COMMON_INT_MASK2, 0xfc); pr_info("Hardware HDCP is enabled."); } void sp_tx_clean_hdcp(void) { sp_tx_hdcp_auth_fail_counter = 0; sp_tx_hdcp_auth_pass = 0; sp_tx_hw_hdcp_en = 0; sp_tx_hdcp_capable_chk = 0; sp_tx_hdcp_auth_done = 0; sp_tx_clean_hdcp_status(); pr_info("HDCP Clean!\n"); } static void sp_tx_pclk_calc(unchar *hbr_rbr) { ulong str_clk = 0; unchar c; unchar link_bw_current = *hbr_rbr; switch (link_bw_current) { case 0x14: str_clk = 540; break; case 0x0a: str_clk = 270; break; case 0x06: str_clk = 162; break; default: break; } sp_read_reg(TX_P0, M_VID_2, &c); m_val = c * 0x10000; sp_read_reg(TX_P0, M_VID_1, &c); m_val = m_val + c * 0x100; sp_read_reg(TX_P0, M_VID_0, &c); m_val = m_val + c; sp_read_reg(TX_P0, N_VID_2, &c); n_val = c * 0x10000; sp_read_reg(TX_P0, N_VID_1, &c); n_val = n_val + c * 0x100; sp_read_reg(TX_P0, N_VID_0, &c); n_val = n_val + c; str_clk = str_clk * m_val; pclk = str_clk; pclk = pclk / n_val; } void sp_tx_show_infomation(void) { unchar c, c1; uint h_res, h_act, v_res, v_act; uint h_fp, h_sw, h_bp, v_fp, v_sw, v_bp; ulong fresh_rate; pr_info("\n*******SP Video Information*******\n"); sp_read_reg(TX_P0, SP_TX_LINK_BW_SET_REG, &c); if (c == 0x06) { pr_info("BW = 1.62G\n"); sp_tx_pclk_calc(&c); } else if (c == 0x0a) { pr_info("BW = 2.7G\n"); sp_tx_pclk_calc(&c); } else if (c == 0x14) { pr_info("BW = 5.4G\n"); sp_tx_pclk_calc(&c); } #ifdef SSC_EN pr_info(" SSC On"); #else pr_info(" SSC Off"); #endif pr_info(" M = %lu, N = %lu, PCLK = %ld MHz\n", m_val, n_val, pclk); sp_read_reg(TX_P2, SP_TX_TOTAL_LINE_STA_L, &c); sp_read_reg(TX_P2, SP_TX_TOTAL_LINE_STA_H, &c1); v_res = c1; v_res = v_res << 8; v_res = v_res + c; sp_read_reg(TX_P2, SP_TX_ACT_LINE_STA_L, &c); sp_read_reg(TX_P2, SP_TX_ACT_LINE_STA_H, &c1); v_act = c1; v_act = v_act << 8; v_act = v_act + c; sp_read_reg(TX_P2, SP_TX_TOTAL_PIXEL_STA_L, &c); sp_read_reg(TX_P2, SP_TX_TOTAL_PIXEL_STA_H, &c1); h_res = c1; h_res = h_res << 8; h_res = h_res + c; sp_read_reg(TX_P2, SP_TX_ACT_PIXEL_STA_L, &c); sp_read_reg(TX_P2, SP_TX_ACT_PIXEL_STA_H, &c1); h_act = c1; h_act = h_act << 8; h_act = h_act + c; sp_read_reg(TX_P2, SP_TX_H_F_PORCH_STA_L, &c); sp_read_reg(TX_P2, SP_TX_H_F_PORCH_STA_H, &c1); h_fp = c1; h_fp = h_fp << 8; h_fp = h_fp + c; sp_read_reg(TX_P2, SP_TX_H_SYNC_STA_L, &c); sp_read_reg(TX_P2, SP_TX_H_SYNC_STA_H, &c1); h_sw = c1; h_sw = h_sw << 8; h_sw = h_sw + c; sp_read_reg(TX_P2, SP_TX_H_B_PORCH_STA_L, &c); sp_read_reg(TX_P2, SP_TX_H_B_PORCH_STA_H, &c1); h_bp = c1; h_bp = h_bp << 8; h_bp = h_bp + c; sp_read_reg(TX_P2, SP_TX_V_F_PORCH_STA, &c); v_fp = c; sp_read_reg(TX_P2, SP_TX_V_SYNC_STA, &c); v_sw = c; sp_read_reg(TX_P2, SP_TX_V_B_PORCH_STA, &c); v_bp = c; pr_info(" Total resolution is %d * %d\n", h_res, v_res); pr_info(" HF=%d, HSW=%d, HBP=%d\n", h_fp, h_sw, h_bp); pr_info(" VF=%d, VSW=%d, VBP=%d\n", v_fp, v_sw, v_bp); pr_info(" Active resolution is %d * %d ", h_act, v_act); fresh_rate = pclk * 1000; fresh_rate = fresh_rate / h_res; fresh_rate = fresh_rate * 1000; fresh_rate = fresh_rate / v_res; pr_info(" @ %ldHz\n", fresh_rate); sp_read_reg(TX_P0, SP_TX_VID_CTRL, &c); if ((c & 0x06) == 0x00) pr_info(" ColorSpace: RGB,"); else if ((c & 0x06) == 0x02) pr_info(" ColorSpace: YCbCr422,"); else if ((c & 0x06) == 0x04) pr_info(" ColorSpace: YCbCr444,"); sp_read_reg(TX_P0, SP_TX_VID_CTRL, &c); if ((c & 0xe0) == 0x00) pr_info(" 6 BPC"); else if ((c & 0xe0) == 0x20) pr_info(" 8 BPC"); else if ((c & 0xe0) == 0x40) pr_info(" 10 BPC"); else if ((c & 0xe0) == 0x60) pr_info(" 12 BPC"); pr_info("\n***************************\n"); } static void sp_tx_aux_wr(unchar offset) { unchar c, cnt; cnt = 0; sp_write_reg(TX_P0, SP_TX_BUF_DATA_0_REG, offset); sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, 0x04); sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, 0x01); msleep(10); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); while (c & AUX_OP_EN) { msleep(10); cnt++; if (cnt == 10) { pr_err("write break\n"); cnt = 0; bedid_break = 1; break; } sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); } } static void sp_tx_aux_rd(unchar len_cmd) { unchar c, cnt; cnt = 0; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, len_cmd); sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, 0x01); msleep(10); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); while (c & AUX_OP_EN) { msleep(10); cnt++; if (cnt == 10) { pr_err("read break\n"); sp_tx_rst_aux(); bedid_break = 1; break; } sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); } } unchar sp_tx_chip_located(void) { unchar c1, c2; sp_tx_hardware_poweron(); sp_read_reg(TX_P2, SP_TX_DEV_IDL_REG, &c1); sp_read_reg(TX_P2, SP_TX_DEV_IDH_REG, &c2); if ((c1 == 0x08) && (c2 == 0x78)) { pr_info("ANX7808 BA is found.\n"); anx7808_ver_ba = 1; return 1; } else if ((c1 == 0x06) && (c2 == 0x78)) { pr_info("ANX7808 AA is found.\n"); anx7808_ver_ba = 0; return 1; } else { sp_tx_hardware_powerdown(); pr_info("ANX7808 is not found.\n"); return 0; } } void sp_tx_vbus_poweron(void) { unchar c; int i; for (i = 0; i < 5; i++) { sp_read_reg(TX_P2, SP_TX_PLL_FILTER_CTRL6, &c); c &= ~P5V_PROTECT_PD; c &= ~SHORT_PROTECT_PD; sp_write_reg(TX_P2, SP_TX_PLL_FILTER_CTRL6, c); /* enable power 3.3v out */ sp_read_reg(TX_P2, SP_TX_PLL_FILTER_CTRL11, &c); c &= ~V33_SWITCH_ON; sp_write_reg(TX_P2, SP_TX_PLL_FILTER_CTRL11, c); c |= V33_SWITCH_ON; sp_write_reg(TX_P2, SP_TX_PLL_FILTER_CTRL11, c); sp_read_reg(TX_P2, SP_TX_PLL_FILTER_CTRL6, &c); if (!(c & 0xc0)) { pr_err("3.3V output enabled\n"); return; } else { pr_err("VBUS power can not be supplied\n"); } } } void sp_tx_vbus_powerdown(void) { unchar c; /* Disableable power 3.3v out */ sp_read_reg(TX_P2, SP_TX_PLL_FILTER_CTRL11, &c); c &= ~V33_SWITCH_ON; sp_write_reg(TX_P2, SP_TX_PLL_FILTER_CTRL11, c); sp_read_reg(TX_P2, SP_TX_PLL_FILTER_CTRL6, &c); c |= P5V_PROTECT_PD | SHORT_PROTECT_PD; sp_write_reg(TX_P2, SP_TX_PLL_FILTER_CTRL6, c); pr_notice("3.3V output disabled\n"); } static void sp_tx_spread_enable(unchar benable) { unchar c; sp_read_reg(TX_P0, SSC_CTRL_REG1, &c); if (benable) { c |= SPREAD_AMP; sp_write_reg(TX_P0, SSC_CTRL_REG1, c); sp_read_reg(TX_P2, SP_TX_RST_CTRL2_REG, &c); c |= SSC_RST; sp_write_reg(TX_P2, SP_TX_RST_CTRL2_REG, c); c &= ~SSC_RST; sp_write_reg(TX_P2, SP_TX_RST_CTRL2_REG, c); sp_tx_aux_dpcdread_bytes(0x00, 0x01, DPCD_DOWNSPREAD_CTRL, 1, &c); c |= SPREAD_AMPLITUDE; sp_tx_aux_dpcdwrite_byte(0x00, 0x01, DPCD_DOWNSPREAD_CTRL, c); } else { c &= ~SPREAD_AMP; sp_write_reg(TX_P0, SSC_CTRL_REG1, c); sp_tx_aux_dpcdread_bytes(0x00, 0x01, DPCD_DOWNSPREAD_CTRL, 1, &c); c &= ~SPREAD_AMPLITUDE; sp_tx_aux_dpcdwrite_byte(0x00, 0x01, DPCD_DOWNSPREAD_CTRL, c); } } static void sp_tx_config_ssc(enum SP_LINK_BW linkbw) { unchar c; sp_write_reg(TX_P0, SSC_CTRL_REG1, 0x00); sp_tx_aux_dpcdread_bytes(0x00, 0x00, DPCD_MAX_DOWNSPREAD, 1, &c); #ifndef SSC_1 if (linkbw == BW_54G) { sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL1, 0xc0); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL2, 0x00); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL3, 0x75); } else if (linkbw == BW_27G) { sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL1, 0x5f); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL2, 0x00); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL3, 0x75); } else { sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL1, 0x9e); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL2, 0x00); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL3, 0x6d); } #else if (linkbw == BW_54G) { sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL1, 0xdd); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL2, 0x01); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL3, 0x76); } else if (linkbw == BW_27G) { sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL1, 0xef); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL2, 0x00); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL3, 0x76); } else { sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL1, 0x8e); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL2, 0x01); sp_write_reg(TX_P0, SP_TX_DOWN_SPREADING_CTRL3, 0x6d); } #endif sp_tx_spread_enable(1); } static void sp_tx_audioinfoframe_setup(void) { int i; unchar c; sp_read_reg(RX_P1, HDMI_RX_AUDIO_TYPE_REG, &c); sp_tx_audioinfoframe.type = c; sp_read_reg(RX_P1, HDMI_RX_AUDIO_VER_REG, &c); sp_tx_audioinfoframe.version = c; sp_read_reg(RX_P1, HDMI_RX_AUDIO_LEN_REG, &c); sp_tx_audioinfoframe.length = c; for (i = 0; i < 11; i++) { sp_read_reg(RX_P1, (HDMI_RX_AUDIO_DATA00_REG + i), &c); sp_tx_audioinfoframe.pb_byte[i] = c; } } static void sp_tx_enable_audio_output(unchar benable) { unchar c; sp_read_reg(TX_P0, SP_TX_AUD_CTRL, &c); if (benable) { c |= AUD_EN; sp_write_reg(TX_P0, SP_TX_AUD_CTRL, c); sp_tx_audioinfoframe_setup(); sp_tx_config_packets(AUDIF_PACKETS); } else { c &= ~AUD_EN; sp_write_reg(TX_P0, SP_TX_AUD_CTRL, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c &= ~AUD_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); } } static void sp_tx_get_link_bw(unchar *bwtype) { unchar c; sp_read_reg(TX_P0, SP_TX_LINK_BW_SET_REG, &c); *bwtype = c; } static void sp_tx_config_audio(void) { unchar c, g_BW; int i; ulong M_AUD, LS_Clk = 0; ulong AUD_Freq = 0; pr_notice("##Config audio ##"); sp_tx_power_on(SP_TX_PWR_AUDIO); sp_read_reg(RX_P0, 0xCA, &c); switch (c & 0x0f) { case 0x00: AUD_Freq = 44.1; break; case 0x02: AUD_Freq = 48; break; case 0x03: AUD_Freq = 32; break; case 0x08: AUD_Freq = 88.2; break; case 0x0a: AUD_Freq = 96; break; case 0x0c: AUD_Freq = 176.4; break; case 0x0e: AUD_Freq = 192; break; default: break; } sp_tx_get_link_bw(&g_BW); switch (g_BW) { case BW_162G: LS_Clk = 162000; break; case BW_27G: LS_Clk = 270000; break; case BW_54G: LS_Clk = 540000; break; default: break; } pr_info("AUD_Freq = %ld , LS_CLK = %ld\n", AUD_Freq, LS_Clk); M_AUD = ((512 * AUD_Freq) / LS_Clk) * 32768; M_AUD = M_AUD + 0x05; sp_write_reg(TX_P1, SP_TX_AUD_INTERFACE_CTRL4, (M_AUD & 0xff)); M_AUD = M_AUD >> 8; sp_write_reg(TX_P1, SP_TX_AUD_INTERFACE_CTRL5, (M_AUD & 0xff)); sp_write_reg(TX_P1, SP_TX_AUD_INTERFACE_CTRL6, 0x00); sp_read_reg(TX_P1, SP_TX_AUD_INTERFACE_CTRL0, &c); c &= ~AUD_INTERFACE_DISABLE; sp_write_reg(TX_P1, SP_TX_AUD_INTERFACE_CTRL0, c); sp_read_reg(TX_P1, SP_TX_AUD_INTERFACE_CTRL2, &c); c |= M_AUD_ADJUST_ST; sp_write_reg(TX_P1, SP_TX_AUD_INTERFACE_CTRL2, c); /* configure layout and channel number */ sp_read_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, &c); if (c & HDMI_AUD_LAYOUT) { sp_read_reg(TX_P2, SP_TX_AUD_CH_NUM_REG5, &c); c |= CH_NUM_8 | AUD_LAYOUT; sp_write_reg(TX_P2, SP_TX_AUD_CH_NUM_REG5, c); } else { sp_read_reg(TX_P2, SP_TX_AUD_CH_NUM_REG5, &c); c &= ~CH_NUM_8; c &= ~AUD_LAYOUT; sp_write_reg(TX_P2, SP_TX_AUD_CH_NUM_REG5, c); } /* transfer audio chaneel status from HDMI Rx to Slinmport Tx */ for (i = 0; i < 5; i++) { sp_read_reg(RX_P0, (HDMI_RX_AUD_IN_CH_STATUS1_REG + i), &c); sp_write_reg(TX_P2, (SP_TX_AUD_CH_STATUS_REG1 + i), c); } /* enable audio */ sp_tx_enable_audio_output(1); } static void sp_tx_get_rx_bw(unchar bMax, unchar *cBw) { if (bMax) sp_tx_aux_dpcdread_bytes(0x00, 0x00, DPCD_MAX_LINK_RATE, 1, cBw); else sp_tx_aux_dpcdread_bytes(0x00, 0x01, DPCD_LINK_BW_SET, 1, cBw); } static void sp_tx_edid_read_initial(void) { unchar c; sp_write_reg(TX_P0, SP_TX_AUX_ADDR_7_0_REG, 0x50); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_15_8_REG, 0); sp_read_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, &c); c &= 0xf0; sp_write_reg(TX_P0, SP_TX_AUX_ADDR_19_16_REG, c); } static unchar sp_tx_aux_edidread_byte(unchar offset) { unchar c, i, edid[16], data_cnt, cnt; unchar bReturn = 0; cnt = 0; sp_tx_aux_wr(offset); sp_tx_aux_rd(0xf5); if ((offset == 0x00) || (offset == 0x80)) bedid_checksum = 0; data_cnt = 0; while (data_cnt < 16) { sp_read_reg(TX_P0, SP_TX_BUF_DATA_COUNT_REG, &c); c = c & 0x1f; if (c != 0) { for (i = 0; i < c; i++) { sp_read_reg(TX_P0, SP_TX_BUF_DATA_0_REG + i, &edid[i + data_cnt]); bedid_checksum = bedid_checksum + edid[i + data_cnt]; } } else { sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, 0x01); c = ADDR_ONLY_BIT | AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); while (c & 0x01) { msleep(1); cnt++; if (cnt == 10) { pr_err("read break"); sp_tx_rst_aux(); bedid_break = 1; bReturn = 0x01; } sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); } bReturn = 0x02; return bReturn; } data_cnt = data_cnt + c; if (data_cnt < 16) { sp_tx_rst_aux(); msleep(10); c = 0x05 | ((0x0f - data_cnt) << 4); sp_tx_aux_rd(c); } } sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, 0x01); c = ADDR_ONLY_BIT | AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); while (c & AUX_OP_EN) sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); if (offset < 0x80) { for (i = 0; i < 16; i++) bedid_firstblock[offset + i] = edid[i]; } else if (offset >= 0x80) { for (i = 0; i < 16; i++) bedid_extblock[offset - 0x80 + i] = edid[i]; } if ((offset == 0x70) || (offset == 0xf0)) { bedid_checksum &= 0xff; bedid_checksum = bedid_checksum - edid[15]; bedid_checksum = ~bedid_checksum + 1; if (bedid_checksum != edid[15]) bedid_checksum = edid[15]; } #ifdef EDID_DEBUG_PRINT for (i = 0; i < 16; i++) { if ((i & 0x0f) == 0) pr_info("\n edid: [%.2x] %.2x ", (unsigned int)offset, (uint)edid[i]); else pr_info("%.2x ", (uint)edid[i]); if ((i & 0x0f) == 0x0f) pr_info("\n"); } #endif return bReturn; } static void sp_tx_parse_segments_edid(unchar segment, unchar offset) { unchar c, cnt; int i; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, 0x04); sp_write_reg(TX_P0, SP_TX_AUX_ADDR_7_0_REG, 0x30); c = ADDR_ONLY_BIT | AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); sp_tx_wait_aux_finished(); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG, &c); sp_write_reg(TX_P0, SP_TX_BUF_DATA_0_REG, segment); sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, 0x04); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); c &= ~ADDR_ONLY_BIT; c |= AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); cnt = 0; sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); while (c & AUX_OP_EN) { msleep(1); cnt++; if (cnt == 10) { pr_err("write break"); sp_tx_rst_aux(); cnt = 0; bedid_break = 1; return; } sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); } sp_write_reg(TX_P0, SP_TX_AUX_ADDR_7_0_REG, 0x50); c = ADDR_ONLY_BIT | AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); sp_tx_aux_wr(offset); c = ADDR_ONLY_BIT | AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); sp_tx_aux_rd(0xf5); cnt = 0; for (i = 0; i < 16; i++) { sp_read_reg(TX_P0, SP_TX_BUF_DATA_COUNT_REG, &c); while ((c & 0x1f) == 0) { cnt++; sp_read_reg(TX_P0, SP_TX_BUF_DATA_COUNT_REG, &c); if (cnt == 10) { pr_err("read break"); sp_tx_rst_aux(); bedid_break = 1; return; } } sp_read_reg(TX_P0, SP_TX_BUF_DATA_0_REG + i, &c); } sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, 0x01); c = ADDR_ONLY_BIT | AUX_OP_EN; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); while (c & AUX_OP_EN) sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); } static unchar sp_tx_get_edid_block(void) { unchar c; sp_tx_aux_wr(0x00); sp_tx_aux_rd(0x01); sp_read_reg(TX_P0, SP_TX_BUF_DATA_0_REG, &c); sp_tx_aux_wr(0x7e); sp_tx_aux_rd(0x01); sp_read_reg(TX_P0, SP_TX_BUF_DATA_0_REG, &c); pr_info("EDID Block = %d\n", (int)(c + 1)); if (c > 3) bedid_break = 1; return c; } static void sp_tx_addronly_set(unchar bSet) { unchar c; sp_read_reg(TX_P0, SP_TX_AUX_CTRL_REG2, &c); if (bSet) { c |= ADDR_ONLY_BIT; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); } else { c &= ~ADDR_ONLY_BIT; sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, c); } } static void sp_tx_load_packet(enum PACKETS_TYPE type) { int i; unchar c; switch (type) { case AVI_PACKETS: sp_write_reg(TX_P2, SP_TX_AVI_TYPE, 0x82); sp_write_reg(TX_P2, SP_TX_AVI_VER, 0x02); sp_write_reg(TX_P2, SP_TX_AVI_LEN, 0x0d); for (i = 0; i < 13; i++) { sp_write_reg(TX_P2, SP_TX_AVI_DB0 + i, sp_tx_packet_avi.AVI_data[i]); } break; case SPD_PACKETS: sp_write_reg(TX_P2, SP_TX_SPD_TYPE, 0x83); sp_write_reg(TX_P2, SP_TX_SPD_VER, 0x01); sp_write_reg(TX_P2, SP_TX_SPD_LEN, 0x19); for (i = 0; i < 25; i++) { sp_write_reg(TX_P2, SP_TX_SPD_DB0 + i, sp_tx_packet_spd.SPD_data[i]); } break; case VSI_PACKETS: sp_write_reg(TX_P2, SP_TX_MPEG_TYPE, 0x81); sp_write_reg(TX_P2, SP_TX_MPEG_VER, 0x01); sp_read_reg(RX_P1, HDMI_RX_MPEG_LEN_REG, &c); sp_write_reg(TX_P2, SP_TX_MPEG_LEN, c); for (i = 0; i < 10; i++) { sp_write_reg(TX_P2, SP_TX_MPEG_DB0 + i, sp_tx_packet_mpeg.MPEG_data[i]); } break; case MPEG_PACKETS: sp_write_reg(TX_P2, SP_TX_MPEG_TYPE, 0x85); sp_write_reg(TX_P2, SP_TX_MPEG_VER, 0x01); sp_write_reg(TX_P2, SP_TX_MPEG_LEN, 0x0d); for (i = 0; i < 10; i++) { sp_write_reg(TX_P2, SP_TX_MPEG_DB0 + i, sp_tx_packet_mpeg.MPEG_data[i]); } break; case AUDIF_PACKETS: sp_write_reg(TX_P2, SP_TX_AUD_TYPE, 0x84); sp_write_reg(TX_P2, SP_TX_AUD_VER, 0x01); sp_write_reg(TX_P2, SP_TX_AUD_LEN, 0x0a); for (i = 0; i < 10; i++) { sp_write_reg(TX_P2, SP_TX_AUD_DB0 + i, sp_tx_audioinfoframe.pb_byte[i]); } break; default: break; } } void sp_tx_config_packets(enum PACKETS_TYPE bType) { unchar c; switch (bType) { case AVI_PACKETS: sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c &= ~AVI_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_tx_load_packet(AVI_PACKETS); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= AVI_IF_UD; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= AVI_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); break; case SPD_PACKETS: sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c &= ~SPD_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_tx_load_packet(SPD_PACKETS); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= SPD_IF_UD; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= SPD_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); break; case VSI_PACKETS: sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c &= ~MPEG_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_tx_load_packet(VSI_PACKETS); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= MPEG_IF_UD; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= MPEG_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); break; case MPEG_PACKETS: sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c &= ~MPEG_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_tx_load_packet(MPEG_PACKETS); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= MPEG_IF_UD; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= MPEG_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); break; case AUDIF_PACKETS: sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c &= ~AUD_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_tx_load_packet(AUDIF_PACKETS); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= AUD_IF_UP; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= AUD_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); break; default: break; } } void sp_tx_avi_setup(void) { unchar c; int i; for (i = 0; i < 13; i++) { sp_read_reg(RX_P1, (HDMI_RX_AVI_DATA00_REG + i), &c); sp_tx_packet_avi.AVI_data[i] = c; } } static unchar sp_tx_bw_lc_sel(void) { unchar over_bw = 0; uint pixel_clk = 0; enum HDMI_color_depth hdmi_input_color_depth = Hdmi_legacy; unchar c; pr_info("input pclk = %d\n", (unsigned int)pclk); sp_read_reg(RX_P0, HDMI_RX_VIDEO_STATUS_REG1, &c); c &= COLOR_DEPTH; if (c == 0x00) hdmi_input_color_depth = Hdmi_legacy; else if (c == 0x40) hdmi_input_color_depth = Hdmi_24bit; else if (c == 0x50) hdmi_input_color_depth = Hdmi_30bit; else if (c == 0x60) hdmi_input_color_depth = Hdmi_36bit; else pr_warn("HDMI input color depth is not supported .\n"); switch (hdmi_input_color_depth) { case Hdmi_legacy: case Hdmi_24bit: pixel_clk = pclk; break; case Hdmi_30bit: pixel_clk = pclk * 5 / 4; break; case Hdmi_36bit: pixel_clk = pclk * 3 / 2; break; default: break; } if (pixel_clk <= 54) { sp_tx_bw = BW_162G; over_bw = 0; } else if ((54 < pixel_clk) && (pixel_clk <= 90)) { if (sp_tx_bw >= BW_27G) { sp_tx_bw = BW_27G; over_bw = 0; } else over_bw = 1; } else if ((90 < pixel_clk) && (pixel_clk <= 180)) { if (sp_tx_bw >= BW_54G) { sp_tx_bw = BW_54G; over_bw = 0; } else over_bw = 1; } else over_bw = 1; if (over_bw) pr_err("over bw!\n"); else pr_notice("The optimized BW =%.2x\n", sp_tx_bw); return over_bw; } unchar sp_tx_hw_link_training(void) { unchar c; if (!sp_tx_hw_lt_enable) { pr_notice("Hardware link training"); if (!sp_tx_get_pll_lock_status()) { pr_err("PLL not lock!"); return 1; } sp_write_reg(TX_P0, SP_TX_LINK_BW_SET_REG, sp_tx_bw); pr_info("initial BW = %.2x\n",(uint)sp_tx_bw); sp_tx_enhancemode_set(); sp_tx_aux_dpcdread_bytes(0x00, 0x06, 0x00, 0x01, &c); c |= 0x01; sp_tx_aux_dpcdwrite_byte(0x00, 0x06, 0x00, c); sp_write_reg(TX_P0, SP_TX_LT_CTRL_REG, SP_TX_LT_EN); sp_tx_hw_lt_enable = 1; return 1; } if (sp_tx_hw_lt_done) { sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x02, 1, bytebuf); if ((bytebuf[0] & 0x07) != 0x07) { sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; return 1; } else { sp_tx_hw_lt_done = 1; if (!sp_tx_test_lt) { /*In link cts4.3.1.9, need to link training from the lowest swing, so swing adjust needs to be moved here*/ sp_write_reg(TX_P0, SP_TX_LT_SET_REG, 0x0a); if (sp_tx_link_err_check()) { c = PRE_EMP_LEVEL1 | DRVIE_CURRENT_LEVEL1; sp_write_reg(TX_P0, SP_TX_LT_SET_REG, c); if (sp_tx_link_err_check()) sp_write_reg(TX_P0, SP_TX_LT_SET_REG, 0x0a); } sp_read_reg(TX_P0, SP_TX_LINK_BW_SET_REG, &c); if (c != sp_tx_bw) { sp_tx_hw_lt_done = 0; sp_tx_hw_lt_enable = 0; return 1; } } sp_tx_set_sys_state(STATE_CONFIG_OUTPUT); return 0; } } return 1; } uint sp_tx_link_err_check(void) { uint errl = 0, errh = 0; sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x10, 2, bytebuf); msleep(5); sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x10, 2, bytebuf); errh = bytebuf[1]; if (errh & 0x80) { errl = bytebuf[0]; errh = (errh & 0x7f) << 8; errl = errh + errl; } pr_err(" Err of Lane = %d\n", errl); return errl; } unchar sp_tx_lt_pre_config(void) { unchar c; unchar link_bw = 0; if (!sp_tx_link_config_done) { sp_tx_get_rx_bw(1, &c); switch (c) { case 0x06: sp_tx_bw = BW_162G; break; case 0x0a: sp_tx_bw = BW_27G; break; case 0x14: sp_tx_bw = BW_54G; break; default: sp_tx_bw = BW_54G; break; } if ((sp_tx_bw != BW_27G) && (sp_tx_bw != BW_162G) && (sp_tx_bw != BW_54G)) return 1; sp_tx_power_on(SP_TX_PWR_VIDEO); sp_tx_video_mute(1); sp_tx_enable_video_input(1); sp_read_reg(TX_P0, SP_TX_SYS_CTRL2_REG, &c); sp_write_reg(TX_P0, SP_TX_SYS_CTRL2_REG, c); sp_read_reg(TX_P0, SP_TX_SYS_CTRL2_REG, &c); if (c & CHA_STA) { pr_err("Stream clock not stable!\n"); return 1; } sp_read_reg(TX_P0, SP_TX_SYS_CTRL3_REG, &c); sp_write_reg(TX_P0, SP_TX_SYS_CTRL3_REG, c); sp_read_reg(TX_P0, SP_TX_SYS_CTRL3_REG, &c); if (!(c & STRM_VALID)) { pr_err("video stream not valid!\n"); return 1; } sp_write_reg(TX_P0, SP_TX_LINK_BW_SET_REG, 0x14); sp_tx_get_link_bw(&link_bw); sp_tx_pclk_calc(&link_bw); if (sp_tx_test_lt) { sp_tx_bw = sp_tx_test_bw; sp_tx_test_lt = 0; /*Link CTS 4.3.3.1, need to send the video timing 640x480p@60Hz, 18-bit*/ sp_read_reg(TX_P2, SP_TX_VID_CTRL2_REG, &c); c = (c & 0x8f); sp_write_reg(TX_P2, SP_TX_VID_CTRL2_REG, c); } else { /* Optimize the LT to get minimum power consumption */ if (sp_tx_bw_lc_sel()) { pr_err("****Over bandwidth****\n"); return 1; } } /*Diable video before link training to enable idle pattern*/ sp_tx_enable_video_input(0); #ifdef SSC_EN sp_tx_config_ssc(sp_tx_bw); #else sp_tx_spread_enable(0); #endif sp_read_reg(TX_P0, SP_TX_ANALOG_PD_REG, &c); c |= CH0_PD; sp_write_reg(TX_P0, SP_TX_ANALOG_PD_REG, c); msleep(1); c &= ~CH0_PD; sp_write_reg(TX_P0, SP_TX_ANALOG_PD_REG, c); sp_read_reg(TX_P0, SP_TX_PLL_CTRL_REG, &c); c |= PLL_RST; sp_write_reg(TX_P0, SP_TX_PLL_CTRL_REG, c); msleep(1); c &=~PLL_RST; sp_write_reg(TX_P0, SP_TX_PLL_CTRL_REG, c); sp_tx_link_config_done = 1; } return 0; } void sp_tx_video_mute(unchar enable) { unchar c; if (enable) { sp_read_reg(TX_P2, SP_TX_VID_CTRL1_REG, &c); c |= VIDEO_MUTE; sp_write_reg(TX_P2, SP_TX_VID_CTRL1_REG, c); } else { sp_read_reg(TX_P2, SP_TX_VID_CTRL1_REG, &c); c &= ~VIDEO_MUTE; sp_write_reg(TX_P2, SP_TX_VID_CTRL1_REG, c); } } void sp_tx_send_message(enum SP_TX_SEND_MSG message) { unchar c; switch (message) { case MSG_OCM_EN: sp_tx_aux_dpcdwrite_byte(0x00, 0x05, 0x25, 0x5a); break; case MSG_INPUT_HDMI: sp_tx_aux_dpcdwrite_byte(0x00, 0x05, 0x26, 0x01); break; case MSG_INPUT_DVI: sp_tx_aux_dpcdwrite_byte(0x00, 0x05, 0x26, 0x00); break; case MSG_CLEAR_IRQ: sp_tx_aux_dpcdread_bytes(0x00, 0x04, 0x10, 1, &c); c |= 0x01; sp_tx_aux_dpcdwrite_byte(0x00, 0x04, 0x10, c); break; } } unchar sp_tx_get_cable_type(void) { unchar SINK_OUI[8] = { 0 }; unchar ds_port_preset = 0; unchar ds_port_recoginze = 0; int i,j; for (i = 0; i < 5; i++) { if (AUX_ERR == sp_tx_aux_dpcdread_bytes(0x00, 0x00, DPCD_DSPORT_PRESENT, 1, &ds_port_preset)) { pr_err(" AUX access error"); /*Add time delay for VGA dongle bootup*/ msleep(200); continue; } for (j = 0; j < 0x0c; j++) sp_tx_aux_dpcdread_bytes(0x00, 0x00, j, 1, bytebuf); switch (ds_port_preset & 0x07) { case 0x00: sp_tx_rx_mydp = 1; sp_tx_rx_anx7730 = 0; ds_port_recoginze = 1; pr_notice("Downstream is DP dongle."); break; case 0x03: sp_tx_aux_dpcdread_bytes(0x00, 0x04, 0x00, 8, SINK_OUI); if ((SINK_OUI[0] == 0x00) && (SINK_OUI[1] == 0x22) && (SINK_OUI[2] == 0xb9) && (SINK_OUI[3] == 0x61) && (SINK_OUI[4] == 0x39) && (SINK_OUI[5] == 0x38) && (SINK_OUI[6] == 0x33)) { pr_notice("Downstream is VGA dongle."); sp_tx_rx_anx7730 = 0; sp_tx_rx_mydp = 0; } else { sp_tx_rx_mydp = 1; sp_tx_rx_anx7730 = 0; pr_notice("Downstream is general DP2VGA converter."); } ds_port_recoginze = 1; break; case 0x05: sp_tx_aux_dpcdread_bytes(0x00, 0x04, 0x00, 8, SINK_OUI); if ((SINK_OUI[0] == 0xb9) && (SINK_OUI[1] == 0x22) && (SINK_OUI[2] == 0x00) && (SINK_OUI[3] == 0x00) && (SINK_OUI[4] == 0x00) && (SINK_OUI[5] == 0x00) && (SINK_OUI[6] == 0x00)) { pr_notice("Downstream is HDMI dongle."); sp_tx_send_message(MSG_OCM_EN); sp_tx_rx_anx7730 = 1; sp_tx_rx_mydp = 0; } else { sp_tx_rx_mydp = 1; sp_tx_rx_anx7730 = 0; pr_notice("Downstream is general DP2HDMI converter."); } ds_port_recoginze = 1; break; default: pr_err("Downstream can not recognized."); sp_tx_rx_anx7730 = 0; sp_tx_rx_mydp = 0; ds_port_recoginze = 0; break; } if (ds_port_recoginze) return 1; } return 0; } bool sp_tx_get_hdmi_connection(void) { unchar c; msleep(200); sp_tx_aux_dpcdread_bytes(0x00, 0x05, 0x18, 1, &c); if ((c & 0x41) == 0x41) return TRUE; else return FALSE; } bool sp_tx_get_vga_connection(void) { unchar c; sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_SINK_COUNT, 1, &c); if (c & 0x01) return TRUE; else return FALSE; } static bool sp_tx_get_ds_video_status(void) { unchar c; sp_tx_aux_dpcdread_bytes(0x00, 0x05, 0x27, 1, &c); if (c & 0x01) return TRUE; else return FALSE; } bool sp_tx_get_dp_connection(void) { unchar c; sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_SINK_COUNT, 1, &c); if (c & 0x1f) { sp_tx_aux_dpcdread_bytes(0x00, 0x00, 0x04, 1, &c); if (c & 0x20) sp_tx_aux_dpcdwrite_byte(0x00, 0x06, 0x00, 0x20); return TRUE; } else return FALSE; } void sp_tx_edid_read(void) { unchar i, j, edid_block = 0, segment = 0, offset = 0; unchar c; /*Add bandwidth check to support low resolution for VGA and myDP monitor*/ sp_tx_get_rx_bw(1, &c); sp_set_link_bw(c); sp_tx_edid_read_initial(); bedid_break = 0; sp_tx_addronly_set(1); sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG, 0x04); sp_write_reg(TX_P0, SP_TX_AUX_CTRL_REG2, 0x01); sp_tx_wait_aux_finished(); edid_block = sp_tx_get_edid_block(); if (edid_block < 2) { edid_block = 8 * (edid_block + 1); for (i = 0; i < edid_block; i++) { if (!bedid_break) sp_tx_aux_edidread_byte(i * 16); msleep(10); } sp_tx_addronly_set(0); } else { for (i = 0; i < 16; i++) { if (!bedid_break) sp_tx_aux_edidread_byte(i * 16); } sp_tx_addronly_set(0); if (!bedid_break) { edid_block = (edid_block + 1); for (i = 0; i < ((edid_block - 1) / 2); i++) { pr_notice("EXT 256 EDID block"); segment = i + 1; for (j = 0; j < 16; j++) { sp_tx_parse_segments_edid(segment, offset); offset = offset + 0x10; } } if (edid_block % 2) { pr_notice("Last block"); segment = segment + 1; for (j = 0; j < 8; j++) { sp_tx_parse_segments_edid(segment, offset); offset = offset + 0x10; } } } } sp_tx_addronly_set(0); sp_tx_rst_aux(); sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x18, 1, bytebuf); if (bytebuf[0] & 0x04) { pr_info("check sum = %.2x\n", (uint)bedid_checksum); bytebuf[0] = bedid_checksum; sp_tx_aux_dpcdwrite_bytes(0x00, 0x02, 0x61, 1, bytebuf); bytebuf[0] = 0x04; sp_tx_aux_dpcdwrite_bytes(0x00, 0x02, 0x60, 1, bytebuf); pr_info("Test EDID done\n"); } /*Link CTS4.3.1.1, link training needs to be fast right after EDID reading*/ sp_tx_get_rx_bw(1, &c); sp_write_reg(TX_P0, SP_TX_LINK_BW_SET_REG, c); sp_tx_enhancemode_set(); sp_write_reg(TX_P0, SP_TX_LT_CTRL_REG, SP_TX_LT_EN); /*Release the HPD after the EEPROM loaddown*/ for(i=0; i < 10; i++) { sp_read_reg(TX_P0, SP_TX_HDCP_KEY_STATUS, &c); if((c&0x07) == 0x05) return; else msleep(10); } } static void sp_tx_pll_changed_int_handler(void) { if (sp_tx_system_state > STATE_PARSE_EDID) { if (!sp_tx_get_pll_lock_status()) { pr_err("PLL:_______________PLL not lock!"); sp_tx_clean_hdcp(); sp_tx_set_sys_state(STATE_LINK_TRAINING); sp_tx_link_config_done = 0; sp_tx_hw_lt_done = 0; sp_tx_hw_lt_enable = 0; } } } static void sp_tx_auth_done_int_handler(void) { unchar c; sp_read_reg(TX_P0, SP_TX_HDCP_STATUS, &c); if (c & SP_TX_HDCP_AUTH_PASS) { sp_tx_aux_dpcdread_bytes(0x06, 0x80, 0x2a, 2, bytebuf); if (bytebuf[1] & 0x08) { /* max cascade read, fail */ sp_tx_video_mute(1); sp_tx_clean_hdcp(); pr_err("Re-authentication!"); } else { pr_notice("Authentication pass in Auth_Done"); sp_tx_hdcp_auth_pass = 1; sp_tx_hdcp_auth_fail_counter = 0; } } else { pr_err("Authentication failed in AUTH_done"); sp_tx_hdcp_auth_pass = 0; sp_tx_hdcp_auth_fail_counter++; if (sp_tx_hdcp_auth_fail_counter >= SP_TX_HDCP_FAIL_TH) { sp_tx_video_mute(1); sp_tx_clean_hdcp(); } else { sp_tx_video_mute(1); sp_tx_clean_hdcp(); pr_err("Re-authentication!"); if (sp_tx_system_state > STATE_CONFIG_OUTPUT) { sp_tx_set_sys_state(STATE_HDCP_AUTH); return; } } } sp_tx_hdcp_auth_done = 1; } static void sp_tx_link_chk_fail_int_handler(void) { if (sp_tx_system_state >= STATE_HDCP_AUTH) { sp_tx_set_sys_state(STATE_HDCP_AUTH); sp_tx_clean_hdcp(); pr_err("IRQ:____________HDCP Sync lost!"); } } static void sp_tx_lt_done_int_handler(void) { unchar c, c1; if ((sp_tx_hw_lt_done) || (sp_tx_system_state != STATE_LINK_TRAINING)) return; sp_read_reg(TX_P0, SP_TX_LT_CTRL_REG, &c); if (c & 0x70) { c = (c & 0x70) >> 4; pr_err("HW LT failed in interrupt,"); pr_err("ERR code = %.2x\n", (uint) c); sp_tx_link_config_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; sp_tx_set_sys_state(STATE_LINK_TRAINING); msleep(10); } else { sp_tx_hw_lt_done = 1; sp_read_reg(TX_P0, SP_TX_LT_SET_REG, &c); sp_read_reg(TX_P0, SP_TX_LINK_BW_SET_REG, &c1); pr_notice("HW LT succeed,LANE0_SET = %.2x,", (uint) c); pr_notice("link_bw = %.2x\n", (uint) c1); } } static void sp_tx_link_change_int_handler(void) { unchar lane0_1_status, sl_cr, al; if (sp_tx_system_state < STATE_CONFIG_OUTPUT) return; sp_tx_link_err_check(); sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_LANE_ALIGN_UD, 1, bytebuf); al = bytebuf[0]; sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_LANE0_1_STATUS, 1, bytebuf); lane0_1_status = bytebuf[0]; if (((lane0_1_status & 0x01) == 0) || ((lane0_1_status & 0x04) == 0)) sl_cr = 0; else sl_cr = 1; if (((al & 0x01) == 0) || (sl_cr == 0)) { if ((al & 0x01) == 0) pr_err("Lane align not done\n"); if (sl_cr == 0) pr_err("Lane clock recovery not done\n"); if ((sp_tx_system_state > STATE_LINK_TRAINING) && sp_tx_link_config_done) { sp_tx_link_config_done = 0; sp_tx_set_sys_state(STATE_LINK_TRAINING); pr_err("IRQ:____________re-LT request!"); } } } static void sp_tx_polling_err_int_handler(void) { unchar c; int i; if ((sp_tx_system_state < STATE_CABLE_PLUG) || sp_tx_pd_mode) return; for (i = 0; i < 5; i++) { sp_tx_aux_dpcdread_bytes(0x00, 0x00, 0x00, 1, &c); if (c == 0x11) return; msleep(2); } if (sp_tx_pd_mode == 0) { pr_err("Cwire polling is corrupted,power down ANX7808.\n"); sp_tx_clean_hdcp(); sp_tx_vbus_powerdown(); sp_tx_power_down(SP_TX_PWR_TOTAL); sp_tx_power_down(SP_TX_PWR_REG); sp_tx_hardware_powerdown(); sp_tx_set_sys_state(STATE_CABLE_PLUG); sp_tx_pd_mode = 1; sp_tx_link_config_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; sp_tx_rx_anx7730 = 0; sp_tx_rx_mydp = 0; } } static void sp_tx_irq_isr(void) { unchar c, c1, lane0_1_status, sl_cr, al; unchar IRQ_Vector, Int_vector1, Int_vector2; unchar test_vector; sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_SERVICE_IRQ_VECTOR, 1, bytebuf); IRQ_Vector = bytebuf[0]; sp_tx_aux_dpcdwrite_bytes(0x00, 0x02, DPCD_SERVICE_IRQ_VECTOR, 1, bytebuf); /* HDCP IRQ */ if (IRQ_Vector & CP_IRQ) { pr_info("IRQ:***CP_IRQ***!"); if (sp_tx_hdcp_auth_pass) { sp_tx_aux_dpcdread_bytes(0x06, 0x80, 0x29, 1, &c1); if (c1 & 0x04) { if (sp_tx_system_state >= STATE_HDCP_AUTH) { sp_tx_clean_hdcp(); sp_tx_set_sys_state (STATE_HDCP_AUTH); pr_err("IRQ:____________HDCP Sync lost!"); } } } } /* specific int */ if ((IRQ_Vector & SINK_SPECIFIC_IRQ) && (sp_tx_rx_anx7730)) { sp_tx_aux_dpcdread_bytes(0x00, 0x05, DPCD_SPECIFIC_INTERRUPT1, 1, &Int_vector1); sp_tx_aux_dpcdwrite_byte(0x00, 0x05, DPCD_SPECIFIC_INTERRUPT1, Int_vector1); sp_tx_aux_dpcdread_bytes(0x00, 0x05, DPCD_SPECIFIC_INTERRUPT2, 1, &Int_vector2); sp_tx_aux_dpcdwrite_byte(0x00, 0x05, DPCD_SPECIFIC_INTERRUPT2, Int_vector2); if ((Int_vector1 & 0x01) == 0x01) { sp_tx_aux_dpcdread_bytes(0x00, 0x05, 0x18, 1, &c); if (c & 0x01) pr_notice("Downstream HDMI is pluged!\n"); } if ((Int_vector1 & 0x02) == 0x02) { sp_tx_aux_dpcdread_bytes(0x00, 0x05, 0x18, 1, &c); if ((c & 0x01) != 0x01) { pr_notice("Downstream HDMI is unpluged!\n"); if ((sp_tx_system_state > STATE_CABLE_PLUG) && (!sp_tx_pd_mode)) { sp_tx_clean_hdcp(); sp_tx_power_down(SP_TX_PWR_REG); sp_tx_power_down(SP_TX_PWR_TOTAL); sp_tx_hardware_powerdown(); sp_tx_set_sys_state(STATE_CABLE_PLUG); sp_tx_pd_mode = 1; sp_tx_link_config_done = 0; sp_tx_hw_lt_done = 0; sp_tx_hw_lt_enable = 0; } } } if (((Int_vector1 & 0x04) == 0x04) && (sp_tx_system_state > STATE_CONFIG_OUTPUT)) { pr_err("Rx specific IRQ: Link is down!\n"); sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_LANE_ALIGN_UD, 1, bytebuf); al = bytebuf[0]; sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_LANE0_1_STATUS, 1, bytebuf); lane0_1_status = bytebuf[0]; if (((lane0_1_status & 0x01) == 0) || ((lane0_1_status & 0x04) == 0)) sl_cr = 0; else sl_cr = 1; if (((al & 0x01) == 0) || (sl_cr == 0)) { if ((al & 0x01) == 0) pr_err("Lane align not done\n"); if (sl_cr == 0) pr_err("Lane clock recovery not done\n"); if ((sp_tx_system_state > STATE_LINK_TRAINING) && sp_tx_link_config_done) { sp_tx_link_config_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; sp_tx_set_sys_state (STATE_LINK_TRAINING); pr_err("IRQ:____________re-LT request!"); } } sp_tx_aux_dpcdread_bytes(0x00, 0x05, 0x18, 1, &c); if (!(c & 0x40)) { if ((sp_tx_system_state > STATE_CABLE_PLUG) && (!sp_tx_pd_mode)) { sp_tx_clean_hdcp(); sp_tx_power_down(SP_TX_PWR_REG); sp_tx_power_down(SP_TX_PWR_TOTAL); sp_tx_hardware_powerdown(); sp_tx_set_sys_state(STATE_CABLE_PLUG); sp_tx_pd_mode = 1; sp_tx_link_config_done = 0; sp_tx_hw_lt_done = 0; sp_tx_hw_lt_enable = 0; } } } if ((Int_vector1 & 0x08) == 0x08) { pr_info("Downstream HDCP is done!\n"); if ((Int_vector1 & 0x10) != 0x10) pr_info("Downstream HDCP is passed!\n"); else { if (sp_tx_system_state > STATE_CONFIG_OUTPUT) { sp_tx_video_mute(1); sp_tx_clean_hdcp(); sp_tx_set_sys_state(STATE_HDCP_AUTH); pr_err("Re-authentication due to downstream HDCP failure!"); } } } if ((Int_vector1 & 0x20) == 0x20) { pr_err(" Downstream HDCP link integrity check fail!"); if (sp_tx_system_state > STATE_HDCP_AUTH) { sp_tx_set_sys_state(STATE_HDCP_AUTH); sp_tx_clean_hdcp(); pr_err("IRQ:____________HDCP Sync lost!"); } } if ((Int_vector1 & 0x40) == 0x40) pr_info("Receive CEC command from upstream done!"); if ((Int_vector1 & 0x80) == 0x80) pr_info("CEC command transfer to downstream done!"); if ((Int_vector2 & 0x04) == 0x04) { sp_tx_aux_dpcdread_bytes(0x00, 0x05, 0x18, 1, &c); if ((c & 0x40) == 0x40) pr_notice("Downstream HDMI termination is detected!\n"); } /* specific int */ } else if ((IRQ_Vector & SINK_SPECIFIC_IRQ) && (!sp_tx_rx_anx7730)) { sp_tx_send_message(MSG_CLEAR_IRQ); sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x00, 1, &c); if (!(c & 0x01)) { if ((sp_tx_system_state > STATE_CABLE_PLUG) && (!sp_tx_pd_mode)) { sp_tx_power_down(SP_TX_PWR_TOTAL); sp_tx_power_down(SP_TX_PWR_REG); sp_tx_vbus_powerdown(); sp_tx_hardware_powerdown(); sp_tx_clean_hdcp(); sp_tx_pd_mode = 1; sp_tx_link_config_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; sp_tx_set_sys_state(STATE_CABLE_PLUG); } } sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_LANE_ALIGN_UD, 1, bytebuf); al = bytebuf[0]; sp_tx_aux_dpcdread_bytes(0x00, 0x02, DPCD_LANE0_1_STATUS, 1, bytebuf); lane0_1_status = bytebuf[0]; if (((lane0_1_status & 0x01) == 0) || ((lane0_1_status & 0x04) == 0)) sl_cr = 0; else sl_cr = 1; if (((al & 0x01) == 0) || (sl_cr == 0)) { if ((al & 0x01) == 0) pr_err("Lane align not done\n"); if (sl_cr == 0) pr_err("Lane clock recovery not done\n"); if ((sp_tx_system_state > STATE_LINK_TRAINING) && sp_tx_link_config_done) { sp_tx_link_config_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; sp_tx_set_sys_state(STATE_LINK_TRAINING); pr_err("IRQ:____________re-LT request!"); } } } /* AUTOMATED TEST IRQ */ if (IRQ_Vector & TEST_IRQ) { sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x18, 1, bytebuf); test_vector = bytebuf[0]; /*test link training*/ if (test_vector & 0x01) { sp_tx_test_lt = 1; sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x19, 1, bytebuf); sp_tx_test_bw = bytebuf[0]; pr_info(" test_bw = %.2x\n", (uint)sp_tx_test_bw); sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x60, 1, bytebuf); bytebuf[0] = bytebuf[0] | TEST_ACK; sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x60, 1, bytebuf); pr_info("Set TEST_ACK!\n"); sp_tx_set_sys_state(STATE_LINK_TRAINING); pr_info("IRQ:test-LT request!\n"); } /*test edid*/ if (test_vector & 0x04) { sp_tx_set_sys_state(STATE_PARSE_EDID); sp_tx_test_edid = 1; pr_info("Test EDID Requested!\n"); } /*phy test pattern*/ if (test_vector & 0x08) { sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x60, 1, bytebuf); bytebuf[0] = bytebuf[0] | 0x01; sp_tx_aux_dpcdread_bytes(0x00, 0x02, 0x60, 1, bytebuf); } } } static void sp_tx_sink_irq_int_handler(void) { sp_tx_irq_isr(); } void sp_tx_hdcp_process(void) { unchar c; int i; if (!sp_tx_hdcp_capable_chk) { sp_tx_hdcp_capable_chk = 1; sp_tx_aux_dpcdread_bytes(0x06, 0x80, 0x28, 1, &c); if (!(c & 0x01)) { pr_err("Sink is not capable HDCP"); sp_tx_video_mute(1); sp_tx_set_sys_state(STATE_PLAY_BACK); return; } } /*In case ANX730 video can not get ready*/ if (sp_tx_rx_anx7730) { if (!sp_tx_get_ds_video_status()) { if (sp_tx_ds_vid_stb_cntr == SP_TX_DS_VID_STB_TH) { sp_tx_vbus_powerdown(); sp_tx_power_down(SP_TX_PWR_REG); sp_tx_power_down(SP_TX_PWR_TOTAL); sp_tx_hardware_powerdown(); sp_tx_pd_mode = 1; sp_tx_link_config_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; sp_tx_rx_anx7730 = 0; sp_tx_rx_mydp = 0; sp_tx_ds_vid_stb_cntr = 0; sp_tx_set_sys_state(STATE_CABLE_PLUG); } else { sp_tx_ds_vid_stb_cntr++; msleep(100); } return; } else { sp_tx_ds_vid_stb_cntr = 0; } } if (!sp_tx_hw_hdcp_en) { /*Issue HDCP after the HDMI Rx key loaddown*/ for(i=0; i < 10; i++) { sp_read_reg(RX_P1, HDMI_RX_HDCP_STATUS_REG, &c); if(c&LOAD_KEY_DONE) break; else msleep(10); } sp_tx_power_on(SP_TX_PWR_HDCP); msleep(50); sp_tx_hw_hdcp_enable(); sp_tx_hw_hdcp_en = 1; } if (sp_tx_hdcp_auth_done) { sp_tx_hdcp_auth_done = 0; if (sp_tx_hdcp_auth_pass) { sp_tx_hdcp_encryption_enable(); sp_tx_video_mute(0); pr_notice("@@@@@@@hdcp_auth_pass@@@@@@\n"); } else { sp_tx_hdcp_encryption_disable(); sp_tx_video_mute(1); pr_notice("*********hdcp_auth_failed*********\n"); return; } sp_tx_set_sys_state(STATE_PLAY_BACK); sp_tx_show_infomation(); } } void sp_tx_set_sys_state(enum SP_TX_System_State ss) { pr_notice("SP_TX To System State: "); switch (ss) { case STATE_INIT: sp_tx_system_state = STATE_INIT; pr_notice("STATE_INIT"); break; case STATE_CABLE_PLUG: sp_tx_system_state = STATE_CABLE_PLUG; pr_notice("STATE_CABLE_PLUG"); break; case STATE_PARSE_EDID: sp_tx_system_state = STATE_PARSE_EDID; pr_notice("SP_TX_READ_PARSE_EDID"); break; case STATE_CONFIG_HDMI: sp_tx_system_state = STATE_CONFIG_HDMI; pr_notice("STATE_CONFIG_HDMI"); break; case STATE_CONFIG_OUTPUT: sp_tx_system_state = STATE_CONFIG_OUTPUT; pr_notice("STATE_CONFIG_OUTPUT"); break; case STATE_LINK_TRAINING: sp_tx_system_state = STATE_LINK_TRAINING; sp_tx_link_config_done = 0; sp_tx_hw_lt_enable = 0; sp_tx_hw_lt_done = 0; pr_notice("STATE_LINK_TRAINING"); break; case STATE_HDCP_AUTH: sp_tx_system_state = STATE_HDCP_AUTH; pr_notice("STATE_HDCP_AUTH"); break; case STATE_PLAY_BACK: sp_tx_system_state = STATE_PLAY_BACK; pr_notice("STATE_PLAY_BACK"); break; default: break; } } void sp_tx_int_irq_handler(void) { unchar c1, c2, c3, c4, c5; sp_tx_get_int_status(COMMON_INT_1, &c1); sp_tx_get_int_status(COMMON_INT_2, &c2); sp_tx_get_int_status(COMMON_INT_3, &c3); sp_tx_get_int_status(COMMON_INT_4, &c4); sp_tx_get_int_status(SP_INT_STATUS, &c5); if (c1 & PLL_LOCK_CHG) sp_tx_pll_changed_int_handler(); if (c2 & HDCP_AUTH_DONE) sp_tx_auth_done_int_handler(); if (c3 & HDCP_LINK_CHK_FAIL) sp_tx_link_chk_fail_int_handler(); if (c5 & DPCD_IRQ_REQUEST) sp_tx_sink_irq_int_handler(); if (c5 & POLLING_ERR) sp_tx_polling_err_int_handler(); if (c5 & TRAINING_Finish) sp_tx_lt_done_int_handler(); if (c5 & LINK_CHANGE) sp_tx_link_change_int_handler(); } /* ***************************************************************** */ /* Functions defination for HDMI Input */ /* ***************************************************************** */ void hdmi_rx_set_hpd(unchar enable) { unchar c; if (enable) { /* set HPD high */ sp_read_reg(TX_P2, SP_TX_VID_CTRL3_REG, &c); c |= HPD_OUT; sp_write_reg(TX_P2, SP_TX_VID_CTRL3_REG, c); pr_notice("HPD high is issued\n"); } else { /* set HPD low */ sp_read_reg(TX_P2, SP_TX_VID_CTRL3_REG, &c); c &= ~HPD_OUT; sp_write_reg(TX_P2, SP_TX_VID_CTRL3_REG, c); pr_notice("HPD low is issued\n"); } } void hdmi_rx_set_termination(unchar enable) { unchar c; if (enable) { /* set termination high */ sp_read_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG6, &c); c &= ~TERM_PD; sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG6, c); pr_notice("Termination high is issued\n"); } else { /* set termination low */ sp_read_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG6, &c); c |= TERM_PD; sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG6, c); pr_notice("Termination low is issued\n"); } } static void hdmi_rx_restart_audio_chk(void) { if (hdmi_system_state == HDMI_AUDIO_CONFIG) { pr_info("WAIT_AUDIO: hdmi_rx_restart_audio_chk."); g_cts_got = 0; g_audio_got = 0; } } static void hdmi_rx_set_sys_state(enum HDMI_RX_System_State ss) { if (hdmi_system_state != ss) { pr_notice(""); hdmi_system_state = ss; switch (ss) { case HDMI_CLOCK_DET: pr_notice("HDMI_RX: HDMI_CLOCK_DET"); break; case HDMI_SYNC_DET: pr_notice("HDMI_RX: HDMI_SYNC_DET"); break; case HDMI_VIDEO_CONFIG: pr_notice("HDMI_RX: HDMI_VIDEO_CONFIG"); break; case HDMI_AUDIO_CONFIG: pr_notice("HDMI_RX: HDMI_AUDIO_CONFIG"); hdmi_rx_restart_audio_chk(); break; case HDMI_PLAYBACK: pr_notice("HDMI_RX: HDMI_PLAYBACK"); break; default: break; } } } static void hdmi_rx_mute_video(void) { unchar c; pr_info("Mute Video."); sp_read_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, &c); c |= VID_MUTE; sp_write_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, c); g_video_muted = 1; } static void hdmi_rx_unmute_video(void) { unchar c; pr_info("Unmute Video."); sp_read_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, &c); c &= ~VID_MUTE; sp_write_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, c); g_video_muted = 0; } static void hdmi_rx_mute_audio(void) { unchar c; pr_info("Mute Audio."); sp_read_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, &c); c |= AUD_MUTE; sp_write_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, c); g_audio_muted = 1; } static void hdmi_rx_unmute_audio(void) { unchar c; pr_info("Unmute Audio."); sp_read_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, &c); c &= ~AUD_MUTE; sp_write_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, c); g_audio_muted = 0; } static unchar hdmi_rx_is_video_change(void) { unchar ch, cl; ulong n; sp_read_reg(RX_P0, HDMI_RX_HTOTAL_LOW, &cl); sp_read_reg(RX_P0, HDMI_RX_HTOTAL_HIGH, &ch); n = ch; n = (n << 8) + cl; if ((g_cur_h_res < (n - 10)) || (g_cur_h_res > (n + 10))) { pr_err("H_Res changed."); pr_err("Current H_Res = %ld\n", n); return 1; } sp_read_reg(RX_P0, HDMI_RX_VTOTAL_LOW, &cl); sp_read_reg(RX_P0, HDMI_RX_VTOTAL_HIGH, &ch); n = ch; n = (n << 8) + cl; if ((g_cur_v_res < (n - 10)) || (g_cur_v_res > (n + 10))) { pr_err("V_Res changed.\n"); pr_err("Current V_Res = %ld\n", n); return 1; } sp_read_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, &cl); cl &= HDMI_MODE; if (g_hdmi_dvi_status != cl) { pr_err("DVI to HDMI or HDMI to DVI Change."); return 1; } return 0; } static void hdmi_rx_get_video_info(void) { unchar ch, cl; uint n; sp_read_reg(RX_P0, HDMI_RX_HTOTAL_LOW, &cl); sp_read_reg(RX_P0, HDMI_RX_HTOTAL_HIGH, &ch); n = ch; n = (n << 8) + cl; g_cur_h_res = n; sp_read_reg(RX_P0, HDMI_RX_VTOTAL_LOW, &cl); sp_read_reg(RX_P0, HDMI_RX_VTOTAL_HIGH, &ch); n = ch; n = (n << 8) + cl; g_cur_v_res = n; sp_read_reg(RX_P0, HDMI_RX_VID_PCLK_CNTR_REG, &cl); g_cur_pix_clk = cl; sp_read_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, &cl); g_hdmi_dvi_status = ((cl & HDMI_MODE) == HDMI_MODE); } static void hdmi_rx_show_video_info(void) { unchar c, c1; unchar cl, ch; ulong n; ulong h_res, v_res; sp_read_reg(RX_P0, HDMI_RX_HACT_LOW, &cl); sp_read_reg(RX_P0, HDMI_RX_HACT_HIGH, &ch); n = ch; n = (n << 8) + cl; h_res = n; sp_read_reg(RX_P0, HDMI_RX_VACT_LOW, &cl); sp_read_reg(RX_P0, HDMI_RX_VACT_HIGH, &ch); n = ch; n = (n << 8) + cl; v_res = n; pr_info(""); pr_info("*****************HDMI_RX Info*******************"); pr_info("HDMI_RX Is Normally Play Back.\n"); sp_read_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, &c); if (c & HDMI_MODE) pr_info("HDMI_RX Mode = HDMI Mode.\n"); else pr_info("HDMI_RX Mode = DVI Mode.\n"); sp_read_reg(RX_P0, HDMI_RX_VIDEO_STATUS_REG1, &c); if (c & VIDEO_TYPE) v_res += v_res; pr_info("HDMI_RX Video Resolution = %ld * %ld ", h_res, v_res); sp_read_reg(RX_P0, HDMI_RX_VIDEO_STATUS_REG1, &c); if (c & VIDEO_TYPE) pr_info(" Interlace Video."); else pr_info(" Progressive Video."); sp_read_reg(RX_P0, HDMI_RX_SYS_CTRL1_REG, &c); if ((c & 0x30) == 0x00) pr_info("Input Pixel Clock = Not Repeated.\n"); else if ((c & 0x30) == 0x10) pr_info("Input Pixel Clock = 2x Video Clock. Repeated.\n"); else if ((c & 0x30) == 0x30) pr_info("Input Pixel Clock = 4x Vvideo Clock. Repeated.\n"); if ((c & 0xc0) == 0x00) pr_info("Output Video Clock = Not Divided.\n"); else if ((c & 0xc0) == 0x40) pr_info("Output Video Clock = Divided By 2.\n"); else if ((c & 0xc0) == 0xc0) pr_info("Output Video Clock = Divided By 4.\n"); if (c & 0x02) pr_info("Output Video Using Rising Edge To Latch Data.\n"); else pr_info("Output Video Using Falling Edge To Latch Data.\n"); pr_info("Input Video Color Depth = "); sp_read_reg(RX_P0, 0x70, &c1); c1 &= 0xf0; if (c1 == 0x00) pr_info("Legacy Mode.\n"); else if (c1 == 0x40) pr_info("24 Bit Mode.\n"); else if (c1 == 0x50) pr_info("30 Bit Mode.\n"); else if (c1 == 0x60) pr_info("36 Bit Mode.\n"); else if (c1 == 0x70) pr_info("48 Bit Mode.\n"); pr_info("Input Video Color Space = "); sp_read_reg(RX_P1, HDMI_RX_AVI_DATA00_REG, &c); c &= 0x60; if (c == 0x20) pr_info("YCbCr4:2:2 .\n"); else if (c == 0x40) pr_info("YCbCr4:4:4 .\n"); else if (c == 0x00) pr_info("RGB.\n"); else pr_info("Unknow 0x44 = 0x%.2x\n", (int)c); sp_read_reg(RX_P1, HDMI_RX_HDCP_STATUS_REG, &c); if (c & AUTH_EN) pr_info("Authentication is attempted."); else pr_info("Authentication is not attempted."); for (cl = 0; cl < 20; cl++) { sp_read_reg(RX_P1, HDMI_RX_HDCP_STATUS_REG, &c); if (c & DECRYPT_EN) break; else msleep(10); } if (cl < 20) pr_info("Decryption is active."); else pr_info("Decryption is not active."); pr_info("********************************************************"); pr_info(""); } static void hdmi_rx_show_audio_info(void) { unchar c; pr_info("Audio Fs = "); sp_read_reg(RX_P0, HDMI_RX_AUD_IN_CH_STATUS4_REG, &c); c &= 0x0f; switch (c) { case 0x00: pr_info("44.1 KHz."); break; case 0x02: pr_info("48 KHz."); break; case 0x03: pr_info("32 KHz."); break; case 0x08: pr_info("88.2 KHz."); break; case 0x0a: pr_info("96 KHz."); break; case 0x0e: pr_info("192 KHz."); break; default: break; } pr_info(""); } static void hdmi_rx_init_var(void) { hdmi_rx_set_sys_state(HDMI_CLOCK_DET); g_cur_h_res = 0; g_cur_v_res = 0; g_cur_pix_clk = 0; g_video_muted = 1; g_audio_muted = 1; g_audio_stable_cntr = 0; g_video_stable_cntr = 0; g_sync_expire_cntr = 0; g_hdcp_err_cnt = 0; g_hdmi_dvi_status = VID_DVI_MODE; g_cts_got = 0; g_audio_got = 0; g_vsi_got = 0; g_no_vsi_counter = 0; } static void hdmi_rx_tmds_phy_initialization(void) { /* Set EQ Value */ sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG2, 0x00); sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG4, 0X28); sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG5, 0xe3); if (anx7808_ver_ba) { sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG7, 0x70); sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG19, 0x00); } else { sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG7, 0x50); } sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG21, 0x04); sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG22, 0x38); } void hdmi_rx_initialization(void) { unchar c; hdmi_rx_init_var(); sp_write_reg(TX_P2, SP_TX_DP_ADDR_REG1, 0xbc); c = AUD_MUTE | VID_MUTE; sp_write_reg(RX_P0, HDMI_RX_HDMI_MUTE_CTRL_REG, c); sp_read_reg(RX_P0, HDMI_RX_CHIP_CTRL_REG, &c); c |= MAN_HDMI5V_DET; c |= PLLLOCK_CKDT_EN; c |= DIGITAL_CKDT_EN; sp_write_reg(RX_P0, HDMI_RX_CHIP_CTRL_REG, c); sp_read_reg(RX_P0, HDMI_RX_AEC_CTRL_REG, &c); c |= AVC_OE; sp_write_reg(RX_P0, HDMI_RX_AEC_CTRL_REG, c); sp_read_reg(RX_P0, HDMI_RX_SRST_REG, &c); c |= HDCP_MAN_RST; sp_write_reg(RX_P0, HDMI_RX_SRST_REG, c); msleep(1); sp_read_reg(RX_P0, HDMI_RX_SRST_REG, &c); c &= ~HDCP_MAN_RST; sp_write_reg(RX_P0, HDMI_RX_SRST_REG, c); sp_read_reg(RX_P0, HDMI_RX_SRST_REG, &c); c |= SW_MAN_RST; sp_write_reg(RX_P0, HDMI_RX_SRST_REG, c); msleep(1); c &= ~SW_MAN_RST; sp_write_reg(RX_P0, HDMI_RX_SRST_REG, c); sp_read_reg(RX_P0, HDMI_RX_SRST_REG, &c); c |= TMDS_RST; sp_write_reg(RX_P0, HDMI_RX_SRST_REG, c); c = AEC_EN07 | AEC_EN06 | AEC_EN05 | AEC_EN02; sp_write_reg(RX_P0, HDMI_RX_AEC_EN0_REG, c); c = AEC_EN12 | AEC_EN10 | AEC_EN09 | AEC_EN08; sp_write_reg(RX_P0, HDMI_RX_AEC_EN1_REG, c); c = AEC_EN23 | AEC_EN22 | AEC_EN21 | AEC_EN20; sp_write_reg(RX_P0, HDMI_RX_AEC_EN2_REG, 0xf0); sp_read_reg(RX_P0, HDMI_RX_AEC_CTRL_REG, &c); c |= AVC_EN; sp_write_reg(RX_P0, HDMI_RX_AEC_CTRL_REG, c); sp_read_reg(RX_P0, HDMI_RX_AEC_CTRL_REG, &c); c |= AAC_EN; sp_write_reg(RX_P0, HDMI_RX_AEC_CTRL_REG, c); sp_read_reg(RX_P0, HDMI_RX_SYS_PWDN1_REG, &c); c &= ~PWDN_CTRL; sp_write_reg(RX_P0, HDMI_RX_SYS_PWDN1_REG, c); sp_write_reg(RX_P0, HDMI_RX_INT_MASK1_REG, 0xff); sp_write_reg(RX_P0, HDMI_RX_INT_MASK2_REG, 0xf3); sp_write_reg(RX_P0, HDMI_RX_INT_MASK3_REG, 0x3f); sp_write_reg(RX_P0, HDMI_RX_INT_MASK4_REG, 0x17); sp_write_reg(RX_P0, HDMI_RX_INT_MASK5_REG, 0xff); sp_write_reg(RX_P0, HDMI_RX_INT_MASK6_REG, 0xff); sp_write_reg(RX_P0, HDMI_RX_INT_MASK7_REG, 0x07); /* Range limitation for RGB input */ sp_read_reg(RX_P0, HDMI_RX_VID_DATA_RNG_CTRL_REG, &c); c |= R2Y_INPUT_LIMIT; sp_write_reg(RX_P0, HDMI_RX_VID_DATA_RNG_CTRL_REG, c); /* set GPIO control by HPD */ if (!anx7808_ver_ba) { sp_write_reg(RX_P0, HDMI_RX_PIO_CTRL, 0x02); /* generate interrupt on any received HDMI Vendor Specific packet; */ sp_write_reg(RX_P0, HDMI_RX_PKT_RX_INDU_INT_CTRL, 0x80); } c = CEC_RST; sp_write_reg(RX_P0, HDMI_RX_CEC_CTRL_REG, c); c = CEC_SPEED_27M; sp_write_reg(RX_P0, HDMI_RX_CEC_SPEED_CTRL_REG, c); c = CEC_RX_EN; sp_write_reg(RX_P0, HDMI_RX_CEC_CTRL_REG, c); hdmi_rx_tmds_phy_initialization(); hdmi_rx_set_hpd(0); hdmi_rx_set_termination(0); pr_notice("HDMI Rx is initialized..."); } static void hdmi_rx_clk_det_int(void) { unchar c; pr_notice("*HDMI_RX Interrupt: Pixel Clock Change.\n"); if (sp_tx_system_state > STATE_CONFIG_HDMI) { hdmi_rx_mute_audio(); hdmi_rx_mute_video(); sp_tx_video_mute(1); sp_tx_enable_video_input(0); sp_tx_enable_audio_output(0); sp_tx_set_sys_state(STATE_CONFIG_HDMI); if (hdmi_system_state > HDMI_CLOCK_DET) hdmi_rx_set_sys_state(HDMI_CLOCK_DET); } sp_read_reg(RX_P0, HDMI_RX_SYS_STATUS_REG, &c); if (c & TMDS_CLOCK_DET) { pr_err("Pixel clock existed.\n"); if (hdmi_system_state == HDMI_CLOCK_DET) hdmi_rx_set_sys_state(HDMI_SYNC_DET); } else { if (hdmi_system_state > HDMI_CLOCK_DET) hdmi_rx_set_sys_state(HDMI_CLOCK_DET); pr_err("Pixel clock lost.\n"); g_sync_expire_cntr = 0; } } static void hdmi_rx_sync_det_int(void) { unchar c; pr_notice("*HDMI_RX Interrupt: Sync Detect."); if (sp_tx_system_state > STATE_CONFIG_HDMI) { hdmi_rx_mute_audio(); hdmi_rx_mute_video(); sp_tx_video_mute(1); sp_tx_enable_video_input(0); sp_tx_enable_audio_output(0); sp_tx_set_sys_state(STATE_CONFIG_HDMI); if (hdmi_system_state > HDMI_SYNC_DET) hdmi_rx_set_sys_state(HDMI_SYNC_DET); } sp_read_reg(RX_P0, HDMI_RX_SYS_STATUS_REG, &c); if (c & TMDS_DE_DET) { pr_notice("Sync found."); if (hdmi_system_state == HDMI_SYNC_DET) hdmi_rx_set_sys_state(HDMI_VIDEO_CONFIG); g_video_stable_cntr = 0; hdmi_rx_get_video_info(); } else { pr_err("Sync lost."); if ((c & TMDS_CLOCK_DET) && (hdmi_system_state > HDMI_SYNC_DET)) hdmi_rx_set_sys_state(HDMI_SYNC_DET); else hdmi_rx_set_sys_state(HDMI_CLOCK_DET); } } static void hdmi_rx_hdmi_dvi_int(void) { unchar c; pr_notice("*HDMI_RX Interrupt: HDMI-DVI Mode Change."); sp_read_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, &c); hdmi_rx_get_video_info(); if ((c & HDMI_MODE) == HDMI_MODE) { pr_notice("hdmi_rx_hdmi_dvi_int: HDMI MODE."); if (hdmi_system_state == HDMI_PLAYBACK) hdmi_rx_set_sys_state(HDMI_AUDIO_CONFIG); } else { hdmi_rx_unmute_audio(); } } static void hdmi_rx_avmute_int(void) { unchar avmute_status, c; sp_read_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, &avmute_status); if (avmute_status & MUTE_STAT) { pr_notice("HDMI_RX AV mute packet received."); if (!g_video_muted) hdmi_rx_mute_video(); if (!g_audio_muted) hdmi_rx_mute_audio(); c = avmute_status & (~MUTE_STAT); sp_write_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, c); } } static void hdmi_rx_cts_rcv_int(void) { unchar c; g_cts_got = 1; sp_read_reg(RX_P0, HDMI_RX_SYS_STATUS_REG, &c); if ((hdmi_system_state == HDMI_AUDIO_CONFIG) && (c & TMDS_DE_DET)) { if (g_cts_got && g_audio_got) { if (g_audio_stable_cntr >= AUDIO_STABLE_TH) { hdmi_rx_unmute_audio(); hdmi_rx_unmute_video(); g_audio_stable_cntr = 0; hdmi_rx_show_audio_info(); hdmi_rx_set_sys_state(HDMI_PLAYBACK); sp_tx_config_audio(); } else { g_audio_stable_cntr++; } } else { g_audio_stable_cntr = 0; } } } static void hdmi_rx_audio_rcv_int(void) { unchar c; g_audio_got = 1; sp_read_reg(RX_P0, HDMI_RX_SYS_STATUS_REG, &c); if ((hdmi_system_state == HDMI_AUDIO_CONFIG) && (c & TMDS_DE_DET)) { if (g_cts_got && g_audio_got) { if (g_audio_stable_cntr >= AUDIO_STABLE_TH) { hdmi_rx_unmute_audio(); hdmi_rx_unmute_video(); g_audio_stable_cntr = 0; hdmi_rx_show_audio_info(); hdmi_rx_set_sys_state(HDMI_PLAYBACK); sp_tx_config_audio(); } else { g_audio_stable_cntr++; } } else { g_audio_stable_cntr = 0; } } } static void hdmi_rx_hdcp_error_int(void) { g_audio_got = 0; g_cts_got = 0; if (g_hdcp_err_cnt >= 40) { g_hdcp_err_cnt = 0; pr_err("Lots of hdcp error occured ..."); hdmi_rx_mute_audio(); hdmi_rx_mute_video(); /* issue hotplug */ hdmi_rx_set_hpd(0); msleep(10); hdmi_rx_set_hpd(1); } else if ((hdmi_system_state == HDMI_CLOCK_DET) || (hdmi_system_state == HDMI_SYNC_DET)) { g_hdcp_err_cnt = 0; } else { g_hdcp_err_cnt++; } } static void hdmi_rx_new_avi_int(void) { pr_notice("*HDMI_RX Interrupt: New AVI Packet."); sp_tx_avi_setup(); sp_tx_config_packets(AVI_PACKETS); } static void hdmi_rx_new_gcp_int(void) { unchar c; sp_read_reg(RX_P1, HDMI_RX_GENERAL_CTRL, &c); if (c&SET_AVMUTE) { if (!g_video_muted) hdmi_rx_mute_video(); if (!g_audio_muted) hdmi_rx_mute_audio(); } else if (c&CLEAR_AVMUTE) { if ((g_video_muted) && (hdmi_system_state >HDMI_VIDEO_CONFIG)) hdmi_rx_unmute_video(); if ((g_audio_muted) && (hdmi_system_state >HDMI_AUDIO_CONFIG)) hdmi_rx_unmute_audio(); } } static void hdmi_rx_new_vsi_int(void) { if (anx7808_ver_ba) { unchar c; unchar hdmi_video_format, vsi_header, v3d_structure; pr_err("*HDMI_RX Interrupt: NEW VSI packet.\n"); sp_read_reg(TX_P0, SP_TX_3D_VSC_CTRL, &c); if (!(c&INFO_FRAME_VSC_EN)) { sp_read_reg(RX_P1, HDMI_RX_MPEG_TYPE_REG, &vsi_header); sp_read_reg(RX_P1, HDMI_RX_MPEG_DATA03_REG, &hdmi_video_format); if ((vsi_header == 0x81) && ((hdmi_video_format & 0xe0) == 0x40)) { pr_info("3D VSI packet is detected. Config VSC packet\n"); /*use mpeg packet as mail box to send vsi packet*/ sp_tx_vsi_setup(); sp_tx_config_packets(VSI_PACKETS); sp_read_reg(RX_P1, HDMI_RX_MPEG_DATA05_REG, &v3d_structure); switch (v3d_structure&0xf0) { case 0x00:/*frame packing*/ v3d_structure = 0x02; break; case 0x20:/*Line alternative*/ v3d_structure = 0x03; break; case 0x30:/*Side-by-side(full)*/ v3d_structure = 0x04; break; default: v3d_structure = 0x00; pr_err("3D structure is not supported\n"); break; } sp_write_reg(TX_P0, SP_TX_VSC_DB1, v3d_structure); sp_read_reg(TX_P0, SP_TX_3D_VSC_CTRL, &c); c |= INFO_FRAME_VSC_EN; sp_write_reg(TX_P0, SP_TX_3D_VSC_CTRL, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c &= ~SPD_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= SPD_IF_UD; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); sp_read_reg(TX_P0, SP_TX_PKT_EN_REG, &c); c |= SPD_IF_EN; sp_write_reg(TX_P0, SP_TX_PKT_EN_REG, c); } } } else { g_vsi_got = 1; } } static void hdmi_rx_no_vsi_int(void) { if (anx7808_ver_ba) { unchar c; sp_read_reg(TX_P0, SP_TX_3D_VSC_CTRL, &c); if (c&INFO_FRAME_VSC_EN) { pr_err("No new VSI is received, disable VSC packet\n"); c &= ~INFO_FRAME_VSC_EN; sp_write_reg(TX_P0, SP_TX_3D_VSC_CTRL, c); sp_tx_mpeg_setup(); sp_tx_config_packets(MPEG_PACKETS); } } } void sp_tx_config_hdmi_input(void) { unchar c; unchar avmute_status, sys_status; sp_read_reg(RX_P0, HDMI_RX_SYS_STATUS_REG, &sys_status); if ((sys_status & TMDS_CLOCK_DET) && (hdmi_system_state == HDMI_CLOCK_DET)) hdmi_rx_set_sys_state(HDMI_SYNC_DET); if (hdmi_system_state == HDMI_SYNC_DET) { sp_read_reg(RX_P0, HDMI_RX_SYS_STATUS_REG, &c); if (!(c & TMDS_DE_DET)) { if (g_sync_expire_cntr >= SCDT_EXPIRE_TH) { pr_err("No sync for long time."); /* misc reset */ sp_read_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG18, &c); c |= PLL_RESET; sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG18, c); msleep(2); sp_read_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG18, &c); c &= ~PLL_RESET; sp_write_reg(RX_P0, HDMI_RX_TMDS_CTRL_REG18, c); hdmi_rx_set_sys_state(HDMI_CLOCK_DET); g_sync_expire_cntr = 0; } else { g_sync_expire_cntr++; } return; } else { g_sync_expire_cntr = 0; hdmi_rx_set_sys_state(HDMI_VIDEO_CONFIG); } } if (hdmi_system_state < HDMI_VIDEO_CONFIG) return; if (hdmi_rx_is_video_change()) { pr_err("Video Changed , mute video and mute audio"); g_video_stable_cntr = 0; if (!g_video_muted) hdmi_rx_mute_video(); if (!g_audio_muted) hdmi_rx_mute_audio(); } else if (g_video_stable_cntr < VIDEO_STABLE_TH) { g_video_stable_cntr++; pr_notice("WAIT_VIDEO: Wait for video stable cntr."); } else if (hdmi_system_state == HDMI_VIDEO_CONFIG) { sp_read_reg(RX_P0, HDMI_RX_HDMI_STATUS_REG, &avmute_status); if (!(avmute_status & MUTE_STAT)) { hdmi_rx_get_video_info(); hdmi_rx_unmute_video(); sp_tx_lvttl_bit_mapping(); sp_tx_set_sys_state(STATE_LINK_TRAINING); hdmi_rx_show_video_info(); sp_tx_power_down(SP_TX_PWR_AUDIO); if (g_hdmi_dvi_status) { pr_notice("HDMI mode: Video is stable."); sp_tx_send_message(MSG_INPUT_HDMI); hdmi_rx_set_sys_state(HDMI_AUDIO_CONFIG); } else { pr_notice("DVI mode: Video is stable."); sp_tx_send_message(MSG_INPUT_DVI); hdmi_rx_unmute_audio(); hdmi_rx_set_sys_state(HDMI_PLAYBACK); } } } hdmi_rx_get_video_info(); } void hdmi_rx_int_irq_handler(void) { unchar c1, c2, c3, c4, c5, c6, c7; if ((hdmi_system_state < HDMI_CLOCK_DET) || (sp_tx_system_state < STATE_CONFIG_HDMI)) return; sp_read_reg(RX_P0, HDMI_RX_INT_STATUS1_REG, &c1); sp_write_reg(RX_P0, HDMI_RX_INT_STATUS1_REG, c1); sp_read_reg(RX_P0, HDMI_RX_INT_STATUS2_REG, &c2); sp_write_reg(RX_P0, HDMI_RX_INT_STATUS2_REG, c2); sp_read_reg(RX_P0, HDMI_RX_INT_STATUS3_REG, &c3); sp_write_reg(RX_P0, HDMI_RX_INT_STATUS3_REG, c3); sp_read_reg(RX_P0, HDMI_RX_INT_STATUS4_REG, &c4); sp_write_reg(RX_P0, HDMI_RX_INT_STATUS4_REG, c4); sp_read_reg(RX_P0, HDMI_RX_INT_STATUS5_REG, &c5); sp_write_reg(RX_P0, HDMI_RX_INT_STATUS5_REG, c5); sp_read_reg(RX_P0, HDMI_RX_INT_STATUS6_REG, &c6); sp_write_reg(RX_P0, HDMI_RX_INT_STATUS6_REG, c6); sp_read_reg(RX_P0, HDMI_RX_INT_STATUS7_REG, &c7); sp_write_reg(RX_P0, HDMI_RX_INT_STATUS7_REG, c7); if (c1 & CKDT_CHANGE) hdmi_rx_clk_det_int(); if (c1 & SCDT_CHANGE) hdmi_rx_sync_det_int(); if (c1 & HDMI_DVI) hdmi_rx_hdmi_dvi_int(); if (c1 & SET_MUTE) hdmi_rx_avmute_int(); if (c6 & NEW_AVI) hdmi_rx_new_avi_int(); if (c7 & NEW_VS) hdmi_rx_new_vsi_int(); if (c7 & NO_VSI) hdmi_rx_no_vsi_int(); if ((c6 & NEW_AUD) || (c3 & AUD_MODE_CHANGE)) hdmi_rx_restart_audio_chk(); if (c6 & CTS_RCV) hdmi_rx_cts_rcv_int(); if (c5 & AUDIO_RCV) hdmi_rx_audio_rcv_int(); if (c2 & HDCP_ERR) hdmi_rx_hdcp_error_int(); if (c6 & NEW_CP) hdmi_rx_new_gcp_int(); } MODULE_DESCRIPTION("Slimport transmitter ANX7808 driver"); MODULE_AUTHOR("FeiWang <fwang@analogixsemi.com>"); MODULE_LICENSE("GPL");
gpl-2.0
jmztaylor/android_kernel_pantech_pororo
drivers/gpu/drm/nouveau/nvc0_instmem.c
2111
6201
/* * Copyright 2010 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_vm.h" struct nvc0_instmem_priv { struct nouveau_gpuobj *bar1_pgd; struct nouveau_channel *bar1; struct nouveau_gpuobj *bar3_pgd; struct nouveau_channel *bar3; struct nouveau_gpuobj *chan_pgd; }; int nvc0_instmem_suspend(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; dev_priv->ramin_available = false; return 0; } void nvc0_instmem_resume(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv; nv_mask(dev, 0x100c80, 0x00000001, 0x00000000); nv_wr32(dev, 0x001704, 0x80000000 | priv->bar1->ramin->vinst >> 12); nv_wr32(dev, 0x001714, 0xc0000000 | priv->bar3->ramin->vinst >> 12); dev_priv->ramin_available = true; } static void nvc0_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan; chan = *pchan; *pchan = NULL; if (!chan) return; nouveau_vm_ref(NULL, &chan->vm, NULL); if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); kfree(chan); } static int nvc0_channel_new(struct drm_device *dev, u32 size, struct nouveau_vm *vm, struct nouveau_channel **pchan, struct nouveau_gpuobj *pgd, u64 vm_size) { struct nouveau_channel *chan; int ret; chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; chan->dev = dev; ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin); if (ret) { nvc0_channel_del(&chan); return ret; } ret = drm_mm_init(&chan->ramin_heap, 0x1000, size - 0x1000); if (ret) { nvc0_channel_del(&chan); return ret; } ret = nouveau_vm_ref(vm, &chan->vm, NULL); if (ret) { nvc0_channel_del(&chan); return ret; } nv_wo32(chan->ramin, 0x0200, lower_32_bits(pgd->vinst)); nv_wo32(chan->ramin, 0x0204, upper_32_bits(pgd->vinst)); nv_wo32(chan->ramin, 0x0208, lower_32_bits(vm_size - 1)); nv_wo32(chan->ramin, 0x020c, upper_32_bits(vm_size - 1)); *pchan = chan; return 0; } int nvc0_instmem_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem; struct pci_dev *pdev = dev->pdev; struct nvc0_instmem_priv *priv; struct nouveau_vm *vm = NULL; int ret; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; pinstmem->priv = priv; /* BAR3 VM */ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 3), 0, &dev_priv->bar3_vm); if (ret) goto error; ret = nouveau_gpuobj_new(dev, NULL, (pci_resource_len(pdev, 3) >> 12) * 8, 0, NVOBJ_FLAG_DONT_MAP | NVOBJ_FLAG_ZERO_ALLOC, &dev_priv->bar3_vm->pgt[0].obj[0]); if (ret) goto error; dev_priv->bar3_vm->pgt[0].refcount[0] = 1; nv50_instmem_map(dev_priv->bar3_vm->pgt[0].obj[0]); ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, NVOBJ_FLAG_ZERO_ALLOC, &priv->bar3_pgd); if (ret) goto error; ret = nouveau_vm_ref(dev_priv->bar3_vm, &vm, priv->bar3_pgd); if (ret) goto error; nouveau_vm_ref(NULL, &vm, NULL); ret = nvc0_channel_new(dev, 8192, dev_priv->bar3_vm, &priv->bar3, priv->bar3_pgd, pci_resource_len(dev->pdev, 3)); if (ret) goto error; /* BAR1 VM */ ret = nouveau_vm_new(dev, 0, pci_resource_len(pdev, 1), 0, &vm); if (ret) goto error; ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, NVOBJ_FLAG_ZERO_ALLOC, &priv->bar1_pgd); if (ret) goto error; ret = nouveau_vm_ref(vm, &dev_priv->bar1_vm, priv->bar1_pgd); if (ret) goto error; nouveau_vm_ref(NULL, &vm, NULL); ret = nvc0_channel_new(dev, 8192, dev_priv->bar1_vm, &priv->bar1, priv->bar1_pgd, pci_resource_len(dev->pdev, 1)); if (ret) goto error; /* channel vm */ ret = nouveau_vm_new(dev, 0, (1ULL << 40), 0x0008000000ULL, &vm); if (ret) goto error; ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 4096, 0, &priv->chan_pgd); if (ret) goto error; nouveau_vm_ref(vm, &dev_priv->chan_vm, priv->chan_pgd); nouveau_vm_ref(NULL, &vm, NULL); nvc0_instmem_resume(dev); return 0; error: nvc0_instmem_takedown(dev); return ret; } void nvc0_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_vm *vm = NULL; nvc0_instmem_suspend(dev); nv_wr32(dev, 0x1704, 0x00000000); nv_wr32(dev, 0x1714, 0x00000000); nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); nouveau_gpuobj_ref(NULL, &priv->chan_pgd); nvc0_channel_del(&priv->bar1); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); nouveau_gpuobj_ref(NULL, &priv->bar1_pgd); nvc0_channel_del(&priv->bar3); nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL); nouveau_vm_ref(NULL, &vm, priv->bar3_pgd); nouveau_gpuobj_ref(NULL, &priv->bar3_pgd); nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); dev_priv->engine.instmem.priv = NULL; kfree(priv); }
gpl-2.0
denghongcai/rk3x_kernel_3.0.36
drivers/usb/gadget/s3c-hsudc.c
2367
36404
/* linux/drivers/usb/gadget/s3c-hsudc.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * S3C24XX USB 2.0 High-speed USB controller gadget driver * * The S3C24XX USB 2.0 high-speed USB controller supports upto 9 endpoints. * Each endpoint can be configured as either in or out endpoint. Endpoints * can be configured for Bulk or Interrupt transfer mode. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/prefetch.h> #include <mach/regs-s3c2443-clock.h> #include <plat/udc.h> #define S3C_HSUDC_REG(x) (x) /* Non-Indexed Registers */ #define S3C_IR S3C_HSUDC_REG(0x00) /* Index Register */ #define S3C_EIR S3C_HSUDC_REG(0x04) /* EP Intr Status */ #define S3C_EIR_EP0 (1<<0) #define S3C_EIER S3C_HSUDC_REG(0x08) /* EP Intr Enable */ #define S3C_FAR S3C_HSUDC_REG(0x0c) /* Gadget Address */ #define S3C_FNR S3C_HSUDC_REG(0x10) /* Frame Number */ #define S3C_EDR S3C_HSUDC_REG(0x14) /* EP Direction */ #define S3C_TR S3C_HSUDC_REG(0x18) /* Test Register */ #define S3C_SSR S3C_HSUDC_REG(0x1c) /* System Status */ #define S3C_SSR_DTZIEN_EN (0xff8f) #define S3C_SSR_ERR (0xff80) #define S3C_SSR_VBUSON (1 << 8) #define S3C_SSR_HSP (1 << 4) #define S3C_SSR_SDE (1 << 3) #define S3C_SSR_RESUME (1 << 2) #define S3C_SSR_SUSPEND (1 << 1) #define S3C_SSR_RESET (1 << 0) #define S3C_SCR S3C_HSUDC_REG(0x20) /* System Control */ #define S3C_SCR_DTZIEN_EN (1 << 14) #define S3C_SCR_RRD_EN (1 << 5) #define S3C_SCR_SUS_EN (1 << 1) #define S3C_SCR_RST_EN (1 << 0) #define S3C_EP0SR S3C_HSUDC_REG(0x24) /* EP0 Status */ #define S3C_EP0SR_EP0_LWO (1 << 6) #define S3C_EP0SR_STALL (1 << 4) #define S3C_EP0SR_TX_SUCCESS (1 << 1) #define S3C_EP0SR_RX_SUCCESS (1 << 0) #define S3C_EP0CR S3C_HSUDC_REG(0x28) /* EP0 Control */ #define S3C_BR(_x) S3C_HSUDC_REG(0x60 + (_x * 4)) /* Indexed Registers */ #define S3C_ESR S3C_HSUDC_REG(0x2c) /* EPn Status */ #define S3C_ESR_FLUSH (1 << 6) #define S3C_ESR_STALL (1 << 5) #define S3C_ESR_LWO (1 << 4) #define S3C_ESR_PSIF_ONE (1 << 2) #define S3C_ESR_PSIF_TWO (2 << 2) #define S3C_ESR_TX_SUCCESS (1 << 1) #define S3C_ESR_RX_SUCCESS (1 << 0) #define S3C_ECR S3C_HSUDC_REG(0x30) /* EPn Control */ #define S3C_ECR_DUEN (1 << 7) #define S3C_ECR_FLUSH (1 << 6) #define S3C_ECR_STALL (1 << 1) #define S3C_ECR_IEMS (1 << 0) #define S3C_BRCR S3C_HSUDC_REG(0x34) /* Read Count */ #define S3C_BWCR S3C_HSUDC_REG(0x38) /* Write Count */ #define S3C_MPR S3C_HSUDC_REG(0x3c) /* Max Pkt Size */ #define WAIT_FOR_SETUP (0) #define DATA_STATE_XMIT (1) #define DATA_STATE_RECV (2) /** * struct s3c_hsudc_ep - Endpoint representation used by driver. * @ep: USB gadget layer representation of device endpoint. * @name: Endpoint name (as required by ep autoconfiguration). * @dev: Reference to the device controller to which this EP belongs. * @desc: Endpoint descriptor obtained from the gadget driver. * @queue: Transfer request queue for the endpoint. * @stopped: Maintains state of endpoint, set if EP is halted. * @bEndpointAddress: EP address (including direction bit). * @fifo: Base address of EP FIFO. */ struct s3c_hsudc_ep { struct usb_ep ep; char name[20]; struct s3c_hsudc *dev; const struct usb_endpoint_descriptor *desc; struct list_head queue; u8 stopped; u8 wedge; u8 bEndpointAddress; void __iomem *fifo; }; /** * struct s3c_hsudc_req - Driver encapsulation of USB gadget transfer request. * @req: Reference to USB gadget transfer request. * @queue: Used for inserting this request to the endpoint request queue. */ struct s3c_hsudc_req { struct usb_request req; struct list_head queue; }; /** * struct s3c_hsudc - Driver's abstraction of the device controller. * @gadget: Instance of usb_gadget which is referenced by gadget driver. * @driver: Reference to currenty active gadget driver. * @dev: The device reference used by probe function. * @lock: Lock to synchronize the usage of Endpoints (EP's are indexed). * @regs: Remapped base address of controller's register space. * @mem_rsrc: Device memory resource used for remapping device register space. * irq: IRQ number used by the controller. * uclk: Reference to the controller clock. * ep0state: Current state of EP0. * ep: List of endpoints supported by the controller. */ struct s3c_hsudc { struct usb_gadget gadget; struct usb_gadget_driver *driver; struct device *dev; struct s3c24xx_hsudc_platdata *pd; spinlock_t lock; void __iomem *regs; struct resource *mem_rsrc; int irq; struct clk *uclk; int ep0state; struct s3c_hsudc_ep ep[]; }; #define ep_maxpacket(_ep) ((_ep)->ep.maxpacket) #define ep_is_in(_ep) ((_ep)->bEndpointAddress & USB_DIR_IN) #define ep_index(_ep) ((_ep)->bEndpointAddress & \ USB_ENDPOINT_NUMBER_MASK) static struct s3c_hsudc *the_controller; static const char driver_name[] = "s3c-udc"; static const char ep0name[] = "ep0-control"; static inline struct s3c_hsudc_req *our_req(struct usb_request *req) { return container_of(req, struct s3c_hsudc_req, req); } static inline struct s3c_hsudc_ep *our_ep(struct usb_ep *ep) { return container_of(ep, struct s3c_hsudc_ep, ep); } static inline struct s3c_hsudc *to_hsudc(struct usb_gadget *gadget) { return container_of(gadget, struct s3c_hsudc, gadget); } static inline void set_index(struct s3c_hsudc *hsudc, int ep_addr) { ep_addr &= USB_ENDPOINT_NUMBER_MASK; writel(ep_addr, hsudc->regs + S3C_IR); } static inline void __orr32(void __iomem *ptr, u32 val) { writel(readl(ptr) | val, ptr); } static void s3c_hsudc_init_phy(void) { u32 cfg; cfg = readl(S3C2443_PWRCFG) | S3C2443_PWRCFG_USBPHY; writel(cfg, S3C2443_PWRCFG); cfg = readl(S3C2443_URSTCON); cfg |= (S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST); writel(cfg, S3C2443_URSTCON); mdelay(1); cfg = readl(S3C2443_URSTCON); cfg &= ~(S3C2443_URSTCON_FUNCRST | S3C2443_URSTCON_PHYRST); writel(cfg, S3C2443_URSTCON); cfg = readl(S3C2443_PHYCTRL); cfg &= ~(S3C2443_PHYCTRL_CLKSEL | S3C2443_PHYCTRL_DSPORT); cfg |= (S3C2443_PHYCTRL_EXTCLK | S3C2443_PHYCTRL_PLLSEL); writel(cfg, S3C2443_PHYCTRL); cfg = readl(S3C2443_PHYPWR); cfg &= ~(S3C2443_PHYPWR_FSUSPEND | S3C2443_PHYPWR_PLL_PWRDN | S3C2443_PHYPWR_XO_ON | S3C2443_PHYPWR_PLL_REFCLK | S3C2443_PHYPWR_ANALOG_PD); cfg |= S3C2443_PHYPWR_COMMON_ON; writel(cfg, S3C2443_PHYPWR); cfg = readl(S3C2443_UCLKCON); cfg |= (S3C2443_UCLKCON_DETECT_VBUS | S3C2443_UCLKCON_FUNC_CLKEN | S3C2443_UCLKCON_TCLKEN); writel(cfg, S3C2443_UCLKCON); } static void s3c_hsudc_uninit_phy(void) { u32 cfg; cfg = readl(S3C2443_PWRCFG) & ~S3C2443_PWRCFG_USBPHY; writel(cfg, S3C2443_PWRCFG); writel(S3C2443_PHYPWR_FSUSPEND, S3C2443_PHYPWR); cfg = readl(S3C2443_UCLKCON) & ~S3C2443_UCLKCON_FUNC_CLKEN; writel(cfg, S3C2443_UCLKCON); } /** * s3c_hsudc_complete_request - Complete a transfer request. * @hsep: Endpoint to which the request belongs. * @hsreq: Transfer request to be completed. * @status: Transfer completion status for the transfer request. */ static void s3c_hsudc_complete_request(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq, int status) { unsigned int stopped = hsep->stopped; struct s3c_hsudc *hsudc = hsep->dev; list_del_init(&hsreq->queue); hsreq->req.status = status; if (!ep_index(hsep)) { hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; } hsep->stopped = 1; spin_unlock(&hsudc->lock); if (hsreq->req.complete != NULL) hsreq->req.complete(&hsep->ep, &hsreq->req); spin_lock(&hsudc->lock); hsep->stopped = stopped; } /** * s3c_hsudc_nuke_ep - Terminate all requests queued for a endpoint. * @hsep: Endpoint for which queued requests have to be terminated. * @status: Transfer completion status for the transfer request. */ static void s3c_hsudc_nuke_ep(struct s3c_hsudc_ep *hsep, int status) { struct s3c_hsudc_req *hsreq; while (!list_empty(&hsep->queue)) { hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_complete_request(hsep, hsreq, status); } } /** * s3c_hsudc_stop_activity - Stop activity on all endpoints. * @hsudc: Device controller for which EP activity is to be stopped. * @driver: Reference to the gadget driver which is currently active. * * All the endpoints are stopped and any pending transfer requests if any on * the endpoint are terminated. */ static void s3c_hsudc_stop_activity(struct s3c_hsudc *hsudc, struct usb_gadget_driver *driver) { struct s3c_hsudc_ep *hsep; int epnum; hsudc->gadget.speed = USB_SPEED_UNKNOWN; for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) { hsep = &hsudc->ep[epnum]; hsep->stopped = 1; s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN); } spin_unlock(&hsudc->lock); driver->disconnect(&hsudc->gadget); spin_lock(&hsudc->lock); } /** * s3c_hsudc_read_setup_pkt - Read the received setup packet from EP0 fifo. * @hsudc: Device controller from which setup packet is to be read. * @buf: The buffer into which the setup packet is read. * * The setup packet received in the EP0 fifo is read and stored into a * given buffer address. */ static void s3c_hsudc_read_setup_pkt(struct s3c_hsudc *hsudc, u16 *buf) { int count; count = readl(hsudc->regs + S3C_BRCR); while (count--) *buf++ = (u16)readl(hsudc->regs + S3C_BR(0)); writel(S3C_EP0SR_RX_SUCCESS, hsudc->regs + S3C_EP0SR); } /** * s3c_hsudc_write_fifo - Write next chunk of transfer data to EP fifo. * @hsep: Endpoint to which the data is to be written. * @hsreq: Transfer request from which the next chunk of data is written. * * Write the next chunk of data from a transfer request to the endpoint FIFO. * If the transfer request completes, 1 is returned, otherwise 0 is returned. */ static int s3c_hsudc_write_fifo(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq) { u16 *buf; u32 max = ep_maxpacket(hsep); u32 count, length; bool is_last; void __iomem *fifo = hsep->fifo; buf = hsreq->req.buf + hsreq->req.actual; prefetch(buf); length = hsreq->req.length - hsreq->req.actual; length = min(length, max); hsreq->req.actual += length; writel(length, hsep->dev->regs + S3C_BWCR); for (count = 0; count < length; count += 2) writel(*buf++, fifo); if (count != max) { is_last = true; } else { if (hsreq->req.length != hsreq->req.actual || hsreq->req.zero) is_last = false; else is_last = true; } if (is_last) { s3c_hsudc_complete_request(hsep, hsreq, 0); return 1; } return 0; } /** * s3c_hsudc_read_fifo - Read the next chunk of data from EP fifo. * @hsep: Endpoint from which the data is to be read. * @hsreq: Transfer request to which the next chunk of data read is written. * * Read the next chunk of data from the endpoint FIFO and a write it to the * transfer request buffer. If the transfer request completes, 1 is returned, * otherwise 0 is returned. */ static int s3c_hsudc_read_fifo(struct s3c_hsudc_ep *hsep, struct s3c_hsudc_req *hsreq) { struct s3c_hsudc *hsudc = hsep->dev; u32 csr, offset; u16 *buf, word; u32 buflen, rcnt, rlen; void __iomem *fifo = hsep->fifo; u32 is_short = 0; offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR; csr = readl(hsudc->regs + offset); if (!(csr & S3C_ESR_RX_SUCCESS)) return -EINVAL; buf = hsreq->req.buf + hsreq->req.actual; prefetchw(buf); buflen = hsreq->req.length - hsreq->req.actual; rcnt = readl(hsudc->regs + S3C_BRCR); rlen = (csr & S3C_ESR_LWO) ? (rcnt * 2 - 1) : (rcnt * 2); hsreq->req.actual += min(rlen, buflen); is_short = (rlen < hsep->ep.maxpacket); while (rcnt-- != 0) { word = (u16)readl(fifo); if (buflen) { *buf++ = word; buflen--; } else { hsreq->req.status = -EOVERFLOW; } } writel(S3C_ESR_RX_SUCCESS, hsudc->regs + offset); if (is_short || hsreq->req.actual == hsreq->req.length) { s3c_hsudc_complete_request(hsep, hsreq, 0); return 1; } return 0; } /** * s3c_hsudc_epin_intr - Handle in-endpoint interrupt. * @hsudc - Device controller for which the interrupt is to be handled. * @ep_idx - Endpoint number on which an interrupt is pending. * * Handles interrupt for a in-endpoint. The interrupts that are handled are * stall and data transmit complete interrupt. */ static void s3c_hsudc_epin_intr(struct s3c_hsudc *hsudc, u32 ep_idx) { struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; struct s3c_hsudc_req *hsreq; u32 csr; csr = readl((u32)hsudc->regs + S3C_ESR); if (csr & S3C_ESR_STALL) { writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR); return; } if (csr & S3C_ESR_TX_SUCCESS) { writel(S3C_ESR_TX_SUCCESS, hsudc->regs + S3C_ESR); if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if ((s3c_hsudc_write_fifo(hsep, hsreq) == 0) && (csr & S3C_ESR_PSIF_TWO)) s3c_hsudc_write_fifo(hsep, hsreq); } } /** * s3c_hsudc_epout_intr - Handle out-endpoint interrupt. * @hsudc - Device controller for which the interrupt is to be handled. * @ep_idx - Endpoint number on which an interrupt is pending. * * Handles interrupt for a out-endpoint. The interrupts that are handled are * stall, flush and data ready interrupt. */ static void s3c_hsudc_epout_intr(struct s3c_hsudc *hsudc, u32 ep_idx) { struct s3c_hsudc_ep *hsep = &hsudc->ep[ep_idx]; struct s3c_hsudc_req *hsreq; u32 csr; csr = readl((u32)hsudc->regs + S3C_ESR); if (csr & S3C_ESR_STALL) { writel(S3C_ESR_STALL, hsudc->regs + S3C_ESR); return; } if (csr & S3C_ESR_FLUSH) { __orr32(hsudc->regs + S3C_ECR, S3C_ECR_FLUSH); return; } if (csr & S3C_ESR_RX_SUCCESS) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if (((s3c_hsudc_read_fifo(hsep, hsreq)) == 0) && (csr & S3C_ESR_PSIF_TWO)) s3c_hsudc_read_fifo(hsep, hsreq); } } /** s3c_hsudc_set_halt - Set or clear a endpoint halt. * @_ep: Endpoint on which halt has to be set or cleared. * @value: 1 for setting halt on endpoint, 0 to clear halt. * * Set or clear endpoint halt. If halt is set, the endpoint is stopped. * If halt is cleared, for in-endpoints, if there are any pending * transfer requests, transfers are started. */ static int s3c_hsudc_set_halt(struct usb_ep *_ep, int value) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; struct s3c_hsudc_req *hsreq; unsigned long irqflags; u32 ecr; u32 offset; if (value && ep_is_in(hsep) && !list_empty(&hsep->queue)) return -EAGAIN; spin_lock_irqsave(&hsudc->lock, irqflags); set_index(hsudc, ep_index(hsep)); offset = (ep_index(hsep)) ? S3C_ECR : S3C_EP0CR; ecr = readl(hsudc->regs + offset); if (value) { ecr |= S3C_ECR_STALL; if (ep_index(hsep)) ecr |= S3C_ECR_FLUSH; hsep->stopped = 1; } else { ecr &= ~S3C_ECR_STALL; hsep->stopped = hsep->wedge = 0; } writel(ecr, hsudc->regs + offset); if (ep_is_in(hsep) && !list_empty(&hsep->queue) && !value) { hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); if (hsreq) s3c_hsudc_write_fifo(hsep, hsreq); } spin_unlock_irqrestore(&hsudc->lock, irqflags); return 0; } /** s3c_hsudc_set_wedge - Sets the halt feature with the clear requests ignored * @_ep: Endpoint on which wedge has to be set. * * Sets the halt feature with the clear requests ignored. */ static int s3c_hsudc_set_wedge(struct usb_ep *_ep) { struct s3c_hsudc_ep *hsep = our_ep(_ep); if (!hsep) return -EINVAL; hsep->wedge = 1; return usb_ep_set_halt(_ep); } /** s3c_hsudc_handle_reqfeat - Handle set feature or clear feature requests. * @_ep: Device controller on which the set/clear feature needs to be handled. * @ctrl: Control request as received on the endpoint 0. * * Handle set feature or clear feature control requests on the control endpoint. */ static int s3c_hsudc_handle_reqfeat(struct s3c_hsudc *hsudc, struct usb_ctrlrequest *ctrl) { struct s3c_hsudc_ep *hsep; bool set = (ctrl->bRequest == USB_REQ_SET_FEATURE); u8 ep_num = ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK; if (ctrl->bRequestType == USB_RECIP_ENDPOINT) { hsep = &hsudc->ep[ep_num]; switch (le16_to_cpu(ctrl->wValue)) { case USB_ENDPOINT_HALT: if (set || (!set && !hsep->wedge)) s3c_hsudc_set_halt(&hsep->ep, set); return 0; } } return -ENOENT; } /** * s3c_hsudc_process_req_status - Handle get status control request. * @hsudc: Device controller on which get status request has be handled. * @ctrl: Control request as received on the endpoint 0. * * Handle get status control request received on control endpoint. */ static void s3c_hsudc_process_req_status(struct s3c_hsudc *hsudc, struct usb_ctrlrequest *ctrl) { struct s3c_hsudc_ep *hsep0 = &hsudc->ep[0]; struct s3c_hsudc_req hsreq; struct s3c_hsudc_ep *hsep; __le16 reply; u8 epnum; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: reply = cpu_to_le16(0); break; case USB_RECIP_INTERFACE: reply = cpu_to_le16(0); break; case USB_RECIP_ENDPOINT: epnum = le16_to_cpu(ctrl->wIndex) & USB_ENDPOINT_NUMBER_MASK; hsep = &hsudc->ep[epnum]; reply = cpu_to_le16(hsep->stopped ? 1 : 0); break; } INIT_LIST_HEAD(&hsreq.queue); hsreq.req.length = 2; hsreq.req.buf = &reply; hsreq.req.actual = 0; hsreq.req.complete = NULL; s3c_hsudc_write_fifo(hsep0, &hsreq); } /** * s3c_hsudc_process_setup - Process control request received on endpoint 0. * @hsudc: Device controller on which control request has been received. * * Read the control request received on endpoint 0, decode it and handle * the request. */ static void s3c_hsudc_process_setup(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; struct usb_ctrlrequest ctrl = {0}; int ret; s3c_hsudc_nuke_ep(hsep, -EPROTO); s3c_hsudc_read_setup_pkt(hsudc, (u16 *)&ctrl); if (ctrl.bRequestType & USB_DIR_IN) { hsep->bEndpointAddress |= USB_DIR_IN; hsudc->ep0state = DATA_STATE_XMIT; } else { hsep->bEndpointAddress &= ~USB_DIR_IN; hsudc->ep0state = DATA_STATE_RECV; } switch (ctrl.bRequest) { case USB_REQ_SET_ADDRESS: if (ctrl.bRequestType != (USB_TYPE_STANDARD | USB_RECIP_DEVICE)) break; hsudc->ep0state = WAIT_FOR_SETUP; return; case USB_REQ_GET_STATUS: if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) break; s3c_hsudc_process_req_status(hsudc, &ctrl); return; case USB_REQ_SET_FEATURE: case USB_REQ_CLEAR_FEATURE: if ((ctrl.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD) break; s3c_hsudc_handle_reqfeat(hsudc, &ctrl); hsudc->ep0state = WAIT_FOR_SETUP; return; } if (hsudc->driver) { spin_unlock(&hsudc->lock); ret = hsudc->driver->setup(&hsudc->gadget, &ctrl); spin_lock(&hsudc->lock); if (ctrl.bRequest == USB_REQ_SET_CONFIGURATION) { hsep->bEndpointAddress &= ~USB_DIR_IN; hsudc->ep0state = WAIT_FOR_SETUP; } if (ret < 0) { dev_err(hsudc->dev, "setup failed, returned %d\n", ret); s3c_hsudc_set_halt(&hsep->ep, 1); hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; } } } /** s3c_hsudc_handle_ep0_intr - Handle endpoint 0 interrupt. * @hsudc: Device controller on which endpoint 0 interrupt has occured. * * Handle endpoint 0 interrupt when it occurs. EP0 interrupt could occur * when a stall handshake is sent to host or data is sent/received on * endpoint 0. */ static void s3c_hsudc_handle_ep0_intr(struct s3c_hsudc *hsudc) { struct s3c_hsudc_ep *hsep = &hsudc->ep[0]; struct s3c_hsudc_req *hsreq; u32 csr = readl(hsudc->regs + S3C_EP0SR); u32 ecr; if (csr & S3C_EP0SR_STALL) { ecr = readl(hsudc->regs + S3C_EP0CR); ecr &= ~(S3C_ECR_STALL | S3C_ECR_FLUSH); writel(ecr, hsudc->regs + S3C_EP0CR); writel(S3C_EP0SR_STALL, hsudc->regs + S3C_EP0SR); hsep->stopped = 0; s3c_hsudc_nuke_ep(hsep, -ECONNABORTED); hsudc->ep0state = WAIT_FOR_SETUP; hsep->bEndpointAddress &= ~USB_DIR_IN; return; } if (csr & S3C_EP0SR_TX_SUCCESS) { writel(S3C_EP0SR_TX_SUCCESS, hsudc->regs + S3C_EP0SR); if (ep_is_in(hsep)) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_write_fifo(hsep, hsreq); } } if (csr & S3C_EP0SR_RX_SUCCESS) { if (hsudc->ep0state == WAIT_FOR_SETUP) s3c_hsudc_process_setup(hsudc); else { if (!ep_is_in(hsep)) { if (list_empty(&hsep->queue)) return; hsreq = list_entry(hsep->queue.next, struct s3c_hsudc_req, queue); s3c_hsudc_read_fifo(hsep, hsreq); } } } } /** * s3c_hsudc_ep_enable - Enable a endpoint. * @_ep: The endpoint to be enabled. * @desc: Endpoint descriptor. * * Enables a endpoint when called from the gadget driver. Endpoint stall if * any is cleared, transfer type is configured and endpoint interrupt is * enabled. */ static int s3c_hsudc_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct s3c_hsudc_ep *hsep; struct s3c_hsudc *hsudc; unsigned long flags; u32 ecr = 0; hsep = container_of(_ep, struct s3c_hsudc_ep, ep); if (!_ep || !desc || hsep->desc || _ep->name == ep0name || desc->bDescriptorType != USB_DT_ENDPOINT || hsep->bEndpointAddress != desc->bEndpointAddress || ep_maxpacket(hsep) < le16_to_cpu(desc->wMaxPacketSize)) return -EINVAL; if ((desc->bmAttributes == USB_ENDPOINT_XFER_BULK && le16_to_cpu(desc->wMaxPacketSize) != ep_maxpacket(hsep)) || !desc->wMaxPacketSize) return -ERANGE; hsudc = hsep->dev; if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); ecr |= ((usb_endpoint_xfer_int(desc)) ? S3C_ECR_IEMS : S3C_ECR_DUEN); writel(ecr, hsudc->regs + S3C_ECR); hsep->stopped = hsep->wedge = 0; hsep->desc = desc; hsep->ep.maxpacket = le16_to_cpu(desc->wMaxPacketSize); s3c_hsudc_set_halt(_ep, 0); __set_bit(ep_index(hsep), hsudc->regs + S3C_EIER); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_ep_disable - Disable a endpoint. * @_ep: The endpoint to be disabled. * @desc: Endpoint descriptor. * * Disables a endpoint when called from the gadget driver. */ static int s3c_hsudc_ep_disable(struct usb_ep *_ep) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; unsigned long flags; if (!_ep || !hsep->desc) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); __clear_bit(ep_index(hsep), hsudc->regs + S3C_EIER); s3c_hsudc_nuke_ep(hsep, -ESHUTDOWN); hsep->desc = 0; hsep->stopped = 1; spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_alloc_request - Allocate a new request. * @_ep: Endpoint for which request is allocated (not used). * @gfp_flags: Flags used for the allocation. * * Allocates a single transfer request structure when called from gadget driver. */ static struct usb_request *s3c_hsudc_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct s3c_hsudc_req *hsreq; hsreq = kzalloc(sizeof *hsreq, gfp_flags); if (!hsreq) return 0; INIT_LIST_HEAD(&hsreq->queue); return &hsreq->req; } /** * s3c_hsudc_free_request - Deallocate a request. * @ep: Endpoint for which request is deallocated (not used). * @_req: Request to be deallocated. * * Allocates a single transfer request structure when called from gadget driver. */ static void s3c_hsudc_free_request(struct usb_ep *ep, struct usb_request *_req) { struct s3c_hsudc_req *hsreq; hsreq = container_of(_req, struct s3c_hsudc_req, req); WARN_ON(!list_empty(&hsreq->queue)); kfree(hsreq); } /** * s3c_hsudc_queue - Queue a transfer request for the endpoint. * @_ep: Endpoint for which the request is queued. * @_req: Request to be queued. * @gfp_flags: Not used. * * Start or enqueue a request for a endpoint when called from gadget driver. */ static int s3c_hsudc_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct s3c_hsudc_req *hsreq; struct s3c_hsudc_ep *hsep; struct s3c_hsudc *hsudc; unsigned long flags; u32 offset; u32 csr; hsreq = container_of(_req, struct s3c_hsudc_req, req); if ((!_req || !_req->complete || !_req->buf || !list_empty(&hsreq->queue))) return -EINVAL; hsep = container_of(_ep, struct s3c_hsudc_ep, ep); hsudc = hsep->dev; if (!hsudc->driver || hsudc->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&hsudc->lock, flags); set_index(hsudc, hsep->bEndpointAddress); _req->status = -EINPROGRESS; _req->actual = 0; if (!ep_index(hsep) && _req->length == 0) { hsudc->ep0state = WAIT_FOR_SETUP; s3c_hsudc_complete_request(hsep, hsreq, 0); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } if (list_empty(&hsep->queue) && !hsep->stopped) { offset = (ep_index(hsep)) ? S3C_ESR : S3C_EP0SR; if (ep_is_in(hsep)) { csr = readl((u32)hsudc->regs + offset); if (!(csr & S3C_ESR_TX_SUCCESS) && (s3c_hsudc_write_fifo(hsep, hsreq) == 1)) hsreq = 0; } else { csr = readl((u32)hsudc->regs + offset); if ((csr & S3C_ESR_RX_SUCCESS) && (s3c_hsudc_read_fifo(hsep, hsreq) == 1)) hsreq = 0; } } if (hsreq != 0) list_add_tail(&hsreq->queue, &hsep->queue); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } /** * s3c_hsudc_dequeue - Dequeue a transfer request from an endpoint. * @_ep: Endpoint from which the request is dequeued. * @_req: Request to be dequeued. * * Dequeue a request from a endpoint when called from gadget driver. */ static int s3c_hsudc_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct s3c_hsudc_ep *hsep = our_ep(_ep); struct s3c_hsudc *hsudc = hsep->dev; struct s3c_hsudc_req *hsreq; unsigned long flags; hsep = container_of(_ep, struct s3c_hsudc_ep, ep); if (!_ep || hsep->ep.name == ep0name) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); list_for_each_entry(hsreq, &hsep->queue, queue) { if (&hsreq->req == _req) break; } if (&hsreq->req != _req) { spin_unlock_irqrestore(&hsudc->lock, flags); return -EINVAL; } set_index(hsudc, hsep->bEndpointAddress); s3c_hsudc_complete_request(hsep, hsreq, -ECONNRESET); spin_unlock_irqrestore(&hsudc->lock, flags); return 0; } static struct usb_ep_ops s3c_hsudc_ep_ops = { .enable = s3c_hsudc_ep_enable, .disable = s3c_hsudc_ep_disable, .alloc_request = s3c_hsudc_alloc_request, .free_request = s3c_hsudc_free_request, .queue = s3c_hsudc_queue, .dequeue = s3c_hsudc_dequeue, .set_halt = s3c_hsudc_set_halt, .set_wedge = s3c_hsudc_set_wedge, }; /** * s3c_hsudc_initep - Initialize a endpoint to default state. * @hsudc - Reference to the device controller. * @hsep - Endpoint to be initialized. * @epnum - Address to be assigned to the endpoint. * * Initialize a endpoint with default configuration. */ static void s3c_hsudc_initep(struct s3c_hsudc *hsudc, struct s3c_hsudc_ep *hsep, int epnum) { char *dir; if ((epnum % 2) == 0) { dir = "out"; } else { dir = "in"; hsep->bEndpointAddress = USB_DIR_IN; } hsep->bEndpointAddress |= epnum; if (epnum) snprintf(hsep->name, sizeof(hsep->name), "ep%d%s", epnum, dir); else snprintf(hsep->name, sizeof(hsep->name), "%s", ep0name); INIT_LIST_HEAD(&hsep->queue); INIT_LIST_HEAD(&hsep->ep.ep_list); if (epnum) list_add_tail(&hsep->ep.ep_list, &hsudc->gadget.ep_list); hsep->dev = hsudc; hsep->ep.name = hsep->name; hsep->ep.maxpacket = epnum ? 512 : 64; hsep->ep.ops = &s3c_hsudc_ep_ops; hsep->fifo = hsudc->regs + S3C_BR(epnum); hsep->desc = 0; hsep->stopped = 0; hsep->wedge = 0; set_index(hsudc, epnum); writel(hsep->ep.maxpacket, hsudc->regs + S3C_MPR); } /** * s3c_hsudc_setup_ep - Configure all endpoints to default state. * @hsudc: Reference to device controller. * * Configures all endpoints to default state. */ static void s3c_hsudc_setup_ep(struct s3c_hsudc *hsudc) { int epnum; hsudc->ep0state = WAIT_FOR_SETUP; INIT_LIST_HEAD(&hsudc->gadget.ep_list); for (epnum = 0; epnum < hsudc->pd->epnum; epnum++) s3c_hsudc_initep(hsudc, &hsudc->ep[epnum], epnum); } /** * s3c_hsudc_reconfig - Reconfigure the device controller to default state. * @hsudc: Reference to device controller. * * Reconfigures the device controller registers to a default state. */ static void s3c_hsudc_reconfig(struct s3c_hsudc *hsudc) { writel(0xAA, hsudc->regs + S3C_EDR); writel(1, hsudc->regs + S3C_EIER); writel(0, hsudc->regs + S3C_TR); writel(S3C_SCR_DTZIEN_EN | S3C_SCR_RRD_EN | S3C_SCR_SUS_EN | S3C_SCR_RST_EN, hsudc->regs + S3C_SCR); writel(0, hsudc->regs + S3C_EP0CR); s3c_hsudc_setup_ep(hsudc); } /** * s3c_hsudc_irq - Interrupt handler for device controller. * @irq: Not used. * @_dev: Reference to the device controller. * * Interrupt handler for the device controller. This handler handles controller * interrupts and endpoint interrupts. */ static irqreturn_t s3c_hsudc_irq(int irq, void *_dev) { struct s3c_hsudc *hsudc = _dev; struct s3c_hsudc_ep *hsep; u32 ep_intr; u32 sys_status; u32 ep_idx; spin_lock(&hsudc->lock); sys_status = readl(hsudc->regs + S3C_SSR); ep_intr = readl(hsudc->regs + S3C_EIR) & 0x3FF; if (!ep_intr && !(sys_status & S3C_SSR_DTZIEN_EN)) { spin_unlock(&hsudc->lock); return IRQ_HANDLED; } if (sys_status) { if (sys_status & S3C_SSR_VBUSON) writel(S3C_SSR_VBUSON, hsudc->regs + S3C_SSR); if (sys_status & S3C_SSR_ERR) writel(S3C_SSR_ERR, hsudc->regs + S3C_SSR); if (sys_status & S3C_SSR_SDE) { writel(S3C_SSR_SDE, hsudc->regs + S3C_SSR); hsudc->gadget.speed = (sys_status & S3C_SSR_HSP) ? USB_SPEED_HIGH : USB_SPEED_FULL; } if (sys_status & S3C_SSR_SUSPEND) { writel(S3C_SSR_SUSPEND, hsudc->regs + S3C_SSR); if (hsudc->gadget.speed != USB_SPEED_UNKNOWN && hsudc->driver && hsudc->driver->suspend) hsudc->driver->suspend(&hsudc->gadget); } if (sys_status & S3C_SSR_RESUME) { writel(S3C_SSR_RESUME, hsudc->regs + S3C_SSR); if (hsudc->gadget.speed != USB_SPEED_UNKNOWN && hsudc->driver && hsudc->driver->resume) hsudc->driver->resume(&hsudc->gadget); } if (sys_status & S3C_SSR_RESET) { writel(S3C_SSR_RESET, hsudc->regs + S3C_SSR); for (ep_idx = 0; ep_idx < hsudc->pd->epnum; ep_idx++) { hsep = &hsudc->ep[ep_idx]; hsep->stopped = 1; s3c_hsudc_nuke_ep(hsep, -ECONNRESET); } s3c_hsudc_reconfig(hsudc); hsudc->ep0state = WAIT_FOR_SETUP; } } if (ep_intr & S3C_EIR_EP0) { writel(S3C_EIR_EP0, hsudc->regs + S3C_EIR); set_index(hsudc, 0); s3c_hsudc_handle_ep0_intr(hsudc); } ep_intr >>= 1; ep_idx = 1; while (ep_intr) { if (ep_intr & 1) { hsep = &hsudc->ep[ep_idx]; set_index(hsudc, ep_idx); writel(1 << ep_idx, hsudc->regs + S3C_EIR); if (ep_is_in(hsep)) s3c_hsudc_epin_intr(hsudc, ep_idx); else s3c_hsudc_epout_intr(hsudc, ep_idx); } ep_intr >>= 1; ep_idx++; } spin_unlock(&hsudc->lock); return IRQ_HANDLED; } int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct s3c_hsudc *hsudc = the_controller; int ret; if (!driver || (driver->speed != USB_SPEED_FULL && driver->speed != USB_SPEED_HIGH) || !bind || !driver->unbind || !driver->disconnect || !driver->setup) return -EINVAL; if (!hsudc) return -ENODEV; if (hsudc->driver) return -EBUSY; hsudc->driver = driver; hsudc->gadget.dev.driver = &driver->driver; hsudc->gadget.speed = USB_SPEED_UNKNOWN; ret = device_add(&hsudc->gadget.dev); if (ret) { dev_err(hsudc->dev, "failed to probe gadget device"); return ret; } ret = bind(&hsudc->gadget); if (ret) { dev_err(hsudc->dev, "%s: bind failed\n", hsudc->gadget.name); device_del(&hsudc->gadget.dev); hsudc->driver = NULL; hsudc->gadget.dev.driver = NULL; return ret; } enable_irq(hsudc->irq); dev_info(hsudc->dev, "bound driver %s\n", driver->driver.name); s3c_hsudc_reconfig(hsudc); s3c_hsudc_init_phy(); if (hsudc->pd->gpio_init) hsudc->pd->gpio_init(); return 0; } EXPORT_SYMBOL(usb_gadget_probe_driver); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct s3c_hsudc *hsudc = the_controller; unsigned long flags; if (!hsudc) return -ENODEV; if (!driver || driver != hsudc->driver || !driver->unbind) return -EINVAL; spin_lock_irqsave(&hsudc->lock, flags); hsudc->driver = 0; s3c_hsudc_uninit_phy(); if (hsudc->pd->gpio_uninit) hsudc->pd->gpio_uninit(); s3c_hsudc_stop_activity(hsudc, driver); spin_unlock_irqrestore(&hsudc->lock, flags); driver->unbind(&hsudc->gadget); device_del(&hsudc->gadget.dev); disable_irq(hsudc->irq); dev_info(hsudc->dev, "unregistered gadget driver '%s'\n", driver->driver.name); return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); static inline u32 s3c_hsudc_read_frameno(struct s3c_hsudc *hsudc) { return readl(hsudc->regs + S3C_FNR) & 0x3FF; } static int s3c_hsudc_gadget_getframe(struct usb_gadget *gadget) { return s3c_hsudc_read_frameno(to_hsudc(gadget)); } static struct usb_gadget_ops s3c_hsudc_gadget_ops = { .get_frame = s3c_hsudc_gadget_getframe, }; static int s3c_hsudc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct resource *res; struct s3c_hsudc *hsudc; struct s3c24xx_hsudc_platdata *pd = pdev->dev.platform_data; int ret; hsudc = kzalloc(sizeof(struct s3c_hsudc) + sizeof(struct s3c_hsudc_ep) * pd->epnum, GFP_KERNEL); if (!hsudc) { dev_err(dev, "cannot allocate memory\n"); return -ENOMEM; } the_controller = hsudc; platform_set_drvdata(pdev, dev); hsudc->dev = dev; hsudc->pd = pdev->dev.platform_data; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "unable to obtain driver resource data\n"); ret = -ENODEV; goto err_res; } hsudc->mem_rsrc = request_mem_region(res->start, resource_size(res), dev_name(&pdev->dev)); if (!hsudc->mem_rsrc) { dev_err(dev, "failed to reserve register area\n"); ret = -ENODEV; goto err_res; } hsudc->regs = ioremap(res->start, resource_size(res)); if (!hsudc->regs) { dev_err(dev, "error mapping device register area\n"); ret = -EBUSY; goto err_remap; } ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(dev, "unable to obtain IRQ number\n"); goto err_irq; } hsudc->irq = ret; ret = request_irq(hsudc->irq, s3c_hsudc_irq, 0, driver_name, hsudc); if (ret < 0) { dev_err(dev, "irq request failed\n"); goto err_irq; } spin_lock_init(&hsudc->lock); device_initialize(&hsudc->gadget.dev); dev_set_name(&hsudc->gadget.dev, "gadget"); hsudc->gadget.is_dualspeed = 1; hsudc->gadget.ops = &s3c_hsudc_gadget_ops; hsudc->gadget.name = dev_name(dev); hsudc->gadget.dev.parent = dev; hsudc->gadget.dev.dma_mask = dev->dma_mask; hsudc->gadget.ep0 = &hsudc->ep[0].ep; hsudc->gadget.is_otg = 0; hsudc->gadget.is_a_peripheral = 0; s3c_hsudc_setup_ep(hsudc); hsudc->uclk = clk_get(&pdev->dev, "usb-device"); if (IS_ERR(hsudc->uclk)) { dev_err(dev, "failed to find usb-device clock source\n"); ret = PTR_ERR(hsudc->uclk); goto err_clk; } clk_enable(hsudc->uclk); local_irq_disable(); disable_irq(hsudc->irq); local_irq_enable(); return 0; err_clk: free_irq(hsudc->irq, hsudc); err_irq: iounmap(hsudc->regs); err_remap: release_resource(hsudc->mem_rsrc); kfree(hsudc->mem_rsrc); err_res: kfree(hsudc); return ret; } static struct platform_driver s3c_hsudc_driver = { .driver = { .owner = THIS_MODULE, .name = "s3c-hsudc", }, .probe = s3c_hsudc_probe, }; static int __init s3c_hsudc_modinit(void) { return platform_driver_register(&s3c_hsudc_driver); } static void __exit s3c_hsudc_modexit(void) { platform_driver_unregister(&s3c_hsudc_driver); } module_init(s3c_hsudc_modinit); module_exit(s3c_hsudc_modexit); MODULE_DESCRIPTION("Samsung S3C24XX USB high-speed controller driver"); MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>"); MODULE_LICENSE("GPL");
gpl-2.0
jakew02/android_kernel_lge_bullhead
drivers/acpi/acpica/exstorob.c
2623
7217
/****************************************************************************** * * Module Name: exstorob - AML Interpreter object store support, store to object * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acinterp.h" #define _COMPONENT ACPI_EXECUTER ACPI_MODULE_NAME("exstorob") /******************************************************************************* * * FUNCTION: acpi_ex_store_buffer_to_buffer * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a buffer object to another buffer object. * ******************************************************************************/ acpi_status acpi_ex_store_buffer_to_buffer(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_buffer_to_buffer, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a buffer by now */ buffer = ACPI_CAST_PTR(u8, source_desc->buffer.pointer); length = source_desc->buffer.length; /* * If target is a buffer of length zero or is a static buffer, * allocate a new buffer of the proper length */ if ((target_desc->buffer.length == 0) || (target_desc->common.flags & AOPOBJ_STATIC_POINTER)) { target_desc->buffer.pointer = ACPI_ALLOCATE(length); if (!target_desc->buffer.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->buffer.length = length; } /* Copy source buffer to target buffer */ if (length <= target_desc->buffer.length) { /* Clear existing buffer and copy in the new one */ ACPI_MEMSET(target_desc->buffer.pointer, 0, target_desc->buffer.length); ACPI_MEMCPY(target_desc->buffer.pointer, buffer, length); #ifdef ACPI_OBSOLETE_BEHAVIOR /* * NOTE: ACPI versions up to 3.0 specified that the buffer must be * truncated if the string is smaller than the buffer. However, "other" * implementations of ACPI never did this and thus became the defacto * standard. ACPI 3.0A changes this behavior such that the buffer * is no longer truncated. */ /* * OBSOLETE BEHAVIOR: * If the original source was a string, we must truncate the buffer, * according to the ACPI spec. Integer-to-Buffer and Buffer-to-Buffer * copy must not truncate the original buffer. */ if (original_src_type == ACPI_TYPE_STRING) { /* Set the new length of the target */ target_desc->buffer.length = length; } #endif } else { /* Truncate the source, copy only what will fit */ ACPI_MEMCPY(target_desc->buffer.pointer, buffer, target_desc->buffer.length); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Truncating source buffer from %X to %X\n", length, target_desc->buffer.length)); } /* Copy flags */ target_desc->buffer.flags = source_desc->buffer.flags; target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ex_store_string_to_string * * PARAMETERS: source_desc - Source object to copy * target_desc - Destination object of the copy * * RETURN: Status * * DESCRIPTION: Copy a String object to another String object * ******************************************************************************/ acpi_status acpi_ex_store_string_to_string(union acpi_operand_object *source_desc, union acpi_operand_object *target_desc) { u32 length; u8 *buffer; ACPI_FUNCTION_TRACE_PTR(ex_store_string_to_string, source_desc); /* If Source and Target are the same, just return */ if (source_desc == target_desc) { return_ACPI_STATUS(AE_OK); } /* We know that source_desc is a string by now */ buffer = ACPI_CAST_PTR(u8, source_desc->string.pointer); length = source_desc->string.length; /* * Replace existing string value if it will fit and the string * pointer is not a static pointer (part of an ACPI table) */ if ((length < target_desc->string.length) && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* * String will fit in existing non-static buffer. * Clear old string and copy in the new one */ ACPI_MEMSET(target_desc->string.pointer, 0, (acpi_size) target_desc->string.length + 1); ACPI_MEMCPY(target_desc->string.pointer, buffer, length); } else { /* * Free the current buffer, then allocate a new buffer * large enough to hold the value */ if (target_desc->string.pointer && (!(target_desc->common.flags & AOPOBJ_STATIC_POINTER))) { /* Only free if not a pointer into the DSDT */ ACPI_FREE(target_desc->string.pointer); } target_desc->string.pointer = ACPI_ALLOCATE_ZEROED((acpi_size) length + 1); if (!target_desc->string.pointer) { return_ACPI_STATUS(AE_NO_MEMORY); } target_desc->common.flags &= ~AOPOBJ_STATIC_POINTER; ACPI_MEMCPY(target_desc->string.pointer, buffer, length); } /* Set the new target length */ target_desc->string.length = length; return_ACPI_STATUS(AE_OK); }
gpl-2.0
OMAP4-AOSP/android_kernel_samsung_tuna
fs/cifs/netmisc.c
3135
40181
/* * fs/cifs/netmisc.c * * Copyright (c) International Business Machines Corp., 2002,2008 * Author(s): Steve French (sfrench@us.ibm.com) * * Error mapping routines from Samba libsmb/errormap.c * Copyright (C) Andrew Tridgell 2001 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/net.h> #include <linux/string.h> #include <linux/in.h> #include <linux/ctype.h> #include <linux/fs.h> #include <asm/div64.h> #include <asm/byteorder.h> #include <linux/inet.h> #include "cifsfs.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsproto.h" #include "smberr.h" #include "cifs_debug.h" #include "nterr.h" struct smb_to_posix_error { __u16 smb_err; int posix_code; }; static const struct smb_to_posix_error mapping_table_ERRDOS[] = { {ERRbadfunc, -EINVAL}, {ERRbadfile, -ENOENT}, {ERRbadpath, -ENOTDIR}, {ERRnofids, -EMFILE}, {ERRnoaccess, -EACCES}, {ERRbadfid, -EBADF}, {ERRbadmcb, -EIO}, {ERRnomem, -ENOMEM}, {ERRbadmem, -EFAULT}, {ERRbadenv, -EFAULT}, {ERRbadformat, -EINVAL}, {ERRbadaccess, -EACCES}, {ERRbaddata, -EIO}, {ERRbaddrive, -ENXIO}, {ERRremcd, -EACCES}, {ERRdiffdevice, -EXDEV}, {ERRnofiles, -ENOENT}, {ERRwriteprot, -EROFS}, {ERRbadshare, -ETXTBSY}, {ERRlock, -EACCES}, {ERRunsup, -EINVAL}, {ERRnosuchshare, -ENXIO}, {ERRfilexists, -EEXIST}, {ERRinvparm, -EINVAL}, {ERRdiskfull, -ENOSPC}, {ERRinvname, -ENOENT}, {ERRinvlevel, -EOPNOTSUPP}, {ERRdirnotempty, -ENOTEMPTY}, {ERRnotlocked, -ENOLCK}, {ERRcancelviolation, -ENOLCK}, {ERRalreadyexists, -EEXIST}, {ERRmoredata, -EOVERFLOW}, {ERReasnotsupported, -EOPNOTSUPP}, {ErrQuota, -EDQUOT}, {ErrNotALink, -ENOLINK}, {ERRnetlogonNotStarted, -ENOPROTOOPT}, {ERRsymlink, -EOPNOTSUPP}, {ErrTooManyLinks, -EMLINK}, {0, 0} }; static const struct smb_to_posix_error mapping_table_ERRSRV[] = { {ERRerror, -EIO}, {ERRbadpw, -EACCES}, /* was EPERM */ {ERRbadtype, -EREMOTE}, {ERRaccess, -EACCES}, {ERRinvtid, -ENXIO}, {ERRinvnetname, -ENXIO}, {ERRinvdevice, -ENXIO}, {ERRqfull, -ENOSPC}, {ERRqtoobig, -ENOSPC}, {ERRqeof, -EIO}, {ERRinvpfid, -EBADF}, {ERRsmbcmd, -EBADRQC}, {ERRsrverror, -EIO}, {ERRbadBID, -EIO}, {ERRfilespecs, -EINVAL}, {ERRbadLink, -EIO}, {ERRbadpermits, -EINVAL}, {ERRbadPID, -ESRCH}, {ERRsetattrmode, -EINVAL}, {ERRpaused, -EHOSTDOWN}, {ERRmsgoff, -EHOSTDOWN}, {ERRnoroom, -ENOSPC}, {ERRrmuns, -EUSERS}, {ERRtimeout, -ETIME}, {ERRnoresource, -ENOBUFS}, {ERRtoomanyuids, -EUSERS}, {ERRbaduid, -EACCES}, {ERRusempx, -EIO}, {ERRusestd, -EIO}, {ERR_NOTIFY_ENUM_DIR, -ENOBUFS}, {ERRnoSuchUser, -EACCES}, /* {ERRaccountexpired, -EACCES}, {ERRbadclient, -EACCES}, {ERRbadLogonTime, -EACCES}, {ERRpasswordExpired, -EACCES},*/ {ERRaccountexpired, -EKEYEXPIRED}, {ERRbadclient, -EACCES}, {ERRbadLogonTime, -EACCES}, {ERRpasswordExpired, -EKEYEXPIRED}, {ERRnosupport, -EINVAL}, {0, 0} }; static const struct smb_to_posix_error mapping_table_ERRHRD[] = { {0, 0} }; /* * Convert a string containing text IPv4 or IPv6 address to binary form. * * Returns 0 on failure. */ static int cifs_inet_pton(const int address_family, const char *cp, int len, void *dst) { int ret = 0; /* calculate length by finding first slash or NULL */ if (address_family == AF_INET) ret = in4_pton(cp, len, dst, '\\', NULL); else if (address_family == AF_INET6) ret = in6_pton(cp, len, dst , '\\', NULL); cFYI(DBG2, "address conversion returned %d for %*.*s", ret, len, len, cp); if (ret > 0) ret = 1; return ret; } /* * Try to convert a string to an IPv4 address and then attempt to convert * it to an IPv6 address if that fails. Set the family field if either * succeeds. If it's an IPv6 address and it has a '%' sign in it, try to * treat the part following it as a numeric sin6_scope_id. * * Returns 0 on failure. */ int cifs_convert_address(struct sockaddr *dst, const char *src, int len) { int rc, alen, slen; const char *pct; char scope_id[13]; struct sockaddr_in *s4 = (struct sockaddr_in *) dst; struct sockaddr_in6 *s6 = (struct sockaddr_in6 *) dst; /* IPv4 address */ if (cifs_inet_pton(AF_INET, src, len, &s4->sin_addr.s_addr)) { s4->sin_family = AF_INET; return 1; } /* attempt to exclude the scope ID from the address part */ pct = memchr(src, '%', len); alen = pct ? pct - src : len; rc = cifs_inet_pton(AF_INET6, src, alen, &s6->sin6_addr.s6_addr); if (!rc) return rc; s6->sin6_family = AF_INET6; if (pct) { /* grab the scope ID */ slen = len - (alen + 1); if (slen <= 0 || slen > 12) return 0; memcpy(scope_id, pct + 1, slen); scope_id[slen] = '\0'; rc = strict_strtoul(scope_id, 0, (unsigned long *)&s6->sin6_scope_id); rc = (rc == 0) ? 1 : 0; } return rc; } int cifs_set_port(struct sockaddr *addr, const unsigned short int port) { switch (addr->sa_family) { case AF_INET: ((struct sockaddr_in *)addr)->sin_port = htons(port); break; case AF_INET6: ((struct sockaddr_in6 *)addr)->sin6_port = htons(port); break; default: return 0; } return 1; } int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, const unsigned short int port) { if (!cifs_convert_address(dst, src, len)) return 0; return cifs_set_port(dst, port); } /***************************************************************************** convert a NT status code to a dos class/code *****************************************************************************/ /* NT status -> dos error map */ static const struct { __u8 dos_class; __u16 dos_code; __u32 ntstatus; } ntstatus_to_dos_map[] = { { ERRDOS, ERRgeneral, NT_STATUS_UNSUCCESSFUL}, { ERRDOS, ERRbadfunc, NT_STATUS_NOT_IMPLEMENTED}, { ERRDOS, ERRinvlevel, NT_STATUS_INVALID_INFO_CLASS}, { ERRDOS, 24, NT_STATUS_INFO_LENGTH_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_ACCESS_VIOLATION}, { ERRHRD, ERRgeneral, NT_STATUS_IN_PAGE_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA}, { ERRDOS, ERRbadfid, NT_STATUS_INVALID_HANDLE}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_INITIAL_STACK}, { ERRDOS, 193, NT_STATUS_BAD_INITIAL_PC}, { ERRDOS, 87, NT_STATUS_INVALID_CID}, { ERRHRD, ERRgeneral, NT_STATUS_TIMER_NOT_CANCELED}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER}, { ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_DEVICE}, { ERRDOS, ERRbadfile, NT_STATUS_NO_SUCH_FILE}, { ERRDOS, ERRbadfunc, NT_STATUS_INVALID_DEVICE_REQUEST}, { ERRDOS, 38, NT_STATUS_END_OF_FILE}, { ERRDOS, 34, NT_STATUS_WRONG_VOLUME}, { ERRDOS, 21, NT_STATUS_NO_MEDIA_IN_DEVICE}, { ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_MEDIA}, { ERRDOS, 27, NT_STATUS_NONEXISTENT_SECTOR}, /* { This NT error code was 'sqashed' from NT_STATUS_MORE_PROCESSING_REQUIRED to NT_STATUS_OK during the session setup } */ { ERRDOS, ERRnomem, NT_STATUS_NO_MEMORY}, { ERRDOS, 487, NT_STATUS_CONFLICTING_ADDRESSES}, { ERRDOS, 487, NT_STATUS_NOT_MAPPED_VIEW}, { ERRDOS, 87, NT_STATUS_UNABLE_TO_FREE_VM}, { ERRDOS, 87, NT_STATUS_UNABLE_TO_DELETE_SECTION}, { ERRDOS, 2142, NT_STATUS_INVALID_SYSTEM_SERVICE}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_INSTRUCTION}, { ERRDOS, ERRnoaccess, NT_STATUS_INVALID_LOCK_SEQUENCE}, { ERRDOS, ERRnoaccess, NT_STATUS_INVALID_VIEW_SIZE}, { ERRDOS, 193, NT_STATUS_INVALID_FILE_FOR_SECTION}, { ERRDOS, ERRnoaccess, NT_STATUS_ALREADY_COMMITTED}, /* { This NT error code was 'sqashed' from NT_STATUS_ACCESS_DENIED to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_ACCESS_DENIED}, { ERRDOS, 111, NT_STATUS_BUFFER_TOO_SMALL}, { ERRDOS, ERRbadfid, NT_STATUS_OBJECT_TYPE_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_NONCONTINUABLE_EXCEPTION}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DISPOSITION}, { ERRHRD, ERRgeneral, NT_STATUS_UNWIND}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_STACK}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_UNWIND_TARGET}, { ERRDOS, 158, NT_STATUS_NOT_LOCKED}, { ERRHRD, ERRgeneral, NT_STATUS_PARITY_ERROR}, { ERRDOS, 487, NT_STATUS_UNABLE_TO_DECOMMIT_VM}, { ERRDOS, 487, NT_STATUS_NOT_COMMITTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PORT_ATTRIBUTES}, { ERRHRD, ERRgeneral, NT_STATUS_PORT_MESSAGE_TOO_LONG}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_MIX}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_QUOTA_LOWER}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_CORRUPT_ERROR}, { /* mapping changed since shell does lookup on * expects FileNotFound */ ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_INVALID}, { ERRDOS, ERRbadfile, NT_STATUS_OBJECT_NAME_NOT_FOUND}, { ERRDOS, ERRalreadyexists, NT_STATUS_OBJECT_NAME_COLLISION}, { ERRHRD, ERRgeneral, NT_STATUS_HANDLE_NOT_WAITABLE}, { ERRDOS, ERRbadfid, NT_STATUS_PORT_DISCONNECTED}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_ALREADY_ATTACHED}, { ERRDOS, 161, NT_STATUS_OBJECT_PATH_INVALID}, { ERRDOS, ERRbadpath, NT_STATUS_OBJECT_PATH_NOT_FOUND}, { ERRDOS, 161, NT_STATUS_OBJECT_PATH_SYNTAX_BAD}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_OVERRUN}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_LATE_ERROR}, { ERRDOS, 23, NT_STATUS_DATA_ERROR}, { ERRDOS, 23, NT_STATUS_CRC_ERROR}, { ERRDOS, ERRnomem, NT_STATUS_SECTION_TOO_BIG}, { ERRDOS, ERRnoaccess, NT_STATUS_PORT_CONNECTION_REFUSED}, { ERRDOS, ERRbadfid, NT_STATUS_INVALID_PORT_HANDLE}, { ERRDOS, ERRbadshare, NT_STATUS_SHARING_VIOLATION}, { ERRHRD, ERRgeneral, NT_STATUS_QUOTA_EXCEEDED}, { ERRDOS, 87, NT_STATUS_INVALID_PAGE_PROTECTION}, { ERRDOS, 288, NT_STATUS_MUTANT_NOT_OWNED}, { ERRDOS, 298, NT_STATUS_SEMAPHORE_LIMIT_EXCEEDED}, { ERRDOS, 87, NT_STATUS_PORT_ALREADY_SET}, { ERRDOS, 87, NT_STATUS_SECTION_NOT_IMAGE}, { ERRDOS, 156, NT_STATUS_SUSPEND_COUNT_EXCEEDED}, { ERRDOS, ERRnoaccess, NT_STATUS_THREAD_IS_TERMINATING}, { ERRDOS, 87, NT_STATUS_BAD_WORKING_SET_LIMIT}, { ERRDOS, 87, NT_STATUS_INCOMPATIBLE_FILE_MAP}, { ERRDOS, 87, NT_STATUS_SECTION_PROTECTION}, { ERRDOS, ERReasnotsupported, NT_STATUS_EAS_NOT_SUPPORTED}, { ERRDOS, 255, NT_STATUS_EA_TOO_LARGE}, { ERRHRD, ERRgeneral, NT_STATUS_NONEXISTENT_EA_ENTRY}, { ERRHRD, ERRgeneral, NT_STATUS_NO_EAS_ON_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_EA_CORRUPT_ERROR}, { ERRDOS, ERRlock, NT_STATUS_FILE_LOCK_CONFLICT}, { ERRDOS, ERRlock, NT_STATUS_LOCK_NOT_GRANTED}, { ERRDOS, ERRbadfile, NT_STATUS_DELETE_PENDING}, { ERRDOS, ERRunsup, NT_STATUS_CTL_FILE_NOT_SUPPORTED}, { ERRHRD, ERRgeneral, NT_STATUS_UNKNOWN_REVISION}, { ERRHRD, ERRgeneral, NT_STATUS_REVISION_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_OWNER}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PRIMARY_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_NO_IMPERSONATION_TOKEN}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_DISABLE_MANDATORY}, { ERRDOS, 2215, NT_STATUS_NO_LOGON_SERVERS}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_LOGON_SESSION}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PRIVILEGE}, { ERRDOS, ERRnoaccess, NT_STATUS_PRIVILEGE_NOT_HELD}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACCOUNT_NAME}, { ERRHRD, ERRgeneral, NT_STATUS_USER_EXISTS}, /* { This NT error code was 'sqashed' from NT_STATUS_NO_SUCH_USER to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_NO_SUCH_USER}, { /* could map to 2238 */ ERRHRD, ERRgeneral, NT_STATUS_GROUP_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_LAST_ADMIN}, /* { This NT error code was 'sqashed' from NT_STATUS_WRONG_PASSWORD to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRSRV, ERRbadpw, NT_STATUS_WRONG_PASSWORD}, { ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_PASSWORD}, { ERRHRD, ERRgeneral, NT_STATUS_PASSWORD_RESTRICTION}, { ERRDOS, ERRnoaccess, NT_STATUS_LOGON_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_ACCOUNT_RESTRICTION}, { ERRSRV, ERRbadLogonTime, NT_STATUS_INVALID_LOGON_HOURS}, { ERRSRV, ERRbadclient, NT_STATUS_INVALID_WORKSTATION}, { ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_EXPIRED}, { ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_DISABLED}, { ERRHRD, ERRgeneral, NT_STATUS_NONE_MAPPED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_LUIDS_REQUESTED}, { ERRHRD, ERRgeneral, NT_STATUS_LUIDS_EXHAUSTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SUB_AUTHORITY}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ACL}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SID}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SECURITY_DESCR}, { ERRDOS, 127, NT_STATUS_PROCEDURE_NOT_FOUND}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_NO_TOKEN}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_INHERITANCE_ACL}, { ERRDOS, 158, NT_STATUS_RANGE_NOT_LOCKED}, { ERRDOS, 112, NT_STATUS_DISK_FULL}, { ERRHRD, ERRgeneral, NT_STATUS_SERVER_DISABLED}, { ERRHRD, ERRgeneral, NT_STATUS_SERVER_NOT_DISABLED}, { ERRDOS, 68, NT_STATUS_TOO_MANY_GUIDS_REQUESTED}, { ERRDOS, 259, NT_STATUS_GUIDS_EXHAUSTED}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ID_AUTHORITY}, { ERRDOS, 259, NT_STATUS_AGENTS_EXHAUSTED}, { ERRDOS, 154, NT_STATUS_INVALID_VOLUME_LABEL}, { ERRDOS, 14, NT_STATUS_SECTION_NOT_EXTENDED}, { ERRDOS, 487, NT_STATUS_NOT_MAPPED_DATA}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_DATA_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_TYPE_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_NAME_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_ARRAY_BOUNDS_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DENORMAL_OPERAND}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_DIVIDE_BY_ZERO}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INEXACT_RESULT}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_INVALID_OPERATION}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_STACK_CHECK}, { ERRHRD, ERRgeneral, NT_STATUS_FLOAT_UNDERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_INTEGER_DIVIDE_BY_ZERO}, { ERRDOS, 534, NT_STATUS_INTEGER_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_PRIVILEGED_INSTRUCTION}, { ERRDOS, ERRnomem, NT_STATUS_TOO_MANY_PAGING_FILES}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_INVALID}, { ERRHRD, ERRgeneral, NT_STATUS_ALLOTTED_SPACE_EXCEEDED}, /* { This NT error code was 'sqashed' from NT_STATUS_INSUFFICIENT_RESOURCES to NT_STATUS_INSUFF_SERVER_RESOURCES during the session setup } */ { ERRDOS, ERRnomem, NT_STATUS_INSUFFICIENT_RESOURCES}, { ERRDOS, ERRbadpath, NT_STATUS_DFS_EXIT_PATH_FOUND}, { ERRDOS, 23, NT_STATUS_DEVICE_DATA_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_CONNECTED}, { ERRDOS, 21, NT_STATUS_DEVICE_POWER_FAILURE}, { ERRDOS, 487, NT_STATUS_FREE_VM_NOT_AT_BASE}, { ERRDOS, 487, NT_STATUS_MEMORY_NOT_ALLOCATED}, { ERRHRD, ERRgeneral, NT_STATUS_WORKING_SET_QUOTA}, { ERRDOS, 19, NT_STATUS_MEDIA_WRITE_PROTECTED}, { ERRDOS, 21, NT_STATUS_DEVICE_NOT_READY}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_GROUP_ATTRIBUTES}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_IMPERSONATION_LEVEL}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_OPEN_ANONYMOUS}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_VALIDATION_CLASS}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_TOKEN_TYPE}, { ERRDOS, 87, NT_STATUS_BAD_MASTER_BOOT_RECORD}, { ERRHRD, ERRgeneral, NT_STATUS_INSTRUCTION_MISALIGNMENT}, { ERRDOS, ERRpipebusy, NT_STATUS_INSTANCE_NOT_AVAILABLE}, { ERRDOS, ERRpipebusy, NT_STATUS_PIPE_NOT_AVAILABLE}, { ERRDOS, ERRbadpipe, NT_STATUS_INVALID_PIPE_STATE}, { ERRDOS, ERRpipebusy, NT_STATUS_PIPE_BUSY}, { ERRDOS, ERRbadfunc, NT_STATUS_ILLEGAL_FUNCTION}, { ERRDOS, ERRnotconnected, NT_STATUS_PIPE_DISCONNECTED}, { ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_CLOSING}, { ERRHRD, ERRgeneral, NT_STATUS_PIPE_CONNECTED}, { ERRHRD, ERRgeneral, NT_STATUS_PIPE_LISTENING}, { ERRDOS, ERRbadpipe, NT_STATUS_INVALID_READ_MODE}, { ERRDOS, 121, NT_STATUS_IO_TIMEOUT}, { ERRDOS, 38, NT_STATUS_FILE_FORCED_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_NOT_STOPPED}, { ERRHRD, ERRgeneral, NT_STATUS_COULD_NOT_INTERPRET}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_IS_A_DIRECTORY}, { ERRDOS, ERRunsup, NT_STATUS_NOT_SUPPORTED}, { ERRDOS, 51, NT_STATUS_REMOTE_NOT_LISTENING}, { ERRDOS, 52, NT_STATUS_DUPLICATE_NAME}, { ERRDOS, 53, NT_STATUS_BAD_NETWORK_PATH}, { ERRDOS, 54, NT_STATUS_NETWORK_BUSY}, { ERRDOS, 55, NT_STATUS_DEVICE_DOES_NOT_EXIST}, { ERRDOS, 56, NT_STATUS_TOO_MANY_COMMANDS}, { ERRDOS, 57, NT_STATUS_ADAPTER_HARDWARE_ERROR}, { ERRDOS, 58, NT_STATUS_INVALID_NETWORK_RESPONSE}, { ERRDOS, 59, NT_STATUS_UNEXPECTED_NETWORK_ERROR}, { ERRDOS, 60, NT_STATUS_BAD_REMOTE_ADAPTER}, { ERRDOS, 61, NT_STATUS_PRINT_QUEUE_FULL}, { ERRDOS, 62, NT_STATUS_NO_SPOOL_SPACE}, { ERRDOS, 63, NT_STATUS_PRINT_CANCELLED}, { ERRDOS, 64, NT_STATUS_NETWORK_NAME_DELETED}, { ERRDOS, 65, NT_STATUS_NETWORK_ACCESS_DENIED}, { ERRDOS, 66, NT_STATUS_BAD_DEVICE_TYPE}, { ERRDOS, ERRnosuchshare, NT_STATUS_BAD_NETWORK_NAME}, { ERRDOS, 68, NT_STATUS_TOO_MANY_NAMES}, { ERRDOS, 69, NT_STATUS_TOO_MANY_SESSIONS}, { ERRDOS, 70, NT_STATUS_SHARING_PAUSED}, { ERRDOS, 71, NT_STATUS_REQUEST_NOT_ACCEPTED}, { ERRDOS, 72, NT_STATUS_REDIRECTOR_PAUSED}, { ERRDOS, 88, NT_STATUS_NET_WRITE_FAULT}, { ERRHRD, ERRgeneral, NT_STATUS_PROFILING_AT_LIMIT}, { ERRDOS, ERRdiffdevice, NT_STATUS_NOT_SAME_DEVICE}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_RENAMED}, { ERRDOS, 240, NT_STATUS_VIRTUAL_CIRCUIT_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SECURITY_ON_OBJECT}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_WAIT}, { ERRDOS, ERRpipeclosing, NT_STATUS_PIPE_EMPTY}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_ACCESS_DOMAIN_INFO}, { ERRHRD, ERRgeneral, NT_STATUS_CANT_TERMINATE_SELF}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_SERVER_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_DOMAIN_ROLE}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_DOMAIN}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_LIMIT_EXCEEDED}, { ERRDOS, 300, NT_STATUS_OPLOCK_NOT_GRANTED}, { ERRDOS, 301, NT_STATUS_INVALID_OPLOCK_PROTOCOL}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_CORRUPTION}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_GENERIC_NOT_MAPPED}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_DESCRIPTOR_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_USER_BUFFER}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_IO_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_CREATE_ERR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_MAP_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_UNEXPECTED_MM_EXTEND_ERR}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_LOGON_PROCESS}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_EXISTS}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_1}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_2}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_3}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_4}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_5}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_6}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_7}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_8}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_9}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_10}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_11}, { ERRDOS, 87, NT_STATUS_INVALID_PARAMETER_12}, { ERRDOS, ERRbadpath, NT_STATUS_REDIRECTOR_NOT_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_REDIRECTOR_STARTED}, { ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_PACKAGE}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_FUNCTION_TABLE}, { ERRDOS, 203, 0xc0000100}, { ERRDOS, 145, NT_STATUS_DIRECTORY_NOT_EMPTY}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_CORRUPT_ERROR}, { ERRDOS, 267, NT_STATUS_NOT_A_DIRECTORY}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_LOGON_SESSION_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SESSION_COLLISION}, { ERRDOS, 206, NT_STATUS_NAME_TOO_LONG}, { ERRDOS, 2401, NT_STATUS_FILES_OPEN}, { ERRDOS, 2404, NT_STATUS_CONNECTION_IN_USE}, { ERRHRD, ERRgeneral, NT_STATUS_MESSAGE_NOT_FOUND}, { ERRDOS, ERRnoaccess, NT_STATUS_PROCESS_IS_TERMINATING}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LOGON_TYPE}, { ERRHRD, ERRgeneral, NT_STATUS_NO_GUID_TRANSLATION}, { ERRHRD, ERRgeneral, NT_STATUS_CANNOT_IMPERSONATE}, { ERRHRD, ERRgeneral, NT_STATUS_IMAGE_ALREADY_LOADED}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_PRESENT}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_NOT_EXIST}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_LID_ALREADY_OWNED}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_NOT_LID_OWNER}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_COMMAND}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_LID}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_SELECTOR_NOT_AVAILABLE}, { ERRHRD, ERRgeneral, NT_STATUS_ABIOS_INVALID_SELECTOR}, { ERRHRD, ERRgeneral, NT_STATUS_NO_LDT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_SIZE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_OFFSET}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_LDT_DESCRIPTOR}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NE_FORMAT}, { ERRHRD, ERRgeneral, NT_STATUS_RXACT_INVALID_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_RXACT_COMMIT_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_MAPPED_FILE_SIZE_ZERO}, { ERRDOS, ERRnofids, NT_STATUS_TOO_MANY_OPENED_FILES}, { ERRHRD, ERRgeneral, NT_STATUS_CANCELLED}, { ERRDOS, ERRnoaccess, NT_STATUS_CANNOT_DELETE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_COMPUTER_NAME}, { ERRDOS, ERRnoaccess, NT_STATUS_FILE_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_ACCOUNT}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_GROUP}, { ERRHRD, ERRgeneral, NT_STATUS_SPECIAL_USER}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBERS_PRIMARY_GROUP}, { ERRDOS, ERRbadfid, NT_STATUS_FILE_CLOSED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_THREADS}, { ERRHRD, ERRgeneral, NT_STATUS_THREAD_NOT_IN_PROCESS}, { ERRHRD, ERRgeneral, NT_STATUS_TOKEN_ALREADY_IN_USE}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_QUOTA_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_COMMITMENT_LIMIT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_LE_FORMAT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_NOT_MZ}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_PROTECT}, { ERRDOS, 193, NT_STATUS_INVALID_IMAGE_WIN_16}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_SERVER_CONFLICT}, { ERRHRD, ERRgeneral, NT_STATUS_TIME_DIFFERENCE_AT_DC}, { ERRHRD, ERRgeneral, NT_STATUS_SYNCHRONIZATION_REQUIRED}, { ERRDOS, 126, NT_STATUS_DLL_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_OPEN_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_IO_PRIVILEGE_FAILED}, { ERRDOS, 182, NT_STATUS_ORDINAL_NOT_FOUND}, { ERRDOS, 127, NT_STATUS_ENTRYPOINT_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_CONTROL_C_EXIT}, { ERRDOS, 64, NT_STATUS_LOCAL_DISCONNECT}, { ERRDOS, 64, NT_STATUS_REMOTE_DISCONNECT}, { ERRDOS, 51, NT_STATUS_REMOTE_RESOURCES}, { ERRDOS, 59, NT_STATUS_LINK_FAILED}, { ERRDOS, 59, NT_STATUS_LINK_TIMEOUT}, { ERRDOS, 59, NT_STATUS_INVALID_CONNECTION}, { ERRDOS, 59, NT_STATUS_INVALID_ADDRESS}, { ERRHRD, ERRgeneral, NT_STATUS_DLL_INIT_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_MISSING_SYSTEMFILE}, { ERRHRD, ERRgeneral, NT_STATUS_UNHANDLED_EXCEPTION}, { ERRHRD, ERRgeneral, NT_STATUS_APP_INIT_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_PAGEFILE_CREATE_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_PAGEFILE}, { ERRDOS, 124, NT_STATUS_INVALID_LEVEL}, { ERRDOS, 86, NT_STATUS_WRONG_PASSWORD_CORE}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_FLOAT_CONTEXT}, { ERRDOS, 109, NT_STATUS_PIPE_BROKEN}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_CORRUPT}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_IO_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_EVENT_PAIR}, { ERRHRD, ERRgeneral, NT_STATUS_UNRECOGNIZED_VOLUME}, { ERRHRD, ERRgeneral, NT_STATUS_SERIAL_NO_DEVICE_INITED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_NOT_IN_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_MEMBER_IN_ALIAS}, { ERRHRD, ERRgeneral, NT_STATUS_ALIAS_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_LOGON_NOT_GRANTED}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SECRETS}, { ERRHRD, ERRgeneral, NT_STATUS_SECRET_TOO_LONG}, { ERRHRD, ERRgeneral, NT_STATUS_INTERNAL_DB_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FULLSCREEN_MODE}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_CONTEXT_IDS}, { ERRDOS, ERRnoaccess, NT_STATUS_LOGON_TYPE_NOT_GRANTED}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_REGISTRY_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_NT_CROSS_ENCRYPTION_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CTRLR_CONFIG_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FT_MISSING_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_ILL_FORMED_SERVICE_ENTRY}, { ERRHRD, ERRgeneral, NT_STATUS_ILLEGAL_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_UNMAPPABLE_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_UNDEFINED_CHARACTER}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_VOLUME}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_ID_MARK_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_WRONG_CYLINDER}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_UNKNOWN_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_FLOPPY_BAD_REGISTERS}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_RECALIBRATE_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_OPERATION_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_DISK_RESET_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_SHARED_IRQ_BUSY}, { ERRHRD, ERRgeneral, NT_STATUS_FT_ORPHANING}, { ERRHRD, ERRgeneral, 0xc000016e}, { ERRHRD, ERRgeneral, 0xc000016f}, { ERRHRD, ERRgeneral, 0xc0000170}, { ERRHRD, ERRgeneral, 0xc0000171}, { ERRHRD, ERRgeneral, NT_STATUS_PARTITION_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_BLOCK_LENGTH}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_NOT_PARTITIONED}, { ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_LOCK_MEDIA}, { ERRHRD, ERRgeneral, NT_STATUS_UNABLE_TO_UNLOAD_MEDIA}, { ERRHRD, ERRgeneral, NT_STATUS_EOM_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_NO_MEDIA}, { ERRHRD, ERRgeneral, 0xc0000179}, { ERRHRD, ERRgeneral, NT_STATUS_NO_SUCH_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_MEMBER}, { ERRHRD, ERRgeneral, NT_STATUS_KEY_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_LOG_SPACE}, { ERRHRD, ERRgeneral, NT_STATUS_TOO_MANY_SIDS}, { ERRHRD, ERRgeneral, NT_STATUS_LM_CROSS_ENCRYPTION_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_KEY_HAS_CHILDREN}, { ERRHRD, ERRgeneral, NT_STATUS_CHILD_MUST_BE_VOLATILE}, { ERRDOS, 87, NT_STATUS_DEVICE_CONFIGURATION_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DRIVER_INTERNAL_ERROR}, { ERRDOS, 22, NT_STATUS_INVALID_DEVICE_STATE}, { ERRHRD, ERRgeneral, NT_STATUS_IO_DEVICE_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DEVICE_PROTOCOL_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_BACKUP_CONTROLLER}, { ERRHRD, ERRgeneral, NT_STATUS_LOG_FILE_FULL}, { ERRDOS, 19, NT_STATUS_TOO_LATE}, { ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_LSA_SECRET}, /* { This NT error code was 'sqashed' from NT_STATUS_NO_TRUST_SAM_ACCOUNT to NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_NO_TRUST_SAM_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_DOMAIN_FAILURE}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUSTED_RELATIONSHIP_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CORRUPT}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_CANT_START}, { ERRDOS, ERRnoaccess, NT_STATUS_TRUST_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_MUTANT_LIMIT_EXCEEDED}, { ERRDOS, ERRnetlogonNotStarted, NT_STATUS_NETLOGON_NOT_STARTED}, { ERRSRV, ERRaccountexpired, NT_STATUS_ACCOUNT_EXPIRED}, { ERRHRD, ERRgeneral, NT_STATUS_POSSIBLE_DEADLOCK}, { ERRHRD, ERRgeneral, NT_STATUS_NETWORK_CREDENTIAL_CONFLICT}, { ERRHRD, ERRgeneral, NT_STATUS_REMOTE_SESSION_LIMIT}, { ERRHRD, ERRgeneral, NT_STATUS_EVENTLOG_FILE_CHANGED}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_INTERDOMAIN_TRUST_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_WORKSTATION_TRUST_ACCOUNT}, { ERRDOS, ERRnoaccess, NT_STATUS_NOLOGON_SERVER_TRUST_ACCOUNT}, /* { This NT error code was 'sqashed' from NT_STATUS_DOMAIN_TRUST_INCONSISTENT to NT_STATUS_LOGON_FAILURE during the session setup } */ { ERRDOS, ERRnoaccess, NT_STATUS_DOMAIN_TRUST_INCONSISTENT}, { ERRHRD, ERRgeneral, NT_STATUS_FS_DRIVER_REQUIRED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_USER_SESSION_KEY}, { ERRDOS, 59, NT_STATUS_USER_SESSION_DELETED}, { ERRHRD, ERRgeneral, NT_STATUS_RESOURCE_LANG_NOT_FOUND}, { ERRDOS, ERRnomem, NT_STATUS_INSUFF_SERVER_RESOURCES}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_BUFFER_SIZE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_COMPONENT}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_ADDRESS_WILDCARD}, { ERRDOS, 68, NT_STATUS_TOO_MANY_ADDRESSES}, { ERRDOS, 52, NT_STATUS_ADDRESS_ALREADY_EXISTS}, { ERRDOS, 64, NT_STATUS_ADDRESS_CLOSED}, { ERRDOS, 64, NT_STATUS_CONNECTION_DISCONNECTED}, { ERRDOS, 64, NT_STATUS_CONNECTION_RESET}, { ERRDOS, 68, NT_STATUS_TOO_MANY_NODES}, { ERRDOS, 59, NT_STATUS_TRANSACTION_ABORTED}, { ERRDOS, 59, NT_STATUS_TRANSACTION_TIMED_OUT}, { ERRDOS, 59, NT_STATUS_TRANSACTION_NO_RELEASE}, { ERRDOS, 59, NT_STATUS_TRANSACTION_NO_MATCH}, { ERRDOS, 59, NT_STATUS_TRANSACTION_RESPONDED}, { ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_ID}, { ERRDOS, 59, NT_STATUS_TRANSACTION_INVALID_TYPE}, { ERRDOS, ERRunsup, NT_STATUS_NOT_SERVER_SESSION}, { ERRDOS, ERRunsup, NT_STATUS_NOT_CLIENT_SESSION}, { ERRHRD, ERRgeneral, NT_STATUS_CANNOT_LOAD_REGISTRY_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_DEBUG_ATTACH_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_SYSTEM_PROCESS_TERMINATED}, { ERRHRD, ERRgeneral, NT_STATUS_DATA_NOT_ACCEPTED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_BROWSER_SERVERS_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_VDM_HARD_ERROR}, { ERRHRD, ERRgeneral, NT_STATUS_DRIVER_CANCEL_TIMEOUT}, { ERRHRD, ERRgeneral, NT_STATUS_REPLY_MESSAGE_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_MAPPED_ALIGNMENT}, { ERRDOS, 193, NT_STATUS_IMAGE_CHECKSUM_MISMATCH}, { ERRHRD, ERRgeneral, NT_STATUS_LOST_WRITEBEHIND_DATA}, { ERRHRD, ERRgeneral, NT_STATUS_CLIENT_SERVER_PARAMETERS_INVALID}, { ERRSRV, ERRpasswordExpired, NT_STATUS_PASSWORD_MUST_CHANGE}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_NOT_TINY_STREAM}, { ERRHRD, ERRgeneral, NT_STATUS_RECOVERY_FAILURE}, { ERRHRD, ERRgeneral, NT_STATUS_STACK_OVERFLOW_READ}, { ERRHRD, ERRgeneral, NT_STATUS_FAIL_CHECK}, { ERRHRD, ERRgeneral, NT_STATUS_DUPLICATE_OBJECTID}, { ERRHRD, ERRgeneral, NT_STATUS_OBJECTID_EXISTS}, { ERRHRD, ERRgeneral, NT_STATUS_CONVERT_TO_LARGE}, { ERRHRD, ERRgeneral, NT_STATUS_RETRY}, { ERRHRD, ERRgeneral, NT_STATUS_FOUND_OUT_OF_SCOPE}, { ERRHRD, ERRgeneral, NT_STATUS_ALLOCATE_BUCKET}, { ERRHRD, ERRgeneral, NT_STATUS_PROPSET_NOT_FOUND}, { ERRHRD, ERRgeneral, NT_STATUS_MARSHALL_OVERFLOW}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_VARIANT}, { ERRHRD, ERRgeneral, NT_STATUS_DOMAIN_CONTROLLER_NOT_FOUND}, { ERRDOS, ERRnoaccess, NT_STATUS_ACCOUNT_LOCKED_OUT}, { ERRDOS, ERRbadfid, NT_STATUS_HANDLE_NOT_CLOSABLE}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_REFUSED}, { ERRHRD, ERRgeneral, NT_STATUS_GRACEFUL_DISCONNECT}, { ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_ALREADY_ASSOCIATED}, { ERRHRD, ERRgeneral, NT_STATUS_ADDRESS_NOT_ASSOCIATED}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_INVALID}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ACTIVE}, { ERRHRD, ERRgeneral, NT_STATUS_NETWORK_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_HOST_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_PROTOCOL_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_PORT_UNREACHABLE}, { ERRHRD, ERRgeneral, NT_STATUS_REQUEST_ABORTED}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_ABORTED}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_COMPRESSION_BUFFER}, { ERRHRD, ERRgeneral, NT_STATUS_USER_MAPPED_FILE}, { ERRHRD, ERRgeneral, NT_STATUS_AUDIT_FAILED}, { ERRHRD, ERRgeneral, NT_STATUS_TIMER_RESOLUTION_NOT_SET}, { ERRHRD, ERRgeneral, NT_STATUS_CONNECTION_COUNT_LIMIT}, { ERRHRD, ERRgeneral, NT_STATUS_LOGIN_TIME_RESTRICTION}, { ERRHRD, ERRgeneral, NT_STATUS_LOGIN_WKSTA_RESTRICTION}, { ERRDOS, 193, NT_STATUS_IMAGE_MP_UP_MISMATCH}, { ERRHRD, ERRgeneral, 0xc000024a}, { ERRHRD, ERRgeneral, 0xc000024b}, { ERRHRD, ERRgeneral, 0xc000024c}, { ERRHRD, ERRgeneral, 0xc000024d}, { ERRHRD, ERRgeneral, 0xc000024e}, { ERRHRD, ERRgeneral, 0xc000024f}, { ERRHRD, ERRgeneral, NT_STATUS_INSUFFICIENT_LOGON_INFO}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_DLL_ENTRYPOINT}, { ERRHRD, ERRgeneral, NT_STATUS_BAD_SERVICE_ENTRYPOINT}, { ERRHRD, ERRgeneral, NT_STATUS_LPC_REPLY_LOST}, { ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT1}, { ERRHRD, ERRgeneral, NT_STATUS_IP_ADDRESS_CONFLICT2}, { ERRHRD, ERRgeneral, NT_STATUS_REGISTRY_QUOTA_LIMIT}, { ERRSRV, 3, NT_STATUS_PATH_NOT_COVERED}, { ERRHRD, ERRgeneral, NT_STATUS_NO_CALLBACK_ACTIVE}, { ERRHRD, ERRgeneral, NT_STATUS_LICENSE_QUOTA_EXCEEDED}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_SHORT}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_TOO_RECENT}, { ERRHRD, ERRgeneral, NT_STATUS_PWD_HISTORY_CONFLICT}, { ERRHRD, ERRgeneral, 0xc000025d}, { ERRHRD, ERRgeneral, NT_STATUS_PLUGPLAY_NO_DEVICE}, { ERRHRD, ERRgeneral, NT_STATUS_UNSUPPORTED_COMPRESSION}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_HW_PROFILE}, { ERRHRD, ERRgeneral, NT_STATUS_INVALID_PLUGPLAY_DEVICE_PATH}, { ERRDOS, 182, NT_STATUS_DRIVER_ORDINAL_NOT_FOUND}, { ERRDOS, 127, NT_STATUS_DRIVER_ENTRYPOINT_NOT_FOUND}, { ERRDOS, 288, NT_STATUS_RESOURCE_NOT_OWNED}, { ERRDOS, ErrTooManyLinks, NT_STATUS_TOO_MANY_LINKS}, { ERRHRD, ERRgeneral, NT_STATUS_QUOTA_LIST_INCONSISTENT}, { ERRHRD, ERRgeneral, NT_STATUS_FILE_IS_OFFLINE}, { ERRDOS, 21, 0xc000026e}, { ERRDOS, 161, 0xc0000281}, { ERRDOS, ERRnoaccess, 0xc000028a}, { ERRDOS, ERRnoaccess, 0xc000028b}, { ERRHRD, ERRgeneral, 0xc000028c}, { ERRDOS, ERRnoaccess, 0xc000028d}, { ERRDOS, ERRnoaccess, 0xc000028e}, { ERRDOS, ERRnoaccess, 0xc000028f}, { ERRDOS, ERRnoaccess, 0xc0000290}, { ERRDOS, ERRbadfunc, 0xc000029c}, { ERRDOS, ERRsymlink, NT_STATUS_STOPPED_ON_SYMLINK}, { ERRDOS, ERRinvlevel, 0x007c0001}, }; /***************************************************************************** Print an error message from the status code *****************************************************************************/ static void cifs_print_status(__u32 status_code) { int idx = 0; while (nt_errs[idx].nt_errstr != NULL) { if (((nt_errs[idx].nt_errcode) & 0xFFFFFF) == (status_code & 0xFFFFFF)) { printk(KERN_NOTICE "Status code returned 0x%08x %s\n", status_code, nt_errs[idx].nt_errstr); } idx++; } return; } static void ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode) { int i; if (ntstatus == 0) { *eclass = 0; *ecode = 0; return; } for (i = 0; ntstatus_to_dos_map[i].ntstatus; i++) { if (ntstatus == ntstatus_to_dos_map[i].ntstatus) { *eclass = ntstatus_to_dos_map[i].dos_class; *ecode = ntstatus_to_dos_map[i].dos_code; return; } } *eclass = ERRHRD; *ecode = ERRgeneral; } int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr) { unsigned int i; int rc = -EIO; /* if transport error smb error may not be set */ __u8 smberrclass; __u16 smberrcode; /* BB if NT Status codes - map NT BB */ /* old style smb error codes */ if (smb->Status.CifsError == 0) return 0; if (smb->Flags2 & SMBFLG2_ERR_STATUS) { /* translate the newer STATUS codes to old style SMB errors * and then to POSIX errors */ __u32 err = le32_to_cpu(smb->Status.CifsError); if (logErr && (err != (NT_STATUS_MORE_PROCESSING_REQUIRED))) cifs_print_status(err); else if (cifsFYI & CIFS_RC) cifs_print_status(err); ntstatus_to_dos(err, &smberrclass, &smberrcode); } else { smberrclass = smb->Status.DosError.ErrorClass; smberrcode = le16_to_cpu(smb->Status.DosError.Error); } /* old style errors */ /* DOS class smb error codes - map DOS */ if (smberrclass == ERRDOS) { /* 1 byte field no need to byte reverse */ for (i = 0; i < sizeof(mapping_table_ERRDOS) / sizeof(struct smb_to_posix_error); i++) { if (mapping_table_ERRDOS[i].smb_err == 0) break; else if (mapping_table_ERRDOS[i].smb_err == smberrcode) { rc = mapping_table_ERRDOS[i].posix_code; break; } /* else try next error mapping one to see if match */ } } else if (smberrclass == ERRSRV) { /* server class of error codes */ for (i = 0; i < sizeof(mapping_table_ERRSRV) / sizeof(struct smb_to_posix_error); i++) { if (mapping_table_ERRSRV[i].smb_err == 0) break; else if (mapping_table_ERRSRV[i].smb_err == smberrcode) { rc = mapping_table_ERRSRV[i].posix_code; break; } /* else try next error mapping to see if match */ } } /* else ERRHRD class errors or junk - return EIO */ cFYI(1, "Mapping smb error code 0x%x to POSIX err %d", le32_to_cpu(smb->Status.CifsError), rc); /* generic corrective action e.g. reconnect SMB session on * ERRbaduid could be added */ return rc; } /* * calculate the size of the SMB message based on the fixed header * portion, the number of word parameters and the data portion of the message */ unsigned int smbCalcSize(struct smb_hdr *ptr) { return (sizeof(struct smb_hdr) + (2 * ptr->WordCount) + 2 /* size of the bcc field */ + get_bcc(ptr)); } /* The following are taken from fs/ntfs/util.c */ #define NTFS_TIME_OFFSET ((u64)(369*365 + 89) * 24 * 3600 * 10000000) /* * Convert the NT UTC (based 1601-01-01, in hundred nanosecond units) * into Unix UTC (based 1970-01-01, in seconds). */ struct timespec cifs_NTtimeToUnix(__le64 ntutc) { struct timespec ts; /* BB what about the timezone? BB */ /* Subtract the NTFS time offset, then convert to 1s intervals. */ u64 t; t = le64_to_cpu(ntutc) - NTFS_TIME_OFFSET; ts.tv_nsec = do_div(t, 10000000) * 100; ts.tv_sec = t; return ts; } /* Convert the Unix UTC into NT UTC. */ u64 cifs_UnixTimeToNT(struct timespec t) { /* Convert to 100ns intervals and then add the NTFS time offset. */ return (u64) t.tv_sec * 10000000 + t.tv_nsec/100 + NTFS_TIME_OFFSET; } static int total_days_of_prev_months[] = {0, 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334}; struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, int offset) { struct timespec ts; int sec, min, days, month, year; u16 date = le16_to_cpu(le_date); u16 time = le16_to_cpu(le_time); SMB_TIME *st = (SMB_TIME *)&time; SMB_DATE *sd = (SMB_DATE *)&date; cFYI(1, "date %d time %d", date, time); sec = 2 * st->TwoSeconds; min = st->Minutes; if ((sec > 59) || (min > 59)) cERROR(1, "illegal time min %d sec %d", min, sec); sec += (min * 60); sec += 60 * 60 * st->Hours; if (st->Hours > 24) cERROR(1, "illegal hours %d", st->Hours); days = sd->Day; month = sd->Month; if ((days > 31) || (month > 12)) { cERROR(1, "illegal date, month %d day: %d", month, days); if (month > 12) month = 12; } month -= 1; days += total_days_of_prev_months[month]; days += 3652; /* account for difference in days between 1980 and 1970 */ year = sd->Year; days += year * 365; days += (year/4); /* leap year */ /* generalized leap year calculation is more complex, ie no leap year for years/100 except for years/400, but since the maximum number for DOS year is 2**7, the last year is 1980+127, which means we need only consider 2 special case years, ie the years 2000 and 2100, and only adjust for the lack of leap year for the year 2100, as 2000 was a leap year (divisable by 400) */ if (year >= 120) /* the year 2100 */ days = days - 1; /* do not count leap year for the year 2100 */ /* adjust for leap year where we are still before leap day */ if (year != 120) days -= ((year & 0x03) == 0) && (month < 2 ? 1 : 0); sec += 24 * 60 * 60 * days; ts.tv_sec = sec + offset; /* cFYI(1, "sec after cnvrt dos to unix time %d",sec); */ ts.tv_nsec = 0; return ts; }
gpl-2.0
utkanos/android_tuna_omap_kernel
drivers/s390/crypto/zcrypt_mono.c
4415
2270
/* * linux/drivers/s390/crypto/zcrypt_mono.c * * zcrypt 2.1.0 * * Copyright (C) 2001, 2006 IBM Corporation * Author(s): Robert Burroughs * Eric Rossman (edrossma@us.ibm.com) * * Hotplug & misc device support: Jochen Roehrig (roehrig@de.ibm.com) * Major cleanup & driver split: Martin Schwidefsky <schwidefsky@de.ibm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/miscdevice.h> #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/compat.h> #include <asm/atomic.h> #include <asm/uaccess.h> #include "ap_bus.h" #include "zcrypt_api.h" #include "zcrypt_pcica.h" #include "zcrypt_pcicc.h" #include "zcrypt_pcixcc.h" #include "zcrypt_cex2a.h" /** * The module initialization code. */ static int __init zcrypt_init(void) { int rc; rc = ap_module_init(); if (rc) goto out; rc = zcrypt_api_init(); if (rc) goto out_ap; rc = zcrypt_pcica_init(); if (rc) goto out_api; rc = zcrypt_pcicc_init(); if (rc) goto out_pcica; rc = zcrypt_pcixcc_init(); if (rc) goto out_pcicc; rc = zcrypt_cex2a_init(); if (rc) goto out_pcixcc; return 0; out_pcixcc: zcrypt_pcixcc_exit(); out_pcicc: zcrypt_pcicc_exit(); out_pcica: zcrypt_pcica_exit(); out_api: zcrypt_api_exit(); out_ap: ap_module_exit(); out: return rc; } /** * The module termination code. */ static void __exit zcrypt_exit(void) { zcrypt_cex2a_exit(); zcrypt_pcixcc_exit(); zcrypt_pcicc_exit(); zcrypt_pcica_exit(); zcrypt_api_exit(); ap_module_exit(); } module_init(zcrypt_init); module_exit(zcrypt_exit);
gpl-2.0
charles1018/kernel_sony_14.4.A.0.157
arch/tile/mm/homecache.c
4415
13099
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. * * This code maintains the "home" for each page in the system. */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/bootmem.h> #include <linux/rmap.h> #include <linux/pagemap.h> #include <linux/mutex.h> #include <linux/interrupt.h> #include <linux/sysctl.h> #include <linux/pagevec.h> #include <linux/ptrace.h> #include <linux/timex.h> #include <linux/cache.h> #include <linux/smp.h> #include <linux/module.h> #include <asm/page.h> #include <asm/sections.h> #include <asm/tlbflush.h> #include <asm/pgalloc.h> #include <asm/homecache.h> #include <arch/sim.h> #include "migrate.h" #if CHIP_HAS_COHERENT_LOCAL_CACHE() /* * The noallocl2 option suppresses all use of the L2 cache to cache * locally from a remote home. There's no point in using it if we * don't have coherent local caching, though. */ static int __write_once noallocl2; static int __init set_noallocl2(char *str) { noallocl2 = 1; return 0; } early_param("noallocl2", set_noallocl2); #else #define noallocl2 0 #endif /* Provide no-op versions of these routines to keep flush_remote() cleaner. */ #define mark_caches_evicted_start() 0 #define mark_caches_evicted_finish(mask, timestamp) do {} while (0) /* * Update the irq_stat for cpus that we are going to interrupt * with TLB or cache flushes. Also handle removing dataplane cpus * from the TLB flush set, and setting dataplane_tlb_state instead. */ static void hv_flush_update(const struct cpumask *cache_cpumask, struct cpumask *tlb_cpumask, unsigned long tlb_va, unsigned long tlb_length, HV_Remote_ASID *asids, int asidcount) { struct cpumask mask; int i, cpu; cpumask_clear(&mask); if (cache_cpumask) cpumask_or(&mask, &mask, cache_cpumask); if (tlb_cpumask && tlb_length) { cpumask_or(&mask, &mask, tlb_cpumask); } for (i = 0; i < asidcount; ++i) cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask); /* * Don't bother to update atomically; losing a count * here is not that critical. */ for_each_cpu(cpu, &mask) ++per_cpu(irq_stat, cpu).irq_hv_flush_count; } /* * This wrapper function around hv_flush_remote() does several things: * * - Provides a return value error-checking panic path, since * there's never any good reason for hv_flush_remote() to fail. * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally * is the type that Linux wants to pass around anyway. * - Centralizes the mark_caches_evicted() handling. * - Canonicalizes that lengths of zero make cpumasks NULL. * - Handles deferring TLB flushes for dataplane tiles. * - Tracks remote interrupts in the per-cpu irq_cpustat_t. * * Note that we have to wait until the cache flush completes before * updating the per-cpu last_cache_flush word, since otherwise another * concurrent flush can race, conclude the flush has already * completed, and start to use the page while it's still dirty * remotely (running concurrently with the actual evict, presumably). */ void flush_remote(unsigned long cache_pfn, unsigned long cache_control, const struct cpumask *cache_cpumask_orig, HV_VirtAddr tlb_va, unsigned long tlb_length, unsigned long tlb_pgsize, const struct cpumask *tlb_cpumask_orig, HV_Remote_ASID *asids, int asidcount) { int rc; int timestamp = 0; /* happy compiler */ struct cpumask cache_cpumask_copy, tlb_cpumask_copy; struct cpumask *cache_cpumask, *tlb_cpumask; HV_PhysAddr cache_pa; char cache_buf[NR_CPUS*5], tlb_buf[NR_CPUS*5]; mb(); /* provided just to simplify "magic hypervisor" mode */ /* * Canonicalize and copy the cpumasks. */ if (cache_cpumask_orig && cache_control) { cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig); cache_cpumask = &cache_cpumask_copy; } else { cpumask_clear(&cache_cpumask_copy); cache_cpumask = NULL; } if (cache_cpumask == NULL) cache_control = 0; if (tlb_cpumask_orig && tlb_length) { cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig); tlb_cpumask = &tlb_cpumask_copy; } else { cpumask_clear(&tlb_cpumask_copy); tlb_cpumask = NULL; } hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length, asids, asidcount); cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT; if (cache_control & HV_FLUSH_EVICT_L2) timestamp = mark_caches_evicted_start(); rc = hv_flush_remote(cache_pa, cache_control, cpumask_bits(cache_cpumask), tlb_va, tlb_length, tlb_pgsize, cpumask_bits(tlb_cpumask), asids, asidcount); if (cache_control & HV_FLUSH_EVICT_L2) mark_caches_evicted_finish(cache_cpumask, timestamp); if (rc == 0) return; cpumask_scnprintf(cache_buf, sizeof(cache_buf), &cache_cpumask_copy); cpumask_scnprintf(tlb_buf, sizeof(tlb_buf), &tlb_cpumask_copy); pr_err("hv_flush_remote(%#llx, %#lx, %p [%s]," " %#lx, %#lx, %#lx, %p [%s], %p, %d) = %d\n", cache_pa, cache_control, cache_cpumask, cache_buf, (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask, tlb_buf, asids, asidcount, rc); panic("Unsafe to continue."); } void flush_remote_page(struct page *page, int order) { int i, pages = (1 << order); for (i = 0; i < pages; ++i, ++page) { void *p = kmap_atomic(page); int hfh = 0; int home = page_home(page); #if CHIP_HAS_CBOX_HOME_MAP() if (home == PAGE_HOME_HASH) hfh = 1; else #endif BUG_ON(home < 0 || home >= NR_CPUS); finv_buffer_remote(p, PAGE_SIZE, hfh); kunmap_atomic(p); } } void homecache_evict(const struct cpumask *mask) { flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0); } /* * Return a mask of the cpus whose caches currently own these pages. * The return value is whether the pages are all coherently cached * (i.e. none are immutable, incoherent, or uncached). */ static int homecache_mask(struct page *page, int pages, struct cpumask *home_mask) { int i; int cached_coherently = 1; cpumask_clear(home_mask); for (i = 0; i < pages; ++i) { int home = page_home(&page[i]); if (home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT) { cpumask_copy(home_mask, cpu_possible_mask); return 0; } #if CHIP_HAS_CBOX_HOME_MAP() if (home == PAGE_HOME_HASH) { cpumask_or(home_mask, home_mask, &hash_for_home_map); continue; } #endif if (home == PAGE_HOME_UNCACHED) { cached_coherently = 0; continue; } BUG_ON(home < 0 || home >= NR_CPUS); cpumask_set_cpu(home, home_mask); } return cached_coherently; } /* * Return the passed length, or zero if it's long enough that we * believe we should evict the whole L2 cache. */ static unsigned long cache_flush_length(unsigned long length) { return (length >= CHIP_L2_CACHE_SIZE()) ? HV_FLUSH_EVICT_L2 : length; } /* Flush a page out of whatever cache(s) it is in. */ void homecache_flush_cache(struct page *page, int order) { int pages = 1 << order; int length = cache_flush_length(pages * PAGE_SIZE); unsigned long pfn = page_to_pfn(page); struct cpumask home_mask; homecache_mask(page, pages, &home_mask); flush_remote(pfn, length, &home_mask, 0, 0, 0, NULL, NULL, 0); sim_validate_lines_evicted(PFN_PHYS(pfn), pages * PAGE_SIZE); } /* Report the home corresponding to a given PTE. */ static int pte_to_home(pte_t pte) { if (hv_pte_get_nc(pte)) return PAGE_HOME_IMMUTABLE; switch (hv_pte_get_mode(pte)) { case HV_PTE_MODE_CACHE_TILE_L3: return get_remote_cache_cpu(pte); case HV_PTE_MODE_CACHE_NO_L3: return PAGE_HOME_INCOHERENT; case HV_PTE_MODE_UNCACHED: return PAGE_HOME_UNCACHED; #if CHIP_HAS_CBOX_HOME_MAP() case HV_PTE_MODE_CACHE_HASH_L3: return PAGE_HOME_HASH; #endif } panic("Bad PTE %#llx\n", pte.val); } /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */ pte_t pte_set_home(pte_t pte, int home) { /* Check for non-linear file mapping "PTEs" and pass them through. */ if (pte_file(pte)) return pte; #if CHIP_HAS_MMIO() /* Check for MMIO mappings and pass them through. */ if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO) return pte; #endif /* * Only immutable pages get NC mappings. If we have a * non-coherent PTE, but the underlying page is not * immutable, it's likely the result of a forced * caching setting running up against ptrace setting * the page to be writable underneath. In this case, * just keep the PTE coherent. */ if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) { pte = hv_pte_clear_nc(pte); pr_err("non-immutable page incoherently referenced: %#llx\n", pte.val); } switch (home) { case PAGE_HOME_UNCACHED: pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); break; case PAGE_HOME_INCOHERENT: pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); break; case PAGE_HOME_IMMUTABLE: /* * We could home this page anywhere, since it's immutable, * but by default just home it to follow "hash_default". */ BUG_ON(hv_pte_get_writable(pte)); if (pte_get_forcecache(pte)) { /* Upgrade "force any cpu" to "No L3" for immutable. */ if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3 && pte_get_anyhome(pte)) { pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); } } else #if CHIP_HAS_CBOX_HOME_MAP() if (hash_default) pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); else #endif pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); pte = hv_pte_set_nc(pte); break; #if CHIP_HAS_CBOX_HOME_MAP() case PAGE_HOME_HASH: pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3); break; #endif default: BUG_ON(home < 0 || home >= NR_CPUS || !cpu_is_valid_lotar(home)); pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); pte = set_remote_cache_cpu(pte, home); break; } #if CHIP_HAS_NC_AND_NOALLOC_BITS() if (noallocl2) pte = hv_pte_set_no_alloc_l2(pte); /* Simplify "no local and no l3" to "uncached" */ if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) && hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) { pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED); } #endif /* Checking this case here gives a better panic than from the hv. */ BUG_ON(hv_pte_get_mode(pte) == 0); return pte; } EXPORT_SYMBOL(pte_set_home); /* * The routines in this section are the "static" versions of the normal * dynamic homecaching routines; they just set the home cache * of a kernel page once, and require a full-chip cache/TLB flush, * so they're not suitable for anything but infrequent use. */ #if CHIP_HAS_CBOX_HOME_MAP() static inline int initial_page_home(void) { return PAGE_HOME_HASH; } #else static inline int initial_page_home(void) { return 0; } #endif int page_home(struct page *page) { if (PageHighMem(page)) { return initial_page_home(); } else { unsigned long kva = (unsigned long)page_address(page); return pte_to_home(*virt_to_pte(NULL, kva)); } } EXPORT_SYMBOL(page_home); void homecache_change_page_home(struct page *page, int order, int home) { int i, pages = (1 << order); unsigned long kva; BUG_ON(PageHighMem(page)); BUG_ON(page_count(page) > 1); BUG_ON(page_mapcount(page) != 0); kva = (unsigned long) page_address(page); flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map, kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask, NULL, 0); for (i = 0; i < pages; ++i, kva += PAGE_SIZE) { pte_t *ptep = virt_to_pte(NULL, kva); pte_t pteval = *ptep; BUG_ON(!pte_present(pteval) || pte_huge(pteval)); __set_pte(ptep, pte_set_home(pteval, home)); } } struct page *homecache_alloc_pages(gfp_t gfp_mask, unsigned int order, int home) { struct page *page; BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ page = alloc_pages(gfp_mask, order); if (page) homecache_change_page_home(page, order, home); return page; } EXPORT_SYMBOL(homecache_alloc_pages); struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask, unsigned int order, int home) { struct page *page; BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */ page = alloc_pages_node(nid, gfp_mask, order); if (page) homecache_change_page_home(page, order, home); return page; } void homecache_free_pages(unsigned long addr, unsigned int order) { struct page *page; if (addr == 0) return; VM_BUG_ON(!virt_addr_valid((void *)addr)); page = virt_to_page((void *)addr); if (put_page_testzero(page)) { homecache_change_page_home(page, order, initial_page_home()); if (order == 0) { free_hot_cold_page(page, 0); } else { init_page_count(page); __free_pages(page, order); } } }
gpl-2.0
andi34/kernel_oneplus_msm8974
arch/arm/mach-pxa/poodle.c
4415
10433
/* * linux/arch/arm/mach-pxa/poodle.c * * Support for the SHARP Poodle Board. * * Based on: * linux/arch/arm/mach-pxa/lubbock.c Author: Nicolas Pitre * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Change Log * 12-Dec-2002 Sharp Corporation for Poodle * John Lenz <lenz@cs.wisc.edu> updates to 2.6 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/pm.h> #include <linux/delay.h> #include <linux/mtd/physmap.h> #include <linux/gpio.h> #include <linux/i2c.h> #include <linux/i2c/pxa-i2c.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/spi/pxa2xx_spi.h> #include <linux/mtd/sharpsl.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/setup.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/pxa25x.h> #include <mach/mmc.h> #include <mach/udc.h> #include <mach/irda.h> #include <mach/poodle.h> #include <mach/pxafb.h> #include <asm/hardware/scoop.h> #include <asm/hardware/locomo.h> #include <asm/mach/sharpsl_param.h> #include "generic.h" #include "devices.h" static unsigned long poodle_pin_config[] __initdata = { /* I/O */ GPIO79_nCS_3, GPIO80_nCS_4, GPIO18_RDY, /* Clock */ GPIO12_32KHz, /* SSP1 */ GPIO23_SSP1_SCLK, GPIO25_SSP1_TXD, GPIO26_SSP1_RXD, GPIO24_GPIO, /* POODLE_GPIO_TP_CS - SFRM as chip select */ /* I2S */ GPIO28_I2S_BITCLK_OUT, GPIO29_I2S_SDATA_IN, GPIO30_I2S_SDATA_OUT, GPIO31_I2S_SYNC, GPIO32_I2S_SYSCLK, /* Infra-Red */ GPIO47_FICP_TXD, GPIO46_FICP_RXD, /* FFUART */ GPIO40_FFUART_DTR, GPIO41_FFUART_RTS, GPIO39_FFUART_TXD, GPIO37_FFUART_DSR, GPIO34_FFUART_RXD, GPIO35_FFUART_CTS, /* LCD */ GPIOxx_LCD_TFT_16BPP, /* PC Card */ GPIO48_nPOE, GPIO49_nPWE, GPIO50_nPIOR, GPIO51_nPIOW, GPIO52_nPCE_1, GPIO53_nPCE_2, GPIO54_nPSKTSEL, GPIO55_nPREG, GPIO56_nPWAIT, GPIO57_nIOIS16, /* MMC */ GPIO6_MMC_CLK, GPIO8_MMC_CS0, /* GPIO */ GPIO9_GPIO, /* POODLE_GPIO_nSD_DETECT */ GPIO7_GPIO, /* POODLE_GPIO_nSD_WP */ GPIO3_GPIO, /* POODLE_GPIO_SD_PWR */ GPIO33_GPIO, /* POODLE_GPIO_SD_PWR1 */ GPIO20_GPIO, /* POODLE_GPIO_USB_PULLUP */ GPIO22_GPIO, /* POODLE_GPIO_IR_ON */ }; static struct resource poodle_scoop_resources[] = { [0] = { .start = 0x10800000, .end = 0x10800fff, .flags = IORESOURCE_MEM, }, }; static struct scoop_config poodle_scoop_setup = { .io_dir = POODLE_SCOOP_IO_DIR, .io_out = POODLE_SCOOP_IO_OUT, .gpio_base = POODLE_SCOOP_GPIO_BASE, }; struct platform_device poodle_scoop_device = { .name = "sharp-scoop", .id = -1, .dev = { .platform_data = &poodle_scoop_setup, }, .num_resources = ARRAY_SIZE(poodle_scoop_resources), .resource = poodle_scoop_resources, }; static struct scoop_pcmcia_dev poodle_pcmcia_scoop[] = { { .dev = &poodle_scoop_device.dev, .irq = POODLE_IRQ_GPIO_CF_IRQ, .cd_irq = POODLE_IRQ_GPIO_CF_CD, .cd_irq_str = "PCMCIA0 CD", }, }; static struct scoop_pcmcia_config poodle_pcmcia_config = { .devs = &poodle_pcmcia_scoop[0], .num_devs = 1, }; EXPORT_SYMBOL(poodle_scoop_device); static struct platform_device poodle_audio_device = { .name = "poodle-audio", .id = -1, }; /* LoCoMo device */ static struct resource locomo_resources[] = { [0] = { .start = 0x10000000, .end = 0x10001fff, .flags = IORESOURCE_MEM, }, [1] = { .start = PXA_GPIO_TO_IRQ(10), .end = PXA_GPIO_TO_IRQ(10), .flags = IORESOURCE_IRQ, }, }; static struct locomo_platform_data locomo_info = { .irq_base = IRQ_BOARD_START, }; struct platform_device poodle_locomo_device = { .name = "locomo", .id = 0, .num_resources = ARRAY_SIZE(locomo_resources), .resource = locomo_resources, .dev = { .platform_data = &locomo_info, }, }; EXPORT_SYMBOL(poodle_locomo_device); #if defined(CONFIG_SPI_PXA2XX) || defined(CONFIG_SPI_PXA2XX_MODULE) static struct pxa2xx_spi_master poodle_spi_info = { .num_chipselect = 1, }; static struct ads7846_platform_data poodle_ads7846_info = { .model = 7846, .vref_delay_usecs = 100, .x_plate_ohms = 419, .y_plate_ohms = 486, .gpio_pendown = POODLE_GPIO_TP_INT, }; static struct pxa2xx_spi_chip poodle_ads7846_chip = { .gpio_cs = POODLE_GPIO_TP_CS, }; static struct spi_board_info poodle_spi_devices[] = { { .modalias = "ads7846", .max_speed_hz = 10000, .bus_num = 1, .platform_data = &poodle_ads7846_info, .controller_data= &poodle_ads7846_chip, .irq = PXA_GPIO_TO_IRQ(POODLE_GPIO_TP_INT), }, }; static void __init poodle_init_spi(void) { pxa2xx_set_spi_info(1, &poodle_spi_info); spi_register_board_info(ARRAY_AND_SIZE(poodle_spi_devices)); } #else static inline void poodle_init_spi(void) {} #endif /* * MMC/SD Device * * The card detect interrupt isn't debounced so we delay it by 250ms * to give the card a chance to fully insert/eject. */ static int poodle_mci_init(struct device *dev, irq_handler_t poodle_detect_int, void *data) { int err; err = gpio_request(POODLE_GPIO_SD_PWR, "SD_PWR"); if (err) goto err_free_2; err = gpio_request(POODLE_GPIO_SD_PWR1, "SD_PWR1"); if (err) goto err_free_3; gpio_direction_output(POODLE_GPIO_SD_PWR, 0); gpio_direction_output(POODLE_GPIO_SD_PWR1, 0); return 0; err_free_3: gpio_free(POODLE_GPIO_SD_PWR); err_free_2: return err; } static void poodle_mci_setpower(struct device *dev, unsigned int vdd) { struct pxamci_platform_data* p_d = dev->platform_data; if ((1 << vdd) & p_d->ocr_mask) { gpio_set_value(POODLE_GPIO_SD_PWR, 1); mdelay(2); gpio_set_value(POODLE_GPIO_SD_PWR1, 1); } else { gpio_set_value(POODLE_GPIO_SD_PWR1, 0); gpio_set_value(POODLE_GPIO_SD_PWR, 0); } } static void poodle_mci_exit(struct device *dev, void *data) { gpio_free(POODLE_GPIO_SD_PWR1); gpio_free(POODLE_GPIO_SD_PWR); } static struct pxamci_platform_data poodle_mci_platform_data = { .detect_delay_ms = 250, .ocr_mask = MMC_VDD_32_33|MMC_VDD_33_34, .init = poodle_mci_init, .setpower = poodle_mci_setpower, .exit = poodle_mci_exit, .gpio_card_detect = POODLE_GPIO_nSD_DETECT, .gpio_card_ro = POODLE_GPIO_nSD_WP, .gpio_power = -1, }; /* * Irda */ static struct pxaficp_platform_data poodle_ficp_platform_data = { .gpio_pwdown = POODLE_GPIO_IR_ON, .transceiver_cap = IR_SIRMODE | IR_OFF, }; /* * USB Device Controller */ static struct pxa2xx_udc_mach_info udc_info __initdata = { /* no connect GPIO; poodle can't tell connection status */ .gpio_pullup = POODLE_GPIO_USB_PULLUP, }; /* PXAFB device */ static struct pxafb_mode_info poodle_fb_mode = { .pixclock = 144700, .xres = 320, .yres = 240, .bpp = 16, .hsync_len = 7, .left_margin = 11, .right_margin = 30, .vsync_len = 2, .upper_margin = 2, .lower_margin = 0, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct pxafb_mach_info poodle_fb_info = { .modes = &poodle_fb_mode, .num_modes = 1, .lcd_conn = LCD_COLOR_TFT_16BPP, }; static struct mtd_partition sharpsl_nand_partitions[] = { { .name = "System Area", .offset = 0, .size = 7 * 1024 * 1024, }, { .name = "Root Filesystem", .offset = 7 * 1024 * 1024, .size = 22 * 1024 * 1024, }, { .name = "Home Filesystem", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static uint8_t scan_ff_pattern[] = { 0xff, 0xff }; static struct nand_bbt_descr sharpsl_bbt = { .options = 0, .offs = 4, .len = 2, .pattern = scan_ff_pattern }; static struct sharpsl_nand_platform_data sharpsl_nand_platform_data = { .badblock_pattern = &sharpsl_bbt, .partitions = sharpsl_nand_partitions, .nr_partitions = ARRAY_SIZE(sharpsl_nand_partitions), }; static struct resource sharpsl_nand_resources[] = { { .start = 0x0C000000, .end = 0x0C000FFF, .flags = IORESOURCE_MEM, }, }; static struct platform_device sharpsl_nand_device = { .name = "sharpsl-nand", .id = -1, .resource = sharpsl_nand_resources, .num_resources = ARRAY_SIZE(sharpsl_nand_resources), .dev.platform_data = &sharpsl_nand_platform_data, }; static struct mtd_partition sharpsl_rom_parts[] = { { .name ="Boot PROM Filesystem", .offset = 0x00120000, .size = MTDPART_SIZ_FULL, }, }; static struct physmap_flash_data sharpsl_rom_data = { .width = 2, .nr_parts = ARRAY_SIZE(sharpsl_rom_parts), .parts = sharpsl_rom_parts, }; static struct resource sharpsl_rom_resources[] = { { .start = 0x00000000, .end = 0x007fffff, .flags = IORESOURCE_MEM, }, }; static struct platform_device sharpsl_rom_device = { .name = "physmap-flash", .id = -1, .resource = sharpsl_rom_resources, .num_resources = ARRAY_SIZE(sharpsl_rom_resources), .dev.platform_data = &sharpsl_rom_data, }; static struct platform_device *devices[] __initdata = { &poodle_locomo_device, &poodle_scoop_device, &poodle_audio_device, &sharpsl_nand_device, &sharpsl_rom_device, }; static struct i2c_board_info __initdata poodle_i2c_devices[] = { { I2C_BOARD_INFO("wm8731", 0x1b) }, }; static void poodle_poweroff(void) { pxa_restart('h', NULL); } static void __init poodle_init(void) { int ret = 0; pm_power_off = poodle_poweroff; PCFR |= PCFR_OPDE; pxa2xx_mfp_config(ARRAY_AND_SIZE(poodle_pin_config)); pxa_set_ffuart_info(NULL); pxa_set_btuart_info(NULL); pxa_set_stuart_info(NULL); platform_scoop_config = &poodle_pcmcia_config; ret = platform_add_devices(devices, ARRAY_SIZE(devices)); if (ret) pr_warning("poodle: Unable to register LoCoMo device\n"); pxa_set_fb_info(&poodle_locomo_device.dev, &poodle_fb_info); pxa_set_udc_info(&udc_info); pxa_set_mci_info(&poodle_mci_platform_data); pxa_set_ficp_info(&poodle_ficp_platform_data); pxa_set_i2c_info(NULL); i2c_register_board_info(0, ARRAY_AND_SIZE(poodle_i2c_devices)); poodle_init_spi(); } static void __init fixup_poodle(struct tag *tags, char **cmdline, struct meminfo *mi) { sharpsl_save_param(); mi->nr_banks=1; mi->bank[0].start = 0xa0000000; mi->bank[0].size = (32*1024*1024); } MACHINE_START(POODLE, "SHARP Poodle") .fixup = fixup_poodle, .map_io = pxa25x_map_io, .nr_irqs = POODLE_NR_IRQS, /* 4 for LoCoMo */ .init_irq = pxa25x_init_irq, .handle_irq = pxa25x_handle_irq, .timer = &pxa_timer, .init_machine = poodle_init, .restart = pxa_restart, MACHINE_END
gpl-2.0
SOKP/kernel_xiaomi_cancro
drivers/rtc/rtc-88pm860x.c
4927
12559
/* * Real Time Clock driver for Marvell 88PM860x PMIC * * Copyright (c) 2010 Marvell International Ltd. * Author: Haojian Zhuang <haojian.zhuang@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/rtc.h> #include <linux/delay.h> #include <linux/mfd/core.h> #include <linux/mfd/88pm860x.h> #define VRTC_CALIBRATION struct pm860x_rtc_info { struct pm860x_chip *chip; struct i2c_client *i2c; struct rtc_device *rtc_dev; struct device *dev; struct delayed_work calib_work; int irq; int vrtc; int (*sync)(unsigned int ticks); }; #define REG_VRTC_MEAS1 0x7D #define REG0_ADDR 0xB0 #define REG1_ADDR 0xB2 #define REG2_ADDR 0xB4 #define REG3_ADDR 0xB6 #define REG0_DATA 0xB1 #define REG1_DATA 0xB3 #define REG2_DATA 0xB5 #define REG3_DATA 0xB7 /* bit definitions of Measurement Enable Register 2 (0x51) */ #define MEAS2_VRTC (1 << 0) /* bit definitions of RTC Register 1 (0xA0) */ #define ALARM_EN (1 << 3) #define ALARM_WAKEUP (1 << 4) #define ALARM (1 << 5) #define RTC1_USE_XO (1 << 7) #define VRTC_CALIB_INTERVAL (HZ * 60 * 10) /* 10 minutes */ static irqreturn_t rtc_update_handler(int irq, void *data) { struct pm860x_rtc_info *info = (struct pm860x_rtc_info *)data; int mask; mask = ALARM | ALARM_WAKEUP; pm860x_set_bits(info->i2c, PM8607_RTC1, mask | ALARM_EN, mask); rtc_update_irq(info->rtc_dev, 1, RTC_AF); return IRQ_HANDLED; } static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); if (enabled) pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, ALARM_EN); else pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0); return 0; } /* * Calculate the next alarm time given the requested alarm time mask * and the current time. */ static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, struct rtc_time *alrm) { unsigned long next_time; unsigned long now_time; next->tm_year = now->tm_year; next->tm_mon = now->tm_mon; next->tm_mday = now->tm_mday; next->tm_hour = alrm->tm_hour; next->tm_min = alrm->tm_min; next->tm_sec = alrm->tm_sec; rtc_tm_to_time(now, &now_time); rtc_tm_to_time(next, &next_time); if (next_time < now_time) { /* Advance one day */ next_time += 60 * 60 * 24; rtc_time_to_tm(next_time, next); } } static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[8]; unsigned long ticks, base, data; pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, tm); return 0; } static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[4]; unsigned long ticks, base, data; if ((tm->tm_year < 70) || (tm->tm_year > 138)) { dev_dbg(info->dev, "Set time %d out of range. " "Please set time between 1970 to 2038.\n", 1900 + tm->tm_year); return -EINVAL; } rtc_tm_to_time(tm, &ticks); /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; base = ticks - data; dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); pm860x_page_reg_write(info->i2c, REG0_DATA, (base >> 24) & 0xFF); pm860x_page_reg_write(info->i2c, REG1_DATA, (base >> 16) & 0xFF); pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF); pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF); if (info->sync) info->sync(ticks); return 0; } static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); unsigned char buf[8]; unsigned long ticks, base, data; int ret; pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, &alrm->time); ret = pm860x_reg_read(info->i2c, PM8607_RTC1); alrm->enabled = (ret & ALARM_EN) ? 1 : 0; alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0; return 0; } static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct pm860x_rtc_info *info = dev_get_drvdata(dev); struct rtc_time now_tm, alarm_tm; unsigned long ticks, base, data; unsigned char buf[8]; int mask; pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0); pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; /* load 32-bit read-only counter */ pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; ticks = base + data; dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", base, data, ticks); rtc_time_to_tm(ticks, &now_tm); rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time); /* get new ticks for alarm in 24 hours */ rtc_tm_to_time(&alarm_tm, &ticks); data = ticks - base; buf[0] = data & 0xff; buf[1] = (data >> 8) & 0xff; buf[2] = (data >> 16) & 0xff; buf[3] = (data >> 24) & 0xff; pm860x_bulk_write(info->i2c, PM8607_RTC_EXPIRE1, 4, buf); if (alrm->enabled) { mask = ALARM | ALARM_WAKEUP | ALARM_EN; pm860x_set_bits(info->i2c, PM8607_RTC1, mask, mask); } else { mask = ALARM | ALARM_WAKEUP | ALARM_EN; pm860x_set_bits(info->i2c, PM8607_RTC1, mask, ALARM | ALARM_WAKEUP); } return 0; } static const struct rtc_class_ops pm860x_rtc_ops = { .read_time = pm860x_rtc_read_time, .set_time = pm860x_rtc_set_time, .read_alarm = pm860x_rtc_read_alarm, .set_alarm = pm860x_rtc_set_alarm, .alarm_irq_enable = pm860x_rtc_alarm_irq_enable, }; #ifdef VRTC_CALIBRATION static void calibrate_vrtc_work(struct work_struct *work) { struct pm860x_rtc_info *info = container_of(work, struct pm860x_rtc_info, calib_work.work); unsigned char buf[2]; unsigned int sum, data, mean, vrtc_set; int i; for (i = 0, sum = 0; i < 16; i++) { msleep(100); pm860x_bulk_read(info->i2c, REG_VRTC_MEAS1, 2, buf); data = (buf[0] << 4) | buf[1]; data = (data * 5400) >> 12; /* convert to mv */ sum += data; } mean = sum >> 4; vrtc_set = 2700 + (info->vrtc & 0x3) * 200; dev_dbg(info->dev, "mean:%d, vrtc_set:%d\n", mean, vrtc_set); sum = pm860x_reg_read(info->i2c, PM8607_RTC_MISC1); data = sum & 0x3; if ((mean + 200) < vrtc_set) { /* try higher voltage */ if (++data == 4) goto out; data = (sum & 0xf8) | (data & 0x3); pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data); } else if ((mean - 200) > vrtc_set) { /* try lower voltage */ if (data-- == 0) goto out; data = (sum & 0xf8) | (data & 0x3); pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data); } else goto out; dev_dbg(info->dev, "set 0x%x to RTC_MISC1\n", data); /* trigger next calibration since VRTC is updated */ schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL); return; out: /* disable measurement */ pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); dev_dbg(info->dev, "finish VRTC calibration\n"); return; } #endif static int __devinit pm860x_rtc_probe(struct platform_device *pdev) { struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); struct pm860x_rtc_pdata *pdata = NULL; struct pm860x_rtc_info *info; struct rtc_time tm; unsigned long ticks = 0; int ret; pdata = pdev->dev.platform_data; if (pdata == NULL) dev_warn(&pdev->dev, "No platform data!\n"); info = kzalloc(sizeof(struct pm860x_rtc_info), GFP_KERNEL); if (!info) return -ENOMEM; info->irq = platform_get_irq(pdev, 0); if (info->irq < 0) { dev_err(&pdev->dev, "No IRQ resource!\n"); ret = -EINVAL; goto out; } info->chip = chip; info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; info->dev = &pdev->dev; dev_set_drvdata(&pdev->dev, info); ret = request_threaded_irq(info->irq, NULL, rtc_update_handler, IRQF_ONESHOT, "rtc", info); if (ret < 0) { dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", info->irq, ret); goto out; } /* set addresses of 32-bit base value for RTC time */ pm860x_page_reg_write(info->i2c, REG0_ADDR, REG0_DATA); pm860x_page_reg_write(info->i2c, REG1_ADDR, REG1_DATA); pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA); pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA); ret = pm860x_rtc_read_time(&pdev->dev, &tm); if (ret < 0) { dev_err(&pdev->dev, "Failed to read initial time.\n"); goto out_rtc; } if ((tm.tm_year < 70) || (tm.tm_year > 138)) { tm.tm_year = 70; tm.tm_mon = 0; tm.tm_mday = 1; tm.tm_hour = 0; tm.tm_min = 0; tm.tm_sec = 0; ret = pm860x_rtc_set_time(&pdev->dev, &tm); if (ret < 0) { dev_err(&pdev->dev, "Failed to set initial time.\n"); goto out_rtc; } } rtc_tm_to_time(&tm, &ticks); if (pdata && pdata->sync) { pdata->sync(ticks); info->sync = pdata->sync; } info->rtc_dev = rtc_device_register("88pm860x-rtc", &pdev->dev, &pm860x_rtc_ops, THIS_MODULE); ret = PTR_ERR(info->rtc_dev); if (IS_ERR(info->rtc_dev)) { dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); goto out_rtc; } /* * enable internal XO instead of internal 3.25MHz clock since it can * free running in PMIC power-down state. */ pm860x_set_bits(info->i2c, PM8607_RTC1, RTC1_USE_XO, RTC1_USE_XO); #ifdef VRTC_CALIBRATION /* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */ if (pdata && pdata->vrtc) info->vrtc = pdata->vrtc & 0x3; else info->vrtc = 1; pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC); /* calibrate VRTC */ INIT_DELAYED_WORK(&info->calib_work, calibrate_vrtc_work); schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL); #endif /* VRTC_CALIBRATION */ device_init_wakeup(&pdev->dev, 1); return 0; out_rtc: free_irq(info->irq, info); out: kfree(info); return ret; } static int __devexit pm860x_rtc_remove(struct platform_device *pdev) { struct pm860x_rtc_info *info = platform_get_drvdata(pdev); #ifdef VRTC_CALIBRATION flush_scheduled_work(); /* disable measurement */ pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); #endif /* VRTC_CALIBRATION */ platform_set_drvdata(pdev, NULL); rtc_device_unregister(info->rtc_dev); free_irq(info->irq, info); kfree(info); return 0; } #ifdef CONFIG_PM_SLEEP static int pm860x_rtc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag |= 1 << PM8607_IRQ_RTC; return 0; } static int pm860x_rtc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); if (device_may_wakeup(dev)) chip->wakeup_flag &= ~(1 << PM8607_IRQ_RTC); return 0; } #endif static SIMPLE_DEV_PM_OPS(pm860x_rtc_pm_ops, pm860x_rtc_suspend, pm860x_rtc_resume); static struct platform_driver pm860x_rtc_driver = { .driver = { .name = "88pm860x-rtc", .owner = THIS_MODULE, .pm = &pm860x_rtc_pm_ops, }, .probe = pm860x_rtc_probe, .remove = __devexit_p(pm860x_rtc_remove), }; module_platform_driver(pm860x_rtc_driver); MODULE_DESCRIPTION("Marvell 88PM860x RTC driver"); MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); MODULE_LICENSE("GPL");
gpl-2.0
kogone/AK-Angler
drivers/net/wireless/hostap/hostap_80211_tx.c
7743
16387
#include <linux/slab.h> #include <linux/export.h> #include "hostap_80211.h" #include "hostap_common.h" #include "hostap_wlan.h" #include "hostap.h" #include "hostap_ap.h" /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) { struct ieee80211_hdr *hdr; u16 fc; hdr = (struct ieee80211_hdr *) skb->data; printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n", name, skb->len, jiffies); if (skb->len < 2) return; fc = le16_to_cpu(hdr->frame_control); printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s", fc, (fc & IEEE80211_FCTL_FTYPE) >> 2, (fc & IEEE80211_FCTL_STYPE) >> 4, fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); if (skb->len < IEEE80211_DATA_HDR3_LEN) { printk("\n"); return; } printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctrl)); printk(KERN_DEBUG " A1=%pM", hdr->addr1); printk(" A2=%pM", hdr->addr2); printk(" A3=%pM", hdr->addr3); if (skb->len >= 30) printk(" A4=%pM", hdr->addr4); printk("\n"); } /* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta) * Convert Ethernet header into a suitable IEEE 802.11 header depending on * device configuration. */ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int need_headroom, need_tailroom = 0; struct ieee80211_hdr hdr; u16 fc, ethertype = 0; enum { WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME } use_wds = WDS_NO; u8 *encaps_data; int hdr_len, encaps_len, skip_header_bytes; int to_assoc_ap = 0; struct hostap_skb_tx_data *meta; iface = netdev_priv(dev); local = iface->local; if (skb->len < ETH_HLEN) { printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return NETDEV_TX_OK; } if (local->ddev != dev) { use_wds = (local->iw_mode == IW_MODE_MASTER && !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ? WDS_OWN_FRAME : WDS_COMPLIANT_FRAME; if (dev == local->stadev) { to_assoc_ap = 1; use_wds = WDS_NO; } else if (dev == local->apdev) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "AP device with Ethernet net dev\n", dev->name); kfree_skb(skb); return NETDEV_TX_OK; } } else { if (local->iw_mode == IW_MODE_REPEAT) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "non-WDS link in Repeater mode\n", dev->name); kfree_skb(skb); return NETDEV_TX_OK; } else if (local->iw_mode == IW_MODE_INFRA && (local->wds_type & HOSTAP_WDS_AP_CLIENT) && memcmp(skb->data + ETH_ALEN, dev->dev_addr, ETH_ALEN) != 0) { /* AP client mode: send frames with foreign src addr * using 4-addr WDS frames */ use_wds = WDS_COMPLIANT_FRAME; } } /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload * ==> * Prism2 TX frame with 802.11 header: * txdesc (address order depending on used mode; includes dst_addr and * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel; * proto[2], payload {, possible addr4[6]} */ ethertype = (skb->data[12] << 8) | skb->data[13]; memset(&hdr, 0, sizeof(hdr)); /* Length of data after IEEE 802.11 header */ encaps_data = NULL; encaps_len = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= 0x600) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; hdr_len = IEEE80211_DATA_HDR3_LEN; if (use_wds != WDS_NO) { /* Note! Prism2 station firmware has problems with sending real * 802.11 frames with four addresses; until these problems can * be fixed or worked around, 4-addr frames needed for WDS are * using incompatible format: FromDS flag is not set and the * fourth address is added after the frame payload; it is * assumed, that the receiving station knows how to handle this * frame format */ if (use_wds == WDS_COMPLIANT_FRAME) { fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, * Addr4 = SA */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); hdr_len += ETH_ALEN; } else { /* bogus 4-addr format to workaround Prism2 station * f/w bug */ fc |= IEEE80211_FCTL_TODS; /* From DS: Addr1 = DA (used as RA), * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA), */ /* SA from skb->data + ETH_ALEN will be added after * frame payload; use hdr.addr4 as a temporary buffer */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); need_tailroom += ETH_ALEN; } /* send broadcast and multicast frames to broadcast RA, if * configured; otherwise, use unicast RA of the WDS link */ if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && skb->data[0] & 0x01) memset(&hdr.addr1, 0xff, ETH_ALEN); else if (iface->type == HOSTAP_INTERFACE_WDS) memcpy(&hdr.addr1, iface->u.wds.remote_addr, ETH_ALEN); else memcpy(&hdr.addr1, local->bssid, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { fc |= IEEE80211_FCTL_FROMDS; /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&hdr.addr1, to_assoc_ap ? local->assoc_ap_addr : local->bssid, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); memcpy(&hdr.addr3, local->bssid, ETH_ALEN); } hdr.frame_control = cpu_to_le16(fc); skb_pull(skb, skip_header_bytes); need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len; if (skb_tailroom(skb) < need_tailroom) { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } if (pskb_expand_head(skb, need_headroom, need_tailroom, GFP_ATOMIC)) { kfree_skb(skb); iface->stats.tx_dropped++; return NETDEV_TX_OK; } } else if (skb_headroom(skb) < need_headroom) { struct sk_buff *tmp = skb; skb = skb_realloc_headroom(skb, need_headroom); kfree_skb(tmp); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } } else { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } } if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); memcpy(skb_push(skb, hdr_len), &hdr, hdr_len); if (use_wds == WDS_OWN_FRAME) { memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN); } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; if (use_wds) meta->flags |= HOSTAP_TX_FLAGS_WDS; meta->ethertype = ethertype; meta->iface = iface; /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return NETDEV_TX_OK; } /* hard_start_xmit function for hostapd wlan#ap interfaces */ netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct hostap_skb_tx_data *meta; struct ieee80211_hdr *hdr; u16 fc; iface = netdev_priv(dev); local = iface->local; if (skb->len < 10) { printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return NETDEV_TX_OK; } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; meta->iface = iface; if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) { hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); if (ieee80211_is_data(hdr->frame_control) && (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DATA) { u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header)]; meta->ethertype = (pos[0] << 8) | pos[1]; } } /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return NETDEV_TX_OK; } /* Called only from software IRQ */ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, struct lib80211_crypt_data *crypt) { struct hostap_interface *iface; local_info_t *local; struct ieee80211_hdr *hdr; int prefix_len, postfix_len, hdr_len, res; iface = netdev_priv(skb->dev); local = iface->local; if (skb->len < IEEE80211_DATA_HDR3_LEN) { kfree_skb(skb); return NULL; } if (local->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { hdr = (struct ieee80211_hdr *) skb->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "TX packet to %pM\n", local->dev->name, hdr->addr1); } kfree_skb(skb); return NULL; } skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) return NULL; prefix_len = crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_msdu_prefix_len; postfix_len = crypt->ops->extra_mpdu_postfix_len + crypt->ops->extra_msdu_postfix_len; if ((skb_headroom(skb) < prefix_len || skb_tailroom(skb) < postfix_len) && pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) { kfree_skb(skb); return NULL; } hdr = (struct ieee80211_hdr *) skb->data; hdr_len = hostap_80211_get_hdrlen(hdr->frame_control); /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops->encrypt_msdu) res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv); if (res == 0 && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { kfree_skb(skb); return NULL; } return skb; } /* hard_start_xmit function for master radio interface wifi#. * AP processing (TX rate control, power save buffering, etc.). * Use hardware TX function to send the frame. */ netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; netdev_tx_t ret = NETDEV_TX_BUSY; u16 fc; struct hostap_tx_data tx; ap_tx_ret tx_ret; struct hostap_skb_tx_data *meta; int no_encrypt = 0; struct ieee80211_hdr *hdr; iface = netdev_priv(dev); local = iface->local; tx.skb = skb; tx.sta_ptr = NULL; meta = (struct hostap_skb_tx_data *) skb->cb; if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) { printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " "expected 0x%08x)\n", dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } if (local->host_encrypt) { /* Set crypt to default algorithm and key; will be replaced in * AP code if STA has own alg/key */ tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx]; tx.host_encrypt = 1; } else { tx.crypt = NULL; tx.host_encrypt = 0; } if (skb->len < 24) { printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } /* FIX (?): * Wi-Fi 802.11b test plan suggests that AP should ignore power save * bit in authentication and (re)association frames and assume tha * STA remains awake for the response. */ tx_ret = hostap_handle_sta_tx(local, &tx); skb = tx.skb; meta = (struct hostap_skb_tx_data *) skb->cb; hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); switch (tx_ret) { case AP_TX_CONTINUE: break; case AP_TX_CONTINUE_NOT_AUTHORIZED: if (local->ieee_802_1x && ieee80211_is_data(hdr->frame_control) && meta->ethertype != ETH_P_PAE && !(meta->flags & HOSTAP_TX_FLAGS_WDS)) { printk(KERN_DEBUG "%s: dropped frame to unauthorized " "port (IEEE 802.1X): ethertype=0x%04x\n", dev->name, meta->ethertype); hostap_dump_tx_80211(dev->name, skb); ret = NETDEV_TX_OK; /* drop packet */ iface->stats.tx_dropped++; goto fail; } break; case AP_TX_DROP: ret = NETDEV_TX_OK; /* drop packet */ iface->stats.tx_dropped++; goto fail; case AP_TX_RETRY: goto fail; case AP_TX_BUFFERED: /* do not free skb here, it will be freed when the * buffered frame is sent/timed out */ ret = NETDEV_TX_OK; goto tx_exit; } /* Request TX callback if protocol version is 2 in 802.11 header; * this version 2 is a special case used between hostapd and kernel * driver */ if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) && local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) { meta->tx_cb_idx = local->ap->tx_callback_idx; /* remove special version from the frame header */ fc &= ~IEEE80211_FCTL_VERS; hdr->frame_control = cpu_to_le16(fc); } if (!ieee80211_is_data(hdr->frame_control)) { no_encrypt = 1; tx.crypt = NULL; } if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt && !(fc & IEEE80211_FCTL_PROTECTED)) { no_encrypt = 1; PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing " "unencrypted EAPOL frame\n", dev->name); tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */ } if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu)) tx.crypt = NULL; else if ((tx.crypt || local->crypt_info.crypt[local->crypt_info.tx_keyidx]) && !no_encrypt) { /* Add ISWEP flag both for firmware and host based encryption */ fc |= IEEE80211_FCTL_PROTECTED; hdr->frame_control = cpu_to_le16(fc); } else if (local->drop_unencrypted && ieee80211_is_data(hdr->frame_control) && meta->ethertype != ETH_P_PAE) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: dropped unencrypted TX data " "frame (drop_unencrypted=1)\n", dev->name); } iface->stats.tx_dropped++; ret = NETDEV_TX_OK; goto fail; } if (tx.crypt) { skb = hostap_tx_encrypt(skb, tx.crypt); if (skb == NULL) { printk(KERN_DEBUG "%s: TX - encryption failed\n", dev->name); ret = NETDEV_TX_OK; goto fail; } meta = (struct hostap_skb_tx_data *) skb->cb; if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) { printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " "expected 0x%08x) after hostap_tx_encrypt\n", dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } } if (local->func->tx == NULL || local->func->tx(skb, dev)) { ret = NETDEV_TX_OK; iface->stats.tx_dropped++; } else { ret = NETDEV_TX_OK; iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; } fail: if (ret == NETDEV_TX_OK && skb) dev_kfree_skb(skb); tx_exit: if (tx.sta_ptr) hostap_handle_sta_release(tx.sta_ptr); return ret; } EXPORT_SYMBOL(hostap_master_start_xmit);
gpl-2.0
menghang/android_kernel_xiaomi_msm8996
drivers/net/arcnet/arc-rimi.c
9023
10981
/* * Linux ARCnet driver - "RIM I" (entirely mem-mapped) cards * * Written 1994-1999 by Avery Pennarun. * Written 1999-2000 by Martin Mares <mj@ucw.cz>. * Derived from skeleton.c by Donald Becker. * * Special thanks to Contemporary Controls, Inc. (www.ccontrols.com) * for sponsoring the further development of this driver. * * ********************** * * The original copyright of skeleton.c was as follows: * * skeleton.c Written 1993 by Donald Becker. * Copyright 1993 United States Government as represented by the * Director, National Security Agency. This software may only be used * and distributed according to the terms of the GNU General Public License as * modified by SRC, incorporated herein by reference. * * ********************** * * For more details, see drivers/net/arcnet.c * * ********************** */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/bootmem.h> #include <linux/init.h> #include <linux/interrupt.h> #include <asm/io.h> #include <linux/arcdevice.h> #define VERSION "arcnet: RIM I (entirely mem-mapped) support\n" /* Internal function declarations */ static int arcrimi_probe(struct net_device *dev); static int arcrimi_found(struct net_device *dev); static void arcrimi_command(struct net_device *dev, int command); static int arcrimi_status(struct net_device *dev); static void arcrimi_setmask(struct net_device *dev, int mask); static int arcrimi_reset(struct net_device *dev, int really_reset); static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count); /* Handy defines for ARCnet specific stuff */ /* Amount of I/O memory used by the card */ #define BUFFER_SIZE (512) #define MIRROR_SIZE (BUFFER_SIZE*4) /* COM 9026 controller chip --> ARCnet register addresses */ #define _INTMASK (ioaddr+0) /* writable */ #define _STATUS (ioaddr+0) /* readable */ #define _COMMAND (ioaddr+1) /* writable, returns random vals on read (?) */ #define _RESET (ioaddr+8) /* software reset (on read) */ #define _MEMDATA (ioaddr+12) /* Data port for IO-mapped memory */ #define _ADDR_HI (ioaddr+15) /* Control registers for said */ #define _ADDR_LO (ioaddr+14) #define _CONFIG (ioaddr+2) /* Configuration register */ #undef ASTATUS #undef ACOMMAND #undef AINTMASK #define ASTATUS() readb(_STATUS) #define ACOMMAND(cmd) writeb((cmd),_COMMAND) #define AINTMASK(msk) writeb((msk),_INTMASK) #define SETCONF() writeb(lp->config,_CONFIG) /* * We cannot probe for a RIM I card; one reason is I don't know how to reset * them. In fact, we can't even get their node ID automatically. So, we * need to be passed a specific shmem address, IRQ, and node ID. */ static int __init arcrimi_probe(struct net_device *dev) { BUGLVL(D_NORMAL) printk(VERSION); BUGLVL(D_NORMAL) printk("E-mail me if you actually test the RIM I driver, please!\n"); BUGLVL(D_NORMAL) printk("Given: node %02Xh, shmem %lXh, irq %d\n", dev->dev_addr[0], dev->mem_start, dev->irq); if (dev->mem_start <= 0 || dev->irq <= 0) { BUGLVL(D_NORMAL) printk("No autoprobe for RIM I; you " "must specify the shmem and irq!\n"); return -ENODEV; } if (dev->dev_addr[0] == 0) { BUGLVL(D_NORMAL) printk("You need to specify your card's station " "ID!\n"); return -ENODEV; } /* * Grab the memory region at mem_start for MIRROR_SIZE bytes. * Later in arcrimi_found() the real size will be determined * and this reserve will be released and the correct size * will be taken. */ if (!request_mem_region(dev->mem_start, MIRROR_SIZE, "arcnet (90xx)")) { BUGLVL(D_NORMAL) printk("Card memory already allocated\n"); return -ENODEV; } return arcrimi_found(dev); } static int check_mirror(unsigned long addr, size_t size) { void __iomem *p; int res = -1; if (!request_mem_region(addr, size, "arcnet (90xx)")) return -1; p = ioremap(addr, size); if (p) { if (readb(p) == TESTvalue) res = 1; else res = 0; iounmap(p); } release_mem_region(addr, size); return res; } /* * Set up the struct net_device associated with this card. Called after * probing succeeds. */ static int __init arcrimi_found(struct net_device *dev) { struct arcnet_local *lp; unsigned long first_mirror, last_mirror, shmem; void __iomem *p; int mirror_size; int err; p = ioremap(dev->mem_start, MIRROR_SIZE); if (!p) { release_mem_region(dev->mem_start, MIRROR_SIZE); BUGMSG(D_NORMAL, "Can't ioremap\n"); return -ENODEV; } /* reserve the irq */ if (request_irq(dev->irq, arcnet_interrupt, 0, "arcnet (RIM I)", dev)) { iounmap(p); release_mem_region(dev->mem_start, MIRROR_SIZE); BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq); return -ENODEV; } shmem = dev->mem_start; writeb(TESTvalue, p); writeb(dev->dev_addr[0], p + 1); /* actually the node ID */ /* find the real shared memory start/end points, including mirrors */ /* guess the actual size of one "memory mirror" - the number of * bytes between copies of the shared memory. On most cards, it's * 2k (or there are no mirrors at all) but on some, it's 4k. */ mirror_size = MIRROR_SIZE; if (readb(p) == TESTvalue && check_mirror(shmem - MIRROR_SIZE, MIRROR_SIZE) == 0 && check_mirror(shmem - 2 * MIRROR_SIZE, MIRROR_SIZE) == 1) mirror_size = 2 * MIRROR_SIZE; first_mirror = shmem - mirror_size; while (check_mirror(first_mirror, mirror_size) == 1) first_mirror -= mirror_size; first_mirror += mirror_size; last_mirror = shmem + mirror_size; while (check_mirror(last_mirror, mirror_size) == 1) last_mirror += mirror_size; last_mirror -= mirror_size; dev->mem_start = first_mirror; dev->mem_end = last_mirror + MIRROR_SIZE - 1; /* initialize the rest of the device structure. */ lp = netdev_priv(dev); lp->card_name = "RIM I"; lp->hw.command = arcrimi_command; lp->hw.status = arcrimi_status; lp->hw.intmask = arcrimi_setmask; lp->hw.reset = arcrimi_reset; lp->hw.owner = THIS_MODULE; lp->hw.copy_to_card = arcrimi_copy_to_card; lp->hw.copy_from_card = arcrimi_copy_from_card; /* * re-reserve the memory region - arcrimi_probe() alloced this reqion * but didn't know the real size. Free that region and then re-get * with the correct size. There is a VERY slim chance this could * fail. */ iounmap(p); release_mem_region(shmem, MIRROR_SIZE); if (!request_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1, "arcnet (90xx)")) { BUGMSG(D_NORMAL, "Card memory already allocated\n"); goto err_free_irq; } lp->mem_start = ioremap(dev->mem_start, dev->mem_end - dev->mem_start + 1); if (!lp->mem_start) { BUGMSG(D_NORMAL, "Can't remap device memory!\n"); goto err_release_mem; } /* get and check the station ID from offset 1 in shmem */ dev->dev_addr[0] = readb(lp->mem_start + 1); BUGMSG(D_NORMAL, "ARCnet RIM I: station %02Xh found at IRQ %d, " "ShMem %lXh (%ld*%d bytes).\n", dev->dev_addr[0], dev->irq, dev->mem_start, (dev->mem_end - dev->mem_start + 1) / mirror_size, mirror_size); err = register_netdev(dev); if (err) goto err_unmap; return 0; err_unmap: iounmap(lp->mem_start); err_release_mem: release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); err_free_irq: free_irq(dev->irq, dev); return -EIO; } /* * Do a hardware reset on the card, and set up necessary registers. * * This should be called as little as possible, because it disrupts the * token on the network (causes a RECON) and requires a significant delay. * * However, it does make sure the card is in a defined state. */ static int arcrimi_reset(struct net_device *dev, int really_reset) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; BUGMSG(D_INIT, "Resetting %s (status=%02Xh)\n", dev->name, ASTATUS()); if (really_reset) { writeb(TESTvalue, ioaddr - 0x800); /* fake reset */ return 0; } ACOMMAND(CFLAGScmd | RESETclear); /* clear flags & end reset */ ACOMMAND(CFLAGScmd | CONFIGclear); /* enable extended (512-byte) packets */ ACOMMAND(CONFIGcmd | EXTconf); /* done! return success. */ return 0; } static void arcrimi_setmask(struct net_device *dev, int mask) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; AINTMASK(mask); } static int arcrimi_status(struct net_device *dev) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; return ASTATUS(); } static void arcrimi_command(struct net_device *dev, int cmd) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *ioaddr = lp->mem_start + 0x800; ACOMMAND(cmd); } static void arcrimi_copy_to_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; TIME("memcpy_toio", count, memcpy_toio(memaddr, buf, count)); } static void arcrimi_copy_from_card(struct net_device *dev, int bufnum, int offset, void *buf, int count) { struct arcnet_local *lp = netdev_priv(dev); void __iomem *memaddr = lp->mem_start + 0x800 + bufnum * 512 + offset; TIME("memcpy_fromio", count, memcpy_fromio(buf, memaddr, count)); } static int node; static int io; /* use the insmod io= irq= node= options */ static int irq; static char device[9]; /* use eg. device=arc1 to change name */ module_param(node, int, 0); module_param(io, int, 0); module_param(irq, int, 0); module_param_string(device, device, sizeof(device), 0); MODULE_LICENSE("GPL"); static struct net_device *my_dev; static int __init arc_rimi_init(void) { struct net_device *dev; dev = alloc_arcdev(device); if (!dev) return -ENOMEM; if (node && node != 0xff) dev->dev_addr[0] = node; dev->mem_start = io; dev->irq = irq; if (dev->irq == 2) dev->irq = 9; if (arcrimi_probe(dev)) { free_netdev(dev); return -EIO; } my_dev = dev; return 0; } static void __exit arc_rimi_exit(void) { struct net_device *dev = my_dev; struct arcnet_local *lp = netdev_priv(dev); unregister_netdev(dev); iounmap(lp->mem_start); release_mem_region(dev->mem_start, dev->mem_end - dev->mem_start + 1); free_irq(dev->irq, dev); free_netdev(dev); } #ifndef MODULE static int __init arcrimi_setup(char *s) { int ints[8]; s = get_options(s, 8, ints); if (!ints[0]) return 1; switch (ints[0]) { default: /* ERROR */ printk("arcrimi: Too many arguments.\n"); case 3: /* Node ID */ node = ints[3]; case 2: /* IRQ */ irq = ints[2]; case 1: /* IO address */ io = ints[1]; } if (*s) snprintf(device, sizeof(device), "%s", s); return 1; } __setup("arcrimi=", arcrimi_setup); #endif /* MODULE */ module_init(arc_rimi_init) module_exit(arc_rimi_exit)
gpl-2.0
smac0628/kernel-htc-m8-gpe-stock
net/rose/rose_loopback.c
12863
2800
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) */ #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/timer.h> #include <net/ax25.h> #include <linux/skbuff.h> #include <net/rose.h> #include <linux/init.h> static struct sk_buff_head loopback_queue; static struct timer_list loopback_timer; static void rose_set_loopback_timer(void); void rose_loopback_init(void) { skb_queue_head_init(&loopback_queue); init_timer(&loopback_timer); } static int rose_loopback_running(void) { return timer_pending(&loopback_timer); } int rose_loopback_queue(struct sk_buff *skb, struct rose_neigh *neigh) { struct sk_buff *skbn; skbn = skb_clone(skb, GFP_ATOMIC); kfree_skb(skb); if (skbn != NULL) { skb_queue_tail(&loopback_queue, skbn); if (!rose_loopback_running()) rose_set_loopback_timer(); } return 1; } static void rose_loopback_timer(unsigned long); static void rose_set_loopback_timer(void) { del_timer(&loopback_timer); loopback_timer.data = 0; loopback_timer.function = &rose_loopback_timer; loopback_timer.expires = jiffies + 10; add_timer(&loopback_timer); } static void rose_loopback_timer(unsigned long param) { struct sk_buff *skb; struct net_device *dev; rose_address *dest; struct sock *sk; unsigned short frametype; unsigned int lci_i, lci_o; while ((skb = skb_dequeue(&loopback_queue)) != NULL) { if (skb->len < ROSE_MIN_LEN) { kfree_skb(skb); continue; } lci_i = ((skb->data[0] << 8) & 0xF00) + ((skb->data[1] << 0) & 0x0FF); frametype = skb->data[2]; if (frametype == ROSE_CALL_REQUEST && (skb->len <= ROSE_CALL_REQ_FACILITIES_OFF || skb->data[ROSE_CALL_REQ_ADDR_LEN_OFF] != ROSE_CALL_REQ_ADDR_LEN_VAL)) { kfree_skb(skb); continue; } dest = (rose_address *)(skb->data + ROSE_CALL_REQ_DEST_ADDR_OFF); lci_o = ROSE_DEFAULT_MAXVC + 1 - lci_i; skb_reset_transport_header(skb); sk = rose_find_socket(lci_o, rose_loopback_neigh); if (sk) { if (rose_process_rx_frame(sk, skb) == 0) kfree_skb(skb); continue; } if (frametype == ROSE_CALL_REQUEST) { if ((dev = rose_dev_get(dest)) != NULL) { if (rose_rx_call_request(skb, dev, rose_loopback_neigh, lci_o) == 0) kfree_skb(skb); } else { kfree_skb(skb); } } else { kfree_skb(skb); } } } void __exit rose_loopback_clear(void) { struct sk_buff *skb; del_timer(&loopback_timer); while ((skb = skb_dequeue(&loopback_queue)) != NULL) { skb->sk = NULL; kfree_skb(skb); } }
gpl-2.0
mr-kimia/slicetime-qemu
hw/9pfs/virtio-9p-handle.c
64
18268
/* * Virtio 9p handle callback * * Copyright IBM, Corp. 2011 * * Authors: * Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #include "hw/virtio.h" #include "virtio-9p.h" #include "virtio-9p-xattr.h" #include <arpa/inet.h> #include <pwd.h> #include <grp.h> #include <sys/socket.h> #include <sys/un.h> #include "qemu-xattr.h" #include <unistd.h> #include <linux/fs.h> #ifdef CONFIG_LINUX_MAGIC_H #include <linux/magic.h> #endif #include <sys/ioctl.h> #ifndef XFS_SUPER_MAGIC #define XFS_SUPER_MAGIC 0x58465342 #endif #ifndef EXT2_SUPER_MAGIC #define EXT2_SUPER_MAGIC 0xEF53 #endif #ifndef REISERFS_SUPER_MAGIC #define REISERFS_SUPER_MAGIC 0x52654973 #endif #ifndef BTRFS_SUPER_MAGIC #define BTRFS_SUPER_MAGIC 0x9123683E #endif struct handle_data { int mountfd; int handle_bytes; }; static inline int name_to_handle(int dirfd, const char *name, struct file_handle *fh, int *mnt_id, int flags) { return name_to_handle_at(dirfd, name, fh, mnt_id, flags); } static inline int open_by_handle(int mountfd, const char *fh, int flags) { return open_by_handle_at(mountfd, (struct file_handle *)fh, flags); } static int handle_update_file_cred(int dirfd, const char *name, FsCred *credp) { int fd, ret; fd = openat(dirfd, name, O_NONBLOCK | O_NOFOLLOW); if (fd < 0) { return fd; } ret = fchownat(fd, "", credp->fc_uid, credp->fc_gid, AT_EMPTY_PATH); if (ret < 0) { goto err_out; } ret = fchmod(fd, credp->fc_mode & 07777); err_out: close(fd); return ret; } static int handle_lstat(FsContext *fs_ctx, V9fsPath *fs_path, struct stat *stbuf) { int fd, ret; struct handle_data *data = (struct handle_data *)fs_ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_PATH); if (fd < 0) { return fd; } ret = fstatat(fd, "", stbuf, AT_EMPTY_PATH); close(fd); return ret; } static ssize_t handle_readlink(FsContext *fs_ctx, V9fsPath *fs_path, char *buf, size_t bufsz) { int fd, ret; struct handle_data *data = (struct handle_data *)fs_ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_PATH); if (fd < 0) { return fd; } ret = readlinkat(fd, "", buf, bufsz); close(fd); return ret; } static int handle_close(FsContext *ctx, V9fsFidOpenState *fs) { return close(fs->fd); } static int handle_closedir(FsContext *ctx, V9fsFidOpenState *fs) { return closedir(fs->dir); } static int handle_open(FsContext *ctx, V9fsPath *fs_path, int flags, V9fsFidOpenState *fs) { struct handle_data *data = (struct handle_data *)ctx->private; fs->fd = open_by_handle(data->mountfd, fs_path->data, flags); return fs->fd; } static int handle_opendir(FsContext *ctx, V9fsPath *fs_path, V9fsFidOpenState *fs) { int ret; ret = handle_open(ctx, fs_path, O_DIRECTORY, fs); if (ret < 0) { return -1; } fs->dir = fdopendir(ret); if (!fs->dir) { return -1; } return 0; } static void handle_rewinddir(FsContext *ctx, V9fsFidOpenState *fs) { return rewinddir(fs->dir); } static off_t handle_telldir(FsContext *ctx, V9fsFidOpenState *fs) { return telldir(fs->dir); } static int handle_readdir_r(FsContext *ctx, V9fsFidOpenState *fs, struct dirent *entry, struct dirent **result) { return readdir_r(fs->dir, entry, result); } static void handle_seekdir(FsContext *ctx, V9fsFidOpenState *fs, off_t off) { return seekdir(fs->dir, off); } static ssize_t handle_preadv(FsContext *ctx, V9fsFidOpenState *fs, const struct iovec *iov, int iovcnt, off_t offset) { #ifdef CONFIG_PREADV return preadv(fs->fd, iov, iovcnt, offset); #else int err = lseek(fs->fd, offset, SEEK_SET); if (err == -1) { return err; } else { return readv(fs->fd, iov, iovcnt); } #endif } static ssize_t handle_pwritev(FsContext *ctx, V9fsFidOpenState *fs, const struct iovec *iov, int iovcnt, off_t offset) { ssize_t ret; #ifdef CONFIG_PREADV ret = pwritev(fs->fd, iov, iovcnt, offset); #else int err = lseek(fs->fd, offset, SEEK_SET); if (err == -1) { return err; } else { ret = writev(fs->fd, iov, iovcnt); } #endif #ifdef CONFIG_SYNC_FILE_RANGE if (ret > 0 && ctx->export_flags & V9FS_IMMEDIATE_WRITEOUT) { /* * Initiate a writeback. This is not a data integrity sync. * We want to ensure that we don't leave dirty pages in the cache * after write when writeout=immediate is sepcified. */ sync_file_range(fs->fd, offset, ret, SYNC_FILE_RANGE_WAIT_BEFORE | SYNC_FILE_RANGE_WRITE); } #endif return ret; } static int handle_chmod(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) { int fd, ret; struct handle_data *data = (struct handle_data *)fs_ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = fchmod(fd, credp->fc_mode); close(fd); return ret; } static int handle_mknod(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, FsCred *credp) { int dirfd, ret; struct handle_data *data = (struct handle_data *)fs_ctx->private; dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH); if (dirfd < 0) { return dirfd; } ret = mknodat(dirfd, name, credp->fc_mode, credp->fc_rdev); if (!ret) { ret = handle_update_file_cred(dirfd, name, credp); } close(dirfd); return ret; } static int handle_mkdir(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, FsCred *credp) { int dirfd, ret; struct handle_data *data = (struct handle_data *)fs_ctx->private; dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH); if (dirfd < 0) { return dirfd; } ret = mkdirat(dirfd, name, credp->fc_mode); if (!ret) { ret = handle_update_file_cred(dirfd, name, credp); } close(dirfd); return ret; } static int handle_fstat(FsContext *fs_ctx, int fid_type, V9fsFidOpenState *fs, struct stat *stbuf) { int fd; if (fid_type == P9_FID_DIR) { fd = dirfd(fs->dir); } else { fd = fs->fd; } return fstat(fd, stbuf); } static int handle_open2(FsContext *fs_ctx, V9fsPath *dir_path, const char *name, int flags, FsCred *credp, V9fsFidOpenState *fs) { int ret; int dirfd, fd; struct handle_data *data = (struct handle_data *)fs_ctx->private; dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH); if (dirfd < 0) { return dirfd; } fd = openat(dirfd, name, flags | O_NOFOLLOW, credp->fc_mode); if (fd >= 0) { ret = handle_update_file_cred(dirfd, name, credp); if (ret < 0) { close(fd); fd = ret; } else { fs->fd = fd; } } close(dirfd); return fd; } static int handle_symlink(FsContext *fs_ctx, const char *oldpath, V9fsPath *dir_path, const char *name, FsCred *credp) { int fd, dirfd, ret; struct handle_data *data = (struct handle_data *)fs_ctx->private; dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH); if (dirfd < 0) { return dirfd; } ret = symlinkat(oldpath, dirfd, name); if (!ret) { fd = openat(dirfd, name, O_PATH | O_NOFOLLOW); if (fd < 0) { ret = fd; goto err_out; } ret = fchownat(fd, "", credp->fc_uid, credp->fc_gid, AT_EMPTY_PATH); close(fd); } err_out: close(dirfd); return ret; } static int handle_link(FsContext *ctx, V9fsPath *oldpath, V9fsPath *dirpath, const char *name) { int oldfd, newdirfd, ret; struct handle_data *data = (struct handle_data *)ctx->private; oldfd = open_by_handle(data->mountfd, oldpath->data, O_PATH); if (oldfd < 0) { return oldfd; } newdirfd = open_by_handle(data->mountfd, dirpath->data, O_PATH); if (newdirfd < 0) { close(oldfd); return newdirfd; } ret = linkat(oldfd, "", newdirfd, name, AT_EMPTY_PATH); close(newdirfd); close(oldfd); return ret; } static int handle_truncate(FsContext *ctx, V9fsPath *fs_path, off_t size) { int fd, ret; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK | O_WRONLY); if (fd < 0) { return fd; } ret = ftruncate(fd, size); close(fd); return ret; } static int handle_rename(FsContext *ctx, const char *oldpath, const char *newpath) { errno = EOPNOTSUPP; return -1; } static int handle_chown(FsContext *fs_ctx, V9fsPath *fs_path, FsCred *credp) { int fd, ret; struct handle_data *data = (struct handle_data *)fs_ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_PATH); if (fd < 0) { return fd; } ret = fchownat(fd, "", credp->fc_uid, credp->fc_gid, AT_EMPTY_PATH); close(fd); return ret; } static int handle_utimensat(FsContext *ctx, V9fsPath *fs_path, const struct timespec *buf) { int ret; #ifdef CONFIG_UTIMENSAT int fd; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = futimens(fd, buf); close(fd); #else ret = -1; errno = ENOSYS; #endif return ret; } static int handle_remove(FsContext *ctx, const char *path) { errno = EOPNOTSUPP; return -1; } static int handle_fsync(FsContext *ctx, int fid_type, V9fsFidOpenState *fs, int datasync) { int fd; if (fid_type == P9_FID_DIR) { fd = dirfd(fs->dir); } else { fd = fs->fd; } if (datasync) { return qemu_fdatasync(fd); } else { return fsync(fd); } } static int handle_statfs(FsContext *ctx, V9fsPath *fs_path, struct statfs *stbuf) { int fd, ret; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = fstatfs(fd, stbuf); close(fd); return ret; } static ssize_t handle_lgetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name, void *value, size_t size) { int fd, ret; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = fgetxattr(fd, name, value, size); close(fd); return ret; } static ssize_t handle_llistxattr(FsContext *ctx, V9fsPath *fs_path, void *value, size_t size) { int fd, ret; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = flistxattr(fd, value, size); close(fd); return ret; } static int handle_lsetxattr(FsContext *ctx, V9fsPath *fs_path, const char *name, void *value, size_t size, int flags) { int fd, ret; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = fsetxattr(fd, name, value, size, flags); close(fd); return ret; } static int handle_lremovexattr(FsContext *ctx, V9fsPath *fs_path, const char *name) { int fd, ret; struct handle_data *data = (struct handle_data *)ctx->private; fd = open_by_handle(data->mountfd, fs_path->data, O_NONBLOCK); if (fd < 0) { return fd; } ret = fremovexattr(fd, name); close(fd); return ret; } static int handle_name_to_path(FsContext *ctx, V9fsPath *dir_path, const char *name, V9fsPath *target) { char buffer[PATH_MAX]; struct file_handle *fh; int dirfd, ret, mnt_id; struct handle_data *data = (struct handle_data *)ctx->private; /* "." and ".." are not allowed */ if (!strcmp(name, ".") || !strcmp(name, "..")) { errno = EINVAL; return -1; } if (dir_path) { dirfd = open_by_handle(data->mountfd, dir_path->data, O_PATH); } else { /* relative to export root */ dirfd = open(rpath(ctx, ".", buffer), O_DIRECTORY); } if (dirfd < 0) { return dirfd; } fh = g_malloc(sizeof(struct file_handle) + data->handle_bytes); fh->handle_bytes = data->handle_bytes; /* add a "./" at the beginning of the path */ snprintf(buffer, PATH_MAX, "./%s", name); /* flag = 0 imply don't follow symlink */ ret = name_to_handle(dirfd, buffer, fh, &mnt_id, 0); if (!ret) { target->data = (char *)fh; target->size = sizeof(struct file_handle) + data->handle_bytes; } else { g_free(fh); } close(dirfd); return ret; } static int handle_renameat(FsContext *ctx, V9fsPath *olddir, const char *old_name, V9fsPath *newdir, const char *new_name) { int olddirfd, newdirfd, ret; struct handle_data *data = (struct handle_data *)ctx->private; olddirfd = open_by_handle(data->mountfd, olddir->data, O_PATH); if (olddirfd < 0) { return olddirfd; } newdirfd = open_by_handle(data->mountfd, newdir->data, O_PATH); if (newdirfd < 0) { close(olddirfd); return newdirfd; } ret = renameat(olddirfd, old_name, newdirfd, new_name); close(newdirfd); close(olddirfd); return ret; } static int handle_unlinkat(FsContext *ctx, V9fsPath *dir, const char *name, int flags) { int dirfd, ret; struct handle_data *data = (struct handle_data *)ctx->private; int rflags; dirfd = open_by_handle(data->mountfd, dir->data, O_PATH); if (dirfd < 0) { return dirfd; } rflags = 0; if (flags & P9_DOTL_AT_REMOVEDIR) { rflags |= AT_REMOVEDIR; } ret = unlinkat(dirfd, name, rflags); close(dirfd); return ret; } static int handle_ioc_getversion(FsContext *ctx, V9fsPath *path, mode_t st_mode, uint64_t *st_gen) { int err; V9fsFidOpenState fid_open; /* * Do not try to open special files like device nodes, fifos etc * We can get fd for regular files and directories only */ if (!S_ISREG(st_mode) && !S_ISDIR(st_mode)) { return 0; } err = handle_open(ctx, path, O_RDONLY, &fid_open); if (err < 0) { return err; } err = ioctl(fid_open.fd, FS_IOC_GETVERSION, st_gen); handle_close(ctx, &fid_open); return err; } static int handle_init(FsContext *ctx) { int ret, mnt_id; struct statfs stbuf; struct file_handle fh; struct handle_data *data = g_malloc(sizeof(struct handle_data)); data->mountfd = open(ctx->fs_root, O_DIRECTORY); if (data->mountfd < 0) { ret = data->mountfd; goto err_out; } ret = statfs(ctx->fs_root, &stbuf); if (!ret) { switch (stbuf.f_type) { case EXT2_SUPER_MAGIC: case BTRFS_SUPER_MAGIC: case REISERFS_SUPER_MAGIC: case XFS_SUPER_MAGIC: ctx->exops.get_st_gen = handle_ioc_getversion; break; } } memset(&fh, 0, sizeof(struct file_handle)); ret = name_to_handle(data->mountfd, ".", &fh, &mnt_id, 0); if (ret && errno == EOVERFLOW) { data->handle_bytes = fh.handle_bytes; ctx->private = data; ret = 0; goto out; } /* we got 0 byte handle ? */ ret = -1; close(data->mountfd); err_out: g_free(data); out: return ret; } static int handle_parse_opts(QemuOpts *opts, struct FsDriverEntry *fse) { const char *sec_model = qemu_opt_get(opts, "security_model"); const char *path = qemu_opt_get(opts, "path"); if (sec_model) { fprintf(stderr, "Invalid argument security_model specified with handle fsdriver\n"); return -1; } if (!path) { fprintf(stderr, "fsdev: No path specified.\n"); return -1; } fse->path = g_strdup(path); return 0; } FileOperations handle_ops = { .parse_opts = handle_parse_opts, .init = handle_init, .lstat = handle_lstat, .readlink = handle_readlink, .close = handle_close, .closedir = handle_closedir, .open = handle_open, .opendir = handle_opendir, .rewinddir = handle_rewinddir, .telldir = handle_telldir, .readdir_r = handle_readdir_r, .seekdir = handle_seekdir, .preadv = handle_preadv, .pwritev = handle_pwritev, .chmod = handle_chmod, .mknod = handle_mknod, .mkdir = handle_mkdir, .fstat = handle_fstat, .open2 = handle_open2, .symlink = handle_symlink, .link = handle_link, .truncate = handle_truncate, .rename = handle_rename, .chown = handle_chown, .utimensat = handle_utimensat, .remove = handle_remove, .fsync = handle_fsync, .statfs = handle_statfs, .lgetxattr = handle_lgetxattr, .llistxattr = handle_llistxattr, .lsetxattr = handle_lsetxattr, .lremovexattr = handle_lremovexattr, .name_to_path = handle_name_to_path, .renameat = handle_renameat, .unlinkat = handle_unlinkat, };
gpl-2.0
TeamNyx/external_bluetooth_bluez
tools/hciattach_ath3k.c
64
21186
/* * Copyright (c) 2009-2010 Atheros Communications Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #ifdef HAVE_CONFIG_H #include <config.h> #endif #include <stdio.h> #include <errno.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <time.h> #include <sys/time.h> #include <sys/types.h> #include <sys/param.h> #include <sys/ioctl.h> #include <bluetooth/bluetooth.h> #include <bluetooth/hci.h> #include <bluetooth/hci_lib.h> #include "hciattach.h" #define TRUE 1 #define FALSE 0 #define FW_PATH "/lib/firmware/ar3k/" struct ps_cfg_entry { uint32_t id; uint32_t len; uint8_t *data; }; struct ps_entry_type { unsigned char type; unsigned char array; }; #define MAX_TAGS 50 #define PS_HDR_LEN 4 #define HCI_VENDOR_CMD_OGF 0x3F #define HCI_PS_CMD_OCF 0x0B struct ps_cfg_entry ps_list[MAX_TAGS]; static void load_hci_ps_hdr(uint8_t *cmd, uint8_t ps_op, int len, int index) { hci_command_hdr *ch = (void *)cmd; ch->opcode = htobs(cmd_opcode_pack(HCI_VENDOR_CMD_OGF, HCI_PS_CMD_OCF)); ch->plen = len + PS_HDR_LEN; cmd += HCI_COMMAND_HDR_SIZE; cmd[0] = ps_op; cmd[1] = index; cmd[2] = index >> 8; cmd[3] = len; } #define PS_EVENT_LEN 100 /* * Send HCI command and wait for command complete event. * The event buffer has to be freed by the caller. */ static int send_hci_cmd_sync(int dev, uint8_t *cmd, int len, uint8_t **event) { int err; uint8_t *hci_event; uint8_t pkt_type = HCI_COMMAND_PKT; if (len == 0) return len; if (write(dev, &pkt_type, 1) != 1) return -EILSEQ; if (write(dev, (unsigned char *)cmd, len) != len) return -EILSEQ; hci_event = (uint8_t *)malloc(PS_EVENT_LEN); if (!hci_event) return -ENOMEM; err = read_hci_event(dev, (unsigned char *)hci_event, PS_EVENT_LEN); if (err > 0) { *event = hci_event; } else { free(hci_event); return -EILSEQ; } return len; } #define HCI_EV_SUCCESS 0x00 static int read_ps_event(uint8_t *event, uint16_t ocf) { hci_event_hdr *eh; uint16_t opcode = htobs(cmd_opcode_pack(HCI_VENDOR_CMD_OGF, ocf)); event++; eh = (void *)event; event += HCI_EVENT_HDR_SIZE; if (eh->evt == EVT_CMD_COMPLETE) { evt_cmd_complete *cc = (void *)event; event += EVT_CMD_COMPLETE_SIZE; if (cc->opcode == opcode && event[0] == HCI_EV_SUCCESS) return 0; else return -EILSEQ; } return -EILSEQ; } static int write_cmd(int fd, uint8_t *buffer, int len) { uint8_t *event; int err; err = send_hci_cmd_sync(fd, buffer, len, &event); if (err < 0) return err; err = read_ps_event(event, HCI_PS_CMD_OCF); free(event); return err; } #define PS_WRITE 1 #define PS_RESET 2 #define WRITE_PATCH 8 #define ENABLE_PATCH 11 #define HCI_PS_CMD_HDR_LEN 7 #define PS_RESET_PARAM_LEN 6 #define HCI_MAX_CMD_SIZE 260 #define PS_RESET_CMD_LEN (HCI_PS_CMD_HDR_LEN + PS_RESET_PARAM_LEN) #define PS_ID_MASK 0xFF /* Sends PS commands using vendor specficic HCI commands */ static int write_ps_cmd(int fd, uint8_t opcode, uint32_t ps_param) { uint8_t cmd[HCI_MAX_CMD_SIZE]; uint32_t i; switch (opcode) { case ENABLE_PATCH: load_hci_ps_hdr(cmd, opcode, 0, 0x00); if (write_cmd(fd, cmd, HCI_PS_CMD_HDR_LEN) < 0) return -EILSEQ; break; case PS_RESET: load_hci_ps_hdr(cmd, opcode, PS_RESET_PARAM_LEN, 0x00); cmd[7] = 0x00; cmd[PS_RESET_CMD_LEN - 2] = ps_param & PS_ID_MASK; cmd[PS_RESET_CMD_LEN - 1] = (ps_param >> 8) & PS_ID_MASK; if (write_cmd(fd, cmd, PS_RESET_CMD_LEN) < 0) return -EILSEQ; break; case PS_WRITE: for (i = 0; i < ps_param; i++) { load_hci_ps_hdr(cmd, opcode, ps_list[i].len, ps_list[i].id); memcpy(&cmd[HCI_PS_CMD_HDR_LEN], ps_list[i].data, ps_list[i].len); if (write_cmd(fd, cmd, ps_list[i].len + HCI_PS_CMD_HDR_LEN) < 0) return -EILSEQ; } break; } return 0; } #define __is_delim(ch) ((ch) == ':') #define MAX_PREAMBLE_LEN 4 /* Parse PS entry preamble of format [X:X] for main type and subtype */ static int get_ps_type(char *ptr, int index, char *type, char *sub_type) { int i; int delim = FALSE; if (index > MAX_PREAMBLE_LEN) return -EILSEQ; for (i = 1; i < index; i++) { if (__is_delim(ptr[i])) { delim = TRUE; continue; } if (isalpha(ptr[i])) { if (delim == FALSE) (*type) = toupper(ptr[i]); else (*sub_type) = toupper(ptr[i]); } } return 0; } #define ARRAY 'A' #define STRING 'S' #define DECIMAL 'D' #define BINARY 'B' #define PS_HEX 0 #define PS_DEC 1 static int get_input_format(char *buf, struct ps_entry_type *format) { char *ptr = NULL; char type = '\0'; char sub_type = '\0'; format->type = PS_HEX; format->array = TRUE; if (strstr(buf, "[") != buf) return 0; ptr = strstr(buf, "]"); if (!ptr) return -EILSEQ; if (get_ps_type(buf, ptr - buf, &type, &sub_type) < 0) return -EILSEQ; /* Check is data type is of array */ if (type == ARRAY || sub_type == ARRAY) format->array = TRUE; if (type == STRING || sub_type == STRING) format->array = FALSE; if (type == DECIMAL || type == BINARY) format->type = PS_DEC; else format->type = PS_HEX; return 0; } #define UNDEFINED 0xFFFF static unsigned int read_data_in_section(char *buf, struct ps_entry_type type) { char *ptr = buf; if (!buf) return UNDEFINED; if (buf == strstr(buf, "[")) { ptr = strstr(buf, "]"); if (!ptr) return UNDEFINED; ptr++; } if (type.type == PS_HEX && type.array != TRUE) return strtol(ptr, NULL, 16); return UNDEFINED; } struct tag_info { unsigned section; unsigned line_count; unsigned char_cnt; unsigned byte_count; }; static inline int update_char_count(const char *buf) { char *end_ptr; if (strstr(buf, "[") == buf) { end_ptr = strstr(buf, "]"); if (!end_ptr) return 0; else return (end_ptr - buf) + 1; } return 0; } /* Read PS entries as string, convert and add to Hex array */ static void update_tag_data(struct ps_cfg_entry *tag, struct tag_info *info, const char *ptr) { char buf[3]; buf[2] = '\0'; strncpy(buf, &ptr[info->char_cnt], 2); tag->data[info->byte_count] = strtol(buf, NULL, 16); info->char_cnt += 3; info->byte_count++; strncpy(buf, &ptr[info->char_cnt], 2); tag->data[info->byte_count] = strtol(buf, NULL, 16); info->char_cnt += 3; info->byte_count++; } #define PS_UNDEF 0 #define PS_ID 1 #define PS_LEN 2 #define PS_DATA 3 #define PS_MAX_LEN 500 #define LINE_SIZE_MAX (PS_MAX_LEN * 2) #define ENTRY_PER_LINE 16 #define __check_comment(buf) (((buf)[0] == '/') && ((buf)[1] == '/')) #define __skip_space(str) while (*(str) == ' ') ((str)++) static int ath_parse_ps(FILE *stream) { char buf[LINE_SIZE_MAX + 1]; char *ptr; uint8_t tag_cnt = 0; int16_t byte_count = 0; struct ps_entry_type format; struct tag_info status = { 0, 0, 0, 0 }; do { int read_count; struct ps_cfg_entry *tag; ptr = fgets(buf, LINE_SIZE_MAX, stream); if (!ptr) break; __skip_space(ptr); if (__check_comment(ptr)) continue; /* Lines with a '#' will be followed by new PS entry */ if (ptr == strstr(ptr, "#")) { if (status.section != PS_UNDEF) { return -EILSEQ; } else { status.section = PS_ID; continue; } } tag = &ps_list[tag_cnt]; switch (status.section) { case PS_ID: if (get_input_format(ptr, &format) < 0) return -EILSEQ; tag->id = read_data_in_section(ptr, format); status.section = PS_LEN; break; case PS_LEN: if (get_input_format(ptr, &format) < 0) return -EILSEQ; byte_count = read_data_in_section(ptr, format); if (byte_count > PS_MAX_LEN) return -EILSEQ; tag->len = byte_count; tag->data = (uint8_t *)malloc(byte_count); status.section = PS_DATA; status.line_count = 0; break; case PS_DATA: if (status.line_count == 0) if (get_input_format(ptr, &format) < 0) return -EILSEQ; __skip_space(ptr); status.char_cnt = update_char_count(ptr); read_count = (byte_count > ENTRY_PER_LINE) ? ENTRY_PER_LINE : byte_count; if (format.type == PS_HEX && format.array == TRUE) { while (read_count > 0) { update_tag_data(tag, &status, ptr); read_count -= 2; } if (byte_count > ENTRY_PER_LINE) byte_count -= ENTRY_PER_LINE; else byte_count = 0; } status.line_count++; if (byte_count == 0) memset(&status, 0x00, sizeof(struct tag_info)); if (status.section == PS_UNDEF) tag_cnt++; if (tag_cnt == MAX_TAGS) return -EILSEQ; break; } } while (ptr); return tag_cnt; } #define MAX_PATCH_CMD 244 struct patch_entry { int16_t len; uint8_t data[MAX_PATCH_CMD]; }; #define SET_PATCH_RAM_ID 0x0D #define SET_PATCH_RAM_CMD_SIZE 11 #define ADDRESS_LEN 4 static int set_patch_ram(int dev, char *patch_loc, int len) { int err; uint8_t cmd[20]; int i, j; char loc_byte[3]; uint8_t *event; uint8_t *loc_ptr = &cmd[7]; if (!patch_loc) return -1; loc_byte[2] = '\0'; load_hci_ps_hdr(cmd, SET_PATCH_RAM_ID, ADDRESS_LEN, 0); for (i = 0, j = 3; i < 4; i++, j--) { loc_byte[0] = patch_loc[0]; loc_byte[1] = patch_loc[1]; loc_ptr[j] = strtol(loc_byte, NULL, 16); patch_loc += 2; } err = send_hci_cmd_sync(dev, cmd, SET_PATCH_RAM_CMD_SIZE, &event); if (err < 0) return err; err = read_ps_event(event, HCI_PS_CMD_OCF); free(event); return err; } #define PATCH_LOC_KEY "DA:" #define PATCH_LOC_STRING_LEN 8 static int ps_patch_download(int fd, FILE *stream) { char byte[3]; char ptr[MAX_PATCH_CMD + 1]; int byte_cnt; int patch_count = 0; char patch_loc[PATCH_LOC_STRING_LEN + 1]; byte[2] = '\0'; while (fgets(ptr, MAX_PATCH_CMD, stream)) { if (strlen(ptr) <= 1) continue; else if (strstr(ptr, PATCH_LOC_KEY) == ptr) { strncpy(patch_loc, &ptr[sizeof(PATCH_LOC_KEY) - 1], PATCH_LOC_STRING_LEN); if (set_patch_ram(fd, patch_loc, sizeof(patch_loc)) < 0) return -1; } else if (isxdigit(ptr[0])) break; else return -1; } byte_cnt = strtol(ptr, NULL, 16); while (byte_cnt > 0) { int i; uint8_t cmd[HCI_MAX_CMD_SIZE]; struct patch_entry patch; if (byte_cnt > MAX_PATCH_CMD) patch.len = MAX_PATCH_CMD; else patch.len = byte_cnt; for (i = 0; i < patch.len; i++) { if (!fgets(byte, 3, stream)) return -1; patch.data[i] = strtoul(byte, NULL, 16); } load_hci_ps_hdr(cmd, WRITE_PATCH, patch.len, patch_count); memcpy(&cmd[HCI_PS_CMD_HDR_LEN], patch.data, patch.len); if (write_cmd(fd, cmd, patch.len + HCI_PS_CMD_HDR_LEN) < 0) return -1; patch_count++; byte_cnt = byte_cnt - MAX_PATCH_CMD; } if (write_ps_cmd(fd, ENABLE_PATCH, 0) < 0) return -1; return patch_count; } #define PS_RAM_SIZE 2048 static int ps_config_download(int fd, int tag_count) { if (write_ps_cmd(fd, PS_RESET, PS_RAM_SIZE) < 0) return -1; if (tag_count > 0) if (write_ps_cmd(fd, PS_WRITE, tag_count) < 0) return -1; return 0; } #define PS_ASIC_FILE "PS_ASIC.pst" #define PS_FPGA_FILE "PS_FPGA.pst" static void get_ps_file_name(uint32_t devtype, uint32_t rom_version, char *path) { char *filename; if (devtype == 0xdeadc0de) filename = PS_ASIC_FILE; else filename = PS_FPGA_FILE; snprintf(path, MAXPATHLEN, "%s%x/%s", FW_PATH, rom_version, filename); } #define PATCH_FILE "RamPatch.txt" #define FPGA_ROM_VERSION 0x99999999 #define ROM_DEV_TYPE 0xdeadc0de static void get_patch_file_name(uint32_t dev_type, uint32_t rom_version, uint32_t build_version, char *path) { if (rom_version == FPGA_ROM_VERSION && dev_type != ROM_DEV_TYPE && dev_type != 0 && build_version == 1) path[0] = '\0'; else snprintf(path, MAXPATHLEN, "%s%x/%s", FW_PATH, rom_version, PATCH_FILE); } #define VERIFY_CRC 9 #define PS_REGION 1 #define PATCH_REGION 2 static int get_ath3k_crc(int dev) { uint8_t cmd[7]; uint8_t *event; int err; load_hci_ps_hdr(cmd, VERIFY_CRC, 0, PS_REGION | PATCH_REGION); err = send_hci_cmd_sync(dev, cmd, sizeof(cmd), &event); if (err < 0) return err; /* Send error code if CRC check patched */ if (read_ps_event(event, HCI_PS_CMD_OCF) >= 0) err = -EILSEQ; free(event); return err; } #define DEV_REGISTER 0x4FFC #define GET_DEV_TYPE_OCF 0x05 static int get_device_type(int dev, uint32_t *code) { uint8_t cmd[8]; uint8_t *event; uint32_t reg; int err; uint8_t *ptr = cmd; hci_command_hdr *ch = (void *)cmd; ch->opcode = htobs(cmd_opcode_pack(HCI_VENDOR_CMD_OGF, GET_DEV_TYPE_OCF)); ch->plen = 5; ptr += HCI_COMMAND_HDR_SIZE; ptr[0] = (uint8_t)DEV_REGISTER; ptr[1] = (uint8_t)DEV_REGISTER >> 8; ptr[2] = (uint8_t)DEV_REGISTER >> 16; ptr[3] = (uint8_t)DEV_REGISTER >> 24; ptr[4] = 0x04; err = send_hci_cmd_sync(dev, cmd, sizeof(cmd), &event); if (err < 0) return err; err = read_ps_event(event, GET_DEV_TYPE_OCF); if (err < 0) goto cleanup; reg = event[10]; reg = (reg << 8) | event[9]; reg = (reg << 8) | event[8]; reg = (reg << 8) | event[7]; *code = reg; cleanup: free(event); return err; } #define GET_VERSION_OCF 0x1E static int read_ath3k_version(int pConfig, uint32_t *rom_version, uint32_t *build_version) { uint8_t cmd[3]; uint8_t *event; int err; int status; hci_command_hdr *ch = (void *)cmd; ch->opcode = htobs(cmd_opcode_pack(HCI_VENDOR_CMD_OGF, GET_VERSION_OCF)); ch->plen = 0; err = send_hci_cmd_sync(pConfig, cmd, sizeof(cmd), &event); if (err < 0) return err; err = read_ps_event(event, GET_VERSION_OCF); if (err < 0) goto cleanup; status = event[10]; status = (status << 8) | event[9]; status = (status << 8) | event[8]; status = (status << 8) | event[7]; *rom_version = status; status = event[14]; status = (status << 8) | event[13]; status = (status << 8) | event[12]; status = (status << 8) | event[11]; *build_version = status; cleanup: free(event); return err; } static void convert_bdaddr(char *str_bdaddr, char *bdaddr) { char bdbyte[3]; char *str_byte = str_bdaddr; int i, j; int colon_present = 0; if (strstr(str_bdaddr, ":")) colon_present = 1; bdbyte[2] = '\0'; /* Reverse the BDADDR to LSB first */ for (i = 0, j = 5; i < 6; i++, j--) { bdbyte[0] = str_byte[0]; bdbyte[1] = str_byte[1]; bdaddr[j] = strtol(bdbyte, NULL, 16); if (colon_present == 1) str_byte += 3; else str_byte += 2; } } static int write_bdaddr(int pConfig, char *bdaddr) { uint8_t *event; int err; uint8_t cmd[13]; uint8_t *ptr = cmd; hci_command_hdr *ch = (void *)cmd; memset(cmd, 0, sizeof(cmd)); ch->opcode = htobs(cmd_opcode_pack(HCI_VENDOR_CMD_OGF, HCI_PS_CMD_OCF)); ch->plen = 10; ptr += HCI_COMMAND_HDR_SIZE; ptr[0] = 0x01; ptr[1] = 0x01; ptr[2] = 0x00; ptr[3] = 0x06; convert_bdaddr(bdaddr, (char *)&ptr[4]); err = send_hci_cmd_sync(pConfig, cmd, sizeof(cmd), &event); if (err < 0) return err; err = read_ps_event(event, HCI_PS_CMD_OCF); free(event); return err; } #define BDADDR_FILE "ar3kbdaddr.pst" static void write_bdaddr_from_file(int rom_version, int fd) { FILE *stream; char bdaddr[PATH_MAX]; char bdaddr_file[PATH_MAX]; snprintf(bdaddr_file, MAXPATHLEN, "%s%x/%s", FW_PATH, rom_version, BDADDR_FILE); stream = fopen(bdaddr_file, "r"); if (!stream) return; if (fgets(bdaddr, PATH_MAX - 1, stream)) write_bdaddr(fd, bdaddr); fclose(stream); } static int ath_ps_download(int fd) { int err = 0; int tag_count; int patch_count = 0; uint32_t rom_version = 0; uint32_t build_version = 0; uint32_t dev_type = 0; char patch_file[PATH_MAX]; char ps_file[PATH_MAX]; FILE *stream; /* * Verfiy firmware version. depending on it select the PS * config file to download. */ if (get_device_type(fd, &dev_type) < 0) { err = -EILSEQ; goto download_cmplete; } if (read_ath3k_version(fd, &rom_version, &build_version) < 0) { err = -EILSEQ; goto download_cmplete; } /* Do not download configuration if CRC passes */ if (get_ath3k_crc(fd) < 0) { err = 0; goto download_cmplete; } get_ps_file_name(dev_type, rom_version, ps_file); get_patch_file_name(dev_type, rom_version, build_version, patch_file); stream = fopen(ps_file, "r"); if (!stream) { perror("firmware file open error\n"); err = -EILSEQ; goto download_cmplete; } tag_count = ath_parse_ps(stream); fclose(stream); if (tag_count < 0) { err = -EILSEQ; goto download_cmplete; } /* * It is not necessary that Patch file be available, * continue with PS Operations if patch file is not available. */ if (patch_file[0] == '\0') err = 0; stream = fopen(patch_file, "r"); if (!stream) err = 0; else { patch_count = ps_patch_download(fd, stream); fclose(stream); if (patch_count < 0) { err = -EILSEQ; goto download_cmplete; } } err = ps_config_download(fd, tag_count); download_cmplete: if (!err) write_bdaddr_from_file(rom_version, fd); return err; } #define HCI_SLEEP_CMD_OCF 0x04 /* * Atheros AR300x specific initialization post callback */ int ath3k_post(int fd, int pm) { int dev_id, dd; struct timespec tm = { 0, 50000 }; sleep(1); dev_id = ioctl(fd, HCIUARTGETDEVICE, 0); if (dev_id < 0) { perror("cannot get device id"); return dev_id; } dd = hci_open_dev(dev_id); if (dd < 0) { perror("HCI device open failed"); return dd; } if (ioctl(dd, HCIDEVUP, dev_id) < 0 && errno != EALREADY) { perror("hci down:Power management Disabled"); hci_close_dev(dd); return -1; } /* send vendor specific command with Sleep feature Enabled */ if (hci_send_cmd(dd, OGF_VENDOR_CMD, HCI_SLEEP_CMD_OCF, 1, &pm) < 0) perror("PM command failed, power management Disabled"); nanosleep(&tm, NULL); hci_close_dev(dd); return 0; } #define HCI_VENDOR_CMD_OGF 0x3F #define HCI_PS_CMD_OCF 0x0B #define HCI_CHG_BAUD_CMD_OCF 0x0C #define WRITE_BDADDR_CMD_LEN 14 #define WRITE_BAUD_CMD_LEN 6 #define MAX_CMD_LEN WRITE_BDADDR_CMD_LEN static int set_cntrlr_baud(int fd, int speed) { int baud; struct timespec tm = { 0, 500000 }; unsigned char cmd[MAX_CMD_LEN], rsp[HCI_MAX_EVENT_SIZE]; unsigned char *ptr = cmd + 1; hci_command_hdr *ch = (void *)ptr; cmd[0] = HCI_COMMAND_PKT; /* set controller baud rate to user specified value */ ptr = cmd + 1; ch->opcode = htobs(cmd_opcode_pack(HCI_VENDOR_CMD_OGF, HCI_CHG_BAUD_CMD_OCF)); ch->plen = 2; ptr += HCI_COMMAND_HDR_SIZE; baud = speed/100; ptr[0] = (char)baud; ptr[1] = (char)(baud >> 8); if (write(fd, cmd, WRITE_BAUD_CMD_LEN) != WRITE_BAUD_CMD_LEN) { perror("Failed to write change baud rate command"); return -ETIMEDOUT; } nanosleep(&tm, NULL); if (read_hci_event(fd, rsp, sizeof(rsp)) < 0) return -ETIMEDOUT; return 0; } /* * Atheros AR300x specific initialization and configuration file * download */ int ath3k_init(int fd, int speed, int init_speed, char *bdaddr, struct termios *ti) { int r; int err = 0; struct timespec tm = { 0, 500000 }; unsigned char cmd[MAX_CMD_LEN], rsp[HCI_MAX_EVENT_SIZE]; unsigned char *ptr = cmd + 1; hci_command_hdr *ch = (void *)ptr; cmd[0] = HCI_COMMAND_PKT; /* set both controller and host baud rate to maximum possible value */ err = set_cntrlr_baud(fd, speed); if (err < 0) return err; err = set_speed(fd, ti, speed); if (err < 0) { perror("Can't set required baud rate"); return err; } /* Download PS and patch */ r = ath_ps_download(fd); if (r < 0) { perror("Failed to Download configuration"); err = -ETIMEDOUT; goto failed; } /* Write BDADDR */ if (bdaddr) { ch->opcode = htobs(cmd_opcode_pack(HCI_VENDOR_CMD_OGF, HCI_PS_CMD_OCF)); ch->plen = 10; ptr += HCI_COMMAND_HDR_SIZE; ptr[0] = 0x01; ptr[1] = 0x01; ptr[2] = 0x00; ptr[3] = 0x06; str2ba(bdaddr, (bdaddr_t *)(ptr + 4)); if (write(fd, cmd, WRITE_BDADDR_CMD_LEN) != WRITE_BDADDR_CMD_LEN) { perror("Failed to write BD_ADDR command\n"); err = -ETIMEDOUT; goto failed; } if (read_hci_event(fd, rsp, sizeof(rsp)) < 0) { perror("Failed to set BD_ADDR\n"); err = -ETIMEDOUT; goto failed; } } /* Send HCI Reset */ cmd[1] = 0x03; cmd[2] = 0x0C; cmd[3] = 0x00; r = write(fd, cmd, 4); if (r != 4) { err = -ETIMEDOUT; goto failed; } nanosleep(&tm, NULL); if (read_hci_event(fd, rsp, sizeof(rsp)) < 0) { err = -ETIMEDOUT; goto failed; } err = set_cntrlr_baud(fd, speed); if (err < 0) return err; failed: if (err < 0) { set_cntrlr_baud(fd, init_speed); set_speed(fd, ti, init_speed); } return err; }
gpl-2.0
kongfl888/LAVFilters
common/baseclasses/mtype.cpp
64
11721
//------------------------------------------------------------------------------ // File: MType.cpp // // Desc: DirectShow base classes - implements a class that holds and // manages media type information. // // Copyright (c) 1992-2001 Microsoft Corporation. All rights reserved. //------------------------------------------------------------------------------ // helper class that derived pin objects can use to compare media // types etc. Has same data members as the struct AM_MEDIA_TYPE defined // in the streams IDL file, but also has (non-virtual) functions #include <streams.h> #include <mmreg.h> CMediaType::~CMediaType(){ FreeMediaType(*this); } CMediaType::CMediaType() { InitMediaType(); } CMediaType::CMediaType(const GUID * type) { InitMediaType(); majortype = *type; } // copy constructor does a deep copy of the format block CMediaType::CMediaType(const AM_MEDIA_TYPE& rt, __out_opt HRESULT* phr) { HRESULT hr = CopyMediaType(this, &rt); if (FAILED(hr) && (NULL != phr)) { *phr = hr; } } CMediaType::CMediaType(const CMediaType& rt, __out_opt HRESULT* phr) { HRESULT hr = CopyMediaType(this, &rt); if (FAILED(hr) && (NULL != phr)) { *phr = hr; } } // this class inherits publicly from AM_MEDIA_TYPE so the compiler could generate // the following assignment operator itself, however it could introduce some // memory conflicts and leaks in the process because the structure contains // a dynamically allocated block (pbFormat) which it will not copy correctly CMediaType& CMediaType::operator=(const AM_MEDIA_TYPE& rt) { Set(rt); return *this; } CMediaType& CMediaType::operator=(const CMediaType& rt) { *this = (AM_MEDIA_TYPE &) rt; return *this; } BOOL CMediaType::operator == (const CMediaType& rt) const { // I don't believe we need to check sample size or // temporal compression flags, since I think these must // be represented in the type, subtype and format somehow. They // are pulled out as separate flags so that people who don't understand // the particular format representation can still see them, but // they should duplicate information in the format block. return ((IsEqualGUID(majortype,rt.majortype) == TRUE) && (IsEqualGUID(subtype,rt.subtype) == TRUE) && (IsEqualGUID(formattype,rt.formattype) == TRUE) && (cbFormat == rt.cbFormat) && ( (cbFormat == 0) || pbFormat != NULL && rt.pbFormat != NULL && (memcmp(pbFormat, rt.pbFormat, cbFormat) == 0))); } BOOL CMediaType::operator != (const CMediaType& rt) const { /* Check to see if they are equal */ if (*this == rt) { return FALSE; } return TRUE; } HRESULT CMediaType::Set(const CMediaType& rt) { return Set((AM_MEDIA_TYPE &) rt); } HRESULT CMediaType::Set(const AM_MEDIA_TYPE& rt) { if (&rt != this) { FreeMediaType(*this); HRESULT hr = CopyMediaType(this, &rt); if (FAILED(hr)) { return E_OUTOFMEMORY; } } return S_OK; } BOOL CMediaType::IsValid() const { return (!IsEqualGUID(majortype,GUID_NULL)); } void CMediaType::SetType(const GUID* ptype) { majortype = *ptype; } void CMediaType::SetSubtype(const GUID* ptype) { subtype = *ptype; } ULONG CMediaType::GetSampleSize() const { if (IsFixedSize()) { return lSampleSize; } else { return 0; } } void CMediaType::SetSampleSize(ULONG sz) { if (sz == 0) { SetVariableSize(); } else { bFixedSizeSamples = TRUE; lSampleSize = sz; } } void CMediaType::SetVariableSize() { bFixedSizeSamples = FALSE; } void CMediaType::SetTemporalCompression(BOOL bCompressed) { bTemporalCompression = bCompressed; } BOOL CMediaType::SetFormat(__in_bcount(cb) BYTE * pformat, ULONG cb) { if (NULL == AllocFormatBuffer(cb)) return(FALSE); ASSERT(pbFormat); memcpy(pbFormat, pformat, cb); return(TRUE); } // set the type of the media type format block, this type defines what you // will actually find in the format pointer. For example FORMAT_VideoInfo or // FORMAT_WaveFormatEx. In the future this may be an interface pointer to a // property set. Before sending out media types this should be filled in. void CMediaType::SetFormatType(const GUID *pformattype) { formattype = *pformattype; } // reset the format buffer void CMediaType::ResetFormatBuffer() { if (cbFormat) { CoTaskMemFree((PVOID)pbFormat); } cbFormat = 0; pbFormat = NULL; } // allocate length bytes for the format and return a read/write pointer // If we cannot allocate the new block of memory we return NULL leaving // the original block of memory untouched (as does ReallocFormatBuffer) BYTE* CMediaType::AllocFormatBuffer(ULONG length) { ASSERT(length); // do the types have the same buffer size if (cbFormat == length) { return pbFormat; } // allocate the new format buffer BYTE *pNewFormat = (PBYTE)CoTaskMemAlloc(length); if (pNewFormat == NULL) { if (length <= cbFormat) return pbFormat; //reuse the old block anyway. return NULL; } // delete the old format if (cbFormat != 0) { ASSERT(pbFormat); CoTaskMemFree((PVOID)pbFormat); } cbFormat = length; pbFormat = pNewFormat; return pbFormat; } // reallocate length bytes for the format and return a read/write pointer // to it. We keep as much information as we can given the new buffer size // if this fails the original format buffer is left untouched. The caller // is responsible for ensuring the size of memory required is non zero BYTE* CMediaType::ReallocFormatBuffer(ULONG length) { ASSERT(length); // do the types have the same buffer size if (cbFormat == length) { return pbFormat; } // allocate the new format buffer BYTE *pNewFormat = (PBYTE)CoTaskMemAlloc(length); if (pNewFormat == NULL) { if (length <= cbFormat) return pbFormat; //reuse the old block anyway. return NULL; } // copy any previous format (or part of if new is smaller) // delete the old format and replace with the new one if (cbFormat != 0) { ASSERT(pbFormat); memcpy(pNewFormat,pbFormat,min(length,cbFormat)); CoTaskMemFree((PVOID)pbFormat); } cbFormat = length; pbFormat = pNewFormat; return pNewFormat; } // initialise a media type structure void CMediaType::InitMediaType() { ZeroMemory((PVOID)this, sizeof(*this)); lSampleSize = 1; bFixedSizeSamples = TRUE; } // a partially specified media type can be passed to IPin::Connect // as a constraint on the media type used in the connection. // the type, subtype or format type can be null. BOOL CMediaType::IsPartiallySpecified(void) const { if ((majortype == GUID_NULL) || (formattype == GUID_NULL)) { return TRUE; } else { return FALSE; } } BOOL CMediaType::MatchesPartial(const CMediaType* ppartial) const { if ((ppartial->majortype != GUID_NULL) && (majortype != ppartial->majortype)) { return FALSE; } if ((ppartial->subtype != GUID_NULL) && (subtype != ppartial->subtype)) { return FALSE; } if (ppartial->formattype != GUID_NULL) { // if the format block is specified then it must match exactly if (formattype != ppartial->formattype) { return FALSE; } if (cbFormat != ppartial->cbFormat) { return FALSE; } if ((cbFormat != 0) && (memcmp(pbFormat, ppartial->pbFormat, cbFormat) != 0)) { return FALSE; } } return TRUE; } // general purpose function to delete a heap allocated AM_MEDIA_TYPE structure // which is useful when calling IEnumMediaTypes::Next as the interface // implementation allocates the structures which you must later delete // the format block may also be a pointer to an interface to release void WINAPI DeleteMediaType(__inout_opt AM_MEDIA_TYPE *pmt) { // allow NULL pointers for coding simplicity if (pmt == NULL) { return; } FreeMediaType(*pmt); CoTaskMemFree((PVOID)pmt); } // this also comes in useful when using the IEnumMediaTypes interface so // that you can copy a media type, you can do nearly the same by creating // a CMediaType object but as soon as it goes out of scope the destructor // will delete the memory it allocated (this takes a copy of the memory) AM_MEDIA_TYPE * WINAPI CreateMediaType(AM_MEDIA_TYPE const *pSrc) { ASSERT(pSrc); // Allocate a block of memory for the media type AM_MEDIA_TYPE *pMediaType = (AM_MEDIA_TYPE *)CoTaskMemAlloc(sizeof(AM_MEDIA_TYPE)); if (pMediaType == NULL) { return NULL; } // Copy the variable length format block HRESULT hr = CopyMediaType(pMediaType,pSrc); if (FAILED(hr)) { CoTaskMemFree((PVOID)pMediaType); return NULL; } return pMediaType; } // Copy 1 media type to another HRESULT WINAPI CopyMediaType(__out AM_MEDIA_TYPE *pmtTarget, const AM_MEDIA_TYPE *pmtSource) { // We'll leak if we copy onto one that already exists - there's one // case we can check like that - copying to itself. ASSERT(pmtSource != pmtTarget); *pmtTarget = *pmtSource; if (pmtSource->cbFormat != 0) { ASSERT(pmtSource->pbFormat != NULL); pmtTarget->pbFormat = (PBYTE)CoTaskMemAlloc(pmtSource->cbFormat); if (pmtTarget->pbFormat == NULL) { pmtTarget->cbFormat = 0; return E_OUTOFMEMORY; } else { CopyMemory((PVOID)pmtTarget->pbFormat, (PVOID)pmtSource->pbFormat, pmtTarget->cbFormat); } } if (pmtTarget->pUnk != NULL) { pmtTarget->pUnk->AddRef(); } return S_OK; } // Free an existing media type (ie free resources it holds) void WINAPI FreeMediaType(__inout AM_MEDIA_TYPE& mt) { if (mt.cbFormat != 0) { CoTaskMemFree((PVOID)mt.pbFormat); // Strictly unnecessary but tidier mt.cbFormat = 0; mt.pbFormat = NULL; } if (mt.pUnk != NULL) { mt.pUnk->Release(); mt.pUnk = NULL; } } // Initialize a media type from a WAVEFORMATEX STDAPI CreateAudioMediaType( const WAVEFORMATEX *pwfx, __out AM_MEDIA_TYPE *pmt, BOOL bSetFormat ) { pmt->majortype = MEDIATYPE_Audio; if (pwfx->wFormatTag == WAVE_FORMAT_EXTENSIBLE) { pmt->subtype = ((PWAVEFORMATEXTENSIBLE)pwfx)->SubFormat; } else { pmt->subtype = FOURCCMap(pwfx->wFormatTag); } pmt->formattype = FORMAT_WaveFormatEx; pmt->bFixedSizeSamples = TRUE; pmt->bTemporalCompression = FALSE; pmt->lSampleSize = pwfx->nBlockAlign; pmt->pUnk = NULL; if (bSetFormat) { if (pwfx->wFormatTag == WAVE_FORMAT_PCM) { pmt->cbFormat = sizeof(WAVEFORMATEX); } else { pmt->cbFormat = sizeof(WAVEFORMATEX) + pwfx->cbSize; } pmt->pbFormat = (PBYTE)CoTaskMemAlloc(pmt->cbFormat); if (pmt->pbFormat == NULL) { return E_OUTOFMEMORY; } if (pwfx->wFormatTag == WAVE_FORMAT_PCM) { CopyMemory(pmt->pbFormat, pwfx, sizeof(PCMWAVEFORMAT)); ((WAVEFORMATEX *)pmt->pbFormat)->cbSize = 0; } else { CopyMemory(pmt->pbFormat, pwfx, pmt->cbFormat); } } return S_OK; } // eliminate very many spurious warnings from MS compiler #pragma warning(disable:4514)
gpl-2.0
linuxium/rkm-kk
drivers/media/video/rk2928_camera.c
64
8259
#include <mach/iomux.h> #include <media/soc_camera.h> #include <linux/android_pmem.h> #include <mach/rk2928_camera.h> #ifndef PMEM_CAM_SIZE #include "../../../arch/arm/plat-rk/rk_camera.c" #else /***************************************************************************************** * camera devices * author: ddl@rock-chips.com *****************************************************************************************/ #ifdef CONFIG_VIDEO_RK29 static int rk_sensor_iomux(int pin) { iomux_set_gpio_mode(pin); return 0; } #define PMEM_CAM_BASE 0 //just for compile ,no meaning #include "../../../arch/arm/plat-rk/rk_camera.c" static u64 rockchip_device_camera_dmamask = 0xffffffffUL; #if RK_SUPPORT_CIF0 static struct resource rk_camera_resource_host_0[] = { [0] = { .start = RK2928_CIF_PHYS, .end = RK2928_CIF_PHYS + RK2928_CIF_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_CIF, .end = IRQ_CIF, .flags = IORESOURCE_IRQ, } }; #endif #if RK_SUPPORT_CIF1 static struct resource rk_camera_resource_host_1[] = { [0] = { .start = RK2928_CIF_PHYS, .end = RK2928_CIF_PHYS+ RK2928_CIF_SIZE - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_CIF, .end = IRQ_CIF, .flags = IORESOURCE_IRQ, } }; #endif /*platform_device : */ #if RK_SUPPORT_CIF0 struct platform_device rk_device_camera_host_0 = { .name = RK29_CAM_DRV_NAME, .id = RK_CAM_PLATFORM_DEV_ID_0, /* This is used to put cameras on this interface */ .num_resources = ARRAY_SIZE(rk_camera_resource_host_0), .resource = rk_camera_resource_host_0, .dev = { .dma_mask = &rockchip_device_camera_dmamask, .coherent_dma_mask = 0xffffffffUL, .platform_data = &rk_camera_platform_data, } }; #endif #if RK_SUPPORT_CIF1 /*platform_device : */ struct platform_device rk_device_camera_host_1 = { .name = RK29_CAM_DRV_NAME, .id = RK_CAM_PLATFORM_DEV_ID_1, /* This is used to put cameras on this interface */ .num_resources = ARRAY_SIZE(rk_camera_resource_host_1), .resource = rk_camera_resource_host_1, .dev = { .dma_mask = &rockchip_device_camera_dmamask, .coherent_dma_mask = 0xffffffffUL, .platform_data = &rk_camera_platform_data, } }; #endif static void rk_init_camera_plateform_data(void) { int i,dev_idx; dev_idx = 0; for (i=0; i<RK_CAM_NUM; i++) { rk_camera_platform_data.sensor_init_data[i] = &rk_init_data_sensor[i]; if (rk_camera_platform_data.register_dev[i].device_info.name) { rk_camera_platform_data.register_dev[i].link_info.board_info = &rk_camera_platform_data.register_dev[i].i2c_cam_info; rk_camera_platform_data.register_dev[i].device_info.id = dev_idx; rk_camera_platform_data.register_dev[i].device_info.dev.platform_data = &rk_camera_platform_data.register_dev[i].link_info; dev_idx++; } } } static void rk30_camera_request_reserve_mem(void) { int i,max_resolution; int cam_ipp_mem=PMEM_CAMIPP_NECESSARY, cam_pmem=PMEM_CAM_NECESSARY; i =0; max_resolution = 0x00; while (strstr(new_camera[i].dev.device_info.dev.init_name,"end")==NULL) { if (new_camera[i].resolution > max_resolution) max_resolution = new_camera[i].resolution; i++; } if (max_resolution < PMEM_SENSOR_FULL_RESOLUTION_CIF_1) max_resolution = PMEM_SENSOR_FULL_RESOLUTION_CIF_1; if (max_resolution < PMEM_SENSOR_FULL_RESOLUTION_CIF_0) max_resolution = PMEM_SENSOR_FULL_RESOLUTION_CIF_0; switch (max_resolution) { case 0x800000: default: { cam_ipp_mem = 0x800000; cam_pmem = 0x1900000; break; } case 0x500000: { cam_ipp_mem = 0x800000; cam_pmem = 0x1400000; break; } case 0x300000: { cam_ipp_mem = 0x600000; cam_pmem = 0xf00000; break; } case 0x200000: { cam_ipp_mem = 0x600000; cam_pmem = 0xc00000; break; } case 0x100000: { cam_ipp_mem = 0x600000; cam_pmem = 0xa00000; break; } case 0x30000: { cam_ipp_mem = 0x600000; cam_pmem = 0x600000; break; } } rk_camera_platform_data.meminfo.vbase = rk_camera_platform_data.meminfo_cif1.vbase = NULL; #if defined(CONFIG_VIDEO_RKCIF_WORK_SIMUL_OFF) || ((RK_SUPPORT_CIF0 && RK_SUPPORT_CIF1) == 0) rk_camera_platform_data.meminfo.name = "camera_ipp_mem"; rk_camera_platform_data.meminfo.start = board_mem_reserve_add("camera_ipp_mem",cam_ipp_mem); rk_camera_platform_data.meminfo.size= cam_ipp_mem; memcpy(&rk_camera_platform_data.meminfo_cif1,&rk_camera_platform_data.meminfo,sizeof(struct rk29camera_mem_res)); #else rk_camera_platform_data.meminfo.name = "camera_ipp_mem_0"; rk_camera_platform_data.meminfo.start = board_mem_reserve_add("camera_ipp_mem_0",PMEM_CAMIPP_NECESSARY_CIF_0); rk_camera_platform_data.meminfo.size= PMEM_CAMIPP_NECESSARY_CIF_0; rk_camera_platform_data.meminfo_cif1.name = "camera_ipp_mem_1"; rk_camera_platform_data.meminfo_cif1.start =board_mem_reserve_add("camera_ipp_mem_1",PMEM_CAMIPP_NECESSARY_CIF_1); rk_camera_platform_data.meminfo_cif1.size= PMEM_CAMIPP_NECESSARY_CIF_1; #endif #if PMEM_CAM_NECESSARY android_pmem_cam_pdata.start = board_mem_reserve_add((char*)(android_pmem_cam_pdata.name),cam_pmem); android_pmem_cam_pdata.size= cam_pmem; #endif } static int rk_register_camera_devices(void) { int i; int host_registered_0,host_registered_1; struct rkcamera_platform_data *new_camera; rk_init_camera_plateform_data(); host_registered_0 = 0; host_registered_1 = 0; for (i=0; i<RK_CAM_NUM; i++) { if (rk_camera_platform_data.register_dev[i].device_info.name) { if (rk_camera_platform_data.register_dev[i].link_info.bus_id == RK_CAM_PLATFORM_DEV_ID_0) { #if RK_SUPPORT_CIF0 host_registered_0 = 1; #else printk(KERN_ERR "%s(%d) : This chip isn't support CIF0, Please user check ...\n",__FUNCTION__,__LINE__); #endif } if (rk_camera_platform_data.register_dev[i].link_info.bus_id == RK_CAM_PLATFORM_DEV_ID_1) { #if RK_SUPPORT_CIF1 host_registered_1 = 1; #else printk(KERN_ERR "%s(%d) : This chip isn't support CIF1, Please user check ...\n",__FUNCTION__,__LINE__); #endif } } } i=0; new_camera = rk_camera_platform_data.register_dev_new; if (new_camera != NULL) { while (strstr(new_camera->dev.device_info.dev.init_name,"end")==NULL) { if (new_camera->dev.link_info.bus_id == RK_CAM_PLATFORM_DEV_ID_1) { host_registered_1 = 1; } else if (new_camera->dev.link_info.bus_id == RK_CAM_PLATFORM_DEV_ID_0) { host_registered_0 = 1; } new_camera++; } } #if RK_SUPPORT_CIF0 if (host_registered_0) { platform_device_register(&rk_device_camera_host_0); } #endif #if RK_SUPPORT_CIF1 if (host_registered_1) { platform_device_register(&rk_device_camera_host_1); } #endif for (i=0; i<RK_CAM_NUM; i++) { if (rk_camera_platform_data.register_dev[i].device_info.name) { platform_device_register(&rk_camera_platform_data.register_dev[i].device_info); } } if (rk_camera_platform_data.sensor_register) (rk_camera_platform_data.sensor_register)(); #if PMEM_CAM_NECESSARY platform_device_register(&android_pmem_cam_device); #endif return 0; } module_init(rk_register_camera_devices); #endif #endif //#ifdef CONFIG_VIDEO_RK
gpl-2.0
drod2169/Linux-Kernel
arch/arm64/kernel/cpuinfo.c
64
6118
/* * Record and handle CPU attributes. * * Copyright (C) 2014 ARM Ltd. * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <asm/arch_timer.h> #include <asm/cachetype.h> #include <asm/cpu.h> #include <asm/cputype.h> #include <linux/bitops.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/printk.h> #include <linux/smp.h> /* * In case the boot CPU is hotpluggable, we record its initial state and * current state separately. Certain system registers may contain different * values depending on configuration at or after reset. */ DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data); static struct cpuinfo_arm64 boot_cpu_data; static char *icache_policy_str[] = { [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN", [ICACHE_POLICY_AIVIVT] = "AIVIVT", [ICACHE_POLICY_VIPT] = "VIPT", [ICACHE_POLICY_PIPT] = "PIPT", }; unsigned long __icache_flags; static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info) { unsigned int cpu = smp_processor_id(); u32 l1ip = CTR_L1IP(info->reg_ctr); if (l1ip != ICACHE_POLICY_PIPT) set_bit(ICACHEF_ALIASING, &__icache_flags); if (l1ip == ICACHE_POLICY_AIVIVT) set_bit(ICACHEF_AIVIVT, &__icache_flags); pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu); } static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu) { if ((boot & mask) == (cur & mask)) return 0; pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n", name, (unsigned long)boot, cpu, (unsigned long)cur); return 1; } #define CHECK_MASK(field, mask, boot, cur, cpu) \ check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu) #define CHECK(field, boot, cur, cpu) \ CHECK_MASK(field, ~0ULL, boot, cur, cpu) /* * Verify that CPUs don't have unexpected differences that will cause problems. */ static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur) { unsigned int cpu = smp_processor_id(); struct cpuinfo_arm64 *boot = &boot_cpu_data; unsigned int diff = 0; /* * The kernel can handle differing I-cache policies, but otherwise * caches should look identical. Userspace JITs will make use of * *minLine. */ diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu); /* * Userspace may perform DC ZVA instructions. Mismatched block sizes * could result in too much or too little memory being zeroed if a * process is preempted and migrated between CPUs. */ diff |= CHECK(dczid, boot, cur, cpu); /* If different, timekeeping will be broken (especially with KVM) */ diff |= CHECK(cntfrq, boot, cur, cpu); /* * Even in big.LITTLE, processors should be identical instruction-set * wise. */ diff |= CHECK(id_aa64isar0, boot, cur, cpu); diff |= CHECK(id_aa64isar1, boot, cur, cpu); /* * Differing PARange support is fine as long as all peripherals and * memory are mapped within the minimum PARange of all CPUs. * Linux should not care about secure memory. * ID_AA64MMFR1 is currently RES0. */ diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu); diff |= CHECK(id_aa64mmfr1, boot, cur, cpu); /* * EL3 is not our concern. * ID_AA64PFR1 is currently RES0. */ diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu); diff |= CHECK(id_aa64pfr1, boot, cur, cpu); /* * If we have AArch32, we care about 32-bit features for compat. These * registers should be RES0 otherwise. */ diff |= CHECK(id_isar0, boot, cur, cpu); diff |= CHECK(id_isar1, boot, cur, cpu); diff |= CHECK(id_isar2, boot, cur, cpu); diff |= CHECK(id_isar3, boot, cur, cpu); diff |= CHECK(id_isar4, boot, cur, cpu); diff |= CHECK(id_isar5, boot, cur, cpu); diff |= CHECK(id_mmfr0, boot, cur, cpu); diff |= CHECK(id_mmfr1, boot, cur, cpu); diff |= CHECK(id_mmfr2, boot, cur, cpu); diff |= CHECK(id_mmfr3, boot, cur, cpu); diff |= CHECK(id_pfr0, boot, cur, cpu); diff |= CHECK(id_pfr1, boot, cur, cpu); /* * Mismatched CPU features are a recipe for disaster. Don't even * pretend to support them. */ WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC, "Unsupported CPU feature variation."); } static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info) { info->reg_cntfrq = arch_timer_get_cntfrq(); info->reg_ctr = read_cpuid_cachetype(); info->reg_dczid = read_cpuid(DCZID_EL0); info->reg_midr = read_cpuid_id(); info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1); info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1); info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1); info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1); info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1); info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1); info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1); info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1); info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1); info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1); info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1); info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1); info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1); info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1); info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1); info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1); info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1); info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1); cpuinfo_detect_icache_policy(info); } void cpuinfo_store_cpu(void) { struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data); __cpuinfo_store_cpu(info); cpuinfo_sanity_check(info); } void __init cpuinfo_store_boot_cpu(void) { struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0); __cpuinfo_store_cpu(info); boot_cpu_data = *info; }
gpl-2.0
Shadowfury38/mangos
dep/ACE_wrappers/ace/Token_Invariants.cpp
64
8705
#include "ace/Token_Invariants.h" #if defined (ACE_HAS_TOKENS_LIBRARY) #include "ace/Object_Manager.h" ACE_RCSID (ace, Token_Invariants, "$Id: Token_Invariants.cpp 80826 2008-03-04 14:51:23Z wotte $") ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_Token_Invariant_Manager *ACE_Token_Invariant_Manager::instance_ = 0; ACE_Token_Invariant_Manager * ACE_Token_Invariant_Manager::instance (void) { ACE_TRACE ("ACE_Token_Invariant_Manager::instance"); // Perform the Double-Check pattern... if (instance_ == 0) { ACE_MT (ACE_TOKEN_CONST::MUTEX *lock = ACE_Managed_Object<ACE_TOKEN_CONST::MUTEX>::get_preallocated_object (ACE_Object_Manager::ACE_TOKEN_INVARIANTS_CREATION_LOCK); ACE_GUARD_RETURN (ACE_TOKEN_CONST::MUTEX, ace_mon, *lock, 0)); if (instance_ == 0) { ACE_NEW_RETURN (instance_, ACE_Token_Invariant_Manager, 0); // Register for destruction with ACE_Object_Manager. ACE_Object_Manager::at_exit (instance_); } } return instance_; } ACE_Token_Invariant_Manager::ACE_Token_Invariant_Manager (void) { ACE_TRACE ("ACE_Token_Invariant_Manager::ACE_Token_Invariant_Manager"); } int ACE_Token_Invariant_Manager::mutex_acquired (const ACE_TCHAR *token_name) { ACE_TRACE ("ACE_Token_Invariant_Manager::mutex_acquired"); ACE_GUARD_RETURN (ACE_TOKEN_CONST::MUTEX, ace_mon, this->lock_, -1); ACE_Mutex_Invariants *inv = 0; if (this->get_mutex (token_name, inv) == -1) return -1; return inv->acquired (); } int ACE_Token_Invariant_Manager::acquired (const ACE_Token_Proxy *proxy) { ACE_TRACE ("ACE_Token_Invariant_Manager::acquired"); // Reach into the proxy to find the token type. if (proxy->token_->type () == ACE_Tokens::MUTEX) return this->mutex_acquired (proxy->name ()); else // ACE_Tokens::RWLOCK. { if (proxy->type () == ACE_RW_Token::READER) return this->reader_acquired (proxy->name ()); else // ACE_RW_Token::WRITER. return this->writer_acquired (proxy->name ()); } } void ACE_Token_Invariant_Manager::releasing (const ACE_Token_Proxy *proxy) { ACE_TRACE ("ACE_Token_Invariant_Manager::releasing"); // Reach into the proxy to find the token type. if (proxy->token_->type () == ACE_Tokens::MUTEX) this->mutex_releasing (proxy->name ()); else // ACE_Tokens::RWLOCK. this->rwlock_releasing (proxy->name ()); } void ACE_Token_Invariant_Manager::mutex_releasing (const ACE_TCHAR *token_name) { ACE_TRACE ("ACE_Token_Invariant_Manager::mutex_releasing"); ACE_GUARD (ACE_TOKEN_CONST::MUTEX, ace_mon, this->lock_); ACE_Mutex_Invariants *inv = 0; if (this->get_mutex (token_name, inv) == 0) inv->releasing (); } int ACE_Token_Invariant_Manager::reader_acquired (const ACE_TCHAR *token_name) { ACE_TRACE ("ACE_Token_Invariant_Manager::reader_acquired"); ACE_GUARD_RETURN (ACE_TOKEN_CONST::MUTEX, ace_mon, this->lock_, -1); ACE_RWLock_Invariants *inv = 0; if (this->get_rwlock (token_name, inv) == -1) return -1; return inv->reader_acquired (); } int ACE_Token_Invariant_Manager::writer_acquired (const ACE_TCHAR *token_name) { ACE_TRACE ("ACE_Token_Invariant_Manager::writer_acquired"); ACE_GUARD_RETURN (ACE_TOKEN_CONST::MUTEX, ace_mon, this->lock_, -1); ACE_RWLock_Invariants *inv = 0; if (this->get_rwlock (token_name, inv) == -1) return -1; return inv->writer_acquired (); } void ACE_Token_Invariant_Manager::rwlock_releasing (const ACE_TCHAR *token_name) { ACE_TRACE ("ACE_Token_Invariant_Manager::rwlock_releasing"); ACE_GUARD (ACE_TOKEN_CONST::MUTEX, ace_mon, this->lock_); ACE_RWLock_Invariants *inv = 0; if (this->get_rwlock (token_name, inv) == 0) inv->releasing (); } void ACE_Token_Invariant_Manager::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_Token_Invariant_Manager::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("mutex_collection_:\n"))); mutex_collection_.dump (); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("rwlock_collection_:\n"))); rwlock_collection_.dump (); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } int ACE_Token_Invariant_Manager::get_mutex (const ACE_TCHAR *token_name, ACE_Mutex_Invariants *&inv) { ACE_TRACE ("ACE_Token_Invariant_Manager::get_mutex"); TOKEN_NAME name (token_name); if (mutex_collection_.find (name, inv) == -1) // We did not find one in the collection. { ACE_Mutex_Invariants *new_invariant; ACE_NEW_RETURN (new_invariant, ACE_Mutex_Invariants, -1); if (mutex_collection_.bind (name, new_invariant) == -1) { delete new_invariant; return -1; } if (mutex_collection_.find (name, inv) == -1) // We did not find one in the collection. return -1; } return 0; } int ACE_Token_Invariant_Manager::get_rwlock (const ACE_TCHAR *token_name, ACE_RWLock_Invariants *&inv) { ACE_TRACE ("ACE_Token_Invariant_Manager::get_rwlock"); TOKEN_NAME name (token_name); if (rwlock_collection_.find (name, inv) == -1) // We did not find one in the collection. { ACE_RWLock_Invariants *new_invariant; ACE_NEW_RETURN (new_invariant, ACE_RWLock_Invariants, -1); if (rwlock_collection_.bind (name, new_invariant) == -1) return -1; if (rwlock_collection_.find (name, inv) == -1) // We did not find one in the collection. return -1; } return 0; } ACE_Token_Invariant_Manager::~ACE_Token_Invariant_Manager (void) { ACE_TRACE ("ACE_Token_Invariant_Manager::~ACE_Token_Invariant_Manager"); MUTEX_COLLECTION::ITERATOR iterator (mutex_collection_); for (MUTEX_COLLECTION::ENTRY *temp = 0; iterator.next (temp) != 0; iterator.advance ()) delete temp->int_id_; RWLOCK_COLLECTION::ITERATOR iterator2 (rwlock_collection_); for (RWLOCK_COLLECTION::ENTRY *temp2 = 0; iterator2.next (temp2) != 0; iterator2.advance ()) delete temp2->int_id_; } // ************************************************** // ************************************************** // ************************************************** ACE_Mutex_Invariants::ACE_Mutex_Invariants (void) : owners_ (0) { } int ACE_Mutex_Invariants::acquired (void) { if (++owners_ > 1) { owners_ = 42; return 0; } else return 1; } void ACE_Mutex_Invariants::releasing (void) { if (owners_ == 1) --owners_; } ACE_Mutex_Invariants::ACE_Mutex_Invariants (const ACE_Mutex_Invariants &rhs) : owners_ (rhs.owners_) { } void ACE_Mutex_Invariants::operator= (const ACE_Mutex_Invariants &rhs) { owners_ = rhs.owners_; } void ACE_Mutex_Invariants::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_Mutex_Invariants::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("owners_ = %d\n"), owners_)); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } // ************************************************** // ************************************************** // ************************************************** ACE_RWLock_Invariants::ACE_RWLock_Invariants (void) : writers_ (0), readers_ (0) { } int ACE_RWLock_Invariants::writer_acquired (void) { if (readers_ > 0) { writers_ = readers_ = 42; return 0; } else if (++writers_ > 1) { writers_ = readers_ = 42; return 0; } else return 1; } int ACE_RWLock_Invariants::reader_acquired (void) { if (writers_ > 0) { writers_ = readers_ = 42; return 0; } else { ++readers_; return 1; } } void ACE_RWLock_Invariants::releasing (void) { if (writers_ == 1) writers_ = 0; else if (readers_ > 0) --readers_; } ACE_RWLock_Invariants::ACE_RWLock_Invariants (const ACE_RWLock_Invariants &rhs) : writers_ (rhs.writers_), readers_ (rhs.readers_) { } void ACE_RWLock_Invariants::operator= (const ACE_RWLock_Invariants &rhs) { writers_ = rhs.writers_; readers_ = rhs.readers_; } void ACE_RWLock_Invariants::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_RWLock_Invariants::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("writers_ = %d readers_ = %d\n"), writers_, readers_)); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } ACE_END_VERSIONED_NAMESPACE_DECL #endif /* ACE_HAS_TOKENS_LIBRARY */
gpl-2.0
TeamEOS/kernel_htc_flounder
arch/arm/mach-tegra/tegra3_throttle.c
64
10090
/* * arch/arm/mach-tegra/tegra3_throttle.c * * Copyright (c) 2011-2014, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/kernel.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/thermal.h> #include <linux/module.h> #include <mach/thermal.h> #include "clock.h" #include "cpu-tegra.h" /* cpu_throttle_lock is tegra_cpu_lock from cpu-tegra.c */ static struct mutex *cpu_throttle_lock; static DEFINE_MUTEX(bthrot_list_lock); static LIST_HEAD(bthrot_list); static int num_throt; static struct cpufreq_frequency_table *cpu_freq_table; static unsigned long cpu_throttle_lowest_speed; static unsigned long cpu_cap_freq; static struct { const char *cap_name; struct clk *cap_clk; unsigned long cap_freq; } cap_freqs_table[] = { #ifdef CONFIG_ARCH_TEGRA_12x_SOC { .cap_name = "cap.throttle.gbus" }, #endif #ifdef CONFIG_TEGRA_GPU_DVFS { .cap_name = "cap.throttle.c2bus" }, { .cap_name = "cap.throttle.c3bus" }, #else { .cap_name = "cap.throttle.cbus" }, #endif { .cap_name = "cap.throttle.sclk" }, { .cap_name = "cap.throttle.emc" }, }; static bool tegra_throttle_init_failed; #define CAP_TBL_CAP_NAME(index) (cap_freqs_table[index].cap_name) #define CAP_TBL_CAP_CLK(index) (cap_freqs_table[index].cap_clk) #define CAP_TBL_CAP_FREQ(index) (cap_freqs_table[index].cap_freq) #ifndef CONFIG_TEGRA_THERMAL_THROTTLE_EXACT_FREQ static unsigned long clip_to_table(unsigned long cpu_freq) { int i; if (IS_ERR_OR_NULL(cpu_freq_table)) return -EINVAL; for (i = 0; cpu_freq_table[i].frequency != CPUFREQ_TABLE_END; i++) { if (cpu_freq_table[i].frequency > cpu_freq) break; } i = (i == 0) ? 0 : i-1; return cpu_freq_table[i].frequency; } #else static unsigned long clip_to_table(unsigned long cpu_freq) { return cpu_freq; } #endif /* CONFIG_TEGRA_THERMAL_THROTTLE_EXACT_FREQ */ unsigned long tegra_throttle_governor_speed(unsigned long requested_speed) { if (cpu_cap_freq == NO_CAP || cpu_cap_freq == 0) return requested_speed; return min(requested_speed, cpu_cap_freq); } bool tegra_is_throttling(int *count) { struct balanced_throttle *bthrot; bool is_throttling = false; int lcount = 0; mutex_lock(&bthrot_list_lock); list_for_each_entry(bthrot, &bthrot_list, node) { if (bthrot->cur_state) is_throttling = true; lcount += bthrot->throttle_count; } mutex_unlock(&bthrot_list_lock); if (count) *count = lcount; return is_throttling; } static int tegra_throttle_get_max_state(struct thermal_cooling_device *cdev, unsigned long *max_state) { struct balanced_throttle *bthrot = cdev->devdata; *max_state = bthrot->throt_tab_size; return 0; } static int tegra_throttle_get_cur_state(struct thermal_cooling_device *cdev, unsigned long *cur_state) { struct balanced_throttle *bthrot = cdev->devdata; *cur_state = bthrot->cur_state; return 0; } static void tegra_throttle_set_cap_clk(struct throttle_table *throt_tab, int cap_clk_index) { unsigned long cap_rate, clk_rate; if (tegra_throttle_init_failed) return; cap_rate = throt_tab->cap_freqs[cap_clk_index]; if (cap_rate == NO_CAP) clk_rate = clk_get_max_rate(CAP_TBL_CAP_CLK(cap_clk_index-1)); else clk_rate = cap_rate * 1000UL; if (CAP_TBL_CAP_FREQ(cap_clk_index-1) != clk_rate) { clk_set_rate(CAP_TBL_CAP_CLK(cap_clk_index-1), clk_rate); CAP_TBL_CAP_FREQ(cap_clk_index-1) = clk_rate; } } static void tegra_throttle_cap_freqs_update(struct throttle_table *throt_tab, int direction) { int i; int num_of_cap_clocks = ARRAY_SIZE(cap_freqs_table); if (direction == 1) { /* performance up : throttle less */ for (i = num_of_cap_clocks; i > 0; i--) tegra_throttle_set_cap_clk(throt_tab, i); } else { /* performance down : throotle more */ for (i = 1; i <= num_of_cap_clocks; i++) tegra_throttle_set_cap_clk(throt_tab, i); } } static int tegra_throttle_set_cur_state(struct thermal_cooling_device *cdev, unsigned long cur_state) { struct balanced_throttle *bthrot = cdev->devdata; int direction; int i; int num_of_cap_clocks = ARRAY_SIZE(cap_freqs_table); unsigned long bthrot_speed; struct throttle_table *throt_entry; struct throttle_table cur_throt_freq; if (cpu_freq_table == NULL) return 0; if (bthrot->cur_state == cur_state) return 0; if (bthrot->cur_state == 0 && cur_state) bthrot->throttle_count++; direction = bthrot->cur_state >= cur_state; bthrot->cur_state = cur_state; for (i = 0; i <= num_of_cap_clocks; i++) cur_throt_freq.cap_freqs[i] = NO_CAP; mutex_lock(&bthrot_list_lock); list_for_each_entry(bthrot, &bthrot_list, node) { if (bthrot->cur_state) { throt_entry = &bthrot->throt_tab[bthrot->cur_state-1]; for (i = 0; i <= num_of_cap_clocks; i++) { cur_throt_freq.cap_freqs[i] = min( cur_throt_freq.cap_freqs[i], throt_entry->cap_freqs[i]); } } } tegra_throttle_cap_freqs_update(&cur_throt_freq, direction); bthrot_speed = cur_throt_freq.cap_freqs[0]; if (bthrot_speed == CPU_THROT_LOW) bthrot_speed = cpu_throttle_lowest_speed; else bthrot_speed = clip_to_table(bthrot_speed); cpu_cap_freq = bthrot_speed; tegra_cpu_set_speed_cap(NULL); mutex_unlock(&bthrot_list_lock); return 0; } static struct thermal_cooling_device_ops tegra_throttle_cooling_ops = { .get_max_state = tegra_throttle_get_max_state, .get_cur_state = tegra_throttle_get_cur_state, .set_cur_state = tegra_throttle_set_cur_state, }; #ifdef CONFIG_DEBUG_FS static int table_show(struct seq_file *s, void *data) { struct balanced_throttle *bthrot = s->private; int i, j; for (i = 0; i < bthrot->throt_tab_size; i++) { /* CPU FREQ */ seq_printf(s, "%s[%d] = %7lu", i < 10 ? " " : "", i, bthrot->throt_tab[i].cap_freqs[0]); /* OTHER DVFS MODULE FREQS */ for (j = 1; j <= ARRAY_SIZE(cap_freqs_table); j++) if (bthrot->throt_tab[i].cap_freqs[j] == NO_CAP) seq_puts(s, " NO CAP"); else seq_printf(s, " %7lu", bthrot->throt_tab[i].cap_freqs[j]); seq_puts(s, "\n"); } return 0; } static int table_open(struct inode *inode, struct file *file) { return single_open(file, table_show, inode->i_private); } static ssize_t table_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { struct balanced_throttle *bthrot = ((struct seq_file *)(file->private_data))->private; char buf[80], temp_buf[10], *cur_pos; int table_idx, i; unsigned long cap_rate; if (sizeof(buf) <= count) return -EINVAL; if (copy_from_user(buf, userbuf, count)) return -EFAULT; /* terminate buffer and trim - white spaces may be appended * at the end when invoked from shell command line */ buf[count] = '\0'; strim(buf); cur_pos = buf; /* get table index */ if (sscanf(cur_pos, "[%d] = ", &table_idx) != 1) return -EINVAL; sscanf(cur_pos, "[%s] = ", temp_buf); cur_pos += strlen(temp_buf) + 4; if ((table_idx < 0) || (table_idx >= bthrot->throt_tab_size)) return -EINVAL; /* CPU FREQ and DVFS FREQS == DVFS FREQS + 1(cpu) */ for (i = 0; i < ARRAY_SIZE(cap_freqs_table) + 1; i++) { if (sscanf(cur_pos, "%lu", &cap_rate) != 1) return -EINVAL; sscanf(cur_pos, "%s", temp_buf); cur_pos += strlen(temp_buf) + 1; bthrot->throt_tab[table_idx].cap_freqs[i] = cap_rate; } return count; } static const struct file_operations table_fops = { .open = table_open, .read = seq_read, .write = table_write, .llseek = seq_lseek, .release = single_release, }; static struct dentry *throttle_debugfs_root; #endif /* CONFIG_DEBUG_FS */ struct thermal_cooling_device *balanced_throttle_register( struct balanced_throttle *bthrot, char *type) { #ifdef CONFIG_DEBUG_FS char name[32]; #endif mutex_lock(&bthrot_list_lock); num_throt++; list_add(&bthrot->node, &bthrot_list); mutex_unlock(&bthrot_list_lock); #ifdef CONFIG_DEBUG_FS sprintf(name, "throttle_table%d", num_throt); if (!throttle_debugfs_root || IS_ERR_OR_NULL( debugfs_create_file(name, 0644, throttle_debugfs_root, bthrot, &table_fops))) return ERR_PTR(-ENODEV); #endif bthrot->cdev = thermal_cooling_device_register( type, bthrot, &tegra_throttle_cooling_ops); if (IS_ERR(bthrot->cdev)) { bthrot->cdev = NULL; return ERR_PTR(-ENODEV); } return bthrot->cdev; } int __init tegra_throttle_init(struct mutex *cpu_lock) { int i; struct clk *c; struct tegra_cpufreq_table_data *table_data = tegra_cpufreq_table_get(); if (IS_ERR_OR_NULL(table_data)) return -EINVAL; cpu_freq_table = table_data->freq_table; cpu_throttle_lowest_speed = cpu_freq_table[table_data->throttle_lowest_index].frequency; cpu_throttle_lock = cpu_lock; #ifdef CONFIG_DEBUG_FS throttle_debugfs_root = debugfs_create_dir("tegra_throttle", NULL); if (IS_ERR_OR_NULL(throttle_debugfs_root)) pr_err("%s: debugfs_create_dir 'tegra_throttle' FAILED.\n", __func__); #endif for (i = 0; i < ARRAY_SIZE(cap_freqs_table); i++) { c = tegra_get_clock_by_name(CAP_TBL_CAP_NAME(i)); if (!c) { pr_err("tegra_throttle: cannot get clock %s\n", CAP_TBL_CAP_NAME(i)); tegra_throttle_init_failed = true; continue; } CAP_TBL_CAP_CLK(i) = c; CAP_TBL_CAP_FREQ(i) = clk_get_max_rate(c); } pr_info("tegra_throttle : init %s\n", tegra_throttle_init_failed ? "FAILED" : "passed"); return 0; } void tegra_throttle_exit(void) { #ifdef CONFIG_DEBUG_FS debugfs_remove_recursive(throttle_debugfs_root); #endif }
gpl-2.0
pershoot/galaxy-31
arch/arm/mm/pageattr.c
64
26251
/* * Copyright 2002 Andi Kleen, SuSE Labs. * Thanks to Ben LaHaise for precious feedback. */ #include <linux/highmem.h> #include <linux/bootmem.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <linux/debugfs.h> #include <linux/pfn.h> #include <linux/percpu.h> #include <linux/gfp.h> #include <linux/vmalloc.h> #include <linux/mutex.h> #include <asm/processor.h> #include <asm/tlbflush.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #ifdef CPA_DEBUG #define cpa_debug(x, ...) printk(x, __VA_ARGS__) #else #define cpa_debug(x, ...) #endif #define FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD 8 extern void v7_flush_kern_cache_all(void *); extern void __flush_dcache_page(struct address_space *, struct page *); static void inner_flush_cache_all(void) { on_each_cpu(v7_flush_kern_cache_all, NULL, 1); } #if defined(CONFIG_CPA) /* * The current flushing context - we pass it instead of 5 arguments: */ struct cpa_data { unsigned long *vaddr; pgprot_t mask_set; pgprot_t mask_clr; int numpages; int flags; unsigned long pfn; unsigned force_split:1; int curpage; struct page **pages; }; /* * Serialize cpa() (for !DEBUG_PAGEALLOC which uses large identity mappings) * using cpa_lock. So that we don't allow any other cpu, with stale large tlb * entries change the page attribute in parallel to some other cpu * splitting a large page entry along with changing the attribute. */ static DEFINE_MUTEX(cpa_lock); #define CPA_FLUSHTLB 1 #define CPA_ARRAY 2 #define CPA_PAGES_ARRAY 4 #ifdef CONFIG_PROC_FS static unsigned long direct_pages_count[PG_LEVEL_NUM]; void update_page_count(int level, unsigned long pages) { unsigned long flags; /* Protect against CPA */ spin_lock_irqsave(&pgd_lock, flags); direct_pages_count[level] += pages; spin_unlock_irqrestore(&pgd_lock, flags); } static void split_page_count(int level) { direct_pages_count[level]--; direct_pages_count[level - 1] += PTRS_PER_PTE; } void arch_report_meminfo(struct seq_file *m) { seq_printf(m, "DirectMap4k: %8lu kB\n", direct_pages_count[PG_LEVEL_4K] << 2); seq_printf(m, "DirectMap2M: %8lu kB\n", direct_pages_count[PG_LEVEL_2M] << 11); } #else static inline void split_page_count(int level) { } #endif #ifdef CONFIG_DEBUG_PAGEALLOC # define debug_pagealloc 1 #else # define debug_pagealloc 0 #endif static inline int within(unsigned long addr, unsigned long start, unsigned long end) { return addr >= start && addr < end; } static void cpa_flush_range(unsigned long start, int numpages, int cache) { unsigned int i, level; unsigned long addr; BUG_ON(irqs_disabled()); WARN_ON(PAGE_ALIGN(start) != start); flush_tlb_kernel_range(start, start + (numpages << PAGE_SHIFT)); if (!cache) return; for (i = 0, addr = start; i < numpages; i++, addr += PAGE_SIZE) { pte_t *pte = lookup_address(addr, &level); /* * Only flush present addresses: */ if (pte && pte_present(*pte)) { __cpuc_flush_dcache_area((void *) addr, PAGE_SIZE); outer_flush_range(__pa((void *)addr), __pa((void *)addr) + PAGE_SIZE); } } } static void cpa_flush_array(unsigned long *start, int numpages, int cache, int in_flags, struct page **pages) { unsigned int i, level; bool flush_inner = true; unsigned long base; BUG_ON(irqs_disabled()); if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD && cache && in_flags & CPA_PAGES_ARRAY) { inner_flush_cache_all(); flush_inner = false; } for (i = 0; i < numpages; i++) { unsigned long addr; pte_t *pte; if (in_flags & CPA_PAGES_ARRAY) addr = (unsigned long)page_address(pages[i]); else addr = start[i]; flush_tlb_kernel_range(addr, addr + PAGE_SIZE); if (cache && in_flags & CPA_PAGES_ARRAY) { /* cache flush all pages including high mem pages. */ if (flush_inner) __flush_dcache_page( page_mapping(pages[i]), pages[i]); base = page_to_phys(pages[i]); outer_flush_range(base, base + PAGE_SIZE); } else if (cache) { pte = lookup_address(addr, &level); /* * Only flush present addresses: */ if (pte && pte_present(*pte)) { __cpuc_flush_dcache_area((void *)addr, PAGE_SIZE); outer_flush_range(__pa((void *)addr), __pa((void *)addr) + PAGE_SIZE); } } } } /* * Certain areas of memory require very specific protection flags, * for example the kernel text. Callers don't always get this * right so this function checks and fixes these known static * required protection bits. */ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, unsigned long pfn) { pgprot_t forbidden = __pgprot(0); /* * The kernel text needs to be executable for obvious reasons * Does not cover __inittext since that is gone later on. */ if (within(address, (unsigned long)_text, (unsigned long)_etext)) pgprot_val(forbidden) |= L_PTE_XN; /* * The .rodata section needs to be read-only. Using the pfn * catches all aliases. */ if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) prot |= L_PTE_RDONLY; /* * Mask off the forbidden bits and set the bits that are needed */ prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); return prot; } static inline pgprot_t pte_to_pmd_pgprot(unsigned long pte, unsigned long ext_prot) { pgprot_t ref_prot; ref_prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE; if (pte & L_PTE_MT_BUFFERABLE) ref_prot |= PMD_SECT_BUFFERABLE; if (pte & L_PTE_MT_WRITETHROUGH) ref_prot |= PMD_SECT_CACHEABLE; if (pte & L_PTE_SHARED) ref_prot |= PMD_SECT_S; if (pte & L_PTE_XN) ref_prot |= PMD_SECT_XN; if (pte & L_PTE_RDONLY) ref_prot &= ~PMD_SECT_AP_WRITE; ref_prot |= (ext_prot & (PTE_EXT_AP0 | PTE_EXT_AP1 | PTE_EXT_APX | PTE_EXT_NG | (7 << 6))) << 6; return ref_prot; } static inline pgprot_t pmd_to_pte_pgprot(unsigned long pmd, unsigned long *ext_prot) { pgprot_t ref_prot = 0; ref_prot |= L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY; if (pmd & PMD_SECT_BUFFERABLE) ref_prot |= L_PTE_MT_BUFFERABLE; if (pmd & PMD_SECT_CACHEABLE) ref_prot |= L_PTE_MT_WRITETHROUGH; if (pmd & PMD_SECT_S) ref_prot |= L_PTE_SHARED; if (pmd & PMD_SECT_XN) ref_prot |= L_PTE_XN; if (pmd & PMD_SECT_AP_WRITE) ref_prot &= ~L_PTE_RDONLY; /* AP/APX/TEX bits */ *ext_prot = (pmd & (PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_SECT_APX | PMD_SECT_nG | (7 << 12))) >> 6; return ref_prot; } /* * Lookup the page table entry for a virtual address. Return a pointer * to the entry and the level of the mapping. * * Note: We return pud and pmd either when the entry is marked large * or when the present bit is not set. Otherwise we would return a * pointer to a nonexisting mapping. */ pte_t *lookup_address(unsigned long address, unsigned int *level) { pgd_t *pgd = pgd_offset_k(address); pte_t *pte; pmd_t *pmd; /* pmds are folded into pgds on ARM */ *level = PG_LEVEL_NONE; if (pgd == NULL || pgd_none(*pgd)) return NULL; pmd = pmd_offset(pgd, address); if (pmd == NULL || pmd_none(*pmd) || !pmd_present(*pmd)) return NULL; if (((pmd_val(*pmd) & (PMD_TYPE_SECT | PMD_SECT_SUPER)) == (PMD_TYPE_SECT | PMD_SECT_SUPER)) || !pmd_present(*pmd)) { return NULL; } else if (pmd_val(*pmd) & PMD_TYPE_SECT) { *level = PG_LEVEL_2M; return (pte_t *)pmd; } pte = pte_offset_kernel(pmd, address); if ((pte == NULL) || pte_none(*pte)) return NULL; *level = PG_LEVEL_4K; return pte; } EXPORT_SYMBOL_GPL(lookup_address); /* * Set the new pmd in all the pgds we know about: */ static void __set_pmd_pte(pmd_t *pmd, unsigned long address, pte_t *pte) { struct page *page; cpa_debug("__set_pmd_pte %x %x %x\n", pmd, pte, *pte); /* enforce pte entry stores ordering to avoid pmd writes * bypassing pte stores. */ dsb(); /* change init_mm */ pmd_populate_kernel(&init_mm, pmd, pte); /* change entry in all the pgd's */ list_for_each_entry(page, &pgd_list, lru) { cpa_debug("list %x %x %x\n", (unsigned long)page, (unsigned long)pgd_index(address), address); pmd = pmd_offset(((pgd_t *)page_address(page)) + pgd_index(address), address); pmd_populate_kernel(NULL, pmd, pte); } /* enforce pmd entry stores ordering to avoid tlb flush bypassing * pmd entry stores. */ dsb(); } static int try_preserve_large_page(pte_t *kpte, unsigned long address, struct cpa_data *cpa) { unsigned long nextpage_addr, numpages, pmask, psize, flags, addr, pfn; pte_t old_pte, *tmp; pgprot_t old_prot, new_prot, ext_prot, req_prot; int i, do_split = 1; unsigned int level; if (cpa->force_split) return 1; spin_lock_irqsave(&pgd_lock, flags); /* * Check for races, another CPU might have split this page * up already: */ tmp = lookup_address(address, &level); if (tmp != kpte) goto out_unlock; switch (level) { case PG_LEVEL_2M: psize = PMD_SIZE; pmask = PMD_MASK; break; default: do_split = -EINVAL; goto out_unlock; } /* * Calculate the number of pages, which fit into this large * page starting at address: */ nextpage_addr = (address + psize) & pmask; numpages = (nextpage_addr - address) >> PAGE_SHIFT; if (numpages < cpa->numpages) cpa->numpages = numpages; old_prot = new_prot = req_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); pgprot_val(req_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(req_prot) |= pgprot_val(cpa->mask_set); /* * old_pte points to the large page base address. So we need * to add the offset of the virtual address: */ pfn = pmd_pfn(*kpte) + ((address & (psize - 1)) >> PAGE_SHIFT); cpa->pfn = pfn; new_prot = static_protections(req_prot, address, pfn); /* * We need to check the full range, whether * static_protection() requires a different pgprot for one of * the pages in the range we try to preserve: */ addr = address & pmask; pfn = pmd_pfn(old_pte); for (i = 0; i < (psize >> PAGE_SHIFT); i++, addr += PAGE_SIZE, pfn++) { pgprot_t chk_prot = static_protections(req_prot, addr, pfn); if (pgprot_val(chk_prot) != pgprot_val(new_prot)) goto out_unlock; } /* * If there are no changes, return. maxpages has been updated * above: */ if (pgprot_val(new_prot) == pgprot_val(old_prot)) { do_split = 0; goto out_unlock; } /* * convert prot to pmd format */ new_prot = pte_to_pmd_pgprot(new_prot, ext_prot); /* * We need to change the attributes. Check, whether we can * change the large page in one go. We request a split, when * the address is not aligned and the number of pages is * smaller than the number of pages in the large page. Note * that we limited the number of possible pages already to * the number of pages in the large page. */ if (address == (nextpage_addr - psize) && cpa->numpages == numpages) { /* * The address is aligned and the number of pages * covers the full page. */ phys_addr_t phys = __pfn_to_phys(pmd_pfn(*kpte)); pmd_t *p = (pmd_t *)kpte; *kpte++ = __pmd(phys | new_prot); *kpte = __pmd((phys + SECTION_SIZE) | new_prot); flush_pmd_entry(p); cpa->flags |= CPA_FLUSHTLB; do_split = 0; cpa_debug("preserving page at phys %x pmd %x\n", phys, p); } out_unlock: spin_unlock_irqrestore(&pgd_lock, flags); return do_split; } static int split_large_page(pte_t *kpte, unsigned long address) { unsigned long flags, pfn, pfninc = 1; unsigned int i, level; pte_t *pbase, *tmp; pgprot_t ref_prot = 0, ext_prot = 0; int ret = 0; pbase = pte_alloc_one_kernel(&init_mm, address); if (!pbase) return -ENOMEM; cpa_debug("split_large_page %x PMD %x new pte @ %x\n", address, *kpte, pbase); spin_lock_irqsave(&pgd_lock, flags); /* * Check for races, another CPU might have split this page * up for us already: */ tmp = lookup_address(address, &level); if (tmp != kpte) goto out_unlock; /* * we only split 2MB entries for now */ if (level != PG_LEVEL_2M) { ret = -EINVAL; goto out_unlock; } ref_prot = pmd_to_pte_pgprot(pmd_val(*kpte), &ext_prot); /* * Get the target pfn from the original entry: */ pfn = pmd_pfn(*kpte); for (i = 0; i < PTRS_PER_PTE; i++, pfn += pfninc) set_pte_ext(&pbase[i], pfn_pte(pfn, ref_prot), ext_prot); if (address >= (unsigned long)__va(0) && address < (unsigned long)__va(lowmem_limit)) split_page_count(level); /* * Install the new, split up pagetable. */ __set_pmd_pte((pmd_t *)kpte, address, pbase); pbase = NULL; out_unlock: /* * If we dropped out via the lookup_address check under * pgd_lock then stick the page back into the pool: */ if (pbase) pte_free_kernel(&init_mm, pbase); spin_unlock_irqrestore(&pgd_lock, flags); return ret; } static int __cpa_process_fault(struct cpa_data *cpa, unsigned long vaddr, int primary) { /* * Ignore all non primary paths. */ if (!primary) return 0; /* * Ignore the NULL PTE for kernel identity mapping, as it is expected * to have holes. * Also set numpages to '1' indicating that we processed cpa req for * one virtual address page and its pfn. TBD: numpages can be set based * on the initial value and the level returned by lookup_address(). */ if (within(vaddr, PAGE_OFFSET, PAGE_OFFSET + lowmem_limit)) { cpa->numpages = 1; cpa->pfn = __pa(vaddr) >> PAGE_SHIFT; return 0; } else { WARN(1, KERN_WARNING "CPA: called for zero pte. " "vaddr = %lx cpa->vaddr = %lx\n", vaddr, *cpa->vaddr); return -EFAULT; } } static int __change_page_attr(struct cpa_data *cpa, int primary) { unsigned long address; int do_split, err; unsigned int level; pte_t *kpte, old_pte; if (cpa->flags & CPA_PAGES_ARRAY) { struct page *page = cpa->pages[cpa->curpage]; if (unlikely(PageHighMem(page))) return 0; address = (unsigned long)page_address(page); } else if (cpa->flags & CPA_ARRAY) address = cpa->vaddr[cpa->curpage]; else address = *cpa->vaddr; repeat: kpte = lookup_address(address, &level); if (!kpte) return __cpa_process_fault(cpa, address, primary); old_pte = *kpte; if (!pte_val(old_pte)) return __cpa_process_fault(cpa, address, primary); if (level == PG_LEVEL_4K) { pte_t new_pte; pgprot_t new_prot = pte_pgprot(old_pte); unsigned long pfn = pte_pfn(old_pte); pgprot_val(new_prot) &= ~pgprot_val(cpa->mask_clr); pgprot_val(new_prot) |= pgprot_val(cpa->mask_set); new_prot = static_protections(new_prot, address, pfn); /* * We need to keep the pfn from the existing PTE, * after all we're only going to change it's attributes * not the memory it points to */ new_pte = pfn_pte(pfn, new_prot); cpa->pfn = pfn; /* * Do we really change anything ? */ if (pte_val(old_pte) != pte_val(new_pte)) { set_pte_ext(kpte, new_pte, 0); /* * FIXME : is this needed on arm? * set_pte_ext already does a flush */ cpa->flags |= CPA_FLUSHTLB; } cpa->numpages = 1; return 0; } /* * Check, whether we can keep the large page intact * and just change the pte: */ do_split = try_preserve_large_page(kpte, address, cpa); /* * When the range fits into the existing large page, * return. cp->numpages and cpa->tlbflush have been updated in * try_large_page: */ if (do_split <= 0) return do_split; /* * We have to split the large page: */ err = split_large_page(kpte, address); if (!err) { /* * Do a global flush tlb after splitting the large page * and before we do the actual change page attribute in the PTE. * * With out this, we violate the TLB application note, that says * "The TLBs may contain both ordinary and large-page * translations for a 4-KByte range of linear addresses. This * may occur if software modifies the paging structures so that * the page size used for the address range changes. If the two * translations differ with respect to page frame or attributes * (e.g., permissions), processor behavior is undefined and may * be implementation-specific." * * We do this global tlb flush inside the cpa_lock, so that we * don't allow any other cpu, with stale tlb entries change the * page attribute in parallel, that also falls into the * just split large page entry. */ flush_tlb_all(); goto repeat; } return err; } static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias); static int cpa_process_alias(struct cpa_data *cpa) { struct cpa_data alias_cpa; unsigned long laddr = (unsigned long)__va(cpa->pfn << PAGE_SHIFT); unsigned long vaddr; int ret; if (cpa->pfn >= (lowmem_limit >> PAGE_SHIFT)) return 0; /* * No need to redo, when the primary call touched the direct * mapping already: */ if (cpa->flags & CPA_PAGES_ARRAY) { struct page *page = cpa->pages[cpa->curpage]; if (unlikely(PageHighMem(page))) return 0; vaddr = (unsigned long)page_address(page); } else if (cpa->flags & CPA_ARRAY) vaddr = cpa->vaddr[cpa->curpage]; else vaddr = *cpa->vaddr; if (!(within(vaddr, PAGE_OFFSET, PAGE_OFFSET + lowmem_limit))) { alias_cpa = *cpa; alias_cpa.vaddr = &laddr; alias_cpa.flags &= ~(CPA_PAGES_ARRAY | CPA_ARRAY); ret = __change_page_attr_set_clr(&alias_cpa, 0); if (ret) return ret; } return 0; } static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias) { int ret, numpages = cpa->numpages; while (numpages) { /* * Store the remaining nr of pages for the large page * preservation check. */ cpa->numpages = numpages; /* for array changes, we can't use large page */ if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY)) cpa->numpages = 1; if (!debug_pagealloc) mutex_lock(&cpa_lock); ret = __change_page_attr(cpa, checkalias); if (!debug_pagealloc) mutex_unlock(&cpa_lock); if (ret) return ret; if (checkalias) { ret = cpa_process_alias(cpa); if (ret) return ret; } /* * Adjust the number of pages with the result of the * CPA operation. Either a large page has been * preserved or a single page update happened. */ BUG_ON(cpa->numpages > numpages); numpages -= cpa->numpages; if (cpa->flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) cpa->curpage++; else *cpa->vaddr += cpa->numpages * PAGE_SIZE; } return 0; } static inline int cache_attr(pgprot_t attr) { /* * We need to flush the cache for all memory type changes * except when a page is being marked write back cacheable */ return !((pgprot_val(attr) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK); } static int change_page_attr_set_clr(unsigned long *addr, int numpages, pgprot_t mask_set, pgprot_t mask_clr, int force_split, int in_flag, struct page **pages) { struct cpa_data cpa; int ret, cache, checkalias; unsigned long baddr = 0; if (!pgprot_val(mask_set) && !pgprot_val(mask_clr) && !force_split) return 0; /* Ensure we are PAGE_SIZE aligned */ if (in_flag & CPA_ARRAY) { int i; for (i = 0; i < numpages; i++) { if (addr[i] & ~PAGE_MASK) { addr[i] &= PAGE_MASK; WARN_ON_ONCE(1); } } } else if (!(in_flag & CPA_PAGES_ARRAY)) { /* * in_flag of CPA_PAGES_ARRAY implies it is aligned. * No need to cehck in that case */ if (*addr & ~PAGE_MASK) { *addr &= PAGE_MASK; /* * People should not be passing in unaligned addresses: */ WARN_ON_ONCE(1); } /* * Save address for cache flush. *addr is modified in the call * to __change_page_attr_set_clr() below. */ baddr = *addr; } /* Must avoid aliasing mappings in the highmem code */ kmap_flush_unused(); vm_unmap_aliases(); cpa.vaddr = addr; cpa.pages = pages; cpa.numpages = numpages; cpa.mask_set = mask_set; cpa.mask_clr = mask_clr; cpa.flags = 0; cpa.curpage = 0; cpa.force_split = force_split; if (in_flag & (CPA_ARRAY | CPA_PAGES_ARRAY)) cpa.flags |= in_flag; /* No alias checking for XN bit modifications */ checkalias = (pgprot_val(mask_set) | pgprot_val(mask_clr)) != L_PTE_XN; ret = __change_page_attr_set_clr(&cpa, checkalias); cache = cache_attr(mask_set); /* * Check whether we really changed something or * cache need to be flushed. */ if (!(cpa.flags & CPA_FLUSHTLB) && !cache) goto out; if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) { cpa_flush_array(addr, numpages, cache, cpa.flags, pages); } else cpa_flush_range(baddr, numpages, cache); out: return ret; } static inline int change_page_attr_set(unsigned long *addr, int numpages, pgprot_t mask, int array) { return change_page_attr_set_clr(addr, numpages, mask, __pgprot(0), 0, (array ? CPA_ARRAY : 0), NULL); } static inline int change_page_attr_clear(unsigned long *addr, int numpages, pgprot_t mask, int array) { return change_page_attr_set_clr(addr, numpages, __pgprot(0), mask, 0, (array ? CPA_ARRAY : 0), NULL); } static inline int cpa_set_pages_array(struct page **pages, int numpages, pgprot_t mask) { return change_page_attr_set_clr(NULL, numpages, mask, __pgprot(0), 0, CPA_PAGES_ARRAY, pages); } static inline int cpa_clear_pages_array(struct page **pages, int numpages, pgprot_t mask) { return change_page_attr_set_clr(NULL, numpages, __pgprot(0), mask, 0, CPA_PAGES_ARRAY, pages); } int set_memory_uc(unsigned long addr, int numpages) { return change_page_attr_set_clr(&addr, numpages, __pgprot(L_PTE_MT_UNCACHED), __pgprot(L_PTE_MT_MASK), 0, 0, NULL); } EXPORT_SYMBOL(set_memory_uc); int _set_memory_array(unsigned long *addr, int addrinarray, unsigned long set, unsigned long clr) { return change_page_attr_set_clr(addr, addrinarray, __pgprot(set), __pgprot(clr), 0, CPA_ARRAY, NULL); } int set_memory_array_uc(unsigned long *addr, int addrinarray) { return _set_memory_array(addr, addrinarray, L_PTE_MT_UNCACHED, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_memory_array_uc); int set_memory_array_wc(unsigned long *addr, int addrinarray) { return _set_memory_array(addr, addrinarray, L_PTE_MT_BUFFERABLE, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_memory_array_wc); int set_memory_wc(unsigned long addr, int numpages) { int ret; ret = change_page_attr_set_clr(&addr, numpages, __pgprot(L_PTE_MT_BUFFERABLE), __pgprot(L_PTE_MT_MASK), 0, 0, NULL); return ret; } EXPORT_SYMBOL(set_memory_wc); int set_memory_wb(unsigned long addr, int numpages) { return change_page_attr_set_clr(&addr, numpages, __pgprot(L_PTE_MT_WRITEBACK), __pgprot(L_PTE_MT_MASK), 0, 0, NULL); } EXPORT_SYMBOL(set_memory_wb); int set_memory_iwb(unsigned long addr, int numpages) { return change_page_attr_set_clr(&addr, numpages, __pgprot(L_PTE_MT_INNER_WB), __pgprot(L_PTE_MT_MASK), 0, 0, NULL); } EXPORT_SYMBOL(set_memory_iwb); int set_memory_array_wb(unsigned long *addr, int addrinarray) { return change_page_attr_set_clr(addr, addrinarray, __pgprot(L_PTE_MT_WRITEBACK), __pgprot(L_PTE_MT_MASK), 0, CPA_ARRAY, NULL); } EXPORT_SYMBOL(set_memory_array_wb); int set_memory_array_iwb(unsigned long *addr, int addrinarray) { return change_page_attr_set_clr(addr, addrinarray, __pgprot(L_PTE_MT_INNER_WB), __pgprot(L_PTE_MT_MASK), 0, CPA_ARRAY, NULL); } EXPORT_SYMBOL(set_memory_array_iwb); int set_memory_x(unsigned long addr, int numpages) { return change_page_attr_clear(&addr, numpages, __pgprot(L_PTE_XN), 0); } EXPORT_SYMBOL(set_memory_x); int set_memory_nx(unsigned long addr, int numpages) { return change_page_attr_set(&addr, numpages, __pgprot(L_PTE_XN), 0); } EXPORT_SYMBOL(set_memory_nx); int set_memory_ro(unsigned long addr, int numpages) { return change_page_attr_set(&addr, numpages, __pgprot(L_PTE_RDONLY), 0); } EXPORT_SYMBOL_GPL(set_memory_ro); int set_memory_rw(unsigned long addr, int numpages) { return change_page_attr_clear(&addr, numpages, __pgprot(L_PTE_RDONLY), 0); } EXPORT_SYMBOL_GPL(set_memory_rw); int set_memory_np(unsigned long addr, int numpages) { return change_page_attr_clear(&addr, numpages, __pgprot(L_PTE_PRESENT), 0); } int set_memory_4k(unsigned long addr, int numpages) { return change_page_attr_set_clr(&addr, numpages, __pgprot(0), __pgprot(0), 1, 0, NULL); } static int _set_pages_array(struct page **pages, int addrinarray, unsigned long set, unsigned long clr) { return change_page_attr_set_clr(NULL, addrinarray, __pgprot(set), __pgprot(clr), 0, CPA_PAGES_ARRAY, pages); } int set_pages_array_uc(struct page **pages, int addrinarray) { return _set_pages_array(pages, addrinarray, L_PTE_MT_UNCACHED, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_pages_array_uc); int set_pages_array_wc(struct page **pages, int addrinarray) { return _set_pages_array(pages, addrinarray, L_PTE_MT_BUFFERABLE, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_pages_array_wc); int set_pages_array_wb(struct page **pages, int addrinarray) { return _set_pages_array(pages, addrinarray, L_PTE_MT_WRITEBACK, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_pages_array_wb); int set_pages_array_iwb(struct page **pages, int addrinarray) { return _set_pages_array(pages, addrinarray, L_PTE_MT_INNER_WB, L_PTE_MT_MASK); } EXPORT_SYMBOL(set_pages_array_iwb); #else /* CONFIG_CPA */ void update_page_count(int level, unsigned long pages) { } static void flush_cache(struct page **pages, int numpages) { unsigned int i; bool flush_inner = true; unsigned long base; if (numpages >= FLUSH_CLEAN_BY_SET_WAY_PAGE_THRESHOLD) { inner_flush_cache_all(); flush_inner = false; } for (i = 0; i < numpages; i++) { if (flush_inner) __flush_dcache_page(page_mapping(pages[i]), pages[i]); base = page_to_phys(pages[i]); outer_flush_range(base, base + PAGE_SIZE); } } int set_pages_array_uc(struct page **pages, int addrinarray) { flush_cache(pages, addrinarray); return 0; } EXPORT_SYMBOL(set_pages_array_uc); int set_pages_array_wc(struct page **pages, int addrinarray) { flush_cache(pages, addrinarray); return 0; } EXPORT_SYMBOL(set_pages_array_wc); int set_pages_array_wb(struct page **pages, int addrinarray) { return 0; } EXPORT_SYMBOL(set_pages_array_wb); int set_pages_array_iwb(struct page **pages, int addrinarray) { flush_cache(pages, addrinarray); return 0; } EXPORT_SYMBOL(set_pages_array_iwb); #endif
gpl-2.0
Stefan-Schmidt/linux-wpan-next
drivers/net/ethernet/sfc/falcon/falcon_boards.c
320
21866
/**************************************************************************** * Driver for Solarflare network controllers and boards * Copyright 2007-2012 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/rtnetlink.h> #include "net_driver.h" #include "phy.h" #include "efx.h" #include "nic.h" #include "workarounds.h" /* Macros for unpacking the board revision */ /* The revision info is in host byte order. */ #define FALCON_BOARD_TYPE(_rev) (_rev >> 8) #define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf) #define FALCON_BOARD_MINOR(_rev) (_rev & 0xf) /* Board types */ #define FALCON_BOARD_SFE4001 0x01 #define FALCON_BOARD_SFE4002 0x02 #define FALCON_BOARD_SFE4003 0x03 #define FALCON_BOARD_SFN4112F 0x52 /* Board temperature is about 15°C above ambient when air flow is * limited. The maximum acceptable ambient temperature varies * depending on the PHY specifications but the critical temperature * above which we should shut down to avoid damage is 80°C. */ #define FALCON_BOARD_TEMP_BIAS 15 #define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS) /* SFC4000 datasheet says: 'The maximum permitted junction temperature * is 125°C; the thermal design of the environment for the SFC4000 * should aim to keep this well below 100°C.' */ #define FALCON_JUNC_TEMP_MIN 0 #define FALCON_JUNC_TEMP_MAX 90 #define FALCON_JUNC_TEMP_CRIT 125 /***************************************************************************** * Support for LM87 sensor chip used on several boards */ #define LM87_REG_TEMP_HW_INT_LOCK 0x13 #define LM87_REG_TEMP_HW_EXT_LOCK 0x14 #define LM87_REG_TEMP_HW_INT 0x17 #define LM87_REG_TEMP_HW_EXT 0x18 #define LM87_REG_TEMP_EXT1 0x26 #define LM87_REG_TEMP_INT 0x27 #define LM87_REG_ALARMS1 0x41 #define LM87_REG_ALARMS2 0x42 #define LM87_IN_LIMITS(nr, _min, _max) \ 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min #define LM87_AIN_LIMITS(nr, _min, _max) \ 0x3B + (nr), _max, 0x1A + (nr), _min #define LM87_TEMP_INT_LIMITS(_min, _max) \ 0x39, _max, 0x3A, _min #define LM87_TEMP_EXT1_LIMITS(_min, _max) \ 0x37, _max, 0x38, _min #define LM87_ALARM_TEMP_INT 0x10 #define LM87_ALARM_TEMP_EXT1 0x20 #if IS_ENABLED(CONFIG_SENSORS_LM87) static int ef4_poke_lm87(struct i2c_client *client, const u8 *reg_values) { while (*reg_values) { u8 reg = *reg_values++; u8 value = *reg_values++; int rc = i2c_smbus_write_byte_data(client, reg, value); if (rc) return rc; } return 0; } static const u8 falcon_lm87_common_regs[] = { LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT, LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT, LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX), LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT, LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT, 0 }; static int ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info, const u8 *reg_values) { struct falcon_board *board = falcon_board(efx); struct i2c_client *client = i2c_new_device(&board->i2c_adap, info); int rc; if (!client) return -EIO; /* Read-to-clear alarm/interrupt status */ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); rc = ef4_poke_lm87(client, reg_values); if (rc) goto err; rc = ef4_poke_lm87(client, falcon_lm87_common_regs); if (rc) goto err; board->hwmon_client = client; return 0; err: i2c_unregister_device(client); return rc; } static void ef4_fini_lm87(struct ef4_nic *efx) { i2c_unregister_device(falcon_board(efx)->hwmon_client); } static int ef4_check_lm87(struct ef4_nic *efx, unsigned mask) { struct i2c_client *client = falcon_board(efx)->hwmon_client; bool temp_crit, elec_fault, is_failure; u16 alarms; s32 reg; /* If link is up then do not monitor temperature */ if (EF4_WORKAROUND_7884(efx) && efx->link_state.up) return 0; reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); if (reg < 0) return reg; alarms = reg; reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); if (reg < 0) return reg; alarms |= reg << 8; alarms &= mask; temp_crit = false; if (alarms & LM87_ALARM_TEMP_INT) { reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT); if (reg < 0) return reg; if (reg > FALCON_BOARD_TEMP_CRIT) temp_crit = true; } if (alarms & LM87_ALARM_TEMP_EXT1) { reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1); if (reg < 0) return reg; if (reg > FALCON_JUNC_TEMP_CRIT) temp_crit = true; } elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1); is_failure = temp_crit || elec_fault; if (alarms) netif_err(efx, hw, efx->net_dev, "LM87 detected a hardware %s (status %02x:%02x)" "%s%s%s%s\n", is_failure ? "failure" : "problem", alarms & 0xff, alarms >> 8, (alarms & LM87_ALARM_TEMP_INT) ? "; board is overheating" : "", (alarms & LM87_ALARM_TEMP_EXT1) ? "; controller is overheating" : "", temp_crit ? "; reached critical temperature" : "", elec_fault ? "; electrical fault" : ""); return is_failure ? -ERANGE : 0; } #else /* !CONFIG_SENSORS_LM87 */ static inline int ef4_init_lm87(struct ef4_nic *efx, const struct i2c_board_info *info, const u8 *reg_values) { return 0; } static inline void ef4_fini_lm87(struct ef4_nic *efx) { } static inline int ef4_check_lm87(struct ef4_nic *efx, unsigned mask) { return 0; } #endif /* CONFIG_SENSORS_LM87 */ /***************************************************************************** * Support for the SFE4001 NIC. * * The SFE4001 does not power-up fully at reset due to its high power * consumption. We control its power via a PCA9539 I/O expander. * It also has a MAX6647 temperature monitor which we expose to * the lm90 driver. * * This also provides minimal support for reflashing the PHY, which is * initiated by resetting it with the FLASH_CFG_1 pin pulled down. * On SFE4001 rev A2 and later this is connected to the 3V3X output of * the IO-expander. * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually * exclusive with the network device being open. */ /************************************************************************** * Support for I2C IO Expander device on SFE4001 */ #define PCA9539 0x74 #define P0_IN 0x00 #define P0_OUT 0x02 #define P0_INVERT 0x04 #define P0_CONFIG 0x06 #define P0_EN_1V0X_LBN 0 #define P0_EN_1V0X_WIDTH 1 #define P0_EN_1V2_LBN 1 #define P0_EN_1V2_WIDTH 1 #define P0_EN_2V5_LBN 2 #define P0_EN_2V5_WIDTH 1 #define P0_EN_3V3X_LBN 3 #define P0_EN_3V3X_WIDTH 1 #define P0_EN_5V_LBN 4 #define P0_EN_5V_WIDTH 1 #define P0_SHORTEN_JTAG_LBN 5 #define P0_SHORTEN_JTAG_WIDTH 1 #define P0_X_TRST_LBN 6 #define P0_X_TRST_WIDTH 1 #define P0_DSP_RESET_LBN 7 #define P0_DSP_RESET_WIDTH 1 #define P1_IN 0x01 #define P1_OUT 0x03 #define P1_INVERT 0x05 #define P1_CONFIG 0x07 #define P1_AFE_PWD_LBN 0 #define P1_AFE_PWD_WIDTH 1 #define P1_DSP_PWD25_LBN 1 #define P1_DSP_PWD25_WIDTH 1 #define P1_RESERVED_LBN 2 #define P1_RESERVED_WIDTH 2 #define P1_SPARE_LBN 4 #define P1_SPARE_WIDTH 4 /* Temperature Sensor */ #define MAX664X_REG_RSL 0x02 #define MAX664X_REG_WLHO 0x0B static void sfe4001_poweroff(struct ef4_nic *efx) { struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; /* Turn off all power rails and disable outputs */ i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff); i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff); i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff); /* Clear any over-temperature alert */ i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); } static int sfe4001_poweron(struct ef4_nic *efx) { struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; unsigned int i, j; int rc; u8 out; /* Clear any previous over-temperature alert */ rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); if (rc < 0) return rc; /* Enable port 0 and port 1 outputs on IO expander */ rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); if (rc) return rc; rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff & ~(1 << P1_SPARE_LBN)); if (rc) goto fail_on; /* If PHY power is on, turn it all off and wait 1 second to * ensure a full reset. */ rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT); if (rc < 0) goto fail_on; out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | (0 << P0_EN_1V0X_LBN)); if (rc != out) { netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n"); rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); if (rc) goto fail_on; schedule_timeout_uninterruptible(HZ); } for (i = 0; i < 20; ++i) { /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */ out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | (1 << P0_X_TRST_LBN)); if (efx->phy_mode & PHY_MODE_SPECIAL) out |= 1 << P0_EN_3V3X_LBN; rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); if (rc) goto fail_on; msleep(10); /* Turn on 1V power rail */ out &= ~(1 << P0_EN_1V0X_LBN); rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); if (rc) goto fail_on; netif_info(efx, hw, efx->net_dev, "waiting for DSP boot (attempt %d)...\n", i); /* In flash config mode, DSP does not turn on AFE, so * just wait 1 second. */ if (efx->phy_mode & PHY_MODE_SPECIAL) { schedule_timeout_uninterruptible(HZ); return 0; } for (j = 0; j < 10; ++j) { msleep(100); /* Check DSP has asserted AFE power line */ rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN); if (rc < 0) goto fail_on; if (rc & (1 << P1_AFE_PWD_LBN)) return 0; } } netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n"); rc = -ETIMEDOUT; fail_on: sfe4001_poweroff(efx); return rc; } static ssize_t show_phy_flash_cfg(struct device *dev, struct device_attribute *attr, char *buf) { struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); } static ssize_t set_phy_flash_cfg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ef4_nic *efx = pci_get_drvdata(to_pci_dev(dev)); enum ef4_phy_mode old_mode, new_mode; int err; rtnl_lock(); old_mode = efx->phy_mode; if (count == 0 || *buf == '0') new_mode = old_mode & ~PHY_MODE_SPECIAL; else new_mode = PHY_MODE_SPECIAL; if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { err = 0; } else if (efx->state != STATE_READY || netif_running(efx->net_dev)) { err = -EBUSY; } else { /* Reset the PHY, reconfigure the MAC and enable/disable * MAC stats accordingly. */ efx->phy_mode = new_mode; if (new_mode & PHY_MODE_SPECIAL) falcon_stop_nic_stats(efx); err = sfe4001_poweron(efx); if (!err) err = ef4_reconfigure_port(efx); if (!(new_mode & PHY_MODE_SPECIAL)) falcon_start_nic_stats(efx); } rtnl_unlock(); return err ? err : count; } static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg); static void sfe4001_fini(struct ef4_nic *efx) { struct falcon_board *board = falcon_board(efx); netif_info(efx, drv, efx->net_dev, "%s\n", __func__); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); sfe4001_poweroff(efx); i2c_unregister_device(board->ioexp_client); i2c_unregister_device(board->hwmon_client); } static int sfe4001_check_hw(struct ef4_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; s32 status; /* If XAUI link is up then do not monitor */ if (EF4_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required) return 0; /* Check the powered status of the PHY. Lack of power implies that * the MAX6647 has shut down power to it, probably due to a temp. * alarm. Reading the power status rather than the MAX6647 status * directly because the later is read-to-clear and would thus * start to power up the PHY again when polled, causing us to blip * the power undesirably. * We know we can read from the IO expander because we did * it during power-on. Assume failure now is bad news. */ status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN); if (status >= 0 && (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0) return 0; /* Use board power control, not PHY power control */ sfe4001_poweroff(efx); efx->phy_mode = PHY_MODE_OFF; return (status < 0) ? -EIO : -ERANGE; } static const struct i2c_board_info sfe4001_hwmon_info = { I2C_BOARD_INFO("max6647", 0x4e), }; /* This board uses an I2C expander to provider power to the PHY, which needs to * be turned on before the PHY can be used. * Context: Process context, rtnl lock held */ static int sfe4001_init(struct ef4_nic *efx) { struct falcon_board *board = falcon_board(efx); int rc; #if IS_ENABLED(CONFIG_SENSORS_LM90) board->hwmon_client = i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info); #else board->hwmon_client = i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr); #endif if (!board->hwmon_client) return -EIO; /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ rc = i2c_smbus_write_byte_data(board->hwmon_client, MAX664X_REG_WLHO, 90); if (rc) goto fail_hwmon; board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539); if (!board->ioexp_client) { rc = -EIO; goto fail_hwmon; } if (efx->phy_mode & PHY_MODE_SPECIAL) { /* PHY won't generate a 156.25 MHz clock and MAC stats fetch * will fail. */ falcon_stop_nic_stats(efx); } rc = sfe4001_poweron(efx); if (rc) goto fail_ioexp; rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); if (rc) goto fail_on; netif_info(efx, hw, efx->net_dev, "PHY is powered on\n"); return 0; fail_on: sfe4001_poweroff(efx); fail_ioexp: i2c_unregister_device(board->ioexp_client); fail_hwmon: i2c_unregister_device(board->hwmon_client); return rc; } /***************************************************************************** * Support for the SFE4002 * */ static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ static const u8 sfe4002_lm87_regs[] = { LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), 0 }; static const struct i2c_board_info sfe4002_hwmon_info = { I2C_BOARD_INFO("lm87", 0x2e), .platform_data = &sfe4002_lm87_channel, }; /****************************************************************************/ /* LED allocations. Note that on rev A0 boards the schematic and the reality * differ: red and green are swapped. Below is the fixed (A1) layout (there * are only 3 A0 boards in existence, so no real reason to make this * conditional). */ #define SFE4002_FAULT_LED (2) /* Red */ #define SFE4002_RX_LED (0) /* Green */ #define SFE4002_TX_LED (1) /* Amber */ static void sfe4002_init_phy(struct ef4_nic *efx) { /* Set the TX and RX LEDs to reflect status and activity, and the * fault LED off */ falcon_qt202x_set_led(efx, SFE4002_TX_LED, QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT); falcon_qt202x_set_led(efx, SFE4002_RX_LED, QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT); falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF); } static void sfe4002_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode) { falcon_qt202x_set_led( efx, SFE4002_FAULT_LED, (mode == EF4_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF); } static int sfe4002_check_hw(struct ef4_nic *efx) { struct falcon_board *board = falcon_board(efx); /* A0 board rev. 4002s report a temperature fault the whole time * (bad sensor) so we mask it out. */ unsigned alarm_mask = (board->major == 0 && board->minor == 0) ? ~LM87_ALARM_TEMP_EXT1 : ~0; return ef4_check_lm87(efx, alarm_mask); } static int sfe4002_init(struct ef4_nic *efx) { return ef4_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs); } /***************************************************************************** * Support for the SFN4112F * */ static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ static const u8 sfn4112f_lm87_regs[] = { LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), 0 }; static const struct i2c_board_info sfn4112f_hwmon_info = { I2C_BOARD_INFO("lm87", 0x2e), .platform_data = &sfn4112f_lm87_channel, }; #define SFN4112F_ACT_LED 0 #define SFN4112F_LINK_LED 1 static void sfn4112f_init_phy(struct ef4_nic *efx) { falcon_qt202x_set_led(efx, SFN4112F_ACT_LED, QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT); falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT); } static void sfn4112f_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode) { int reg; switch (mode) { case EF4_LED_OFF: reg = QUAKE_LED_OFF; break; case EF4_LED_ON: reg = QUAKE_LED_ON; break; default: reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT; break; } falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg); } static int sfn4112f_check_hw(struct ef4_nic *efx) { /* Mask out unused sensors */ return ef4_check_lm87(efx, ~0x48); } static int sfn4112f_init(struct ef4_nic *efx) { return ef4_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); } /***************************************************************************** * Support for the SFE4003 * */ static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */ static const u8 sfe4003_lm87_regs[] = { LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS), 0 }; static const struct i2c_board_info sfe4003_hwmon_info = { I2C_BOARD_INFO("lm87", 0x2e), .platform_data = &sfe4003_lm87_channel, }; /* Board-specific LED info. */ #define SFE4003_RED_LED_GPIO 11 #define SFE4003_LED_ON 1 #define SFE4003_LED_OFF 0 static void sfe4003_set_id_led(struct ef4_nic *efx, enum ef4_led_mode mode) { struct falcon_board *board = falcon_board(efx); /* The LEDs were not wired to GPIOs before A3 */ if (board->minor < 3 && board->major == 0) return; falcon_txc_set_gpio_val( efx, SFE4003_RED_LED_GPIO, (mode == EF4_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF); } static void sfe4003_init_phy(struct ef4_nic *efx) { struct falcon_board *board = falcon_board(efx); /* The LEDs were not wired to GPIOs before A3 */ if (board->minor < 3 && board->major == 0) return; falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT); falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF); } static int sfe4003_check_hw(struct ef4_nic *efx) { struct falcon_board *board = falcon_board(efx); /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time * (bad sensor) so we mask it out. */ unsigned alarm_mask = (board->major == 0 && board->minor <= 2) ? ~LM87_ALARM_TEMP_EXT1 : ~0; return ef4_check_lm87(efx, alarm_mask); } static int sfe4003_init(struct ef4_nic *efx) { return ef4_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs); } static const struct falcon_board_type board_types[] = { { .id = FALCON_BOARD_SFE4001, .init = sfe4001_init, .init_phy = ef4_port_dummy_op_void, .fini = sfe4001_fini, .set_id_led = tenxpress_set_id_led, .monitor = sfe4001_check_hw, }, { .id = FALCON_BOARD_SFE4002, .init = sfe4002_init, .init_phy = sfe4002_init_phy, .fini = ef4_fini_lm87, .set_id_led = sfe4002_set_id_led, .monitor = sfe4002_check_hw, }, { .id = FALCON_BOARD_SFE4003, .init = sfe4003_init, .init_phy = sfe4003_init_phy, .fini = ef4_fini_lm87, .set_id_led = sfe4003_set_id_led, .monitor = sfe4003_check_hw, }, { .id = FALCON_BOARD_SFN4112F, .init = sfn4112f_init, .init_phy = sfn4112f_init_phy, .fini = ef4_fini_lm87, .set_id_led = sfn4112f_set_id_led, .monitor = sfn4112f_check_hw, }, }; int falcon_probe_board(struct ef4_nic *efx, u16 revision_info) { struct falcon_board *board = falcon_board(efx); u8 type_id = FALCON_BOARD_TYPE(revision_info); int i; board->major = FALCON_BOARD_MAJOR(revision_info); board->minor = FALCON_BOARD_MINOR(revision_info); for (i = 0; i < ARRAY_SIZE(board_types); i++) if (board_types[i].id == type_id) board->type = &board_types[i]; if (board->type) { return 0; } else { netif_err(efx, probe, efx->net_dev, "unknown board type %d\n", type_id); return -ENODEV; } }
gpl-2.0
Perferom/android_kernel_huawei_msm7x25
fs/hppfs/hppfs.c
576
16758
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/ctype.h> #include <linux/dcache.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/module.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/statfs.h> #include <linux/types.h> #include <asm/uaccess.h> #include "os.h" static struct inode *get_inode(struct super_block *, struct dentry *); struct hppfs_data { struct list_head list; char contents[PAGE_SIZE - sizeof(struct list_head)]; }; struct hppfs_private { struct file *proc_file; int host_fd; loff_t len; struct hppfs_data *contents; }; struct hppfs_inode_info { struct dentry *proc_dentry; struct inode vfs_inode; }; static inline struct hppfs_inode_info *HPPFS_I(struct inode *inode) { return container_of(inode, struct hppfs_inode_info, vfs_inode); } #define HPPFS_SUPER_MAGIC 0xb00000ee static const struct super_operations hppfs_sbops; static int is_pid(struct dentry *dentry) { struct super_block *sb; int i; sb = dentry->d_sb; if (dentry->d_parent != sb->s_root) return 0; for (i = 0; i < dentry->d_name.len; i++) { if (!isdigit(dentry->d_name.name[i])) return 0; } return 1; } static char *dentry_name(struct dentry *dentry, int extra) { struct dentry *parent; char *root, *name; const char *seg_name; int len, seg_len; len = 0; parent = dentry; while (parent->d_parent != parent) { if (is_pid(parent)) len += strlen("pid") + 1; else len += parent->d_name.len + 1; parent = parent->d_parent; } root = "proc"; len += strlen(root); name = kmalloc(len + extra + 1, GFP_KERNEL); if (name == NULL) return NULL; name[len] = '\0'; parent = dentry; while (parent->d_parent != parent) { if (is_pid(parent)) { seg_name = "pid"; seg_len = strlen("pid"); } else { seg_name = parent->d_name.name; seg_len = parent->d_name.len; } len -= seg_len + 1; name[len] = '/'; strncpy(&name[len + 1], seg_name, seg_len); parent = parent->d_parent; } strncpy(name, root, strlen(root)); return name; } static int file_removed(struct dentry *dentry, const char *file) { char *host_file; int extra, fd; extra = 0; if (file != NULL) extra += strlen(file) + 1; host_file = dentry_name(dentry, extra + strlen("/remove")); if (host_file == NULL) { printk(KERN_ERR "file_removed : allocation failed\n"); return -ENOMEM; } if (file != NULL) { strcat(host_file, "/"); strcat(host_file, file); } strcat(host_file, "/remove"); fd = os_open_file(host_file, of_read(OPENFLAGS()), 0); kfree(host_file); if (fd > 0) { os_close_file(fd); return 1; } return 0; } static struct dentry *hppfs_lookup(struct inode *ino, struct dentry *dentry, struct nameidata *nd) { struct dentry *proc_dentry, *new, *parent; struct inode *inode; int err, deleted; deleted = file_removed(dentry, NULL); if (deleted < 0) return ERR_PTR(deleted); else if (deleted) return ERR_PTR(-ENOENT); err = -ENOMEM; parent = HPPFS_I(ino)->proc_dentry; mutex_lock(&parent->d_inode->i_mutex); proc_dentry = d_lookup(parent, &dentry->d_name); if (proc_dentry == NULL) { proc_dentry = d_alloc(parent, &dentry->d_name); if (proc_dentry == NULL) { mutex_unlock(&parent->d_inode->i_mutex); goto out; } new = (*parent->d_inode->i_op->lookup)(parent->d_inode, proc_dentry, NULL); if (new) { dput(proc_dentry); proc_dentry = new; } } mutex_unlock(&parent->d_inode->i_mutex); if (IS_ERR(proc_dentry)) return proc_dentry; err = -ENOMEM; inode = get_inode(ino->i_sb, proc_dentry); if (!inode) goto out_dput; d_add(dentry, inode); return NULL; out_dput: dput(proc_dentry); out: return ERR_PTR(err); } static const struct inode_operations hppfs_file_iops = { }; static ssize_t read_proc(struct file *file, char __user *buf, ssize_t count, loff_t *ppos, int is_user) { ssize_t (*read)(struct file *, char __user *, size_t, loff_t *); ssize_t n; read = file->f_path.dentry->d_inode->i_fop->read; if (!is_user) set_fs(KERNEL_DS); n = (*read)(file, buf, count, &file->f_pos); if (!is_user) set_fs(USER_DS); if (ppos) *ppos = file->f_pos; return n; } static ssize_t hppfs_read_file(int fd, char __user *buf, ssize_t count) { ssize_t n; int cur, err; char *new_buf; n = -ENOMEM; new_buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (new_buf == NULL) { printk(KERN_ERR "hppfs_read_file : kmalloc failed\n"); goto out; } n = 0; while (count > 0) { cur = min_t(ssize_t, count, PAGE_SIZE); err = os_read_file(fd, new_buf, cur); if (err < 0) { printk(KERN_ERR "hppfs_read : read failed, " "errno = %d\n", err); n = err; goto out_free; } else if (err == 0) break; if (copy_to_user(buf, new_buf, err)) { n = -EFAULT; goto out_free; } n += err; count -= err; } out_free: kfree(new_buf); out: return n; } static ssize_t hppfs_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct hppfs_private *hppfs = file->private_data; struct hppfs_data *data; loff_t off; int err; if (hppfs->contents != NULL) { int rem; if (*ppos >= hppfs->len) return 0; data = hppfs->contents; off = *ppos; while (off >= sizeof(data->contents)) { data = list_entry(data->list.next, struct hppfs_data, list); off -= sizeof(data->contents); } if (off + count > hppfs->len) count = hppfs->len - off; rem = copy_to_user(buf, &data->contents[off], count); *ppos += count - rem; if (rem > 0) return -EFAULT; } else if (hppfs->host_fd != -1) { err = os_seek_file(hppfs->host_fd, *ppos); if (err) { printk(KERN_ERR "hppfs_read : seek failed, " "errno = %d\n", err); return err; } err = hppfs_read_file(hppfs->host_fd, buf, count); if (err < 0) { printk(KERN_ERR "hppfs_read: read failed: %d\n", err); return err; } count = err; if (count > 0) *ppos += count; } else count = read_proc(hppfs->proc_file, buf, count, ppos, 1); return count; } static ssize_t hppfs_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; ssize_t (*write)(struct file *, const char __user *, size_t, loff_t *); write = proc_file->f_path.dentry->d_inode->i_fop->write; return (*write)(proc_file, buf, len, ppos); } static int open_host_sock(char *host_file, int *filter_out) { char *end; int fd; end = &host_file[strlen(host_file)]; strcpy(end, "/rw"); *filter_out = 1; fd = os_connect_socket(host_file); if (fd > 0) return fd; strcpy(end, "/r"); *filter_out = 0; fd = os_connect_socket(host_file); return fd; } static void free_contents(struct hppfs_data *head) { struct hppfs_data *data; struct list_head *ele, *next; if (head == NULL) return; list_for_each_safe(ele, next, &head->list) { data = list_entry(ele, struct hppfs_data, list); kfree(data); } kfree(head); } static struct hppfs_data *hppfs_get_data(int fd, int filter, struct file *proc_file, struct file *hppfs_file, loff_t *size_out) { struct hppfs_data *data, *new, *head; int n, err; err = -ENOMEM; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { printk(KERN_ERR "hppfs_get_data : head allocation failed\n"); goto failed; } INIT_LIST_HEAD(&data->list); head = data; *size_out = 0; if (filter) { while ((n = read_proc(proc_file, data->contents, sizeof(data->contents), NULL, 0)) > 0) os_write_file(fd, data->contents, n); err = os_shutdown_socket(fd, 0, 1); if (err) { printk(KERN_ERR "hppfs_get_data : failed to shut down " "socket\n"); goto failed_free; } } while (1) { n = os_read_file(fd, data->contents, sizeof(data->contents)); if (n < 0) { err = n; printk(KERN_ERR "hppfs_get_data : read failed, " "errno = %d\n", err); goto failed_free; } else if (n == 0) break; *size_out += n; if (n < sizeof(data->contents)) break; new = kmalloc(sizeof(*data), GFP_KERNEL); if (new == 0) { printk(KERN_ERR "hppfs_get_data : data allocation " "failed\n"); err = -ENOMEM; goto failed_free; } INIT_LIST_HEAD(&new->list); list_add(&new->list, &data->list); data = new; } return head; failed_free: free_contents(head); failed: return ERR_PTR(err); } static struct hppfs_private *hppfs_data(void) { struct hppfs_private *data; data = kmalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) return data; *data = ((struct hppfs_private ) { .host_fd = -1, .len = -1, .contents = NULL } ); return data; } static int file_mode(int fmode) { if (fmode == (FMODE_READ | FMODE_WRITE)) return O_RDWR; if (fmode == FMODE_READ) return O_RDONLY; if (fmode == FMODE_WRITE) return O_WRONLY; return 0; } static int hppfs_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; char *host_file; int err, fd, type, filter; err = -ENOMEM; data = hppfs_data(); if (data == NULL) goto out; host_file = dentry_name(file->f_path.dentry, strlen("/rw")); if (host_file == NULL) goto out_free2; proc_dentry = HPPFS_I(inode)->proc_dentry; proc_mnt = inode->i_sb->s_fs_info; /* XXX This isn't closed anywhere */ data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free1; type = os_file_type(host_file); if (type == OS_TYPE_FILE) { fd = os_open_file(host_file, of_read(OPENFLAGS()), 0); if (fd >= 0) data->host_fd = fd; else printk(KERN_ERR "hppfs_open : failed to open '%s', " "errno = %d\n", host_file, -fd); data->contents = NULL; } else if (type == OS_TYPE_DIR) { fd = open_host_sock(host_file, &filter); if (fd > 0) { data->contents = hppfs_get_data(fd, filter, data->proc_file, file, &data->len); if (!IS_ERR(data->contents)) data->host_fd = fd; } else printk(KERN_ERR "hppfs_open : failed to open a socket " "in '%s', errno = %d\n", host_file, -fd); } kfree(host_file); file->private_data = data; return 0; out_free1: kfree(host_file); out_free2: free_contents(data->contents); kfree(data); out: return err; } static int hppfs_dir_open(struct inode *inode, struct file *file) { const struct cred *cred = file->f_cred; struct hppfs_private *data; struct vfsmount *proc_mnt; struct dentry *proc_dentry; int err; err = -ENOMEM; data = hppfs_data(); if (data == NULL) goto out; proc_dentry = HPPFS_I(inode)->proc_dentry; proc_mnt = inode->i_sb->s_fs_info; data->proc_file = dentry_open(dget(proc_dentry), mntget(proc_mnt), file_mode(file->f_mode), cred); err = PTR_ERR(data->proc_file); if (IS_ERR(data->proc_file)) goto out_free; file->private_data = data; return 0; out_free: kfree(data); out: return err; } static loff_t hppfs_llseek(struct file *file, loff_t off, int where) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; loff_t (*llseek)(struct file *, loff_t, int); loff_t ret; llseek = proc_file->f_path.dentry->d_inode->i_fop->llseek; if (llseek != NULL) { ret = (*llseek)(proc_file, off, where); if (ret < 0) return ret; } return default_llseek(file, off, where); } static const struct file_operations hppfs_file_fops = { .owner = NULL, .llseek = hppfs_llseek, .read = hppfs_read, .write = hppfs_write, .open = hppfs_open, }; struct hppfs_dirent { void *vfs_dirent; filldir_t filldir; struct dentry *dentry; }; static int hppfs_filldir(void *d, const char *name, int size, loff_t offset, u64 inode, unsigned int type) { struct hppfs_dirent *dirent = d; if (file_removed(dirent->dentry, name)) return 0; return (*dirent->filldir)(dirent->vfs_dirent, name, size, offset, inode, type); } static int hppfs_readdir(struct file *file, void *ent, filldir_t filldir) { struct hppfs_private *data = file->private_data; struct file *proc_file = data->proc_file; int (*readdir)(struct file *, void *, filldir_t); struct hppfs_dirent dirent = ((struct hppfs_dirent) { .vfs_dirent = ent, .filldir = filldir, .dentry = file->f_path.dentry }); int err; readdir = proc_file->f_path.dentry->d_inode->i_fop->readdir; proc_file->f_pos = file->f_pos; err = (*readdir)(proc_file, &dirent, hppfs_filldir); file->f_pos = proc_file->f_pos; return err; } static int hppfs_fsync(struct file *file, struct dentry *dentry, int datasync) { return 0; } static const struct file_operations hppfs_dir_fops = { .owner = NULL, .readdir = hppfs_readdir, .open = hppfs_dir_open, .fsync = hppfs_fsync, }; static int hppfs_statfs(struct dentry *dentry, struct kstatfs *sf) { sf->f_blocks = 0; sf->f_bfree = 0; sf->f_bavail = 0; sf->f_files = 0; sf->f_ffree = 0; sf->f_type = HPPFS_SUPER_MAGIC; return 0; } static struct inode *hppfs_alloc_inode(struct super_block *sb) { struct hppfs_inode_info *hi; hi = kmalloc(sizeof(*hi), GFP_KERNEL); if (!hi) return NULL; hi->proc_dentry = NULL; inode_init_once(&hi->vfs_inode); return &hi->vfs_inode; } void hppfs_delete_inode(struct inode *ino) { dput(HPPFS_I(ino)->proc_dentry); mntput(ino->i_sb->s_fs_info); clear_inode(ino); } static void hppfs_destroy_inode(struct inode *inode) { kfree(HPPFS_I(inode)); } static const struct super_operations hppfs_sbops = { .alloc_inode = hppfs_alloc_inode, .destroy_inode = hppfs_destroy_inode, .delete_inode = hppfs_delete_inode, .statfs = hppfs_statfs, }; static int hppfs_readlink(struct dentry *dentry, char __user *buffer, int buflen) { struct dentry *proc_dentry; proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->readlink(proc_dentry, buffer, buflen); } static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct dentry *proc_dentry; proc_dentry = HPPFS_I(dentry->d_inode)->proc_dentry; return proc_dentry->d_inode->i_op->follow_link(proc_dentry, nd); } static const struct inode_operations hppfs_dir_iops = { .lookup = hppfs_lookup, }; static const struct inode_operations hppfs_link_iops = { .readlink = hppfs_readlink, .follow_link = hppfs_follow_link, }; static struct inode *get_inode(struct super_block *sb, struct dentry *dentry) { struct inode *proc_ino = dentry->d_inode; struct inode *inode = new_inode(sb); if (!inode) return ERR_PTR(-ENOMEM); if (S_ISDIR(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_dir_iops; inode->i_fop = &hppfs_dir_fops; } else if (S_ISLNK(dentry->d_inode->i_mode)) { inode->i_op = &hppfs_link_iops; inode->i_fop = &hppfs_file_fops; } else { inode->i_op = &hppfs_file_iops; inode->i_fop = &hppfs_file_fops; } HPPFS_I(inode)->proc_dentry = dget(dentry); inode->i_uid = proc_ino->i_uid; inode->i_gid = proc_ino->i_gid; inode->i_atime = proc_ino->i_atime; inode->i_mtime = proc_ino->i_mtime; inode->i_ctime = proc_ino->i_ctime; inode->i_ino = proc_ino->i_ino; inode->i_mode = proc_ino->i_mode; inode->i_nlink = proc_ino->i_nlink; inode->i_size = proc_ino->i_size; inode->i_blocks = proc_ino->i_blocks; return inode; } static int hppfs_fill_super(struct super_block *sb, void *d, int silent) { struct inode *root_inode; struct vfsmount *proc_mnt; int err = -ENOENT; proc_mnt = do_kern_mount("proc", 0, "proc", NULL); if (IS_ERR(proc_mnt)) goto out; sb->s_blocksize = 1024; sb->s_blocksize_bits = 10; sb->s_magic = HPPFS_SUPER_MAGIC; sb->s_op = &hppfs_sbops; sb->s_fs_info = proc_mnt; err = -ENOMEM; root_inode = get_inode(sb, proc_mnt->mnt_sb->s_root); if (!root_inode) goto out_mntput; sb->s_root = d_alloc_root(root_inode); if (!sb->s_root) goto out_iput; return 0; out_iput: iput(root_inode); out_mntput: mntput(proc_mnt); out: return(err); } static int hppfs_read_super(struct file_system_type *type, int flags, const char *dev_name, void *data, struct vfsmount *mnt) { return get_sb_nodev(type, flags, data, hppfs_fill_super, mnt); } static struct file_system_type hppfs_type = { .owner = THIS_MODULE, .name = "hppfs", .get_sb = hppfs_read_super, .kill_sb = kill_anon_super, .fs_flags = 0, }; static int __init init_hppfs(void) { return register_filesystem(&hppfs_type); } static void __exit exit_hppfs(void) { unregister_filesystem(&hppfs_type); } module_init(init_hppfs) module_exit(exit_hppfs) MODULE_LICENSE("GPL");
gpl-2.0
hiikezoe/android_kernel_nec_n06e
arch/arm/mach-msm/nand_partitions.c
1344
5019
/* arch/arm/mach-msm/nand_partitions.c * * Code to extract partition information from ATAG set up by the * bootloader. * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2008-2009,2011 The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <asm/mach/flash.h> #include <linux/io.h> #include <asm/setup.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <mach/msm_iomap.h> #include <mach/board.h> #ifdef CONFIG_MSM_SMD #include "smd_private.h" #endif /* configuration tags specific to msm */ #define ATAG_MSM_PARTITION 0x4d534D70 /* MSMp */ struct msm_ptbl_entry { char name[16]; __u32 offset; __u32 size; __u32 flags; }; #define MSM_MAX_PARTITIONS 18 static struct mtd_partition msm_nand_partitions[MSM_MAX_PARTITIONS]; static char msm_nand_names[MSM_MAX_PARTITIONS * 16]; extern struct flash_platform_data msm_nand_data; static int __init parse_tag_msm_partition(const struct tag *tag) { struct mtd_partition *ptn = msm_nand_partitions; char *name = msm_nand_names; struct msm_ptbl_entry *entry = (void *) &tag->u; unsigned count, n; count = (tag->hdr.size - 2) / (sizeof(struct msm_ptbl_entry) / sizeof(__u32)); if (count > MSM_MAX_PARTITIONS) count = MSM_MAX_PARTITIONS; for (n = 0; n < count; n++) { memcpy(name, entry->name, 15); name[15] = 0; ptn->name = name; ptn->offset = entry->offset; ptn->size = entry->size; printk(KERN_INFO "Partition (from atag) %s " "-- Offset:%llx Size:%llx\n", ptn->name, ptn->offset, ptn->size); name += 16; entry++; ptn++; } msm_nand_data.nr_parts = count; msm_nand_data.parts = msm_nand_partitions; return 0; } __tagtable(ATAG_MSM_PARTITION, parse_tag_msm_partition); #define FLASH_PART_MAGIC1 0x55EE73AA #define FLASH_PART_MAGIC2 0xE35EBDDB #define FLASH_PARTITION_VERSION 0x3 #define LINUX_FS_PARTITION_NAME "0:EFS2APPS" struct flash_partition_entry { char name[16]; u32 offset; /* Offset in blocks from beginning of device */ u32 length; /* Length of the partition in blocks */ u8 attrib1; u8 attrib2; u8 attrib3; u8 which_flash; /* Numeric ID (first = 0, second = 1) */ }; struct flash_partition_table { u32 magic1; u32 magic2; u32 version; u32 numparts; struct flash_partition_entry part_entry[16]; }; #ifdef CONFIG_MSM_SMD static int get_nand_partitions(void) { struct flash_partition_table *partition_table; struct flash_partition_entry *part_entry; struct mtd_partition *ptn = msm_nand_partitions; char *name = msm_nand_names; int part; if (msm_nand_data.nr_parts) return 0; partition_table = (struct flash_partition_table *) smem_alloc(SMEM_AARM_PARTITION_TABLE, sizeof(struct flash_partition_table)); if (!partition_table) { printk(KERN_WARNING "%s: no flash partition table in shared " "memory\n", __func__); return -ENOENT; } if ((partition_table->magic1 != (u32) FLASH_PART_MAGIC1) || (partition_table->magic2 != (u32) FLASH_PART_MAGIC2) || (partition_table->version != (u32) FLASH_PARTITION_VERSION)) { printk(KERN_WARNING "%s: version mismatch -- magic1=%#x, " "magic2=%#x, version=%#x\n", __func__, partition_table->magic1, partition_table->magic2, partition_table->version); return -EFAULT; } msm_nand_data.nr_parts = 0; /* Get the LINUX FS partition info */ for (part = 0; part < partition_table->numparts; part++) { part_entry = &partition_table->part_entry[part]; /* Find a match for the Linux file system partition */ if (strcmp(part_entry->name, LINUX_FS_PARTITION_NAME) == 0) { strcpy(name, part_entry->name); ptn->name = name; /*TODO: Get block count and size info */ ptn->offset = part_entry->offset; /* For SMEM, -1 indicates remaining space in flash, * but for MTD it is 0 */ if (part_entry->length == (u32)-1) ptn->size = 0; else ptn->size = part_entry->length; msm_nand_data.nr_parts = 1; msm_nand_data.parts = msm_nand_partitions; printk(KERN_INFO "Partition(from smem) %s " "-- Offset:%llx Size:%llx\n", ptn->name, ptn->offset, ptn->size); return 0; } } printk(KERN_WARNING "%s: no partition table found!", __func__); return -ENODEV; } #else static int get_nand_partitions(void) { if (msm_nand_data.nr_parts) return 0; printk(KERN_WARNING "%s: no partition table found!", __func__); return -ENODEV; } #endif device_initcall(get_nand_partitions);
gpl-2.0
metacloud/linux
drivers/i2c/busses/i2c-designware-core.c
1344
20797
/* * Synopsys DesignWare I2C adapter driver (master only). * * Based on the TI DAVINCI I2C adapter driver. * * Copyright (C) 2006 Texas Instruments. * Copyright (C) 2007 MontaVista Software Inc. * Copyright (C) 2009 Provigent Ltd. * * ---------------------------------------------------------------------------- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ---------------------------------------------------------------------------- * */ #include <linux/export.h> #include <linux/clk.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/delay.h> #include <linux/module.h> #include "i2c-designware-core.h" /* * Registers offset */ #define DW_IC_CON 0x0 #define DW_IC_TAR 0x4 #define DW_IC_DATA_CMD 0x10 #define DW_IC_SS_SCL_HCNT 0x14 #define DW_IC_SS_SCL_LCNT 0x18 #define DW_IC_FS_SCL_HCNT 0x1c #define DW_IC_FS_SCL_LCNT 0x20 #define DW_IC_INTR_STAT 0x2c #define DW_IC_INTR_MASK 0x30 #define DW_IC_RAW_INTR_STAT 0x34 #define DW_IC_RX_TL 0x38 #define DW_IC_TX_TL 0x3c #define DW_IC_CLR_INTR 0x40 #define DW_IC_CLR_RX_UNDER 0x44 #define DW_IC_CLR_RX_OVER 0x48 #define DW_IC_CLR_TX_OVER 0x4c #define DW_IC_CLR_RD_REQ 0x50 #define DW_IC_CLR_TX_ABRT 0x54 #define DW_IC_CLR_RX_DONE 0x58 #define DW_IC_CLR_ACTIVITY 0x5c #define DW_IC_CLR_STOP_DET 0x60 #define DW_IC_CLR_START_DET 0x64 #define DW_IC_CLR_GEN_CALL 0x68 #define DW_IC_ENABLE 0x6c #define DW_IC_STATUS 0x70 #define DW_IC_TXFLR 0x74 #define DW_IC_RXFLR 0x78 #define DW_IC_TX_ABRT_SOURCE 0x80 #define DW_IC_ENABLE_STATUS 0x9c #define DW_IC_COMP_PARAM_1 0xf4 #define DW_IC_COMP_TYPE 0xfc #define DW_IC_COMP_TYPE_VALUE 0x44570140 #define DW_IC_INTR_RX_UNDER 0x001 #define DW_IC_INTR_RX_OVER 0x002 #define DW_IC_INTR_RX_FULL 0x004 #define DW_IC_INTR_TX_OVER 0x008 #define DW_IC_INTR_TX_EMPTY 0x010 #define DW_IC_INTR_RD_REQ 0x020 #define DW_IC_INTR_TX_ABRT 0x040 #define DW_IC_INTR_RX_DONE 0x080 #define DW_IC_INTR_ACTIVITY 0x100 #define DW_IC_INTR_STOP_DET 0x200 #define DW_IC_INTR_START_DET 0x400 #define DW_IC_INTR_GEN_CALL 0x800 #define DW_IC_INTR_DEFAULT_MASK (DW_IC_INTR_RX_FULL | \ DW_IC_INTR_TX_EMPTY | \ DW_IC_INTR_TX_ABRT | \ DW_IC_INTR_STOP_DET) #define DW_IC_STATUS_ACTIVITY 0x1 #define DW_IC_ERR_TX_ABRT 0x1 /* * status codes */ #define STATUS_IDLE 0x0 #define STATUS_WRITE_IN_PROGRESS 0x1 #define STATUS_READ_IN_PROGRESS 0x2 #define TIMEOUT 20 /* ms */ /* * hardware abort codes from the DW_IC_TX_ABRT_SOURCE register * * only expected abort codes are listed here * refer to the datasheet for the full list */ #define ABRT_7B_ADDR_NOACK 0 #define ABRT_10ADDR1_NOACK 1 #define ABRT_10ADDR2_NOACK 2 #define ABRT_TXDATA_NOACK 3 #define ABRT_GCALL_NOACK 4 #define ABRT_GCALL_READ 5 #define ABRT_SBYTE_ACKDET 7 #define ABRT_SBYTE_NORSTRT 9 #define ABRT_10B_RD_NORSTRT 10 #define ABRT_MASTER_DIS 11 #define ARB_LOST 12 #define DW_IC_TX_ABRT_7B_ADDR_NOACK (1UL << ABRT_7B_ADDR_NOACK) #define DW_IC_TX_ABRT_10ADDR1_NOACK (1UL << ABRT_10ADDR1_NOACK) #define DW_IC_TX_ABRT_10ADDR2_NOACK (1UL << ABRT_10ADDR2_NOACK) #define DW_IC_TX_ABRT_TXDATA_NOACK (1UL << ABRT_TXDATA_NOACK) #define DW_IC_TX_ABRT_GCALL_NOACK (1UL << ABRT_GCALL_NOACK) #define DW_IC_TX_ABRT_GCALL_READ (1UL << ABRT_GCALL_READ) #define DW_IC_TX_ABRT_SBYTE_ACKDET (1UL << ABRT_SBYTE_ACKDET) #define DW_IC_TX_ABRT_SBYTE_NORSTRT (1UL << ABRT_SBYTE_NORSTRT) #define DW_IC_TX_ABRT_10B_RD_NORSTRT (1UL << ABRT_10B_RD_NORSTRT) #define DW_IC_TX_ABRT_MASTER_DIS (1UL << ABRT_MASTER_DIS) #define DW_IC_TX_ARB_LOST (1UL << ARB_LOST) #define DW_IC_TX_ABRT_NOACK (DW_IC_TX_ABRT_7B_ADDR_NOACK | \ DW_IC_TX_ABRT_10ADDR1_NOACK | \ DW_IC_TX_ABRT_10ADDR2_NOACK | \ DW_IC_TX_ABRT_TXDATA_NOACK | \ DW_IC_TX_ABRT_GCALL_NOACK) static char *abort_sources[] = { [ABRT_7B_ADDR_NOACK] = "slave address not acknowledged (7bit mode)", [ABRT_10ADDR1_NOACK] = "first address byte not acknowledged (10bit mode)", [ABRT_10ADDR2_NOACK] = "second address byte not acknowledged (10bit mode)", [ABRT_TXDATA_NOACK] = "data not acknowledged", [ABRT_GCALL_NOACK] = "no acknowledgement for a general call", [ABRT_GCALL_READ] = "read after general call", [ABRT_SBYTE_ACKDET] = "start byte acknowledged", [ABRT_SBYTE_NORSTRT] = "trying to send start byte when restart is disabled", [ABRT_10B_RD_NORSTRT] = "trying to read when restart is disabled (10bit mode)", [ABRT_MASTER_DIS] = "trying to use disabled adapter", [ARB_LOST] = "lost arbitration", }; u32 dw_readl(struct dw_i2c_dev *dev, int offset) { u32 value; if (dev->accessor_flags & ACCESS_16BIT) value = readw(dev->base + offset) | (readw(dev->base + offset + 2) << 16); else value = readl(dev->base + offset); if (dev->accessor_flags & ACCESS_SWAP) return swab32(value); else return value; } void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset) { if (dev->accessor_flags & ACCESS_SWAP) b = swab32(b); if (dev->accessor_flags & ACCESS_16BIT) { writew((u16)b, dev->base + offset); writew((u16)(b >> 16), dev->base + offset + 2); } else { writel(b, dev->base + offset); } } static u32 i2c_dw_scl_hcnt(u32 ic_clk, u32 tSYMBOL, u32 tf, int cond, int offset) { /* * DesignWare I2C core doesn't seem to have solid strategy to meet * the tHD;STA timing spec. Configuring _HCNT based on tHIGH spec * will result in violation of the tHD;STA spec. */ if (cond) /* * Conditional expression: * * IC_[FS]S_SCL_HCNT + (1+4+3) >= IC_CLK * tHIGH * * This is based on the DW manuals, and represents an ideal * configuration. The resulting I2C bus speed will be * faster than any of the others. * * If your hardware is free from tHD;STA issue, try this one. */ return (ic_clk * tSYMBOL + 5000) / 10000 - 8 + offset; else /* * Conditional expression: * * IC_[FS]S_SCL_HCNT + 3 >= IC_CLK * (tHD;STA + tf) * * This is just experimental rule; the tHD;STA period turned * out to be proportinal to (_HCNT + 3). With this setting, * we could meet both tHIGH and tHD;STA timing specs. * * If unsure, you'd better to take this alternative. * * The reason why we need to take into account "tf" here, * is the same as described in i2c_dw_scl_lcnt(). */ return (ic_clk * (tSYMBOL + tf) + 5000) / 10000 - 3 + offset; } static u32 i2c_dw_scl_lcnt(u32 ic_clk, u32 tLOW, u32 tf, int offset) { /* * Conditional expression: * * IC_[FS]S_SCL_LCNT + 1 >= IC_CLK * (tLOW + tf) * * DW I2C core starts counting the SCL CNTs for the LOW period * of the SCL clock (tLOW) as soon as it pulls the SCL line. * In order to meet the tLOW timing spec, we need to take into * account the fall time of SCL signal (tf). Default tf value * should be 0.3 us, for safety. */ return ((ic_clk * (tLOW + tf) + 5000) / 10000) - 1 + offset; } static void __i2c_dw_enable(struct dw_i2c_dev *dev, bool enable) { int timeout = 100; do { dw_writel(dev, enable, DW_IC_ENABLE); if ((dw_readl(dev, DW_IC_ENABLE_STATUS) & 1) == enable) return; /* * Wait 10 times the signaling period of the highest I2C * transfer supported by the driver (for 400KHz this is * 25us) as described in the DesignWare I2C databook. */ usleep_range(25, 250); } while (timeout--); dev_warn(dev->dev, "timeout in %sabling adapter\n", enable ? "en" : "dis"); } /** * i2c_dw_init() - initialize the designware i2c master hardware * @dev: device private data * * This functions configures and enables the I2C master. * This function is called during I2C init function, and in case of timeout at * run time. */ int i2c_dw_init(struct dw_i2c_dev *dev) { u32 input_clock_khz; u32 hcnt, lcnt; u32 reg; input_clock_khz = dev->get_clk_rate_khz(dev); reg = dw_readl(dev, DW_IC_COMP_TYPE); if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) { /* Configure register endianess access */ dev->accessor_flags |= ACCESS_SWAP; } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) { /* Configure register access mode 16bit */ dev->accessor_flags |= ACCESS_16BIT; } else if (reg != DW_IC_COMP_TYPE_VALUE) { dev_err(dev->dev, "Unknown Synopsys component type: " "0x%08x\n", reg); return -ENODEV; } /* Disable the adapter */ __i2c_dw_enable(dev, false); /* set standard and fast speed deviders for high/low periods */ /* Standard-mode */ hcnt = i2c_dw_scl_hcnt(input_clock_khz, 40, /* tHD;STA = tHIGH = 4.0 us */ 3, /* tf = 0.3 us */ 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ lcnt = i2c_dw_scl_lcnt(input_clock_khz, 47, /* tLOW = 4.7 us */ 3, /* tf = 0.3 us */ 0); /* No offset */ dw_writel(dev, hcnt, DW_IC_SS_SCL_HCNT); dw_writel(dev, lcnt, DW_IC_SS_SCL_LCNT); dev_dbg(dev->dev, "Standard-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); /* Fast-mode */ hcnt = i2c_dw_scl_hcnt(input_clock_khz, 6, /* tHD;STA = tHIGH = 0.6 us */ 3, /* tf = 0.3 us */ 0, /* 0: DW default, 1: Ideal */ 0); /* No offset */ lcnt = i2c_dw_scl_lcnt(input_clock_khz, 13, /* tLOW = 1.3 us */ 3, /* tf = 0.3 us */ 0); /* No offset */ dw_writel(dev, hcnt, DW_IC_FS_SCL_HCNT); dw_writel(dev, lcnt, DW_IC_FS_SCL_LCNT); dev_dbg(dev->dev, "Fast-mode HCNT:LCNT = %d:%d\n", hcnt, lcnt); /* Configure Tx/Rx FIFO threshold levels */ dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL); dw_writel(dev, 0, DW_IC_RX_TL); /* configure the i2c master */ dw_writel(dev, dev->master_cfg , DW_IC_CON); return 0; } EXPORT_SYMBOL_GPL(i2c_dw_init); /* * Waiting for bus not busy */ static int i2c_dw_wait_bus_not_busy(struct dw_i2c_dev *dev) { int timeout = TIMEOUT; while (dw_readl(dev, DW_IC_STATUS) & DW_IC_STATUS_ACTIVITY) { if (timeout <= 0) { dev_warn(dev->dev, "timeout waiting for bus ready\n"); return -ETIMEDOUT; } timeout--; usleep_range(1000, 1100); } return 0; } static void i2c_dw_xfer_init(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; u32 ic_con; /* Disable the adapter */ __i2c_dw_enable(dev, false); /* set the slave (target) address */ dw_writel(dev, msgs[dev->msg_write_idx].addr, DW_IC_TAR); /* if the slave address is ten bit address, enable 10BITADDR */ ic_con = dw_readl(dev, DW_IC_CON); if (msgs[dev->msg_write_idx].flags & I2C_M_TEN) ic_con |= DW_IC_CON_10BITADDR_MASTER; else ic_con &= ~DW_IC_CON_10BITADDR_MASTER; dw_writel(dev, ic_con, DW_IC_CON); /* enforce disabled interrupts (due to HW issues) */ i2c_dw_disable_int(dev); /* Enable the adapter */ __i2c_dw_enable(dev, true); /* Clear and enable interrupts */ i2c_dw_clear_int(dev); dw_writel(dev, DW_IC_INTR_DEFAULT_MASK, DW_IC_INTR_MASK); } /* * Initiate (and continue) low level master read/write transaction. * This function is only called from i2c_dw_isr, and pumping i2c_msg * messages into the tx buffer. Even if the size of i2c_msg data is * longer than the size of the tx buffer, it handles everything. */ static void i2c_dw_xfer_msg(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; u32 intr_mask; int tx_limit, rx_limit; u32 addr = msgs[dev->msg_write_idx].addr; u32 buf_len = dev->tx_buf_len; u8 *buf = dev->tx_buf; intr_mask = DW_IC_INTR_DEFAULT_MASK; for (; dev->msg_write_idx < dev->msgs_num; dev->msg_write_idx++) { /* * if target address has changed, we need to * reprogram the target address in the i2c * adapter when we are done with this transfer */ if (msgs[dev->msg_write_idx].addr != addr) { dev_err(dev->dev, "%s: invalid target address\n", __func__); dev->msg_err = -EINVAL; break; } if (msgs[dev->msg_write_idx].len == 0) { dev_err(dev->dev, "%s: invalid message length\n", __func__); dev->msg_err = -EINVAL; break; } if (!(dev->status & STATUS_WRITE_IN_PROGRESS)) { /* new i2c_msg */ buf = msgs[dev->msg_write_idx].buf; buf_len = msgs[dev->msg_write_idx].len; } tx_limit = dev->tx_fifo_depth - dw_readl(dev, DW_IC_TXFLR); rx_limit = dev->rx_fifo_depth - dw_readl(dev, DW_IC_RXFLR); while (buf_len > 0 && tx_limit > 0 && rx_limit > 0) { u32 cmd = 0; /* * If IC_EMPTYFIFO_HOLD_MASTER_EN is set we must * manually set the stop bit. However, it cannot be * detected from the registers so we set it always * when writing/reading the last byte. */ if (dev->msg_write_idx == dev->msgs_num - 1 && buf_len == 1) cmd |= BIT(9); if (msgs[dev->msg_write_idx].flags & I2C_M_RD) { /* avoid rx buffer overrun */ if (rx_limit - dev->rx_outstanding <= 0) break; dw_writel(dev, cmd | 0x100, DW_IC_DATA_CMD); rx_limit--; dev->rx_outstanding++; } else dw_writel(dev, cmd | *buf++, DW_IC_DATA_CMD); tx_limit--; buf_len--; } dev->tx_buf = buf; dev->tx_buf_len = buf_len; if (buf_len > 0) { /* more bytes to be written */ dev->status |= STATUS_WRITE_IN_PROGRESS; break; } else dev->status &= ~STATUS_WRITE_IN_PROGRESS; } /* * If i2c_msg index search is completed, we don't need TX_EMPTY * interrupt any more. */ if (dev->msg_write_idx == dev->msgs_num) intr_mask &= ~DW_IC_INTR_TX_EMPTY; if (dev->msg_err) intr_mask = 0; dw_writel(dev, intr_mask, DW_IC_INTR_MASK); } static void i2c_dw_read(struct dw_i2c_dev *dev) { struct i2c_msg *msgs = dev->msgs; int rx_valid; for (; dev->msg_read_idx < dev->msgs_num; dev->msg_read_idx++) { u32 len; u8 *buf; if (!(msgs[dev->msg_read_idx].flags & I2C_M_RD)) continue; if (!(dev->status & STATUS_READ_IN_PROGRESS)) { len = msgs[dev->msg_read_idx].len; buf = msgs[dev->msg_read_idx].buf; } else { len = dev->rx_buf_len; buf = dev->rx_buf; } rx_valid = dw_readl(dev, DW_IC_RXFLR); for (; len > 0 && rx_valid > 0; len--, rx_valid--) { *buf++ = dw_readl(dev, DW_IC_DATA_CMD); dev->rx_outstanding--; } if (len > 0) { dev->status |= STATUS_READ_IN_PROGRESS; dev->rx_buf_len = len; dev->rx_buf = buf; return; } else dev->status &= ~STATUS_READ_IN_PROGRESS; } } static int i2c_dw_handle_tx_abort(struct dw_i2c_dev *dev) { unsigned long abort_source = dev->abort_source; int i; if (abort_source & DW_IC_TX_ABRT_NOACK) { for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_dbg(dev->dev, "%s: %s\n", __func__, abort_sources[i]); return -EREMOTEIO; } for_each_set_bit(i, &abort_source, ARRAY_SIZE(abort_sources)) dev_err(dev->dev, "%s: %s\n", __func__, abort_sources[i]); if (abort_source & DW_IC_TX_ARB_LOST) return -EAGAIN; else if (abort_source & DW_IC_TX_ABRT_GCALL_READ) return -EINVAL; /* wrong msgs[] data */ else return -EIO; } /* * Prepare controller for a transaction and call i2c_dw_xfer_msg */ int i2c_dw_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[], int num) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); int ret; dev_dbg(dev->dev, "%s: msgs: %d\n", __func__, num); mutex_lock(&dev->lock); pm_runtime_get_sync(dev->dev); INIT_COMPLETION(dev->cmd_complete); dev->msgs = msgs; dev->msgs_num = num; dev->cmd_err = 0; dev->msg_write_idx = 0; dev->msg_read_idx = 0; dev->msg_err = 0; dev->status = STATUS_IDLE; dev->abort_source = 0; dev->rx_outstanding = 0; ret = i2c_dw_wait_bus_not_busy(dev); if (ret < 0) goto done; /* start the transfers */ i2c_dw_xfer_init(dev); /* wait for tx to complete */ ret = wait_for_completion_interruptible_timeout(&dev->cmd_complete, HZ); if (ret == 0) { dev_err(dev->dev, "controller timed out\n"); i2c_dw_init(dev); ret = -ETIMEDOUT; goto done; } else if (ret < 0) goto done; if (dev->msg_err) { ret = dev->msg_err; goto done; } /* no error */ if (likely(!dev->cmd_err)) { /* Disable the adapter */ __i2c_dw_enable(dev, false); ret = num; goto done; } /* We have an error */ if (dev->cmd_err == DW_IC_ERR_TX_ABRT) { ret = i2c_dw_handle_tx_abort(dev); goto done; } ret = -EIO; done: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); mutex_unlock(&dev->lock); return ret; } EXPORT_SYMBOL_GPL(i2c_dw_xfer); u32 i2c_dw_func(struct i2c_adapter *adap) { struct dw_i2c_dev *dev = i2c_get_adapdata(adap); return dev->functionality; } EXPORT_SYMBOL_GPL(i2c_dw_func); static u32 i2c_dw_read_clear_intrbits(struct dw_i2c_dev *dev) { u32 stat; /* * The IC_INTR_STAT register just indicates "enabled" interrupts. * Ths unmasked raw version of interrupt status bits are available * in the IC_RAW_INTR_STAT register. * * That is, * stat = dw_readl(IC_INTR_STAT); * equals to, * stat = dw_readl(IC_RAW_INTR_STAT) & dw_readl(IC_INTR_MASK); * * The raw version might be useful for debugging purposes. */ stat = dw_readl(dev, DW_IC_INTR_STAT); /* * Do not use the IC_CLR_INTR register to clear interrupts, or * you'll miss some interrupts, triggered during the period from * dw_readl(IC_INTR_STAT) to dw_readl(IC_CLR_INTR). * * Instead, use the separately-prepared IC_CLR_* registers. */ if (stat & DW_IC_INTR_RX_UNDER) dw_readl(dev, DW_IC_CLR_RX_UNDER); if (stat & DW_IC_INTR_RX_OVER) dw_readl(dev, DW_IC_CLR_RX_OVER); if (stat & DW_IC_INTR_TX_OVER) dw_readl(dev, DW_IC_CLR_TX_OVER); if (stat & DW_IC_INTR_RD_REQ) dw_readl(dev, DW_IC_CLR_RD_REQ); if (stat & DW_IC_INTR_TX_ABRT) { /* * The IC_TX_ABRT_SOURCE register is cleared whenever * the IC_CLR_TX_ABRT is read. Preserve it beforehand. */ dev->abort_source = dw_readl(dev, DW_IC_TX_ABRT_SOURCE); dw_readl(dev, DW_IC_CLR_TX_ABRT); } if (stat & DW_IC_INTR_RX_DONE) dw_readl(dev, DW_IC_CLR_RX_DONE); if (stat & DW_IC_INTR_ACTIVITY) dw_readl(dev, DW_IC_CLR_ACTIVITY); if (stat & DW_IC_INTR_STOP_DET) dw_readl(dev, DW_IC_CLR_STOP_DET); if (stat & DW_IC_INTR_START_DET) dw_readl(dev, DW_IC_CLR_START_DET); if (stat & DW_IC_INTR_GEN_CALL) dw_readl(dev, DW_IC_CLR_GEN_CALL); return stat; } /* * Interrupt service routine. This gets called whenever an I2C interrupt * occurs. */ irqreturn_t i2c_dw_isr(int this_irq, void *dev_id) { struct dw_i2c_dev *dev = dev_id; u32 stat, enabled; enabled = dw_readl(dev, DW_IC_ENABLE); stat = dw_readl(dev, DW_IC_RAW_INTR_STAT); dev_dbg(dev->dev, "%s: %s enabled= 0x%x stat=0x%x\n", __func__, dev->adapter.name, enabled, stat); if (!enabled || !(stat & ~DW_IC_INTR_ACTIVITY)) return IRQ_NONE; stat = i2c_dw_read_clear_intrbits(dev); if (stat & DW_IC_INTR_TX_ABRT) { dev->cmd_err |= DW_IC_ERR_TX_ABRT; dev->status = STATUS_IDLE; /* * Anytime TX_ABRT is set, the contents of the tx/rx * buffers are flushed. Make sure to skip them. */ dw_writel(dev, 0, DW_IC_INTR_MASK); goto tx_aborted; } if (stat & DW_IC_INTR_RX_FULL) i2c_dw_read(dev); if (stat & DW_IC_INTR_TX_EMPTY) i2c_dw_xfer_msg(dev); /* * No need to modify or disable the interrupt mask here. * i2c_dw_xfer_msg() will take care of it according to * the current transmit status. */ tx_aborted: if ((stat & (DW_IC_INTR_TX_ABRT | DW_IC_INTR_STOP_DET)) || dev->msg_err) complete(&dev->cmd_complete); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(i2c_dw_isr); void i2c_dw_enable(struct dw_i2c_dev *dev) { /* Enable the adapter */ __i2c_dw_enable(dev, true); } EXPORT_SYMBOL_GPL(i2c_dw_enable); u32 i2c_dw_is_enabled(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_ENABLE); } EXPORT_SYMBOL_GPL(i2c_dw_is_enabled); void i2c_dw_disable(struct dw_i2c_dev *dev) { /* Disable controller */ __i2c_dw_enable(dev, false); /* Disable all interupts */ dw_writel(dev, 0, DW_IC_INTR_MASK); dw_readl(dev, DW_IC_CLR_INTR); } EXPORT_SYMBOL_GPL(i2c_dw_disable); void i2c_dw_clear_int(struct dw_i2c_dev *dev) { dw_readl(dev, DW_IC_CLR_INTR); } EXPORT_SYMBOL_GPL(i2c_dw_clear_int); void i2c_dw_disable_int(struct dw_i2c_dev *dev) { dw_writel(dev, 0, DW_IC_INTR_MASK); } EXPORT_SYMBOL_GPL(i2c_dw_disable_int); u32 i2c_dw_read_comp_param(struct dw_i2c_dev *dev) { return dw_readl(dev, DW_IC_COMP_PARAM_1); } EXPORT_SYMBOL_GPL(i2c_dw_read_comp_param); MODULE_DESCRIPTION("Synopsys DesignWare I2C bus adapter core"); MODULE_LICENSE("GPL");
gpl-2.0
cm13-kinzie-port-from-clark/kernel_motorola_msm8992
arch/arm/mach-omap2/prm44xx.c
2112
19925
/* * OMAP4 PRM module functions * * Copyright (C) 2011-2012 Texas Instruments, Inc. * Copyright (C) 2010 Nokia Corporation * Benoît Cousson * Paul Walmsley * Rajendra Nayak <rnayak@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/io.h> #include "soc.h" #include "iomap.h" #include "common.h" #include "vp.h" #include "prm44xx.h" #include "prm-regbits-44xx.h" #include "prcm44xx.h" #include "prminst44xx.h" #include "powerdomain.h" /* Static data */ static const struct omap_prcm_irq omap4_prcm_irqs[] = { OMAP_PRCM_IRQ("wkup", 0, 0), OMAP_PRCM_IRQ("io", 9, 1), }; static struct omap_prcm_irq_setup omap4_prcm_irq_setup = { .ack = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .mask = OMAP4_PRM_IRQENABLE_MPU_OFFSET, .nr_regs = 2, .irqs = omap4_prcm_irqs, .nr_irqs = ARRAY_SIZE(omap4_prcm_irqs), .irq = 11 + OMAP44XX_IRQ_GIC_START, .read_pending_irqs = &omap44xx_prm_read_pending_irqs, .ocp_barrier = &omap44xx_prm_ocp_barrier, .save_and_clear_irqen = &omap44xx_prm_save_and_clear_irqen, .restore_irqen = &omap44xx_prm_restore_irqen, }; /* * omap44xx_prm_reset_src_map - map from bits in the PRM_RSTST * hardware register (which are specific to OMAP44xx SoCs) to reset * source ID bit shifts (which is an OMAP SoC-independent * enumeration) */ static struct prm_reset_src_map omap44xx_prm_reset_src_map[] = { { OMAP4430_GLOBAL_WARM_SW_RST_SHIFT, OMAP_GLOBAL_WARM_RST_SRC_ID_SHIFT }, { OMAP4430_GLOBAL_COLD_RST_SHIFT, OMAP_GLOBAL_COLD_RST_SRC_ID_SHIFT }, { OMAP4430_MPU_SECURITY_VIOL_RST_SHIFT, OMAP_SECU_VIOL_RST_SRC_ID_SHIFT }, { OMAP4430_MPU_WDT_RST_SHIFT, OMAP_MPU_WD_RST_SRC_ID_SHIFT }, { OMAP4430_SECURE_WDT_RST_SHIFT, OMAP_SECU_WD_RST_SRC_ID_SHIFT }, { OMAP4430_EXTERNAL_WARM_RST_SHIFT, OMAP_EXTWARM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_MPU_VOLT_MGR_RST_SHIFT, OMAP_VDD_MPU_VM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_IVA_VOLT_MGR_RST_SHIFT, OMAP_VDD_IVA_VM_RST_SRC_ID_SHIFT }, { OMAP4430_VDD_CORE_VOLT_MGR_RST_SHIFT, OMAP_VDD_CORE_VM_RST_SRC_ID_SHIFT }, { OMAP4430_ICEPICK_RST_SHIFT, OMAP_ICEPICK_RST_SRC_ID_SHIFT }, { OMAP4430_C2C_RST_SHIFT, OMAP_C2C_RST_SRC_ID_SHIFT }, { -1, -1 }, }; /* PRM low-level functions */ /* Read a register in a CM/PRM instance in the PRM module */ u32 omap4_prm_read_inst_reg(s16 inst, u16 reg) { return __raw_readl(prm_base + inst + reg); } /* Write into a register in a CM/PRM instance in the PRM module */ void omap4_prm_write_inst_reg(u32 val, s16 inst, u16 reg) { __raw_writel(val, prm_base + inst + reg); } /* Read-modify-write a register in a PRM module. Caller must lock */ u32 omap4_prm_rmw_inst_reg_bits(u32 mask, u32 bits, s16 inst, s16 reg) { u32 v; v = omap4_prm_read_inst_reg(inst, reg); v &= ~mask; v |= bits; omap4_prm_write_inst_reg(v, inst, reg); return v; } /* PRM VP */ /* * struct omap4_vp - OMAP4 VP register access description. * @irqstatus_mpu: offset to IRQSTATUS_MPU register for VP * @tranxdone_status: VP_TRANXDONE_ST bitmask in PRM_IRQSTATUS_MPU reg */ struct omap4_vp { u32 irqstatus_mpu; u32 tranxdone_status; }; static struct omap4_vp omap4_vp[] = { [OMAP4_VP_VDD_MPU_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET, .tranxdone_status = OMAP4430_VP_MPU_TRANXDONE_ST_MASK, }, [OMAP4_VP_VDD_IVA_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .tranxdone_status = OMAP4430_VP_IVA_TRANXDONE_ST_MASK, }, [OMAP4_VP_VDD_CORE_ID] = { .irqstatus_mpu = OMAP4_PRM_IRQSTATUS_MPU_OFFSET, .tranxdone_status = OMAP4430_VP_CORE_TRANXDONE_ST_MASK, }, }; u32 omap4_prm_vp_check_txdone(u8 vp_id) { struct omap4_vp *vp = &omap4_vp[vp_id]; u32 irqstatus; irqstatus = omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_OCP_SOCKET_INST, vp->irqstatus_mpu); return irqstatus & vp->tranxdone_status; } void omap4_prm_vp_clear_txdone(u8 vp_id) { struct omap4_vp *vp = &omap4_vp[vp_id]; omap4_prminst_write_inst_reg(vp->tranxdone_status, OMAP4430_PRM_PARTITION, OMAP4430_PRM_OCP_SOCKET_INST, vp->irqstatus_mpu); }; u32 omap4_prm_vcvp_read(u8 offset) { return omap4_prminst_read_inst_reg(OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, offset); } void omap4_prm_vcvp_write(u32 val, u8 offset) { omap4_prminst_write_inst_reg(val, OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, offset); } u32 omap4_prm_vcvp_rmw(u32 mask, u32 bits, u8 offset) { return omap4_prminst_rmw_inst_reg_bits(mask, bits, OMAP4430_PRM_PARTITION, OMAP4430_PRM_DEVICE_INST, offset); } static inline u32 _read_pending_irq_reg(u16 irqen_offs, u16 irqst_offs) { u32 mask, st; /* XXX read mask from RAM? */ mask = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqen_offs); st = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, irqst_offs); return mask & st; } /** * omap44xx_prm_read_pending_irqs - read pending PRM MPU IRQs into @events * @events: ptr to two consecutive u32s, preallocated by caller * * Read PRM_IRQSTATUS_MPU* bits, AND'ed with the currently-enabled PRM * MPU IRQs, and store the result into the two u32s pointed to by @events. * No return value. */ void omap44xx_prm_read_pending_irqs(unsigned long *events) { events[0] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_OFFSET, OMAP4_PRM_IRQSTATUS_MPU_OFFSET); events[1] = _read_pending_irq_reg(OMAP4_PRM_IRQENABLE_MPU_2_OFFSET, OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); } /** * omap44xx_prm_ocp_barrier - force buffered MPU writes to the PRM to complete * * Force any buffered writes to the PRM IP block to complete. Needed * by the PRM IRQ handler, which reads and writes directly to the IP * block, to avoid race conditions after acknowledging or clearing IRQ * bits. No return value. */ void omap44xx_prm_ocp_barrier(void) { omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } /** * omap44xx_prm_save_and_clear_irqen - save/clear PRM_IRQENABLE_MPU* regs * @saved_mask: ptr to a u32 array to save IRQENABLE bits * * Save the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers to * @saved_mask. @saved_mask must be allocated by the caller. * Intended to be used in the PRM interrupt handler suspend callback. * The OCP barrier is needed to ensure the write to disable PRM * interrupts reaches the PRM before returning; otherwise, spurious * interrupts might occur. No return value. */ void omap44xx_prm_save_and_clear_irqen(u32 *saved_mask) { saved_mask[0] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQSTATUS_MPU_OFFSET); saved_mask[1] = omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQSTATUS_MPU_2_OFFSET); omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); omap4_prm_write_inst_reg(0, OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); /* OCP barrier */ omap4_prm_read_inst_reg(OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_REVISION_PRM_OFFSET); } /** * omap44xx_prm_restore_irqen - set PRM_IRQENABLE_MPU* registers from args * @saved_mask: ptr to a u32 array of IRQENABLE bits saved previously * * Restore the PRM_IRQENABLE_MPU and PRM_IRQENABLE_MPU_2 registers from * @saved_mask. Intended to be used in the PRM interrupt handler resume * callback to restore values saved by omap44xx_prm_save_and_clear_irqen(). * No OCP barrier should be needed here; any pending PRM interrupts will fire * once the writes reach the PRM. No return value. */ void omap44xx_prm_restore_irqen(u32 *saved_mask) { omap4_prm_write_inst_reg(saved_mask[0], OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_OFFSET); omap4_prm_write_inst_reg(saved_mask[1], OMAP4430_PRM_OCP_SOCKET_INST, OMAP4_PRM_IRQENABLE_MPU_2_OFFSET); } /** * omap44xx_prm_reconfigure_io_chain - clear latches and reconfigure I/O chain * * Clear any previously-latched I/O wakeup events and ensure that the * I/O wakeup gates are aligned with the current mux settings. Works * by asserting WUCLKIN, waiting for WUCLKOUT to be asserted, and then * deasserting WUCLKIN and waiting for WUCLKOUT to be deasserted. * No return value. XXX Are the final two steps necessary? */ void omap44xx_prm_reconfigure_io_chain(void) { int i = 0; /* Trigger WUCLKIN enable */ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, OMAP4430_WUCLK_CTRL_MASK, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); omap_test_timeout( (((omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 1), MAX_IOPAD_LATCH_TIME, i); if (i == MAX_IOPAD_LATCH_TIME) pr_warn("PRM: I/O chain clock line assertion timed out\n"); /* Trigger WUCLKIN disable */ omap4_prm_rmw_inst_reg_bits(OMAP4430_WUCLK_CTRL_MASK, 0x0, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); omap_test_timeout( (((omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET) & OMAP4430_WUCLK_STATUS_MASK) >> OMAP4430_WUCLK_STATUS_SHIFT) == 0), MAX_IOPAD_LATCH_TIME, i); if (i == MAX_IOPAD_LATCH_TIME) pr_warn("PRM: I/O chain clock line deassertion timed out\n"); return; } /** * omap44xx_prm_enable_io_wakeup - enable wakeup events from I/O wakeup latches * * Activates the I/O wakeup event latches and allows events logged by * those latches to signal a wakeup event to the PRCM. For I/O wakeups * to occur, WAKEUPENABLE bits must be set in the pad mux registers, and * omap44xx_prm_reconfigure_io_chain() must be called. No return value. */ static void __init omap44xx_prm_enable_io_wakeup(void) { omap4_prm_rmw_inst_reg_bits(OMAP4430_GLOBAL_WUEN_MASK, OMAP4430_GLOBAL_WUEN_MASK, OMAP4430_PRM_DEVICE_INST, OMAP4_PRM_IO_PMCTRL_OFFSET); } /** * omap44xx_prm_read_reset_sources - return the last SoC reset source * * Return a u32 representing the last reset sources of the SoC. The * returned reset source bits are standardized across OMAP SoCs. */ static u32 omap44xx_prm_read_reset_sources(void) { struct prm_reset_src_map *p; u32 r = 0; u32 v; v = omap4_prm_read_inst_reg(OMAP4430_PRM_DEVICE_INST, OMAP4_RM_RSTST); p = omap44xx_prm_reset_src_map; while (p->reg_shift >= 0 && p->std_shift >= 0) { if (v & (1 << p->reg_shift)) r |= 1 << p->std_shift; p++; } return r; } /** * omap44xx_prm_was_any_context_lost_old - was module hardware context lost? * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Return 1 if any bits were set in the *_CONTEXT_* register * identified by (@part, @inst, @idx), which means that some context * was lost for that module; otherwise, return 0. */ static bool omap44xx_prm_was_any_context_lost_old(u8 part, s16 inst, u16 idx) { return (omap4_prminst_read_inst_reg(part, inst, idx)) ? 1 : 0; } /** * omap44xx_prm_clear_context_lost_flags_old - clear context loss flags * @part: PRM partition ID (e.g., OMAP4430_PRM_PARTITION) * @inst: PRM instance offset (e.g., OMAP4430_PRM_MPU_INST) * @idx: CONTEXT register offset * * Clear hardware context loss bits for the module identified by * (@part, @inst, @idx). No return value. XXX Writes to reserved bits; * is there a way to avoid this? */ static void omap44xx_prm_clear_context_loss_flags_old(u8 part, s16 inst, u16 idx) { omap4_prminst_write_inst_reg(0xffffffff, part, inst, idx); } /* Powerdomain low-level functions */ static int omap4_pwrdm_set_next_pwrst(struct powerdomain *pwrdm, u8 pwrst) { omap4_prminst_rmw_inst_reg_bits(OMAP_POWERSTATE_MASK, (pwrst << OMAP_POWERSTATE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_next_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP_POWERSTATE_MASK; v >>= OMAP_POWERSTATE_SHIFT; return v; } static int omap4_pwrdm_read_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP_POWERSTATEST_MASK; v >>= OMAP_POWERSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_prev_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LASTPOWERSTATEENTERED_MASK; v >>= OMAP4430_LASTPOWERSTATEENTERED_SHIFT; return v; } static int omap4_pwrdm_set_lowpwrstchange(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOWPOWERSTATECHANGE_MASK, (1 << OMAP4430_LOWPOWERSTATECHANGE_SHIFT), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_clear_all_prev_pwrst(struct powerdomain *pwrdm) { omap4_prminst_rmw_inst_reg_bits(OMAP4430_LASTPOWERSTATEENTERED_MASK, OMAP4430_LASTPOWERSTATEENTERED_MASK, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); return 0; } static int omap4_pwrdm_set_logic_retst(struct powerdomain *pwrdm, u8 pwrst) { u32 v; v = pwrst << __ffs(OMAP4430_LOGICRETSTATE_MASK); omap4_prminst_rmw_inst_reg_bits(OMAP4430_LOGICRETSTATE_MASK, v, pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_onst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_onstate_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_set_mem_retst(struct powerdomain *pwrdm, u8 bank, u8 pwrst) { u32 m; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); omap4_prminst_rmw_inst_reg_bits(m, (pwrst << __ffs(m)), pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); return 0; } static int omap4_pwrdm_read_logic_pwrst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= OMAP4430_LOGICSTATEST_MASK; v >>= OMAP4430_LOGICSTATEST_SHIFT; return v; } static int omap4_pwrdm_read_logic_retst(struct powerdomain *pwrdm) { u32 v; v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= OMAP4430_LOGICRETSTATE_MASK; v >>= OMAP4430_LOGICRETSTATE_SHIFT; return v; } /** * omap4_pwrdm_read_prev_logic_pwrst - read the previous logic powerstate * @pwrdm: struct powerdomain * to read the state for * * Reads the previous logic powerstate for a powerdomain. This * function must determine the previous logic powerstate by first * checking the previous powerstate for the domain. If that was OFF, * then logic has been lost. If previous state was RETENTION, the * function reads the setting for the next retention logic state to * see the actual value. In every other case, the logic is * retained. Returns either PWRDM_POWER_OFF or PWRDM_POWER_RET * depending whether the logic was retained or not. */ static int omap4_pwrdm_read_prev_logic_pwrst(struct powerdomain *pwrdm) { int state; state = omap4_pwrdm_read_prev_pwrst(pwrdm); if (state == PWRDM_POWER_OFF) return PWRDM_POWER_OFF; if (state != PWRDM_POWER_RET) return PWRDM_POWER_RET; return omap4_pwrdm_read_logic_retst(pwrdm); } static int omap4_pwrdm_read_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_stst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST); v &= m; v >>= __ffs(m); return v; } static int omap4_pwrdm_read_mem_retst(struct powerdomain *pwrdm, u8 bank) { u32 m, v; m = omap2_pwrdm_get_mem_bank_retst_mask(bank); v = omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTCTRL); v &= m; v >>= __ffs(m); return v; } /** * omap4_pwrdm_read_prev_mem_pwrst - reads the previous memory powerstate * @pwrdm: struct powerdomain * to read mem powerstate for * @bank: memory bank index * * Reads the previous memory powerstate for a powerdomain. This * function must determine the previous memory powerstate by first * checking the previous powerstate for the domain. If that was OFF, * then logic has been lost. If previous state was RETENTION, the * function reads the setting for the next memory retention state to * see the actual value. In every other case, the logic is * retained. Returns either PWRDM_POWER_OFF or PWRDM_POWER_RET * depending whether logic was retained or not. */ static int omap4_pwrdm_read_prev_mem_pwrst(struct powerdomain *pwrdm, u8 bank) { int state; state = omap4_pwrdm_read_prev_pwrst(pwrdm); if (state == PWRDM_POWER_OFF) return PWRDM_POWER_OFF; if (state != PWRDM_POWER_RET) return PWRDM_POWER_RET; return omap4_pwrdm_read_mem_retst(pwrdm, bank); } static int omap4_pwrdm_wait_transition(struct powerdomain *pwrdm) { u32 c = 0; /* * REVISIT: pwrdm_wait_transition() may be better implemented * via a callback and a periodic timer check -- how long do we expect * powerdomain transitions to take? */ /* XXX Is this udelay() value meaningful? */ while ((omap4_prminst_read_inst_reg(pwrdm->prcm_partition, pwrdm->prcm_offs, OMAP4_PM_PWSTST) & OMAP_INTRANSITION_MASK) && (c++ < PWRDM_TRANSITION_BAILOUT)) udelay(1); if (c > PWRDM_TRANSITION_BAILOUT) { pr_err("powerdomain: %s: waited too long to complete transition\n", pwrdm->name); return -EAGAIN; } pr_debug("powerdomain: completed transition in %d loops\n", c); return 0; } struct pwrdm_ops omap4_pwrdm_operations = { .pwrdm_set_next_pwrst = omap4_pwrdm_set_next_pwrst, .pwrdm_read_next_pwrst = omap4_pwrdm_read_next_pwrst, .pwrdm_read_pwrst = omap4_pwrdm_read_pwrst, .pwrdm_read_prev_pwrst = omap4_pwrdm_read_prev_pwrst, .pwrdm_set_lowpwrstchange = omap4_pwrdm_set_lowpwrstchange, .pwrdm_clear_all_prev_pwrst = omap4_pwrdm_clear_all_prev_pwrst, .pwrdm_set_logic_retst = omap4_pwrdm_set_logic_retst, .pwrdm_read_logic_pwrst = omap4_pwrdm_read_logic_pwrst, .pwrdm_read_prev_logic_pwrst = omap4_pwrdm_read_prev_logic_pwrst, .pwrdm_read_logic_retst = omap4_pwrdm_read_logic_retst, .pwrdm_read_mem_pwrst = omap4_pwrdm_read_mem_pwrst, .pwrdm_read_mem_retst = omap4_pwrdm_read_mem_retst, .pwrdm_read_prev_mem_pwrst = omap4_pwrdm_read_prev_mem_pwrst, .pwrdm_set_mem_onst = omap4_pwrdm_set_mem_onst, .pwrdm_set_mem_retst = omap4_pwrdm_set_mem_retst, .pwrdm_wait_transition = omap4_pwrdm_wait_transition, }; /* * XXX document */ static struct prm_ll_data omap44xx_prm_ll_data = { .read_reset_sources = &omap44xx_prm_read_reset_sources, .was_any_context_lost_old = &omap44xx_prm_was_any_context_lost_old, .clear_context_loss_flags_old = &omap44xx_prm_clear_context_loss_flags_old, }; int __init omap44xx_prm_init(void) { if (!cpu_is_omap44xx() && !soc_is_omap54xx()) return 0; return prm_register(&omap44xx_prm_ll_data); } static int __init omap44xx_prm_late_init(void) { if (!cpu_is_omap44xx()) return 0; omap44xx_prm_enable_io_wakeup(); return omap_prcm_register_chain_handler(&omap4_prcm_irq_setup); } omap_subsys_initcall(omap44xx_prm_late_init); static void __exit omap44xx_prm_exit(void) { if (!cpu_is_omap44xx()) return; /* Should never happen */ WARN(prm_unregister(&omap44xx_prm_ll_data), "%s: prm_ll_data function pointer mismatch\n", __func__); } __exitcall(omap44xx_prm_exit);
gpl-2.0
thicklizard/Komodo
kernel/latencytop.c
2880
7722
/* * latencytop.c: Latency display infrastructure * * (C) Copyright 2008 Intel Corporation * Author: Arjan van de Ven <arjan@linux.intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. */ /* * CONFIG_LATENCYTOP enables a kernel latency tracking infrastructure that is * used by the "latencytop" userspace tool. The latency that is tracked is not * the 'traditional' interrupt latency (which is primarily caused by something * else consuming CPU), but instead, it is the latency an application encounters * because the kernel sleeps on its behalf for various reasons. * * This code tracks 2 levels of statistics: * 1) System level latency * 2) Per process latency * * The latency is stored in fixed sized data structures in an accumulated form; * if the "same" latency cause is hit twice, this will be tracked as one entry * in the data structure. Both the count, total accumulated latency and maximum * latency are tracked in this data structure. When the fixed size structure is * full, no new causes are tracked until the buffer is flushed by writing to * the /proc file; the userspace tool does this on a regular basis. * * A latency cause is identified by a stringified backtrace at the point that * the scheduler gets invoked. The userland tool will use this string to * identify the cause of the latency in human readable form. * * The information is exported via /proc/latency_stats and /proc/<pid>/latency. * These files look like this: * * Latency Top version : v0.1 * 70 59433 4897 i915_irq_wait drm_ioctl vfs_ioctl do_vfs_ioctl sys_ioctl * | | | | * | | | +----> the stringified backtrace * | | +---------> The maximum latency for this entry in microseconds * | +--------------> The accumulated latency for this entry (microseconds) * +-------------------> The number of times this entry is hit * * (note: the average latency is the accumulated latency divided by the number * of times) */ #include <linux/latencytop.h> #include <linux/kallsyms.h> #include <linux/seq_file.h> #include <linux/notifier.h> #include <linux/spinlock.h> #include <linux/proc_fs.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/list.h> #include <linux/stacktrace.h> static DEFINE_SPINLOCK(latency_lock); #define MAXLR 128 static struct latency_record latency_record[MAXLR]; int latencytop_enabled; void clear_all_latency_tracing(struct task_struct *p) { unsigned long flags; if (!latencytop_enabled) return; spin_lock_irqsave(&latency_lock, flags); memset(&p->latency_record, 0, sizeof(p->latency_record)); p->latency_record_count = 0; spin_unlock_irqrestore(&latency_lock, flags); } static void clear_global_latency_tracing(void) { unsigned long flags; spin_lock_irqsave(&latency_lock, flags); memset(&latency_record, 0, sizeof(latency_record)); spin_unlock_irqrestore(&latency_lock, flags); } static void __sched account_global_scheduler_latency(struct task_struct *tsk, struct latency_record *lat) { int firstnonnull = MAXLR + 1; int i; if (!latencytop_enabled) return; /* skip kernel threads for now */ if (!tsk->mm) return; for (i = 0; i < MAXLR; i++) { int q, same = 1; /* Nothing stored: */ if (!latency_record[i].backtrace[0]) { if (firstnonnull > i) firstnonnull = i; continue; } for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long record = lat->backtrace[q]; if (latency_record[i].backtrace[q] != record) { same = 0; break; } /* 0 and ULONG_MAX entries mean end of backtrace: */ if (record == 0 || record == ULONG_MAX) break; } if (same) { latency_record[i].count++; latency_record[i].time += lat->time; if (lat->time > latency_record[i].max) latency_record[i].max = lat->time; return; } } i = firstnonnull; if (i >= MAXLR - 1) return; /* Allocted a new one: */ memcpy(&latency_record[i], lat, sizeof(struct latency_record)); } /* * Iterator to store a backtrace into a latency record entry */ static inline void store_stacktrace(struct task_struct *tsk, struct latency_record *lat) { struct stack_trace trace; memset(&trace, 0, sizeof(trace)); trace.max_entries = LT_BACKTRACEDEPTH; trace.entries = &lat->backtrace[0]; save_stack_trace_tsk(tsk, &trace); } /** * __account_scheduler_latency - record an occurred latency * @tsk - the task struct of the task hitting the latency * @usecs - the duration of the latency in microseconds * @inter - 1 if the sleep was interruptible, 0 if uninterruptible * * This function is the main entry point for recording latency entries * as called by the scheduler. * * This function has a few special cases to deal with normal 'non-latency' * sleeps: specifically, interruptible sleep longer than 5 msec is skipped * since this usually is caused by waiting for events via select() and co. * * Negative latencies (caused by time going backwards) are also explicitly * skipped. */ void __sched __account_scheduler_latency(struct task_struct *tsk, int usecs, int inter) { unsigned long flags; int i, q; struct latency_record lat; /* Long interruptible waits are generally user requested... */ if (inter && usecs > 5000) return; /* Negative sleeps are time going backwards */ /* Zero-time sleeps are non-interesting */ if (usecs <= 0) return; memset(&lat, 0, sizeof(lat)); lat.count = 1; lat.time = usecs; lat.max = usecs; store_stacktrace(tsk, &lat); spin_lock_irqsave(&latency_lock, flags); account_global_scheduler_latency(tsk, &lat); for (i = 0; i < tsk->latency_record_count; i++) { struct latency_record *mylat; int same = 1; mylat = &tsk->latency_record[i]; for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long record = lat.backtrace[q]; if (mylat->backtrace[q] != record) { same = 0; break; } /* 0 and ULONG_MAX entries mean end of backtrace: */ if (record == 0 || record == ULONG_MAX) break; } if (same) { mylat->count++; mylat->time += lat.time; if (lat.time > mylat->max) mylat->max = lat.time; goto out_unlock; } } /* * short term hack; if we're > 32 we stop; future we recycle: */ if (tsk->latency_record_count >= LT_SAVECOUNT) goto out_unlock; /* Allocated a new one: */ i = tsk->latency_record_count++; memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record)); out_unlock: spin_unlock_irqrestore(&latency_lock, flags); } static int lstats_show(struct seq_file *m, void *v) { int i; seq_puts(m, "Latency Top version : v0.1\n"); for (i = 0; i < MAXLR; i++) { struct latency_record *lr = &latency_record[i]; if (lr->backtrace[0]) { int q; seq_printf(m, "%i %lu %lu", lr->count, lr->time, lr->max); for (q = 0; q < LT_BACKTRACEDEPTH; q++) { unsigned long bt = lr->backtrace[q]; if (!bt) break; if (bt == ULONG_MAX) break; seq_printf(m, " %ps", (void *)bt); } seq_printf(m, "\n"); } } return 0; } static ssize_t lstats_write(struct file *file, const char __user *buf, size_t count, loff_t *offs) { clear_global_latency_tracing(); return count; } static int lstats_open(struct inode *inode, struct file *filp) { return single_open(filp, lstats_show, NULL); } static const struct file_operations lstats_fops = { .open = lstats_open, .read = seq_read, .write = lstats_write, .llseek = seq_lseek, .release = single_release, }; static int __init init_lstats_procfs(void) { proc_create("latency_stats", 0644, NULL, &lstats_fops); return 0; } device_initcall(init_lstats_procfs);
gpl-2.0
SerenityS/android_kernel_samsung_slte
drivers/hid/hid-cherry.c
3136
2097
/* * HID driver for some cherry "special" devices * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2006-2007 Jiri Kosina * Copyright (c) 2008 Jiri Slaby */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include "hid-ids.h" /* * Cherry Cymotion keyboard have an invalid HID report descriptor, * that needs fixing before we can parse it. */ static __u8 *ch_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize >= 18 && rdesc[11] == 0x3c && rdesc[12] == 0x02) { hid_info(hdev, "fixing up Cherry Cymotion report descriptor\n"); rdesc[11] = rdesc[16] = 0xff; rdesc[12] = rdesc[17] = 0x03; } return rdesc; } #define ch_map_key_clear(c) hid_map_usage_clear(hi, usage, bit, max, \ EV_KEY, (c)) static int ch_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { if ((usage->hid & HID_USAGE_PAGE) != HID_UP_CONSUMER) return 0; switch (usage->hid & HID_USAGE) { case 0x301: ch_map_key_clear(KEY_PROG1); break; case 0x302: ch_map_key_clear(KEY_PROG2); break; case 0x303: ch_map_key_clear(KEY_PROG3); break; default: return 0; } return 1; } static const struct hid_device_id ch_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION) }, { HID_USB_DEVICE(USB_VENDOR_ID_CHERRY, USB_DEVICE_ID_CHERRY_CYMOTION_SOLAR) }, { } }; MODULE_DEVICE_TABLE(hid, ch_devices); static struct hid_driver ch_driver = { .name = "cherry", .id_table = ch_devices, .report_fixup = ch_report_fixup, .input_mapping = ch_input_mapping, }; module_hid_driver(ch_driver); MODULE_LICENSE("GPL");
gpl-2.0
tyler6389/android_kernel_samsung_baffinve
drivers/net/mlx4/reset.c
4160
5044
/* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/errno.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/jiffies.h> #include "mlx4.h" int mlx4_reset(struct mlx4_dev *dev) { void __iomem *reset; u32 *hca_header = NULL; int pcie_cap; u16 devctl; u16 linkctl; u16 vendor; unsigned long end; u32 sem; int i; int err = 0; #define MLX4_RESET_BASE 0xf0000 #define MLX4_RESET_SIZE 0x400 #define MLX4_SEM_OFFSET 0x3fc #define MLX4_RESET_OFFSET 0x10 #define MLX4_RESET_VALUE swab32(1) #define MLX4_SEM_TIMEOUT_JIFFIES (10 * HZ) #define MLX4_RESET_TIMEOUT_JIFFIES (2 * HZ) /* * Reset the chip. This is somewhat ugly because we have to * save off the PCI header before reset and then restore it * after the chip reboots. We skip config space offsets 22 * and 23 since those have a special meaning. */ /* Do we need to save off the full 4K PCI Express header?? */ hca_header = kmalloc(256, GFP_KERNEL); if (!hca_header) { err = -ENOMEM; mlx4_err(dev, "Couldn't allocate memory to save HCA " "PCI header, aborting.\n"); goto out; } pcie_cap = pci_find_capability(dev->pdev, PCI_CAP_ID_EXP); for (i = 0; i < 64; ++i) { if (i == 22 || i == 23) continue; if (pci_read_config_dword(dev->pdev, i * 4, hca_header + i)) { err = -ENODEV; mlx4_err(dev, "Couldn't save HCA " "PCI header, aborting.\n"); goto out; } } reset = ioremap(pci_resource_start(dev->pdev, 0) + MLX4_RESET_BASE, MLX4_RESET_SIZE); if (!reset) { err = -ENOMEM; mlx4_err(dev, "Couldn't map HCA reset register, aborting.\n"); goto out; } /* grab HW semaphore to lock out flash updates */ end = jiffies + MLX4_SEM_TIMEOUT_JIFFIES; do { sem = readl(reset + MLX4_SEM_OFFSET); if (!sem) break; msleep(1); } while (time_before(jiffies, end)); if (sem) { mlx4_err(dev, "Failed to obtain HW semaphore, aborting\n"); err = -EAGAIN; iounmap(reset); goto out; } /* actually hit reset */ writel(MLX4_RESET_VALUE, reset + MLX4_RESET_OFFSET); iounmap(reset); /* Docs say to wait one second before accessing device */ msleep(1000); end = jiffies + MLX4_RESET_TIMEOUT_JIFFIES; do { if (!pci_read_config_word(dev->pdev, PCI_VENDOR_ID, &vendor) && vendor != 0xffff) break; msleep(1); } while (time_before(jiffies, end)); if (vendor == 0xffff) { err = -ENODEV; mlx4_err(dev, "PCI device did not come back after reset, " "aborting.\n"); goto out; } /* Now restore the PCI headers */ if (pcie_cap) { devctl = hca_header[(pcie_cap + PCI_EXP_DEVCTL) / 4]; if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_DEVCTL, devctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express " "Device Control register, aborting.\n"); goto out; } linkctl = hca_header[(pcie_cap + PCI_EXP_LNKCTL) / 4]; if (pci_write_config_word(dev->pdev, pcie_cap + PCI_EXP_LNKCTL, linkctl)) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA PCI Express " "Link control register, aborting.\n"); goto out; } } for (i = 0; i < 16; ++i) { if (i * 4 == PCI_COMMAND) continue; if (pci_write_config_dword(dev->pdev, i * 4, hca_header[i])) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA reg %x, " "aborting.\n", i); goto out; } } if (pci_write_config_dword(dev->pdev, PCI_COMMAND, hca_header[PCI_COMMAND / 4])) { err = -ENODEV; mlx4_err(dev, "Couldn't restore HCA COMMAND, " "aborting.\n"); goto out; } out: kfree(hca_header); return err; }
gpl-2.0
ArtisteHsu/jetson-tk1-r21.3-kernel
drivers/media/usb/dvb-usb-v2/usb_urb.c
4160
9686
/* usb-urb.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the * BULK and ISOC USB data transfers in a generic way. * Can be used for DVB-only and also, that's the plan, for * Hybrid USB devices (analog and DVB). */ #include "dvb_usb_common.h" /* URB stuff for streaming */ int usb_urb_reconfig(struct usb_data_stream *stream, struct usb_data_stream_properties *props); static void usb_urb_complete(struct urb *urb) { struct usb_data_stream *stream = urb->context; int ptype = usb_pipetype(urb->pipe); int i; u8 *b; dev_dbg_ratelimited(&stream->udev->dev, "%s: %s urb completed status=%d length=%d/%d pack_num=%d errors=%d\n", __func__, ptype == PIPE_ISOCHRONOUS ? "isoc" : "bulk", urb->status, urb->actual_length, urb->transfer_buffer_length, urb->number_of_packets, urb->error_count); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ dev_dbg_ratelimited(&stream->udev->dev, "%s: urb completition failed=%d\n", __func__, urb->status); break; } b = (u8 *) urb->transfer_buffer; switch (ptype) { case PIPE_ISOCHRONOUS: for (i = 0; i < urb->number_of_packets; i++) { if (urb->iso_frame_desc[i].status != 0) dev_dbg(&stream->udev->dev, "%s: iso frame descriptor has an error=%d\n", __func__, urb->iso_frame_desc[i].status); else if (urb->iso_frame_desc[i].actual_length > 0) stream->complete(stream, b + urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].actual_length); urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } break; case PIPE_BULK: if (urb->actual_length > 0) stream->complete(stream, b, urb->actual_length); break; default: dev_err(&stream->udev->dev, "%s: unknown endpoint type in completition handler\n", KBUILD_MODNAME); return; } usb_submit_urb(urb, GFP_ATOMIC); } int usb_urb_killv2(struct usb_data_stream *stream) { int i; for (i = 0; i < stream->urbs_submitted; i++) { dev_dbg(&stream->udev->dev, "%s: kill urb=%d\n", __func__, i); /* stop the URB */ usb_kill_urb(stream->urb_list[i]); } stream->urbs_submitted = 0; return 0; } int usb_urb_submitv2(struct usb_data_stream *stream, struct usb_data_stream_properties *props) { int i, ret; if (props) { ret = usb_urb_reconfig(stream, props); if (ret < 0) return ret; } for (i = 0; i < stream->urbs_initialized; i++) { dev_dbg(&stream->udev->dev, "%s: submit urb=%d\n", __func__, i); ret = usb_submit_urb(stream->urb_list[i], GFP_ATOMIC); if (ret) { dev_err(&stream->udev->dev, "%s: could not submit urb no. %d - get them all back\n", KBUILD_MODNAME, i); usb_urb_killv2(stream); return ret; } stream->urbs_submitted++; } return 0; } static int usb_urb_free_urbs(struct usb_data_stream *stream) { int i; usb_urb_killv2(stream); for (i = stream->urbs_initialized - 1; i >= 0; i--) { if (stream->urb_list[i]) { dev_dbg(&stream->udev->dev, "%s: free urb=%d\n", __func__, i); /* free the URBs */ usb_free_urb(stream->urb_list[i]); } } stream->urbs_initialized = 0; return 0; } static int usb_urb_alloc_bulk_urbs(struct usb_data_stream *stream) { int i, j; /* allocate the URBs */ for (i = 0; i < stream->props.count; i++) { dev_dbg(&stream->udev->dev, "%s: alloc urb=%d\n", __func__, i); stream->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC); if (!stream->urb_list[i]) { dev_dbg(&stream->udev->dev, "%s: failed\n", __func__); for (j = 0; j < i; j++) usb_free_urb(stream->urb_list[j]); return -ENOMEM; } usb_fill_bulk_urb(stream->urb_list[i], stream->udev, usb_rcvbulkpipe(stream->udev, stream->props.endpoint), stream->buf_list[i], stream->props.u.bulk.buffersize, usb_urb_complete, stream); stream->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; stream->urb_list[i]->transfer_dma = stream->dma_addr[i]; stream->urbs_initialized++; } return 0; } static int usb_urb_alloc_isoc_urbs(struct usb_data_stream *stream) { int i, j; /* allocate the URBs */ for (i = 0; i < stream->props.count; i++) { struct urb *urb; int frame_offset = 0; dev_dbg(&stream->udev->dev, "%s: alloc urb=%d\n", __func__, i); stream->urb_list[i] = usb_alloc_urb( stream->props.u.isoc.framesperurb, GFP_ATOMIC); if (!stream->urb_list[i]) { dev_dbg(&stream->udev->dev, "%s: failed\n", __func__); for (j = 0; j < i; j++) usb_free_urb(stream->urb_list[j]); return -ENOMEM; } urb = stream->urb_list[i]; urb->dev = stream->udev; urb->context = stream; urb->complete = usb_urb_complete; urb->pipe = usb_rcvisocpipe(stream->udev, stream->props.endpoint); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->interval = stream->props.u.isoc.interval; urb->number_of_packets = stream->props.u.isoc.framesperurb; urb->transfer_buffer_length = stream->props.u.isoc.framesize * stream->props.u.isoc.framesperurb; urb->transfer_buffer = stream->buf_list[i]; urb->transfer_dma = stream->dma_addr[i]; for (j = 0; j < stream->props.u.isoc.framesperurb; j++) { urb->iso_frame_desc[j].offset = frame_offset; urb->iso_frame_desc[j].length = stream->props.u.isoc.framesize; frame_offset += stream->props.u.isoc.framesize; } stream->urbs_initialized++; } return 0; } static int usb_free_stream_buffers(struct usb_data_stream *stream) { if (stream->state & USB_STATE_URB_BUF) { while (stream->buf_num) { stream->buf_num--; dev_dbg(&stream->udev->dev, "%s: free buf=%d\n", __func__, stream->buf_num); usb_free_coherent(stream->udev, stream->buf_size, stream->buf_list[stream->buf_num], stream->dma_addr[stream->buf_num]); } } stream->state &= ~USB_STATE_URB_BUF; return 0; } static int usb_alloc_stream_buffers(struct usb_data_stream *stream, int num, unsigned long size) { stream->buf_num = 0; stream->buf_size = size; dev_dbg(&stream->udev->dev, "%s: all in all I will use %lu bytes for streaming\n", __func__, num * size); for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) { stream->buf_list[stream->buf_num] = usb_alloc_coherent( stream->udev, size, GFP_ATOMIC, &stream->dma_addr[stream->buf_num]); if (!stream->buf_list[stream->buf_num]) { dev_dbg(&stream->udev->dev, "%s: alloc buf=%d failed\n", __func__, stream->buf_num); usb_free_stream_buffers(stream); return -ENOMEM; } dev_dbg(&stream->udev->dev, "%s: alloc buf=%d %p (dma %llu)\n", __func__, stream->buf_num, stream->buf_list[stream->buf_num], (long long)stream->dma_addr[stream->buf_num]); memset(stream->buf_list[stream->buf_num], 0, size); stream->state |= USB_STATE_URB_BUF; } return 0; } int usb_urb_reconfig(struct usb_data_stream *stream, struct usb_data_stream_properties *props) { int buf_size; if (!props) return 0; /* check allocated buffers are large enough for the request */ if (props->type == USB_BULK) { buf_size = stream->props.u.bulk.buffersize; } else if (props->type == USB_ISOC) { buf_size = props->u.isoc.framesize * props->u.isoc.framesperurb; } else { dev_err(&stream->udev->dev, "%s: invalid endpoint type=%d\n", KBUILD_MODNAME, props->type); return -EINVAL; } if (stream->buf_num < props->count || stream->buf_size < buf_size) { dev_err(&stream->udev->dev, "%s: cannot reconfigure as allocated buffers are too small\n", KBUILD_MODNAME); return -EINVAL; } /* check if all fields are same */ if (stream->props.type == props->type && stream->props.count == props->count && stream->props.endpoint == props->endpoint) { if (props->type == USB_BULK && props->u.bulk.buffersize == stream->props.u.bulk.buffersize) return 0; else if (props->type == USB_ISOC && props->u.isoc.framesperurb == stream->props.u.isoc.framesperurb && props->u.isoc.framesize == stream->props.u.isoc.framesize && props->u.isoc.interval == stream->props.u.isoc.interval) return 0; } dev_dbg(&stream->udev->dev, "%s: re-alloc urbs\n", __func__); usb_urb_free_urbs(stream); memcpy(&stream->props, props, sizeof(*props)); if (props->type == USB_BULK) return usb_urb_alloc_bulk_urbs(stream); else if (props->type == USB_ISOC) return usb_urb_alloc_isoc_urbs(stream); return 0; } int usb_urb_initv2(struct usb_data_stream *stream, const struct usb_data_stream_properties *props) { int ret; if (!stream || !props) return -EINVAL; memcpy(&stream->props, props, sizeof(*props)); if (!stream->complete) { dev_err(&stream->udev->dev, "%s: there is no data callback - this doesn't make sense\n", KBUILD_MODNAME); return -EINVAL; } switch (stream->props.type) { case USB_BULK: ret = usb_alloc_stream_buffers(stream, stream->props.count, stream->props.u.bulk.buffersize); if (ret < 0) return ret; return usb_urb_alloc_bulk_urbs(stream); case USB_ISOC: ret = usb_alloc_stream_buffers(stream, stream->props.count, stream->props.u.isoc.framesize * stream->props.u.isoc.framesperurb); if (ret < 0) return ret; return usb_urb_alloc_isoc_urbs(stream); default: dev_err(&stream->udev->dev, "%s: unknown urb-type for data transfer\n", KBUILD_MODNAME); return -EINVAL; } } int usb_urb_exitv2(struct usb_data_stream *stream) { usb_urb_free_urbs(stream); usb_free_stream_buffers(stream); return 0; }
gpl-2.0
vl197602/htc_msm8960
drivers/char/pcmcia/synclink_cs.c
4928
111464
/* * linux/drivers/char/pcmcia/synclink_cs.c * * $Id: synclink_cs.c,v 4.34 2005/09/08 13:20:54 paulkf Exp $ * * Device driver for Microgate SyncLink PC Card * multiprotocol serial adapter. * * written by Paul Fulghum for Microgate Corporation * paulkf@microgate.com * * Microgate and SyncLink are trademarks of Microgate Corporation * * This code is released under the GNU General Public License (GPL) * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #define VERSION(ver,rel,seq) (((ver)<<16) | ((rel)<<8) | (seq)) #if defined(__i386__) # define BREAKPOINT() asm(" int $3"); #else # define BREAKPOINT() { } #endif #define MAX_DEVICE_COUNT 4 #include <linux/module.h> #include <linux/errno.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial.h> #include <linux/major.h> #include <linux/string.h> #include <linux/fcntl.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/mm.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/ioctl.h> #include <linux/synclink.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/dma.h> #include <linux/bitops.h> #include <asm/types.h> #include <linux/termios.h> #include <linux/workqueue.h> #include <linux/hdlc.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #if defined(CONFIG_HDLC) || (defined(CONFIG_HDLC_MODULE) && defined(CONFIG_SYNCLINK_CS_MODULE)) #define SYNCLINK_GENERIC_HDLC 1 #else #define SYNCLINK_GENERIC_HDLC 0 #endif #define GET_USER(error,value,addr) error = get_user(value,addr) #define COPY_FROM_USER(error,dest,src,size) error = copy_from_user(dest,src,size) ? -EFAULT : 0 #define PUT_USER(error,value,addr) error = put_user(value,addr) #define COPY_TO_USER(error,dest,src,size) error = copy_to_user(dest,src,size) ? -EFAULT : 0 #include <asm/uaccess.h> static MGSL_PARAMS default_params = { MGSL_MODE_HDLC, /* unsigned long mode */ 0, /* unsigned char loopback; */ HDLC_FLAG_UNDERRUN_ABORT15, /* unsigned short flags; */ HDLC_ENCODING_NRZI_SPACE, /* unsigned char encoding; */ 0, /* unsigned long clock_speed; */ 0xff, /* unsigned char addr_filter; */ HDLC_CRC_16_CCITT, /* unsigned short crc_type; */ HDLC_PREAMBLE_LENGTH_8BITS, /* unsigned char preamble_length; */ HDLC_PREAMBLE_PATTERN_NONE, /* unsigned char preamble; */ 9600, /* unsigned long data_rate; */ 8, /* unsigned char data_bits; */ 1, /* unsigned char stop_bits; */ ASYNC_PARITY_NONE /* unsigned char parity; */ }; typedef struct { int count; unsigned char status; char data[1]; } RXBUF; /* The queue of BH actions to be performed */ #define BH_RECEIVE 1 #define BH_TRANSMIT 2 #define BH_STATUS 4 #define IO_PIN_SHUTDOWN_LIMIT 100 #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) struct _input_signal_events { int ri_up; int ri_down; int dsr_up; int dsr_down; int dcd_up; int dcd_down; int cts_up; int cts_down; }; /* * Device instance data structure */ typedef struct _mgslpc_info { struct tty_port port; void *if_ptr; /* General purpose pointer (used by SPPP) */ int magic; int line; struct mgsl_icount icount; int timeout; int x_char; /* xon/xoff character */ unsigned char read_status_mask; unsigned char ignore_status_mask; unsigned char *tx_buf; int tx_put; int tx_get; int tx_count; /* circular list of fixed length rx buffers */ unsigned char *rx_buf; /* memory allocated for all rx buffers */ int rx_buf_total_size; /* size of memory allocated for rx buffers */ int rx_put; /* index of next empty rx buffer */ int rx_get; /* index of next full rx buffer */ int rx_buf_size; /* size in bytes of single rx buffer */ int rx_buf_count; /* total number of rx buffers */ int rx_frame_count; /* number of full rx buffers */ wait_queue_head_t status_event_wait_q; wait_queue_head_t event_wait_q; struct timer_list tx_timer; /* HDLC transmit timeout timer */ struct _mgslpc_info *next_device; /* device list link */ unsigned short imra_value; unsigned short imrb_value; unsigned char pim_value; spinlock_t lock; struct work_struct task; /* task structure for scheduling bh */ u32 max_frame_size; u32 pending_bh; bool bh_running; bool bh_requested; int dcd_chkcount; /* check counts to prevent */ int cts_chkcount; /* too many IRQs if a signal */ int dsr_chkcount; /* is floating */ int ri_chkcount; bool rx_enabled; bool rx_overflow; bool tx_enabled; bool tx_active; bool tx_aborting; u32 idle_mode; int if_mode; /* serial interface selection (RS-232, v.35 etc) */ char device_name[25]; /* device instance name */ unsigned int io_base; /* base I/O address of adapter */ unsigned int irq_level; MGSL_PARAMS params; /* communications parameters */ unsigned char serial_signals; /* current serial signal states */ bool irq_occurred; /* for diagnostics use */ char testing_irq; unsigned int init_error; /* startup error (DIAGS) */ char flag_buf[MAX_ASYNC_BUFFER_SIZE]; bool drop_rts_on_tx_done; struct _input_signal_events input_signal_events; /* PCMCIA support */ struct pcmcia_device *p_dev; int stop; /* SPPP/Cisco HDLC device parts */ int netcount; spinlock_t netlock; #if SYNCLINK_GENERIC_HDLC struct net_device *netdev; #endif } MGSLPC_INFO; #define MGSLPC_MAGIC 0x5402 /* * The size of the serial xmit buffer is 1 page, or 4096 bytes */ #define TXBUFSIZE 4096 #define CHA 0x00 /* channel A offset */ #define CHB 0x40 /* channel B offset */ /* * FIXME: PPC has PVR defined in asm/reg.h. For now we just undef it. */ #undef PVR #define RXFIFO 0 #define TXFIFO 0 #define STAR 0x20 #define CMDR 0x20 #define RSTA 0x21 #define PRE 0x21 #define MODE 0x22 #define TIMR 0x23 #define XAD1 0x24 #define XAD2 0x25 #define RAH1 0x26 #define RAH2 0x27 #define DAFO 0x27 #define RAL1 0x28 #define RFC 0x28 #define RHCR 0x29 #define RAL2 0x29 #define RBCL 0x2a #define XBCL 0x2a #define RBCH 0x2b #define XBCH 0x2b #define CCR0 0x2c #define CCR1 0x2d #define CCR2 0x2e #define CCR3 0x2f #define VSTR 0x34 #define BGR 0x34 #define RLCR 0x35 #define AML 0x36 #define AMH 0x37 #define GIS 0x38 #define IVA 0x38 #define IPC 0x39 #define ISR 0x3a #define IMR 0x3a #define PVR 0x3c #define PIS 0x3d #define PIM 0x3d #define PCR 0x3e #define CCR4 0x3f // IMR/ISR #define IRQ_BREAK_ON BIT15 // rx break detected #define IRQ_DATAOVERRUN BIT14 // receive data overflow #define IRQ_ALLSENT BIT13 // all sent #define IRQ_UNDERRUN BIT12 // transmit data underrun #define IRQ_TIMER BIT11 // timer interrupt #define IRQ_CTS BIT10 // CTS status change #define IRQ_TXREPEAT BIT9 // tx message repeat #define IRQ_TXFIFO BIT8 // transmit pool ready #define IRQ_RXEOM BIT7 // receive message end #define IRQ_EXITHUNT BIT6 // receive frame start #define IRQ_RXTIME BIT6 // rx char timeout #define IRQ_DCD BIT2 // carrier detect status change #define IRQ_OVERRUN BIT1 // receive frame overflow #define IRQ_RXFIFO BIT0 // receive pool full // STAR #define XFW BIT6 // transmit FIFO write enable #define CEC BIT2 // command executing #define CTS BIT1 // CTS state #define PVR_DTR BIT0 #define PVR_DSR BIT1 #define PVR_RI BIT2 #define PVR_AUTOCTS BIT3 #define PVR_RS232 0x20 /* 0010b */ #define PVR_V35 0xe0 /* 1110b */ #define PVR_RS422 0x40 /* 0100b */ /* Register access functions */ #define write_reg(info, reg, val) outb((val),(info)->io_base + (reg)) #define read_reg(info, reg) inb((info)->io_base + (reg)) #define read_reg16(info, reg) inw((info)->io_base + (reg)) #define write_reg16(info, reg, val) outw((val), (info)->io_base + (reg)) #define set_reg_bits(info, reg, mask) \ write_reg(info, (reg), \ (unsigned char) (read_reg(info, (reg)) | (mask))) #define clear_reg_bits(info, reg, mask) \ write_reg(info, (reg), \ (unsigned char) (read_reg(info, (reg)) & ~(mask))) /* * interrupt enable/disable routines */ static void irq_disable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) { if (channel == CHA) { info->imra_value |= mask; write_reg16(info, CHA + IMR, info->imra_value); } else { info->imrb_value |= mask; write_reg16(info, CHB + IMR, info->imrb_value); } } static void irq_enable(MGSLPC_INFO *info, unsigned char channel, unsigned short mask) { if (channel == CHA) { info->imra_value &= ~mask; write_reg16(info, CHA + IMR, info->imra_value); } else { info->imrb_value &= ~mask; write_reg16(info, CHB + IMR, info->imrb_value); } } #define port_irq_disable(info, mask) \ { info->pim_value |= (mask); write_reg(info, PIM, info->pim_value); } #define port_irq_enable(info, mask) \ { info->pim_value &= ~(mask); write_reg(info, PIM, info->pim_value); } static void rx_start(MGSLPC_INFO *info); static void rx_stop(MGSLPC_INFO *info); static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty); static void tx_stop(MGSLPC_INFO *info); static void tx_set_idle(MGSLPC_INFO *info); static void get_signals(MGSLPC_INFO *info); static void set_signals(MGSLPC_INFO *info); static void reset_device(MGSLPC_INFO *info); static void hdlc_mode(MGSLPC_INFO *info); static void async_mode(MGSLPC_INFO *info); static void tx_timeout(unsigned long context); static int carrier_raised(struct tty_port *port); static void dtr_rts(struct tty_port *port, int onoff); #if SYNCLINK_GENERIC_HDLC #define dev_to_port(D) (dev_to_hdlc(D)->priv) static void hdlcdev_tx_done(MGSLPC_INFO *info); static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size); static int hdlcdev_init(MGSLPC_INFO *info); static void hdlcdev_exit(MGSLPC_INFO *info); #endif static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit); static bool register_test(MGSLPC_INFO *info); static bool irq_test(MGSLPC_INFO *info); static int adapter_test(MGSLPC_INFO *info); static int claim_resources(MGSLPC_INFO *info); static void release_resources(MGSLPC_INFO *info); static void mgslpc_add_device(MGSLPC_INFO *info); static void mgslpc_remove_device(MGSLPC_INFO *info); static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty); static void rx_reset_buffers(MGSLPC_INFO *info); static int rx_alloc_buffers(MGSLPC_INFO *info); static void rx_free_buffers(MGSLPC_INFO *info); static irqreturn_t mgslpc_isr(int irq, void *dev_id); /* * Bottom half interrupt handlers */ static void bh_handler(struct work_struct *work); static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty); static void bh_status(MGSLPC_INFO *info); /* * ioctl handlers */ static int tiocmget(struct tty_struct *tty); static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear); static int get_stats(MGSLPC_INFO *info, struct mgsl_icount __user *user_icount); static int get_params(MGSLPC_INFO *info, MGSL_PARAMS __user *user_params); static int set_params(MGSLPC_INFO *info, MGSL_PARAMS __user *new_params, struct tty_struct *tty); static int get_txidle(MGSLPC_INFO *info, int __user *idle_mode); static int set_txidle(MGSLPC_INFO *info, int idle_mode); static int set_txenable(MGSLPC_INFO *info, int enable, struct tty_struct *tty); static int tx_abort(MGSLPC_INFO *info); static int set_rxenable(MGSLPC_INFO *info, int enable); static int wait_events(MGSLPC_INFO *info, int __user *mask); static MGSLPC_INFO *mgslpc_device_list = NULL; static int mgslpc_device_count = 0; /* * Set this param to non-zero to load eax with the * .text section address and breakpoint on module load. * This is useful for use with gdb and add-symbol-file command. */ static bool break_on_load=0; /* * Driver major number, defaults to zero to get auto * assigned major number. May be forced as module parameter. */ static int ttymajor=0; static int debug_level = 0; static int maxframe[MAX_DEVICE_COUNT] = {0,}; module_param(break_on_load, bool, 0); module_param(ttymajor, int, 0); module_param(debug_level, int, 0); module_param_array(maxframe, int, NULL, 0); MODULE_LICENSE("GPL"); static char *driver_name = "SyncLink PC Card driver"; static char *driver_version = "$Revision: 4.34 $"; static struct tty_driver *serial_driver; /* number of characters left in xmit buffer before we ask for more */ #define WAKEUP_CHARS 256 static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty); static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout); /* PCMCIA prototypes */ static int mgslpc_config(struct pcmcia_device *link); static void mgslpc_release(u_long arg); static void mgslpc_detach(struct pcmcia_device *p_dev); /* * 1st function defined in .text section. Calling this function in * init_module() followed by a breakpoint allows a remote debugger * (gdb) to get the .text address for the add-symbol-file command. * This allows remote debugging of dynamically loadable modules. */ static void* mgslpc_get_text_ptr(void) { return mgslpc_get_text_ptr; } /** * line discipline callback wrappers * * The wrappers maintain line discipline references * while calling into the line discipline. * * ldisc_receive_buf - pass receive data to line discipline */ static void ldisc_receive_buf(struct tty_struct *tty, const __u8 *data, char *flags, int count) { struct tty_ldisc *ld; if (!tty) return; ld = tty_ldisc_ref(tty); if (ld) { if (ld->ops->receive_buf) ld->ops->receive_buf(tty, data, flags, count); tty_ldisc_deref(ld); } } static const struct tty_port_operations mgslpc_port_ops = { .carrier_raised = carrier_raised, .dtr_rts = dtr_rts }; static int mgslpc_probe(struct pcmcia_device *link) { MGSLPC_INFO *info; int ret; if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_attach\n"); info = kzalloc(sizeof(MGSLPC_INFO), GFP_KERNEL); if (!info) { printk("Error can't allocate device instance data\n"); return -ENOMEM; } info->magic = MGSLPC_MAGIC; tty_port_init(&info->port); info->port.ops = &mgslpc_port_ops; INIT_WORK(&info->task, bh_handler); info->max_frame_size = 4096; info->port.close_delay = 5*HZ/10; info->port.closing_wait = 30*HZ; init_waitqueue_head(&info->status_event_wait_q); init_waitqueue_head(&info->event_wait_q); spin_lock_init(&info->lock); spin_lock_init(&info->netlock); memcpy(&info->params,&default_params,sizeof(MGSL_PARAMS)); info->idle_mode = HDLC_TXIDLE_FLAGS; info->imra_value = 0xffff; info->imrb_value = 0xffff; info->pim_value = 0xff; info->p_dev = link; link->priv = info; /* Initialize the struct pcmcia_device structure */ ret = mgslpc_config(link); if (ret) return ret; mgslpc_add_device(info); return 0; } /* Card has been inserted. */ static int mgslpc_ioprobe(struct pcmcia_device *p_dev, void *priv_data) { return pcmcia_request_io(p_dev); } static int mgslpc_config(struct pcmcia_device *link) { MGSLPC_INFO *info = link->priv; int ret; if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_config(0x%p)\n", link); link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; ret = pcmcia_loop_config(link, mgslpc_ioprobe, NULL); if (ret != 0) goto failed; link->config_index = 8; link->config_regs = PRESENT_OPTION; ret = pcmcia_request_irq(link, mgslpc_isr); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; info->io_base = link->resource[0]->start; info->irq_level = link->irq; return 0; failed: mgslpc_release((u_long)link); return -ENODEV; } /* Card has been removed. * Unregister device and release PCMCIA configuration. * If device is open, postpone until it is closed. */ static void mgslpc_release(u_long arg) { struct pcmcia_device *link = (struct pcmcia_device *)arg; if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_release(0x%p)\n", link); pcmcia_disable_device(link); } static void mgslpc_detach(struct pcmcia_device *link) { if (debug_level >= DEBUG_LEVEL_INFO) printk("mgslpc_detach(0x%p)\n", link); ((MGSLPC_INFO *)link->priv)->stop = 1; mgslpc_release((u_long)link); mgslpc_remove_device((MGSLPC_INFO *)link->priv); } static int mgslpc_suspend(struct pcmcia_device *link) { MGSLPC_INFO *info = link->priv; info->stop = 1; return 0; } static int mgslpc_resume(struct pcmcia_device *link) { MGSLPC_INFO *info = link->priv; info->stop = 0; return 0; } static inline bool mgslpc_paranoia_check(MGSLPC_INFO *info, char *name, const char *routine) { #ifdef MGSLPC_PARANOIA_CHECK static const char *badmagic = "Warning: bad magic number for mgsl struct (%s) in %s\n"; static const char *badinfo = "Warning: null mgslpc_info for (%s) in %s\n"; if (!info) { printk(badinfo, name, routine); return true; } if (info->magic != MGSLPC_MAGIC) { printk(badmagic, name, routine); return true; } #else if (!info) return true; #endif return false; } #define CMD_RXFIFO BIT7 // release current rx FIFO #define CMD_RXRESET BIT6 // receiver reset #define CMD_RXFIFO_READ BIT5 #define CMD_START_TIMER BIT4 #define CMD_TXFIFO BIT3 // release current tx FIFO #define CMD_TXEOM BIT1 // transmit end message #define CMD_TXRESET BIT0 // transmit reset static bool wait_command_complete(MGSLPC_INFO *info, unsigned char channel) { int i = 0; /* wait for command completion */ while (read_reg(info, (unsigned char)(channel+STAR)) & BIT2) { udelay(1); if (i++ == 1000) return false; } return true; } static void issue_command(MGSLPC_INFO *info, unsigned char channel, unsigned char cmd) { wait_command_complete(info, channel); write_reg(info, (unsigned char) (channel + CMDR), cmd); } static void tx_pause(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (mgslpc_paranoia_check(info, tty->name, "tx_pause")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("tx_pause(%s)\n",info->device_name); spin_lock_irqsave(&info->lock,flags); if (info->tx_enabled) tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); } static void tx_release(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (mgslpc_paranoia_check(info, tty->name, "tx_release")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("tx_release(%s)\n",info->device_name); spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } /* Return next bottom half action to perform. * or 0 if nothing to do. */ static int bh_action(MGSLPC_INFO *info) { unsigned long flags; int rc = 0; spin_lock_irqsave(&info->lock,flags); if (info->pending_bh & BH_RECEIVE) { info->pending_bh &= ~BH_RECEIVE; rc = BH_RECEIVE; } else if (info->pending_bh & BH_TRANSMIT) { info->pending_bh &= ~BH_TRANSMIT; rc = BH_TRANSMIT; } else if (info->pending_bh & BH_STATUS) { info->pending_bh &= ~BH_STATUS; rc = BH_STATUS; } if (!rc) { /* Mark BH routine as complete */ info->bh_running = false; info->bh_requested = false; } spin_unlock_irqrestore(&info->lock,flags); return rc; } static void bh_handler(struct work_struct *work) { MGSLPC_INFO *info = container_of(work, MGSLPC_INFO, task); struct tty_struct *tty; int action; if (!info) return; if (debug_level >= DEBUG_LEVEL_BH) printk( "%s(%d):bh_handler(%s) entry\n", __FILE__,__LINE__,info->device_name); info->bh_running = true; tty = tty_port_tty_get(&info->port); while((action = bh_action(info)) != 0) { /* Process work item */ if ( debug_level >= DEBUG_LEVEL_BH ) printk( "%s(%d):bh_handler() work item action=%d\n", __FILE__,__LINE__,action); switch (action) { case BH_RECEIVE: while(rx_get_frame(info, tty)); break; case BH_TRANSMIT: bh_transmit(info, tty); break; case BH_STATUS: bh_status(info); break; default: /* unknown work item ID */ printk("Unknown work item ID=%08X!\n", action); break; } } tty_kref_put(tty); if (debug_level >= DEBUG_LEVEL_BH) printk( "%s(%d):bh_handler(%s) exit\n", __FILE__,__LINE__,info->device_name); } static void bh_transmit(MGSLPC_INFO *info, struct tty_struct *tty) { if (debug_level >= DEBUG_LEVEL_BH) printk("bh_transmit() entry on %s\n", info->device_name); if (tty) tty_wakeup(tty); } static void bh_status(MGSLPC_INFO *info) { info->ri_chkcount = 0; info->dsr_chkcount = 0; info->dcd_chkcount = 0; info->cts_chkcount = 0; } /* eom: non-zero = end of frame */ static void rx_ready_hdlc(MGSLPC_INFO *info, int eom) { unsigned char data[2]; unsigned char fifo_count, read_count, i; RXBUF *buf = (RXBUF*)(info->rx_buf + (info->rx_put * info->rx_buf_size)); if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):rx_ready_hdlc(eom=%d)\n",__FILE__,__LINE__,eom); if (!info->rx_enabled) return; if (info->rx_frame_count >= info->rx_buf_count) { /* no more free buffers */ issue_command(info, CHA, CMD_RXRESET); info->pending_bh |= BH_RECEIVE; info->rx_overflow = true; info->icount.buf_overrun++; return; } if (eom) { /* end of frame, get FIFO count from RBCL register */ if (!(fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f))) fifo_count = 32; } else fifo_count = 32; do { if (fifo_count == 1) { read_count = 1; data[0] = read_reg(info, CHA + RXFIFO); } else { read_count = 2; *((unsigned short *) data) = read_reg16(info, CHA + RXFIFO); } fifo_count -= read_count; if (!fifo_count && eom) buf->status = data[--read_count]; for (i = 0; i < read_count; i++) { if (buf->count >= info->max_frame_size) { /* frame too large, reset receiver and reset current buffer */ issue_command(info, CHA, CMD_RXRESET); buf->count = 0; return; } *(buf->data + buf->count) = data[i]; buf->count++; } } while (fifo_count); if (eom) { info->pending_bh |= BH_RECEIVE; info->rx_frame_count++; info->rx_put++; if (info->rx_put >= info->rx_buf_count) info->rx_put = 0; } issue_command(info, CHA, CMD_RXFIFO); } static void rx_ready_async(MGSLPC_INFO *info, int tcd, struct tty_struct *tty) { unsigned char data, status, flag; int fifo_count; int work = 0; struct mgsl_icount *icount = &info->icount; if (tcd) { /* early termination, get FIFO count from RBCL register */ fifo_count = (unsigned char)(read_reg(info, CHA+RBCL) & 0x1f); /* Zero fifo count could mean 0 or 32 bytes available. * If BIT5 of STAR is set then at least 1 byte is available. */ if (!fifo_count && (read_reg(info,CHA+STAR) & BIT5)) fifo_count = 32; } else fifo_count = 32; tty_buffer_request_room(tty, fifo_count); /* Flush received async data to receive data buffer. */ while (fifo_count) { data = read_reg(info, CHA + RXFIFO); status = read_reg(info, CHA + RXFIFO); fifo_count -= 2; icount->rx++; flag = TTY_NORMAL; // if no frameing/crc error then save data // BIT7:parity error // BIT6:framing error if (status & (BIT7 + BIT6)) { if (status & BIT7) icount->parity++; else icount->frame++; /* discard char if tty control flags say so */ if (status & info->ignore_status_mask) continue; status &= info->read_status_mask; if (status & BIT7) flag = TTY_PARITY; else if (status & BIT6) flag = TTY_FRAME; } work += tty_insert_flip_char(tty, data, flag); } issue_command(info, CHA, CMD_RXFIFO); if (debug_level >= DEBUG_LEVEL_ISR) { printk("%s(%d):rx_ready_async", __FILE__,__LINE__); printk("%s(%d):rx=%d brk=%d parity=%d frame=%d overrun=%d\n", __FILE__,__LINE__,icount->rx,icount->brk, icount->parity,icount->frame,icount->overrun); } if (work) tty_flip_buffer_push(tty); } static void tx_done(MGSLPC_INFO *info, struct tty_struct *tty) { if (!info->tx_active) return; info->tx_active = false; info->tx_aborting = false; if (info->params.mode == MGSL_MODE_ASYNC) return; info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); if (info->drop_rts_on_tx_done) { get_signals(info); if (info->serial_signals & SerialSignal_RTS) { info->serial_signals &= ~SerialSignal_RTS; set_signals(info); } info->drop_rts_on_tx_done = false; } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif { if (tty->stopped || tty->hw_stopped) { tx_stop(info); return; } info->pending_bh |= BH_TRANSMIT; } } static void tx_ready(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned char fifo_count = 32; int c; if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):tx_ready(%s)\n", __FILE__,__LINE__,info->device_name); if (info->params.mode == MGSL_MODE_HDLC) { if (!info->tx_active) return; } else { if (tty->stopped || tty->hw_stopped) { tx_stop(info); return; } if (!info->tx_count) info->tx_active = false; } if (!info->tx_count) return; while (info->tx_count && fifo_count) { c = min(2, min_t(int, fifo_count, min(info->tx_count, TXBUFSIZE - info->tx_get))); if (c == 1) { write_reg(info, CHA + TXFIFO, *(info->tx_buf + info->tx_get)); } else { write_reg16(info, CHA + TXFIFO, *((unsigned short*)(info->tx_buf + info->tx_get))); } info->tx_count -= c; info->tx_get = (info->tx_get + c) & (TXBUFSIZE - 1); fifo_count -= c; } if (info->params.mode == MGSL_MODE_ASYNC) { if (info->tx_count < WAKEUP_CHARS) info->pending_bh |= BH_TRANSMIT; issue_command(info, CHA, CMD_TXFIFO); } else { if (info->tx_count) issue_command(info, CHA, CMD_TXFIFO); else issue_command(info, CHA, CMD_TXFIFO + CMD_TXEOM); } } static void cts_change(MGSLPC_INFO *info, struct tty_struct *tty) { get_signals(info); if ((info->cts_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) irq_disable(info, CHB, IRQ_CTS); info->icount.cts++; if (info->serial_signals & SerialSignal_CTS) info->input_signal_events.cts_up++; else info->input_signal_events.cts_down++; wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); if (info->port.flags & ASYNC_CTS_FLOW) { if (tty->hw_stopped) { if (info->serial_signals & SerialSignal_CTS) { if (debug_level >= DEBUG_LEVEL_ISR) printk("CTS tx start..."); if (tty) tty->hw_stopped = 0; tx_start(info, tty); info->pending_bh |= BH_TRANSMIT; return; } } else { if (!(info->serial_signals & SerialSignal_CTS)) { if (debug_level >= DEBUG_LEVEL_ISR) printk("CTS tx stop..."); if (tty) tty->hw_stopped = 1; tx_stop(info); } } } info->pending_bh |= BH_STATUS; } static void dcd_change(MGSLPC_INFO *info, struct tty_struct *tty) { get_signals(info); if ((info->dcd_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) irq_disable(info, CHB, IRQ_DCD); info->icount.dcd++; if (info->serial_signals & SerialSignal_DCD) { info->input_signal_events.dcd_up++; } else info->input_signal_events.dcd_down++; #if SYNCLINK_GENERIC_HDLC if (info->netcount) { if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(info->netdev); else netif_carrier_off(info->netdev); } #endif wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); if (info->port.flags & ASYNC_CHECK_CD) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s CD now %s...", info->device_name, (info->serial_signals & SerialSignal_DCD) ? "on" : "off"); if (info->serial_signals & SerialSignal_DCD) wake_up_interruptible(&info->port.open_wait); else { if (debug_level >= DEBUG_LEVEL_ISR) printk("doing serial hangup..."); if (tty) tty_hangup(tty); } } info->pending_bh |= BH_STATUS; } static void dsr_change(MGSLPC_INFO *info) { get_signals(info); if ((info->dsr_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) port_irq_disable(info, PVR_DSR); info->icount.dsr++; if (info->serial_signals & SerialSignal_DSR) info->input_signal_events.dsr_up++; else info->input_signal_events.dsr_down++; wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); info->pending_bh |= BH_STATUS; } static void ri_change(MGSLPC_INFO *info) { get_signals(info); if ((info->ri_chkcount)++ >= IO_PIN_SHUTDOWN_LIMIT) port_irq_disable(info, PVR_RI); info->icount.rng++; if (info->serial_signals & SerialSignal_RI) info->input_signal_events.ri_up++; else info->input_signal_events.ri_down++; wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); info->pending_bh |= BH_STATUS; } /* Interrupt service routine entry point. * * Arguments: * * irq interrupt number that caused interrupt * dev_id device ID supplied during interrupt registration */ static irqreturn_t mgslpc_isr(int dummy, void *dev_id) { MGSLPC_INFO *info = dev_id; struct tty_struct *tty; unsigned short isr; unsigned char gis, pis; int count=0; if (debug_level >= DEBUG_LEVEL_ISR) printk("mgslpc_isr(%d) entry.\n", info->irq_level); if (!(info->p_dev->_locked)) return IRQ_HANDLED; tty = tty_port_tty_get(&info->port); spin_lock(&info->lock); while ((gis = read_reg(info, CHA + GIS))) { if (debug_level >= DEBUG_LEVEL_ISR) printk("mgslpc_isr %s gis=%04X\n", info->device_name,gis); if ((gis & 0x70) || count > 1000) { printk("synclink_cs:hardware failed or ejected\n"); break; } count++; if (gis & (BIT1 + BIT0)) { isr = read_reg16(info, CHB + ISR); if (isr & IRQ_DCD) dcd_change(info, tty); if (isr & IRQ_CTS) cts_change(info, tty); } if (gis & (BIT3 + BIT2)) { isr = read_reg16(info, CHA + ISR); if (isr & IRQ_TIMER) { info->irq_occurred = true; irq_disable(info, CHA, IRQ_TIMER); } /* receive IRQs */ if (isr & IRQ_EXITHUNT) { info->icount.exithunt++; wake_up_interruptible(&info->event_wait_q); } if (isr & IRQ_BREAK_ON) { info->icount.brk++; if (info->port.flags & ASYNC_SAK) do_SAK(tty); } if (isr & IRQ_RXTIME) { issue_command(info, CHA, CMD_RXFIFO_READ); } if (isr & (IRQ_RXEOM + IRQ_RXFIFO)) { if (info->params.mode == MGSL_MODE_HDLC) rx_ready_hdlc(info, isr & IRQ_RXEOM); else rx_ready_async(info, isr & IRQ_RXEOM, tty); } /* transmit IRQs */ if (isr & IRQ_UNDERRUN) { if (info->tx_aborting) info->icount.txabort++; else info->icount.txunder++; tx_done(info, tty); } else if (isr & IRQ_ALLSENT) { info->icount.txok++; tx_done(info, tty); } else if (isr & IRQ_TXFIFO) tx_ready(info, tty); } if (gis & BIT7) { pis = read_reg(info, CHA + PIS); if (pis & BIT1) dsr_change(info); if (pis & BIT2) ri_change(info); } } /* Request bottom half processing if there's something * for it to do and the bh is not already running */ if (info->pending_bh && !info->bh_running && !info->bh_requested) { if ( debug_level >= DEBUG_LEVEL_ISR ) printk("%s(%d):%s queueing bh task.\n", __FILE__,__LINE__,info->device_name); schedule_work(&info->task); info->bh_requested = true; } spin_unlock(&info->lock); tty_kref_put(tty); if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):mgslpc_isr(%d)exit.\n", __FILE__, __LINE__, info->irq_level); return IRQ_HANDLED; } /* Initialize and start device. */ static int startup(MGSLPC_INFO * info, struct tty_struct *tty) { int retval = 0; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):startup(%s)\n",__FILE__,__LINE__,info->device_name); if (info->port.flags & ASYNC_INITIALIZED) return 0; if (!info->tx_buf) { /* allocate a page of memory for a transmit buffer */ info->tx_buf = (unsigned char *)get_zeroed_page(GFP_KERNEL); if (!info->tx_buf) { printk(KERN_ERR"%s(%d):%s can't allocate transmit buffer\n", __FILE__,__LINE__,info->device_name); return -ENOMEM; } } info->pending_bh = 0; memset(&info->icount, 0, sizeof(info->icount)); setup_timer(&info->tx_timer, tx_timeout, (unsigned long)info); /* Allocate and claim adapter resources */ retval = claim_resources(info); /* perform existence check and diagnostics */ if ( !retval ) retval = adapter_test(info); if ( retval ) { if (capable(CAP_SYS_ADMIN) && tty) set_bit(TTY_IO_ERROR, &tty->flags); release_resources(info); return retval; } /* program hardware for current parameters */ mgslpc_change_params(info, tty); if (tty) clear_bit(TTY_IO_ERROR, &tty->flags); info->port.flags |= ASYNC_INITIALIZED; return 0; } /* Called by mgslpc_close() and mgslpc_hangup() to shutdown hardware */ static void shutdown(MGSLPC_INFO * info, struct tty_struct *tty) { unsigned long flags; if (!(info->port.flags & ASYNC_INITIALIZED)) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_shutdown(%s)\n", __FILE__,__LINE__, info->device_name ); /* clear status wait queue because status changes */ /* can't happen after shutting down the hardware */ wake_up_interruptible(&info->status_event_wait_q); wake_up_interruptible(&info->event_wait_q); del_timer_sync(&info->tx_timer); if (info->tx_buf) { free_page((unsigned long) info->tx_buf); info->tx_buf = NULL; } spin_lock_irqsave(&info->lock,flags); rx_stop(info); tx_stop(info); /* TODO:disable interrupts instead of reset to preserve signal states */ reset_device(info); if (!tty || tty->termios->c_cflag & HUPCL) { info->serial_signals &= ~(SerialSignal_DTR + SerialSignal_RTS); set_signals(info); } spin_unlock_irqrestore(&info->lock,flags); release_resources(info); if (tty) set_bit(TTY_IO_ERROR, &tty->flags); info->port.flags &= ~ASYNC_INITIALIZED; } static void mgslpc_program_hw(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned long flags; spin_lock_irqsave(&info->lock,flags); rx_stop(info); tx_stop(info); info->tx_count = info->tx_put = info->tx_get = 0; if (info->params.mode == MGSL_MODE_HDLC || info->netcount) hdlc_mode(info); else async_mode(info); set_signals(info); info->dcd_chkcount = 0; info->cts_chkcount = 0; info->ri_chkcount = 0; info->dsr_chkcount = 0; irq_enable(info, CHB, IRQ_DCD | IRQ_CTS); port_irq_enable(info, (unsigned char) PVR_DSR | PVR_RI); get_signals(info); if (info->netcount || (tty && (tty->termios->c_cflag & CREAD))) rx_start(info); spin_unlock_irqrestore(&info->lock,flags); } /* Reconfigure adapter based on new parameters */ static void mgslpc_change_params(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned cflag; int bits_per_char; if (!tty || !tty->termios) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_change_params(%s)\n", __FILE__,__LINE__, info->device_name ); cflag = tty->termios->c_cflag; /* if B0 rate (hangup) specified then negate DTR and RTS */ /* otherwise assert DTR and RTS */ if (cflag & CBAUD) info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; else info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); /* byte size and parity */ switch (cflag & CSIZE) { case CS5: info->params.data_bits = 5; break; case CS6: info->params.data_bits = 6; break; case CS7: info->params.data_bits = 7; break; case CS8: info->params.data_bits = 8; break; default: info->params.data_bits = 7; break; } if (cflag & CSTOPB) info->params.stop_bits = 2; else info->params.stop_bits = 1; info->params.parity = ASYNC_PARITY_NONE; if (cflag & PARENB) { if (cflag & PARODD) info->params.parity = ASYNC_PARITY_ODD; else info->params.parity = ASYNC_PARITY_EVEN; #ifdef CMSPAR if (cflag & CMSPAR) info->params.parity = ASYNC_PARITY_SPACE; #endif } /* calculate number of jiffies to transmit a full * FIFO (32 bytes) at specified data rate */ bits_per_char = info->params.data_bits + info->params.stop_bits + 1; /* if port data rate is set to 460800 or less then * allow tty settings to override, otherwise keep the * current data rate. */ if (info->params.data_rate <= 460800) { info->params.data_rate = tty_get_baud_rate(tty); } if ( info->params.data_rate ) { info->timeout = (32*HZ*bits_per_char) / info->params.data_rate; } info->timeout += HZ/50; /* Add .02 seconds of slop */ if (cflag & CRTSCTS) info->port.flags |= ASYNC_CTS_FLOW; else info->port.flags &= ~ASYNC_CTS_FLOW; if (cflag & CLOCAL) info->port.flags &= ~ASYNC_CHECK_CD; else info->port.flags |= ASYNC_CHECK_CD; /* process tty input control flags */ info->read_status_mask = 0; if (I_INPCK(tty)) info->read_status_mask |= BIT7 | BIT6; if (I_IGNPAR(tty)) info->ignore_status_mask |= BIT7 | BIT6; mgslpc_program_hw(info, tty); } /* Add a character to the transmit buffer */ static int mgslpc_put_char(struct tty_struct *tty, unsigned char ch) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) { printk( "%s(%d):mgslpc_put_char(%d) on %s\n", __FILE__,__LINE__,ch,info->device_name); } if (mgslpc_paranoia_check(info, tty->name, "mgslpc_put_char")) return 0; if (!info->tx_buf) return 0; spin_lock_irqsave(&info->lock,flags); if (info->params.mode == MGSL_MODE_ASYNC || !info->tx_active) { if (info->tx_count < TXBUFSIZE - 1) { info->tx_buf[info->tx_put++] = ch; info->tx_put &= TXBUFSIZE-1; info->tx_count++; } } spin_unlock_irqrestore(&info->lock,flags); return 1; } /* Enable transmitter so remaining characters in the * transmit buffer are sent. */ static void mgslpc_flush_chars(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_flush_chars() entry on %s tx_count=%d\n", __FILE__,__LINE__,info->device_name,info->tx_count); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_chars")) return; if (info->tx_count <= 0 || tty->stopped || tty->hw_stopped || !info->tx_buf) return; if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_flush_chars() entry on %s starting transmitter\n", __FILE__,__LINE__,info->device_name); spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } /* Send a block of data * * Arguments: * * tty pointer to tty information structure * buf pointer to buffer containing send data * count size of send data in bytes * * Returns: number of characters written */ static int mgslpc_write(struct tty_struct * tty, const unsigned char *buf, int count) { int c, ret = 0; MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_write(%s) count=%d\n", __FILE__,__LINE__,info->device_name,count); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write") || !info->tx_buf) goto cleanup; if (info->params.mode == MGSL_MODE_HDLC) { if (count > TXBUFSIZE) { ret = -EIO; goto cleanup; } if (info->tx_active) goto cleanup; else if (info->tx_count) goto start; } for (;;) { c = min(count, min(TXBUFSIZE - info->tx_count - 1, TXBUFSIZE - info->tx_put)); if (c <= 0) break; memcpy(info->tx_buf + info->tx_put, buf, c); spin_lock_irqsave(&info->lock,flags); info->tx_put = (info->tx_put + c) & (TXBUFSIZE-1); info->tx_count += c; spin_unlock_irqrestore(&info->lock,flags); buf += c; count -= c; ret += c; } start: if (info->tx_count && !tty->stopped && !tty->hw_stopped) { spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk( "%s(%d):mgslpc_write(%s) returning=%d\n", __FILE__,__LINE__,info->device_name,ret); return ret; } /* Return the count of free bytes in transmit buffer */ static int mgslpc_write_room(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; int ret; if (mgslpc_paranoia_check(info, tty->name, "mgslpc_write_room")) return 0; if (info->params.mode == MGSL_MODE_HDLC) { /* HDLC (frame oriented) mode */ if (info->tx_active) return 0; else return HDLC_MAX_FRAME_SIZE; } else { ret = TXBUFSIZE - info->tx_count - 1; if (ret < 0) ret = 0; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_write_room(%s)=%d\n", __FILE__,__LINE__, info->device_name, ret); return ret; } /* Return the count of bytes in transmit buffer */ static int mgslpc_chars_in_buffer(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; int rc; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_chars_in_buffer(%s)\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_chars_in_buffer")) return 0; if (info->params.mode == MGSL_MODE_HDLC) rc = info->tx_active ? info->max_frame_size : 0; else rc = info->tx_count; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_chars_in_buffer(%s)=%d\n", __FILE__,__LINE__, info->device_name, rc); return rc; } /* Discard all data in the send buffer */ static void mgslpc_flush_buffer(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_flush_buffer(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_flush_buffer")) return; spin_lock_irqsave(&info->lock,flags); info->tx_count = info->tx_put = info->tx_get = 0; del_timer(&info->tx_timer); spin_unlock_irqrestore(&info->lock,flags); wake_up_interruptible(&tty->write_wait); tty_wakeup(tty); } /* Send a high-priority XON/XOFF character */ static void mgslpc_send_xchar(struct tty_struct *tty, char ch) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_send_xchar(%s,%d)\n", __FILE__,__LINE__, info->device_name, ch ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_send_xchar")) return; info->x_char = ch; if (ch) { spin_lock_irqsave(&info->lock,flags); if (!info->tx_enabled) tx_start(info, tty); spin_unlock_irqrestore(&info->lock,flags); } } /* Signal remote device to throttle send data (our receive data) */ static void mgslpc_throttle(struct tty_struct * tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_throttle(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_throttle")) return; if (I_IXOFF(tty)) mgslpc_send_xchar(tty, STOP_CHAR(tty)); if (tty->termios->c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals &= ~SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* Signal remote device to stop throttling send data (our receive data) */ static void mgslpc_unthrottle(struct tty_struct * tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_unthrottle(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_unthrottle")) return; if (I_IXOFF(tty)) { if (info->x_char) info->x_char = 0; else mgslpc_send_xchar(tty, START_CHAR(tty)); } if (tty->termios->c_cflag & CRTSCTS) { spin_lock_irqsave(&info->lock,flags); info->serial_signals |= SerialSignal_RTS; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } } /* get the current serial statistics */ static int get_stats(MGSLPC_INFO * info, struct mgsl_icount __user *user_icount) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_params(%s)\n", info->device_name); if (!user_icount) { memset(&info->icount, 0, sizeof(info->icount)); } else { COPY_TO_USER(err, user_icount, &info->icount, sizeof(struct mgsl_icount)); if (err) return -EFAULT; } return 0; } /* get the current serial parameters */ static int get_params(MGSLPC_INFO * info, MGSL_PARAMS __user *user_params) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_params(%s)\n", info->device_name); COPY_TO_USER(err,user_params, &info->params, sizeof(MGSL_PARAMS)); if (err) return -EFAULT; return 0; } /* set the serial parameters * * Arguments: * * info pointer to device instance data * new_params user buffer containing new serial params * * Returns: 0 if success, otherwise error code */ static int set_params(MGSLPC_INFO * info, MGSL_PARAMS __user *new_params, struct tty_struct *tty) { unsigned long flags; MGSL_PARAMS tmp_params; int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):set_params %s\n", __FILE__,__LINE__, info->device_name ); COPY_FROM_USER(err,&tmp_params, new_params, sizeof(MGSL_PARAMS)); if (err) { if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):set_params(%s) user buffer copy failed\n", __FILE__,__LINE__,info->device_name); return -EFAULT; } spin_lock_irqsave(&info->lock,flags); memcpy(&info->params,&tmp_params,sizeof(MGSL_PARAMS)); spin_unlock_irqrestore(&info->lock,flags); mgslpc_change_params(info, tty); return 0; } static int get_txidle(MGSLPC_INFO * info, int __user *idle_mode) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_txidle(%s)=%d\n", info->device_name, info->idle_mode); COPY_TO_USER(err,idle_mode, &info->idle_mode, sizeof(int)); if (err) return -EFAULT; return 0; } static int set_txidle(MGSLPC_INFO * info, int idle_mode) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_txidle(%s,%d)\n", info->device_name, idle_mode); spin_lock_irqsave(&info->lock,flags); info->idle_mode = idle_mode; tx_set_idle(info); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int get_interface(MGSLPC_INFO * info, int __user *if_mode) { int err; if (debug_level >= DEBUG_LEVEL_INFO) printk("get_interface(%s)=%d\n", info->device_name, info->if_mode); COPY_TO_USER(err,if_mode, &info->if_mode, sizeof(int)); if (err) return -EFAULT; return 0; } static int set_interface(MGSLPC_INFO * info, int if_mode) { unsigned long flags; unsigned char val; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_interface(%s,%d)\n", info->device_name, if_mode); spin_lock_irqsave(&info->lock,flags); info->if_mode = if_mode; val = read_reg(info, PVR) & 0x0f; switch (info->if_mode) { case MGSL_INTERFACE_RS232: val |= PVR_RS232; break; case MGSL_INTERFACE_V35: val |= PVR_V35; break; case MGSL_INTERFACE_RS422: val |= PVR_RS422; break; } write_reg(info, PVR, val); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int set_txenable(MGSLPC_INFO * info, int enable, struct tty_struct *tty) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_txenable(%s,%d)\n", info->device_name, enable); spin_lock_irqsave(&info->lock,flags); if (enable) { if (!info->tx_enabled) tx_start(info, tty); } else { if (info->tx_enabled) tx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } static int tx_abort(MGSLPC_INFO * info) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("tx_abort(%s)\n", info->device_name); spin_lock_irqsave(&info->lock,flags); if (info->tx_active && info->tx_count && info->params.mode == MGSL_MODE_HDLC) { /* clear data count so FIFO is not filled on next IRQ. * This results in underrun and abort transmission. */ info->tx_count = info->tx_put = info->tx_get = 0; info->tx_aborting = true; } spin_unlock_irqrestore(&info->lock,flags); return 0; } static int set_rxenable(MGSLPC_INFO * info, int enable) { unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("set_rxenable(%s,%d)\n", info->device_name, enable); spin_lock_irqsave(&info->lock,flags); if (enable) { if (!info->rx_enabled) rx_start(info); } else { if (info->rx_enabled) rx_stop(info); } spin_unlock_irqrestore(&info->lock,flags); return 0; } /* wait for specified event to occur * * Arguments: info pointer to device instance data * mask pointer to bitmask of events to wait for * Return Value: 0 if successful and bit mask updated with * of events triggerred, * otherwise error code */ static int wait_events(MGSLPC_INFO * info, int __user *mask_ptr) { unsigned long flags; int s; int rc=0; struct mgsl_icount cprev, cnow; int events; int mask; struct _input_signal_events oldsigs, newsigs; DECLARE_WAITQUEUE(wait, current); COPY_FROM_USER(rc,&mask, mask_ptr, sizeof(int)); if (rc) return -EFAULT; if (debug_level >= DEBUG_LEVEL_INFO) printk("wait_events(%s,%d)\n", info->device_name, mask); spin_lock_irqsave(&info->lock,flags); /* return immediately if state matches requested events */ get_signals(info); s = info->serial_signals; events = mask & ( ((s & SerialSignal_DSR) ? MgslEvent_DsrActive:MgslEvent_DsrInactive) + ((s & SerialSignal_DCD) ? MgslEvent_DcdActive:MgslEvent_DcdInactive) + ((s & SerialSignal_CTS) ? MgslEvent_CtsActive:MgslEvent_CtsInactive) + ((s & SerialSignal_RI) ? MgslEvent_RiActive :MgslEvent_RiInactive) ); if (events) { spin_unlock_irqrestore(&info->lock,flags); goto exit; } /* save current irq counts */ cprev = info->icount; oldsigs = info->input_signal_events; if ((info->params.mode == MGSL_MODE_HDLC) && (mask & MgslEvent_ExitHuntMode)) irq_enable(info, CHA, IRQ_EXITHUNT); set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&info->event_wait_q, &wait); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get current irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; newsigs = info->input_signal_events; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (newsigs.dsr_up == oldsigs.dsr_up && newsigs.dsr_down == oldsigs.dsr_down && newsigs.dcd_up == oldsigs.dcd_up && newsigs.dcd_down == oldsigs.dcd_down && newsigs.cts_up == oldsigs.cts_up && newsigs.cts_down == oldsigs.cts_down && newsigs.ri_up == oldsigs.ri_up && newsigs.ri_down == oldsigs.ri_down && cnow.exithunt == cprev.exithunt && cnow.rxidle == cprev.rxidle) { rc = -EIO; break; } events = mask & ( (newsigs.dsr_up != oldsigs.dsr_up ? MgslEvent_DsrActive:0) + (newsigs.dsr_down != oldsigs.dsr_down ? MgslEvent_DsrInactive:0) + (newsigs.dcd_up != oldsigs.dcd_up ? MgslEvent_DcdActive:0) + (newsigs.dcd_down != oldsigs.dcd_down ? MgslEvent_DcdInactive:0) + (newsigs.cts_up != oldsigs.cts_up ? MgslEvent_CtsActive:0) + (newsigs.cts_down != oldsigs.cts_down ? MgslEvent_CtsInactive:0) + (newsigs.ri_up != oldsigs.ri_up ? MgslEvent_RiActive:0) + (newsigs.ri_down != oldsigs.ri_down ? MgslEvent_RiInactive:0) + (cnow.exithunt != cprev.exithunt ? MgslEvent_ExitHuntMode:0) + (cnow.rxidle != cprev.rxidle ? MgslEvent_IdleReceived:0) ); if (events) break; cprev = cnow; oldsigs = newsigs; } remove_wait_queue(&info->event_wait_q, &wait); set_current_state(TASK_RUNNING); if (mask & MgslEvent_ExitHuntMode) { spin_lock_irqsave(&info->lock,flags); if (!waitqueue_active(&info->event_wait_q)) irq_disable(info, CHA, IRQ_EXITHUNT); spin_unlock_irqrestore(&info->lock,flags); } exit: if (rc == 0) PUT_USER(rc, events, mask_ptr); return rc; } static int modem_input_wait(MGSLPC_INFO *info,int arg) { unsigned long flags; int rc; struct mgsl_icount cprev, cnow; DECLARE_WAITQUEUE(wait, current); /* save current irq counts */ spin_lock_irqsave(&info->lock,flags); cprev = info->icount; add_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); for(;;) { schedule(); if (signal_pending(current)) { rc = -ERESTARTSYS; break; } /* get new irq counts */ spin_lock_irqsave(&info->lock,flags); cnow = info->icount; set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(&info->lock,flags); /* if no change, wait aborted for some reason */ if (cnow.rng == cprev.rng && cnow.dsr == cprev.dsr && cnow.dcd == cprev.dcd && cnow.cts == cprev.cts) { rc = -EIO; break; } /* check for change in caller specified modem input */ if ((arg & TIOCM_RNG && cnow.rng != cprev.rng) || (arg & TIOCM_DSR && cnow.dsr != cprev.dsr) || (arg & TIOCM_CD && cnow.dcd != cprev.dcd) || (arg & TIOCM_CTS && cnow.cts != cprev.cts)) { rc = 0; break; } cprev = cnow; } remove_wait_queue(&info->status_event_wait_q, &wait); set_current_state(TASK_RUNNING); return rc; } /* return the state of the serial control and status signals */ static int tiocmget(struct tty_struct *tty) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned int result; unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); result = ((info->serial_signals & SerialSignal_RTS) ? TIOCM_RTS:0) + ((info->serial_signals & SerialSignal_DTR) ? TIOCM_DTR:0) + ((info->serial_signals & SerialSignal_DCD) ? TIOCM_CAR:0) + ((info->serial_signals & SerialSignal_RI) ? TIOCM_RNG:0) + ((info->serial_signals & SerialSignal_DSR) ? TIOCM_DSR:0) + ((info->serial_signals & SerialSignal_CTS) ? TIOCM_CTS:0); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmget() value=%08X\n", __FILE__,__LINE__, info->device_name, result ); return result; } /* set modem control signals (DTR/RTS) */ static int tiocmset(struct tty_struct *tty, unsigned int set, unsigned int clear) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):%s tiocmset(%x,%x)\n", __FILE__,__LINE__,info->device_name, set, clear); if (set & TIOCM_RTS) info->serial_signals |= SerialSignal_RTS; if (set & TIOCM_DTR) info->serial_signals |= SerialSignal_DTR; if (clear & TIOCM_RTS) info->serial_signals &= ~SerialSignal_RTS; if (clear & TIOCM_DTR) info->serial_signals &= ~SerialSignal_DTR; spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); return 0; } /* Set or clear transmit break condition * * Arguments: tty pointer to tty instance data * break_state -1=set break condition, 0=clear */ static int mgslpc_break(struct tty_struct *tty, int break_state) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_break(%s,%d)\n", __FILE__,__LINE__, info->device_name, break_state); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_break")) return -EINVAL; spin_lock_irqsave(&info->lock,flags); if (break_state == -1) set_reg_bits(info, CHA+DAFO, BIT6); else clear_reg_bits(info, CHA+DAFO, BIT6); spin_unlock_irqrestore(&info->lock,flags); return 0; } static int mgslpc_get_icount(struct tty_struct *tty, struct serial_icounter_struct *icount) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; struct mgsl_icount cnow; /* kernel counter temps */ unsigned long flags; spin_lock_irqsave(&info->lock,flags); cnow = info->icount; spin_unlock_irqrestore(&info->lock,flags); icount->cts = cnow.cts; icount->dsr = cnow.dsr; icount->rng = cnow.rng; icount->dcd = cnow.dcd; icount->rx = cnow.rx; icount->tx = cnow.tx; icount->frame = cnow.frame; icount->overrun = cnow.overrun; icount->parity = cnow.parity; icount->brk = cnow.brk; icount->buf_overrun = cnow.buf_overrun; return 0; } /* Service an IOCTL request * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg command argument/context * * Return Value: 0 if success, otherwise error code */ static int mgslpc_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; void __user *argp = (void __user *)arg; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_ioctl %s cmd=%08X\n", __FILE__,__LINE__, info->device_name, cmd ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_ioctl")) return -ENODEV; if ((cmd != TIOCGSERIAL) && (cmd != TIOCSSERIAL) && (cmd != TIOCMIWAIT)) { if (tty->flags & (1 << TTY_IO_ERROR)) return -EIO; } switch (cmd) { case MGSL_IOCGPARAMS: return get_params(info, argp); case MGSL_IOCSPARAMS: return set_params(info, argp, tty); case MGSL_IOCGTXIDLE: return get_txidle(info, argp); case MGSL_IOCSTXIDLE: return set_txidle(info, (int)arg); case MGSL_IOCGIF: return get_interface(info, argp); case MGSL_IOCSIF: return set_interface(info,(int)arg); case MGSL_IOCTXENABLE: return set_txenable(info,(int)arg, tty); case MGSL_IOCRXENABLE: return set_rxenable(info,(int)arg); case MGSL_IOCTXABORT: return tx_abort(info); case MGSL_IOCGSTATS: return get_stats(info, argp); case MGSL_IOCWAITEVENT: return wait_events(info, argp); case TIOCMIWAIT: return modem_input_wait(info,(int)arg); default: return -ENOIOCTLCMD; } return 0; } /* Set new termios settings * * Arguments: * * tty pointer to tty structure * termios pointer to buffer to hold returned old termios */ static void mgslpc_set_termios(struct tty_struct *tty, struct ktermios *old_termios) { MGSLPC_INFO *info = (MGSLPC_INFO *)tty->driver_data; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_set_termios %s\n", __FILE__,__LINE__, tty->driver->name ); /* just return if nothing has changed */ if ((tty->termios->c_cflag == old_termios->c_cflag) && (RELEVANT_IFLAG(tty->termios->c_iflag) == RELEVANT_IFLAG(old_termios->c_iflag))) return; mgslpc_change_params(info, tty); /* Handle transition to B0 status */ if (old_termios->c_cflag & CBAUD && !(tty->termios->c_cflag & CBAUD)) { info->serial_signals &= ~(SerialSignal_RTS + SerialSignal_DTR); spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle transition away from B0 status */ if (!(old_termios->c_cflag & CBAUD) && tty->termios->c_cflag & CBAUD) { info->serial_signals |= SerialSignal_DTR; if (!(tty->termios->c_cflag & CRTSCTS) || !test_bit(TTY_THROTTLED, &tty->flags)) { info->serial_signals |= SerialSignal_RTS; } spin_lock_irqsave(&info->lock,flags); set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } /* Handle turning off CRTSCTS */ if (old_termios->c_cflag & CRTSCTS && !(tty->termios->c_cflag & CRTSCTS)) { tty->hw_stopped = 0; tx_release(tty); } } static void mgslpc_close(struct tty_struct *tty, struct file * filp) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; struct tty_port *port = &info->port; if (mgslpc_paranoia_check(info, tty->name, "mgslpc_close")) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_close(%s) entry, count=%d\n", __FILE__,__LINE__, info->device_name, port->count); WARN_ON(!port->count); if (tty_port_close_start(port, tty, filp) == 0) goto cleanup; if (port->flags & ASYNC_INITIALIZED) mgslpc_wait_until_sent(tty, info->timeout); mgslpc_flush_buffer(tty); tty_ldisc_flush(tty); shutdown(info, tty); tty_port_close_end(port, tty); tty_port_tty_set(port, NULL); cleanup: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__,__LINE__, tty->driver->name, port->count); } /* Wait until the transmitter is empty. */ static void mgslpc_wait_until_sent(struct tty_struct *tty, int timeout) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; unsigned long orig_jiffies, char_time; if (!info ) return; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_wait_until_sent(%s) entry\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_wait_until_sent")) return; if (!(info->port.flags & ASYNC_INITIALIZED)) goto exit; orig_jiffies = jiffies; /* Set check interval to 1/5 of estimated time to * send a character, and make it at least 1. The check * interval should also be less than the timeout. * Note: use tight timings here to satisfy the NIST-PCTS. */ if ( info->params.data_rate ) { char_time = info->timeout/(32 * 5); if (!char_time) char_time++; } else char_time = 1; if (timeout) char_time = min_t(unsigned long, char_time, timeout); if (info->params.mode == MGSL_MODE_HDLC) { while (info->tx_active) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } else { while ((info->tx_count || info->tx_active) && info->tx_enabled) { msleep_interruptible(jiffies_to_msecs(char_time)); if (signal_pending(current)) break; if (timeout && time_after(jiffies, orig_jiffies + timeout)) break; } } exit: if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_wait_until_sent(%s) exit\n", __FILE__,__LINE__, info->device_name ); } /* Called by tty_hangup() when a hangup is signaled. * This is the same as closing all open files for the port. */ static void mgslpc_hangup(struct tty_struct *tty) { MGSLPC_INFO * info = (MGSLPC_INFO *)tty->driver_data; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_hangup(%s)\n", __FILE__,__LINE__, info->device_name ); if (mgslpc_paranoia_check(info, tty->name, "mgslpc_hangup")) return; mgslpc_flush_buffer(tty); shutdown(info, tty); tty_port_hangup(&info->port); } static int carrier_raised(struct tty_port *port) { MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); if (info->serial_signals & SerialSignal_DCD) return 1; return 0; } static void dtr_rts(struct tty_port *port, int onoff) { MGSLPC_INFO *info = container_of(port, MGSLPC_INFO, port); unsigned long flags; spin_lock_irqsave(&info->lock,flags); if (onoff) info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; else info->serial_signals &= ~SerialSignal_RTS + SerialSignal_DTR; set_signals(info); spin_unlock_irqrestore(&info->lock,flags); } static int mgslpc_open(struct tty_struct *tty, struct file * filp) { MGSLPC_INFO *info; struct tty_port *port; int retval, line; unsigned long flags; /* verify range of specified line number */ line = tty->index; if (line >= mgslpc_device_count) { printk("%s(%d):mgslpc_open with invalid line #%d.\n", __FILE__,__LINE__,line); return -ENODEV; } /* find the info structure for the specified line */ info = mgslpc_device_list; while(info && info->line != line) info = info->next_device; if (mgslpc_paranoia_check(info, tty->name, "mgslpc_open")) return -ENODEV; port = &info->port; tty->driver_data = info; tty_port_tty_set(port, tty); if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_open(%s), old ref count = %d\n", __FILE__,__LINE__,tty->driver->name, port->count); /* If port is closing, signal caller to try again */ if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){ if (port->flags & ASYNC_CLOSING) interruptible_sleep_on(&port->close_wait); retval = ((port->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); goto cleanup; } tty->low_latency = (port->flags & ASYNC_LOW_LATENCY) ? 1 : 0; spin_lock_irqsave(&info->netlock, flags); if (info->netcount) { retval = -EBUSY; spin_unlock_irqrestore(&info->netlock, flags); goto cleanup; } spin_lock(&port->lock); port->count++; spin_unlock(&port->lock); spin_unlock_irqrestore(&info->netlock, flags); if (port->count == 1) { /* 1st open on this device, init hardware */ retval = startup(info, tty); if (retval < 0) goto cleanup; } retval = tty_port_block_til_ready(&info->port, tty, filp); if (retval) { if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):block_til_ready(%s) returned %d\n", __FILE__,__LINE__, info->device_name, retval); goto cleanup; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):mgslpc_open(%s) success\n", __FILE__,__LINE__, info->device_name); retval = 0; cleanup: return retval; } /* * /proc fs routines.... */ static inline void line_info(struct seq_file *m, MGSLPC_INFO *info) { char stat_buf[30]; unsigned long flags; seq_printf(m, "%s:io:%04X irq:%d", info->device_name, info->io_base, info->irq_level); /* output current serial signal states */ spin_lock_irqsave(&info->lock,flags); get_signals(info); spin_unlock_irqrestore(&info->lock,flags); stat_buf[0] = 0; stat_buf[1] = 0; if (info->serial_signals & SerialSignal_RTS) strcat(stat_buf, "|RTS"); if (info->serial_signals & SerialSignal_CTS) strcat(stat_buf, "|CTS"); if (info->serial_signals & SerialSignal_DTR) strcat(stat_buf, "|DTR"); if (info->serial_signals & SerialSignal_DSR) strcat(stat_buf, "|DSR"); if (info->serial_signals & SerialSignal_DCD) strcat(stat_buf, "|CD"); if (info->serial_signals & SerialSignal_RI) strcat(stat_buf, "|RI"); if (info->params.mode == MGSL_MODE_HDLC) { seq_printf(m, " HDLC txok:%d rxok:%d", info->icount.txok, info->icount.rxok); if (info->icount.txunder) seq_printf(m, " txunder:%d", info->icount.txunder); if (info->icount.txabort) seq_printf(m, " txabort:%d", info->icount.txabort); if (info->icount.rxshort) seq_printf(m, " rxshort:%d", info->icount.rxshort); if (info->icount.rxlong) seq_printf(m, " rxlong:%d", info->icount.rxlong); if (info->icount.rxover) seq_printf(m, " rxover:%d", info->icount.rxover); if (info->icount.rxcrc) seq_printf(m, " rxcrc:%d", info->icount.rxcrc); } else { seq_printf(m, " ASYNC tx:%d rx:%d", info->icount.tx, info->icount.rx); if (info->icount.frame) seq_printf(m, " fe:%d", info->icount.frame); if (info->icount.parity) seq_printf(m, " pe:%d", info->icount.parity); if (info->icount.brk) seq_printf(m, " brk:%d", info->icount.brk); if (info->icount.overrun) seq_printf(m, " oe:%d", info->icount.overrun); } /* Append serial signal status to end */ seq_printf(m, " %s\n", stat_buf+1); seq_printf(m, "txactive=%d bh_req=%d bh_run=%d pending_bh=%x\n", info->tx_active,info->bh_requested,info->bh_running, info->pending_bh); } /* Called to print information about devices */ static int mgslpc_proc_show(struct seq_file *m, void *v) { MGSLPC_INFO *info; seq_printf(m, "synclink driver:%s\n", driver_version); info = mgslpc_device_list; while( info ) { line_info(m, info); info = info->next_device; } return 0; } static int mgslpc_proc_open(struct inode *inode, struct file *file) { return single_open(file, mgslpc_proc_show, NULL); } static const struct file_operations mgslpc_proc_fops = { .owner = THIS_MODULE, .open = mgslpc_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int rx_alloc_buffers(MGSLPC_INFO *info) { /* each buffer has header and data */ info->rx_buf_size = sizeof(RXBUF) + info->max_frame_size; /* calculate total allocation size for 8 buffers */ info->rx_buf_total_size = info->rx_buf_size * 8; /* limit total allocated memory */ if (info->rx_buf_total_size > 0x10000) info->rx_buf_total_size = 0x10000; /* calculate number of buffers */ info->rx_buf_count = info->rx_buf_total_size / info->rx_buf_size; info->rx_buf = kmalloc(info->rx_buf_total_size, GFP_KERNEL); if (info->rx_buf == NULL) return -ENOMEM; rx_reset_buffers(info); return 0; } static void rx_free_buffers(MGSLPC_INFO *info) { kfree(info->rx_buf); info->rx_buf = NULL; } static int claim_resources(MGSLPC_INFO *info) { if (rx_alloc_buffers(info) < 0 ) { printk( "Can't allocate rx buffer %s\n", info->device_name); release_resources(info); return -ENODEV; } return 0; } static void release_resources(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_INFO) printk("release_resources(%s)\n", info->device_name); rx_free_buffers(info); } /* Add the specified device instance data structure to the * global linked list of devices and increment the device count. * * Arguments: info pointer to device instance data */ static void mgslpc_add_device(MGSLPC_INFO *info) { info->next_device = NULL; info->line = mgslpc_device_count; sprintf(info->device_name,"ttySLP%d",info->line); if (info->line < MAX_DEVICE_COUNT) { if (maxframe[info->line]) info->max_frame_size = maxframe[info->line]; } mgslpc_device_count++; if (!mgslpc_device_list) mgslpc_device_list = info; else { MGSLPC_INFO *current_dev = mgslpc_device_list; while( current_dev->next_device ) current_dev = current_dev->next_device; current_dev->next_device = info; } if (info->max_frame_size < 4096) info->max_frame_size = 4096; else if (info->max_frame_size > 65535) info->max_frame_size = 65535; printk( "SyncLink PC Card %s:IO=%04X IRQ=%d\n", info->device_name, info->io_base, info->irq_level); #if SYNCLINK_GENERIC_HDLC hdlcdev_init(info); #endif } static void mgslpc_remove_device(MGSLPC_INFO *remove_info) { MGSLPC_INFO *info = mgslpc_device_list; MGSLPC_INFO *last = NULL; while(info) { if (info == remove_info) { if (last) last->next_device = info->next_device; else mgslpc_device_list = info->next_device; #if SYNCLINK_GENERIC_HDLC hdlcdev_exit(info); #endif release_resources(info); kfree(info); mgslpc_device_count--; return; } last = info; info = info->next_device; } } static const struct pcmcia_device_id mgslpc_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x02c5, 0x0050), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, mgslpc_ids); static struct pcmcia_driver mgslpc_driver = { .owner = THIS_MODULE, .name = "synclink_cs", .probe = mgslpc_probe, .remove = mgslpc_detach, .id_table = mgslpc_ids, .suspend = mgslpc_suspend, .resume = mgslpc_resume, }; static const struct tty_operations mgslpc_ops = { .open = mgslpc_open, .close = mgslpc_close, .write = mgslpc_write, .put_char = mgslpc_put_char, .flush_chars = mgslpc_flush_chars, .write_room = mgslpc_write_room, .chars_in_buffer = mgslpc_chars_in_buffer, .flush_buffer = mgslpc_flush_buffer, .ioctl = mgslpc_ioctl, .throttle = mgslpc_throttle, .unthrottle = mgslpc_unthrottle, .send_xchar = mgslpc_send_xchar, .break_ctl = mgslpc_break, .wait_until_sent = mgslpc_wait_until_sent, .set_termios = mgslpc_set_termios, .stop = tx_pause, .start = tx_release, .hangup = mgslpc_hangup, .tiocmget = tiocmget, .tiocmset = tiocmset, .get_icount = mgslpc_get_icount, .proc_fops = &mgslpc_proc_fops, }; static void synclink_cs_cleanup(void) { int rc; while(mgslpc_device_list) mgslpc_remove_device(mgslpc_device_list); if (serial_driver) { if ((rc = tty_unregister_driver(serial_driver))) printk("%s(%d) failed to unregister tty driver err=%d\n", __FILE__,__LINE__,rc); put_tty_driver(serial_driver); } pcmcia_unregister_driver(&mgslpc_driver); } static int __init synclink_cs_init(void) { int rc; if (break_on_load) { mgslpc_get_text_ptr(); BREAKPOINT(); } if ((rc = pcmcia_register_driver(&mgslpc_driver)) < 0) return rc; serial_driver = alloc_tty_driver(MAX_DEVICE_COUNT); if (!serial_driver) { rc = -ENOMEM; goto error; } /* Initialize the tty_driver structure */ serial_driver->driver_name = "synclink_cs"; serial_driver->name = "ttySLP"; serial_driver->major = ttymajor; serial_driver->minor_start = 64; serial_driver->type = TTY_DRIVER_TYPE_SERIAL; serial_driver->subtype = SERIAL_TYPE_NORMAL; serial_driver->init_termios = tty_std_termios; serial_driver->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; serial_driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(serial_driver, &mgslpc_ops); if ((rc = tty_register_driver(serial_driver)) < 0) { printk("%s(%d):Couldn't register serial driver\n", __FILE__,__LINE__); put_tty_driver(serial_driver); serial_driver = NULL; goto error; } printk("%s %s, tty major#%d\n", driver_name, driver_version, serial_driver->major); return 0; error: synclink_cs_cleanup(); return rc; } static void __exit synclink_cs_exit(void) { synclink_cs_cleanup(); } module_init(synclink_cs_init); module_exit(synclink_cs_exit); static void mgslpc_set_rate(MGSLPC_INFO *info, unsigned char channel, unsigned int rate) { unsigned int M, N; unsigned char val; /* note:standard BRG mode is broken in V3.2 chip * so enhanced mode is always used */ if (rate) { N = 3686400 / rate; if (!N) N = 1; N >>= 1; for (M = 1; N > 64 && M < 16; M++) N >>= 1; N--; /* BGR[5..0] = N * BGR[9..6] = M * BGR[7..0] contained in BGR register * BGR[9..8] contained in CCR2[7..6] * divisor = (N+1)*2^M * * Note: M *must* not be zero (causes asymetric duty cycle) */ write_reg(info, (unsigned char) (channel + BGR), (unsigned char) ((M << 6) + N)); val = read_reg(info, (unsigned char) (channel + CCR2)) & 0x3f; val |= ((M << 4) & 0xc0); write_reg(info, (unsigned char) (channel + CCR2), val); } } /* Enabled the AUX clock output at the specified frequency. */ static void enable_auxclk(MGSLPC_INFO *info) { unsigned char val; /* MODE * * 07..06 MDS[1..0] 10 = transparent HDLC mode * 05 ADM Address Mode, 0 = no addr recognition * 04 TMD Timer Mode, 0 = external * 03 RAC Receiver Active, 0 = inactive * 02 RTS 0=RTS active during xmit, 1=RTS always active * 01 TRS Timer Resolution, 1=512 * 00 TLP Test Loop, 0 = no loop * * 1000 0010 */ val = 0x82; /* channel B RTS is used to enable AUXCLK driver on SP505 */ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) val |= BIT2; write_reg(info, CHB + MODE, val); /* CCR0 * * 07 PU Power Up, 1=active, 0=power down * 06 MCE Master Clock Enable, 1=enabled * 05 Reserved, 0 * 04..02 SC[2..0] Encoding * 01..00 SM[1..0] Serial Mode, 00=HDLC * * 11000000 */ write_reg(info, CHB + CCR0, 0xc0); /* CCR1 * * 07 SFLG Shared Flag, 0 = disable shared flags * 06 GALP Go Active On Loop, 0 = not used * 05 GLP Go On Loop, 0 = not used * 04 ODS Output Driver Select, 1=TxD is push-pull output * 03 ITF Interframe Time Fill, 0=mark, 1=flag * 02..00 CM[2..0] Clock Mode * * 0001 0111 */ write_reg(info, CHB + CCR1, 0x17); /* CCR2 (Channel B) * * 07..06 BGR[9..8] Baud rate bits 9..8 * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value * 04 SSEL Clock source select, 1=submode b * 03 TOE 0=TxCLK is input, 1=TxCLK is output * 02 RWX Read/Write Exchange 0=disabled * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 * 00 DIV, data inversion 0=disabled, 1=enabled * * 0011 1000 */ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) write_reg(info, CHB + CCR2, 0x38); else write_reg(info, CHB + CCR2, 0x30); /* CCR4 * * 07 MCK4 Master Clock Divide by 4, 1=enabled * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled * 05 TST1 Test Pin, 0=normal operation * 04 ICD Ivert Carrier Detect, 1=enabled (active low) * 03..02 Reserved, must be 0 * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes * * 0101 0000 */ write_reg(info, CHB + CCR4, 0x50); /* if auxclk not enabled, set internal BRG so * CTS transitions can be detected (requires TxC) */ if (info->params.mode == MGSL_MODE_HDLC && info->params.clock_speed) mgslpc_set_rate(info, CHB, info->params.clock_speed); else mgslpc_set_rate(info, CHB, 921600); } static void loopback_enable(MGSLPC_INFO *info) { unsigned char val; /* CCR1:02..00 CM[2..0] Clock Mode = 111 (clock mode 7) */ val = read_reg(info, CHA + CCR1) | (BIT2 + BIT1 + BIT0); write_reg(info, CHA + CCR1, val); /* CCR2:04 SSEL Clock source select, 1=submode b */ val = read_reg(info, CHA + CCR2) | (BIT4 + BIT5); write_reg(info, CHA + CCR2, val); /* set LinkSpeed if available, otherwise default to 2Mbps */ if (info->params.clock_speed) mgslpc_set_rate(info, CHA, info->params.clock_speed); else mgslpc_set_rate(info, CHA, 1843200); /* MODE:00 TLP Test Loop, 1=loopback enabled */ val = read_reg(info, CHA + MODE) | BIT0; write_reg(info, CHA + MODE, val); } static void hdlc_mode(MGSLPC_INFO *info) { unsigned char val; unsigned char clkmode, clksubmode; /* disable all interrupts */ irq_disable(info, CHA, 0xffff); irq_disable(info, CHB, 0xffff); port_irq_disable(info, 0xff); /* assume clock mode 0a, rcv=RxC xmt=TxC */ clkmode = clksubmode = 0; if (info->params.flags & HDLC_FLAG_RXC_DPLL && info->params.flags & HDLC_FLAG_TXC_DPLL) { /* clock mode 7a, rcv = DPLL, xmt = DPLL */ clkmode = 7; } else if (info->params.flags & HDLC_FLAG_RXC_BRG && info->params.flags & HDLC_FLAG_TXC_BRG) { /* clock mode 7b, rcv = BRG, xmt = BRG */ clkmode = 7; clksubmode = 1; } else if (info->params.flags & HDLC_FLAG_RXC_DPLL) { if (info->params.flags & HDLC_FLAG_TXC_BRG) { /* clock mode 6b, rcv = DPLL, xmt = BRG/16 */ clkmode = 6; clksubmode = 1; } else { /* clock mode 6a, rcv = DPLL, xmt = TxC */ clkmode = 6; } } else if (info->params.flags & HDLC_FLAG_TXC_BRG) { /* clock mode 0b, rcv = RxC, xmt = BRG */ clksubmode = 1; } /* MODE * * 07..06 MDS[1..0] 10 = transparent HDLC mode * 05 ADM Address Mode, 0 = no addr recognition * 04 TMD Timer Mode, 0 = external * 03 RAC Receiver Active, 0 = inactive * 02 RTS 0=RTS active during xmit, 1=RTS always active * 01 TRS Timer Resolution, 1=512 * 00 TLP Test Loop, 0 = no loop * * 1000 0010 */ val = 0x82; if (info->params.loopback) val |= BIT0; /* preserve RTS state */ if (info->serial_signals & SerialSignal_RTS) val |= BIT2; write_reg(info, CHA + MODE, val); /* CCR0 * * 07 PU Power Up, 1=active, 0=power down * 06 MCE Master Clock Enable, 1=enabled * 05 Reserved, 0 * 04..02 SC[2..0] Encoding * 01..00 SM[1..0] Serial Mode, 00=HDLC * * 11000000 */ val = 0xc0; switch (info->params.encoding) { case HDLC_ENCODING_NRZI: val |= BIT3; break; case HDLC_ENCODING_BIPHASE_SPACE: val |= BIT4; break; // FM0 case HDLC_ENCODING_BIPHASE_MARK: val |= BIT4 + BIT2; break; // FM1 case HDLC_ENCODING_BIPHASE_LEVEL: val |= BIT4 + BIT3; break; // Manchester } write_reg(info, CHA + CCR0, val); /* CCR1 * * 07 SFLG Shared Flag, 0 = disable shared flags * 06 GALP Go Active On Loop, 0 = not used * 05 GLP Go On Loop, 0 = not used * 04 ODS Output Driver Select, 1=TxD is push-pull output * 03 ITF Interframe Time Fill, 0=mark, 1=flag * 02..00 CM[2..0] Clock Mode * * 0001 0000 */ val = 0x10 + clkmode; write_reg(info, CHA + CCR1, val); /* CCR2 * * 07..06 BGR[9..8] Baud rate bits 9..8 * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value * 04 SSEL Clock source select, 1=submode b * 03 TOE 0=TxCLK is input, 0=TxCLK is input * 02 RWX Read/Write Exchange 0=disabled * 01 C32, CRC select, 0=CRC-16, 1=CRC-32 * 00 DIV, data inversion 0=disabled, 1=enabled * * 0000 0000 */ val = 0x00; if (clkmode == 2 || clkmode == 3 || clkmode == 6 || clkmode == 7 || (clkmode == 0 && clksubmode == 1)) val |= BIT5; if (clksubmode) val |= BIT4; if (info->params.crc_type == HDLC_CRC_32_CCITT) val |= BIT1; if (info->params.encoding == HDLC_ENCODING_NRZB) val |= BIT0; write_reg(info, CHA + CCR2, val); /* CCR3 * * 07..06 PRE[1..0] Preamble count 00=1, 01=2, 10=4, 11=8 * 05 EPT Enable preamble transmission, 1=enabled * 04 RADD Receive address pushed to FIFO, 0=disabled * 03 CRL CRC Reset Level, 0=FFFF * 02 RCRC Rx CRC 0=On 1=Off * 01 TCRC Tx CRC 0=On 1=Off * 00 PSD DPLL Phase Shift Disable * * 0000 0000 */ val = 0x00; if (info->params.crc_type == HDLC_CRC_NONE) val |= BIT2 + BIT1; if (info->params.preamble != HDLC_PREAMBLE_PATTERN_NONE) val |= BIT5; switch (info->params.preamble_length) { case HDLC_PREAMBLE_LENGTH_16BITS: val |= BIT6; break; case HDLC_PREAMBLE_LENGTH_32BITS: val |= BIT6; break; case HDLC_PREAMBLE_LENGTH_64BITS: val |= BIT7 + BIT6; break; } write_reg(info, CHA + CCR3, val); /* PRE - Preamble pattern */ val = 0; switch (info->params.preamble) { case HDLC_PREAMBLE_PATTERN_FLAGS: val = 0x7e; break; case HDLC_PREAMBLE_PATTERN_10: val = 0xaa; break; case HDLC_PREAMBLE_PATTERN_01: val = 0x55; break; case HDLC_PREAMBLE_PATTERN_ONES: val = 0xff; break; } write_reg(info, CHA + PRE, val); /* CCR4 * * 07 MCK4 Master Clock Divide by 4, 1=enabled * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled * 05 TST1 Test Pin, 0=normal operation * 04 ICD Ivert Carrier Detect, 1=enabled (active low) * 03..02 Reserved, must be 0 * 01..00 RFT[1..0] RxFIFO Threshold 00=32 bytes * * 0101 0000 */ val = 0x50; write_reg(info, CHA + CCR4, val); if (info->params.flags & HDLC_FLAG_RXC_DPLL) mgslpc_set_rate(info, CHA, info->params.clock_speed * 16); else mgslpc_set_rate(info, CHA, info->params.clock_speed); /* RLCR Receive length check register * * 7 1=enable receive length check * 6..0 Max frame length = (RL + 1) * 32 */ write_reg(info, CHA + RLCR, 0); /* XBCH Transmit Byte Count High * * 07 DMA mode, 0 = interrupt driven * 06 NRM, 0=ABM (ignored) * 05 CAS Carrier Auto Start * 04 XC Transmit Continuously (ignored) * 03..00 XBC[10..8] Transmit byte count bits 10..8 * * 0000 0000 */ val = 0x00; if (info->params.flags & HDLC_FLAG_AUTO_DCD) val |= BIT5; write_reg(info, CHA + XBCH, val); enable_auxclk(info); if (info->params.loopback || info->testing_irq) loopback_enable(info); if (info->params.flags & HDLC_FLAG_AUTO_CTS) { irq_enable(info, CHB, IRQ_CTS); /* PVR[3] 1=AUTO CTS active */ set_reg_bits(info, CHA + PVR, BIT3); } else clear_reg_bits(info, CHA + PVR, BIT3); irq_enable(info, CHA, IRQ_RXEOM + IRQ_RXFIFO + IRQ_ALLSENT + IRQ_UNDERRUN + IRQ_TXFIFO); issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); wait_command_complete(info, CHA); read_reg16(info, CHA + ISR); /* clear pending IRQs */ /* Master clock mode enabled above to allow reset commands * to complete even if no data clocks are present. * * Disable master clock mode for normal communications because * V3.2 of the ESCC2 has a bug that prevents the transmit all sent * IRQ when in master clock mode. * * Leave master clock mode enabled for IRQ test because the * timer IRQ used by the test can only happen in master clock mode. */ if (!info->testing_irq) clear_reg_bits(info, CHA + CCR0, BIT6); tx_set_idle(info); tx_stop(info); rx_stop(info); } static void rx_stop(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):rx_stop(%s)\n", __FILE__,__LINE__, info->device_name ); /* MODE:03 RAC Receiver Active, 0=inactive */ clear_reg_bits(info, CHA + MODE, BIT3); info->rx_enabled = false; info->rx_overflow = false; } static void rx_start(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):rx_start(%s)\n", __FILE__,__LINE__, info->device_name ); rx_reset_buffers(info); info->rx_enabled = false; info->rx_overflow = false; /* MODE:03 RAC Receiver Active, 1=active */ set_reg_bits(info, CHA + MODE, BIT3); info->rx_enabled = true; } static void tx_start(MGSLPC_INFO *info, struct tty_struct *tty) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):tx_start(%s)\n", __FILE__,__LINE__, info->device_name ); if (info->tx_count) { /* If auto RTS enabled and RTS is inactive, then assert */ /* RTS and set a flag indicating that the driver should */ /* negate RTS when the transmission completes. */ info->drop_rts_on_tx_done = false; if (info->params.flags & HDLC_FLAG_AUTO_RTS) { get_signals(info); if (!(info->serial_signals & SerialSignal_RTS)) { info->serial_signals |= SerialSignal_RTS; set_signals(info); info->drop_rts_on_tx_done = true; } } if (info->params.mode == MGSL_MODE_ASYNC) { if (!info->tx_active) { info->tx_active = true; tx_ready(info, tty); } } else { info->tx_active = true; tx_ready(info, tty); mod_timer(&info->tx_timer, jiffies + msecs_to_jiffies(5000)); } } if (!info->tx_enabled) info->tx_enabled = true; } static void tx_stop(MGSLPC_INFO *info) { if (debug_level >= DEBUG_LEVEL_ISR) printk("%s(%d):tx_stop(%s)\n", __FILE__,__LINE__, info->device_name ); del_timer(&info->tx_timer); info->tx_enabled = false; info->tx_active = false; } /* Reset the adapter to a known state and prepare it for further use. */ static void reset_device(MGSLPC_INFO *info) { /* power up both channels (set BIT7) */ write_reg(info, CHA + CCR0, 0x80); write_reg(info, CHB + CCR0, 0x80); write_reg(info, CHA + MODE, 0); write_reg(info, CHB + MODE, 0); /* disable all interrupts */ irq_disable(info, CHA, 0xffff); irq_disable(info, CHB, 0xffff); port_irq_disable(info, 0xff); /* PCR Port Configuration Register * * 07..04 DEC[3..0] Serial I/F select outputs * 03 output, 1=AUTO CTS control enabled * 02 RI Ring Indicator input 0=active * 01 DSR input 0=active * 00 DTR output 0=active * * 0000 0110 */ write_reg(info, PCR, 0x06); /* PVR Port Value Register * * 07..04 DEC[3..0] Serial I/F select (0000=disabled) * 03 AUTO CTS output 1=enabled * 02 RI Ring Indicator input * 01 DSR input * 00 DTR output (1=inactive) * * 0000 0001 */ // write_reg(info, PVR, PVR_DTR); /* IPC Interrupt Port Configuration * * 07 VIS 1=Masked interrupts visible * 06..05 Reserved, 0 * 04..03 SLA Slave address, 00 ignored * 02 CASM Cascading Mode, 1=daisy chain * 01..00 IC[1..0] Interrupt Config, 01=push-pull output, active low * * 0000 0101 */ write_reg(info, IPC, 0x05); } static void async_mode(MGSLPC_INFO *info) { unsigned char val; /* disable all interrupts */ irq_disable(info, CHA, 0xffff); irq_disable(info, CHB, 0xffff); port_irq_disable(info, 0xff); /* MODE * * 07 Reserved, 0 * 06 FRTS RTS State, 0=active * 05 FCTS Flow Control on CTS * 04 FLON Flow Control Enable * 03 RAC Receiver Active, 0 = inactive * 02 RTS 0=Auto RTS, 1=manual RTS * 01 TRS Timer Resolution, 1=512 * 00 TLP Test Loop, 0 = no loop * * 0000 0110 */ val = 0x06; if (info->params.loopback) val |= BIT0; /* preserve RTS state */ if (!(info->serial_signals & SerialSignal_RTS)) val |= BIT6; write_reg(info, CHA + MODE, val); /* CCR0 * * 07 PU Power Up, 1=active, 0=power down * 06 MCE Master Clock Enable, 1=enabled * 05 Reserved, 0 * 04..02 SC[2..0] Encoding, 000=NRZ * 01..00 SM[1..0] Serial Mode, 11=Async * * 1000 0011 */ write_reg(info, CHA + CCR0, 0x83); /* CCR1 * * 07..05 Reserved, 0 * 04 ODS Output Driver Select, 1=TxD is push-pull output * 03 BCR Bit Clock Rate, 1=16x * 02..00 CM[2..0] Clock Mode, 111=BRG * * 0001 1111 */ write_reg(info, CHA + CCR1, 0x1f); /* CCR2 (channel A) * * 07..06 BGR[9..8] Baud rate bits 9..8 * 05 BDF Baud rate divisor factor, 0=1, 1=BGR value * 04 SSEL Clock source select, 1=submode b * 03 TOE 0=TxCLK is input, 0=TxCLK is input * 02 RWX Read/Write Exchange 0=disabled * 01 Reserved, 0 * 00 DIV, data inversion 0=disabled, 1=enabled * * 0001 0000 */ write_reg(info, CHA + CCR2, 0x10); /* CCR3 * * 07..01 Reserved, 0 * 00 PSD DPLL Phase Shift Disable * * 0000 0000 */ write_reg(info, CHA + CCR3, 0); /* CCR4 * * 07 MCK4 Master Clock Divide by 4, 1=enabled * 06 EBRG Enhanced Baud Rate Generator Mode, 1=enabled * 05 TST1 Test Pin, 0=normal operation * 04 ICD Ivert Carrier Detect, 1=enabled (active low) * 03..00 Reserved, must be 0 * * 0101 0000 */ write_reg(info, CHA + CCR4, 0x50); mgslpc_set_rate(info, CHA, info->params.data_rate * 16); /* DAFO Data Format * * 07 Reserved, 0 * 06 XBRK transmit break, 0=normal operation * 05 Stop bits (0=1, 1=2) * 04..03 PAR[1..0] Parity (01=odd, 10=even) * 02 PAREN Parity Enable * 01..00 CHL[1..0] Character Length (00=8, 01=7) * */ val = 0x00; if (info->params.data_bits != 8) val |= BIT0; /* 7 bits */ if (info->params.stop_bits != 1) val |= BIT5; if (info->params.parity != ASYNC_PARITY_NONE) { val |= BIT2; /* Parity enable */ if (info->params.parity == ASYNC_PARITY_ODD) val |= BIT3; else val |= BIT4; } write_reg(info, CHA + DAFO, val); /* RFC Rx FIFO Control * * 07 Reserved, 0 * 06 DPS, 1=parity bit not stored in data byte * 05 DXS, 0=all data stored in FIFO (including XON/XOFF) * 04 RFDF Rx FIFO Data Format, 1=status byte stored in FIFO * 03..02 RFTH[1..0], rx threshold, 11=16 status + 16 data byte * 01 Reserved, 0 * 00 TCDE Terminate Char Detect Enable, 0=disabled * * 0101 1100 */ write_reg(info, CHA + RFC, 0x5c); /* RLCR Receive length check register * * Max frame length = (RL + 1) * 32 */ write_reg(info, CHA + RLCR, 0); /* XBCH Transmit Byte Count High * * 07 DMA mode, 0 = interrupt driven * 06 NRM, 0=ABM (ignored) * 05 CAS Carrier Auto Start * 04 XC Transmit Continuously (ignored) * 03..00 XBC[10..8] Transmit byte count bits 10..8 * * 0000 0000 */ val = 0x00; if (info->params.flags & HDLC_FLAG_AUTO_DCD) val |= BIT5; write_reg(info, CHA + XBCH, val); if (info->params.flags & HDLC_FLAG_AUTO_CTS) irq_enable(info, CHA, IRQ_CTS); /* MODE:03 RAC Receiver Active, 1=active */ set_reg_bits(info, CHA + MODE, BIT3); enable_auxclk(info); if (info->params.flags & HDLC_FLAG_AUTO_CTS) { irq_enable(info, CHB, IRQ_CTS); /* PVR[3] 1=AUTO CTS active */ set_reg_bits(info, CHA + PVR, BIT3); } else clear_reg_bits(info, CHA + PVR, BIT3); irq_enable(info, CHA, IRQ_RXEOM + IRQ_RXFIFO + IRQ_BREAK_ON + IRQ_RXTIME + IRQ_ALLSENT + IRQ_TXFIFO); issue_command(info, CHA, CMD_TXRESET + CMD_RXRESET); wait_command_complete(info, CHA); read_reg16(info, CHA + ISR); /* clear pending IRQs */ } /* Set the HDLC idle mode for the transmitter. */ static void tx_set_idle(MGSLPC_INFO *info) { /* Note: ESCC2 only supports flags and one idle modes */ if (info->idle_mode == HDLC_TXIDLE_FLAGS) set_reg_bits(info, CHA + CCR1, BIT3); else clear_reg_bits(info, CHA + CCR1, BIT3); } /* get state of the V24 status (input) signals. */ static void get_signals(MGSLPC_INFO *info) { unsigned char status = 0; /* preserve DTR and RTS */ info->serial_signals &= SerialSignal_DTR + SerialSignal_RTS; if (read_reg(info, CHB + VSTR) & BIT7) info->serial_signals |= SerialSignal_DCD; if (read_reg(info, CHB + STAR) & BIT1) info->serial_signals |= SerialSignal_CTS; status = read_reg(info, CHA + PVR); if (!(status & PVR_RI)) info->serial_signals |= SerialSignal_RI; if (!(status & PVR_DSR)) info->serial_signals |= SerialSignal_DSR; } /* Set the state of DTR and RTS based on contents of * serial_signals member of device extension. */ static void set_signals(MGSLPC_INFO *info) { unsigned char val; val = read_reg(info, CHA + MODE); if (info->params.mode == MGSL_MODE_ASYNC) { if (info->serial_signals & SerialSignal_RTS) val &= ~BIT6; else val |= BIT6; } else { if (info->serial_signals & SerialSignal_RTS) val |= BIT2; else val &= ~BIT2; } write_reg(info, CHA + MODE, val); if (info->serial_signals & SerialSignal_DTR) clear_reg_bits(info, CHA + PVR, PVR_DTR); else set_reg_bits(info, CHA + PVR, PVR_DTR); } static void rx_reset_buffers(MGSLPC_INFO *info) { RXBUF *buf; int i; info->rx_put = 0; info->rx_get = 0; info->rx_frame_count = 0; for (i=0 ; i < info->rx_buf_count ; i++) { buf = (RXBUF*)(info->rx_buf + (i * info->rx_buf_size)); buf->status = buf->count = 0; } } /* Attempt to return a received HDLC frame * Only frames received without errors are returned. * * Returns true if frame returned, otherwise false */ static bool rx_get_frame(MGSLPC_INFO *info, struct tty_struct *tty) { unsigned short status; RXBUF *buf; unsigned int framesize = 0; unsigned long flags; bool return_frame = false; if (info->rx_frame_count == 0) return false; buf = (RXBUF*)(info->rx_buf + (info->rx_get * info->rx_buf_size)); status = buf->status; /* 07 VFR 1=valid frame * 06 RDO 1=data overrun * 05 CRC 1=OK, 0=error * 04 RAB 1=frame aborted */ if ((status & 0xf0) != 0xA0) { if (!(status & BIT7) || (status & BIT4)) info->icount.rxabort++; else if (status & BIT6) info->icount.rxover++; else if (!(status & BIT5)) { info->icount.rxcrc++; if (info->params.crc_type & HDLC_CRC_RETURN_EX) return_frame = true; } framesize = 0; #if SYNCLINK_GENERIC_HDLC { info->netdev->stats.rx_errors++; info->netdev->stats.rx_frame_errors++; } #endif } else return_frame = true; if (return_frame) framesize = buf->count; if (debug_level >= DEBUG_LEVEL_BH) printk("%s(%d):rx_get_frame(%s) status=%04X size=%d\n", __FILE__,__LINE__,info->device_name,status,framesize); if (debug_level >= DEBUG_LEVEL_DATA) trace_block(info, buf->data, framesize, 0); if (framesize) { if ((info->params.crc_type & HDLC_CRC_RETURN_EX && framesize+1 > info->max_frame_size) || framesize > info->max_frame_size) info->icount.rxlong++; else { if (status & BIT5) info->icount.rxok++; if (info->params.crc_type & HDLC_CRC_RETURN_EX) { *(buf->data + framesize) = status & BIT5 ? RX_OK:RX_CRC_ERROR; ++framesize; } #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_rx(info, buf->data, framesize); else #endif ldisc_receive_buf(tty, buf->data, info->flag_buf, framesize); } } spin_lock_irqsave(&info->lock,flags); buf->status = buf->count = 0; info->rx_frame_count--; info->rx_get++; if (info->rx_get >= info->rx_buf_count) info->rx_get = 0; spin_unlock_irqrestore(&info->lock,flags); return true; } static bool register_test(MGSLPC_INFO *info) { static unsigned char patterns[] = { 0x00, 0xff, 0xaa, 0x55, 0x69, 0x96, 0x0f }; static unsigned int count = ARRAY_SIZE(patterns); unsigned int i; bool rc = true; unsigned long flags; spin_lock_irqsave(&info->lock,flags); reset_device(info); for (i = 0; i < count; i++) { write_reg(info, XAD1, patterns[i]); write_reg(info, XAD2, patterns[(i + 1) % count]); if ((read_reg(info, XAD1) != patterns[i]) || (read_reg(info, XAD2) != patterns[(i + 1) % count])) { rc = false; break; } } spin_unlock_irqrestore(&info->lock,flags); return rc; } static bool irq_test(MGSLPC_INFO *info) { unsigned long end_time; unsigned long flags; spin_lock_irqsave(&info->lock,flags); reset_device(info); info->testing_irq = true; hdlc_mode(info); info->irq_occurred = false; /* init hdlc mode */ irq_enable(info, CHA, IRQ_TIMER); write_reg(info, CHA + TIMR, 0); /* 512 cycles */ issue_command(info, CHA, CMD_START_TIMER); spin_unlock_irqrestore(&info->lock,flags); end_time=100; while(end_time-- && !info->irq_occurred) { msleep_interruptible(10); } info->testing_irq = false; spin_lock_irqsave(&info->lock,flags); reset_device(info); spin_unlock_irqrestore(&info->lock,flags); return info->irq_occurred; } static int adapter_test(MGSLPC_INFO *info) { if (!register_test(info)) { info->init_error = DiagStatus_AddressFailure; printk( "%s(%d):Register test failure for device %s Addr=%04X\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->io_base) ); return -ENODEV; } if (!irq_test(info)) { info->init_error = DiagStatus_IrqFailure; printk( "%s(%d):Interrupt test failure for device %s IRQ=%d\n", __FILE__,__LINE__,info->device_name, (unsigned short)(info->irq_level) ); return -ENODEV; } if (debug_level >= DEBUG_LEVEL_INFO) printk("%s(%d):device %s passed diagnostics\n", __FILE__,__LINE__,info->device_name); return 0; } static void trace_block(MGSLPC_INFO *info,const char* data, int count, int xmit) { int i; int linecount; if (xmit) printk("%s tx data:\n",info->device_name); else printk("%s rx data:\n",info->device_name); while(count) { if (count > 16) linecount = 16; else linecount = count; for(i=0;i<linecount;i++) printk("%02X ",(unsigned char)data[i]); for(;i<17;i++) printk(" "); for(i=0;i<linecount;i++) { if (data[i]>=040 && data[i]<=0176) printk("%c",data[i]); else printk("."); } printk("\n"); data += linecount; count -= linecount; } } /* HDLC frame time out * update stats and do tx completion processing */ static void tx_timeout(unsigned long context) { MGSLPC_INFO *info = (MGSLPC_INFO*)context; unsigned long flags; if ( debug_level >= DEBUG_LEVEL_INFO ) printk( "%s(%d):tx_timeout(%s)\n", __FILE__,__LINE__,info->device_name); if(info->tx_active && info->params.mode == MGSL_MODE_HDLC) { info->icount.txtimeout++; } spin_lock_irqsave(&info->lock,flags); info->tx_active = false; info->tx_count = info->tx_put = info->tx_get = 0; spin_unlock_irqrestore(&info->lock,flags); #if SYNCLINK_GENERIC_HDLC if (info->netcount) hdlcdev_tx_done(info); else #endif { struct tty_struct *tty = tty_port_tty_get(&info->port); bh_transmit(info, tty); tty_kref_put(tty); } } #if SYNCLINK_GENERIC_HDLC /** * called by generic HDLC layer when protocol selected (PPP, frame relay, etc.) * set encoding and frame check sequence (FCS) options * * dev pointer to network device structure * encoding serial encoding setting * parity FCS setting * * returns 0 if success, otherwise error code */ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { MGSLPC_INFO *info = dev_to_port(dev); struct tty_struct *tty; unsigned char new_encoding; unsigned short new_crctype; /* return error if TTY interface open */ if (info->port.count) return -EBUSY; switch (encoding) { case ENCODING_NRZ: new_encoding = HDLC_ENCODING_NRZ; break; case ENCODING_NRZI: new_encoding = HDLC_ENCODING_NRZI_SPACE; break; case ENCODING_FM_MARK: new_encoding = HDLC_ENCODING_BIPHASE_MARK; break; case ENCODING_FM_SPACE: new_encoding = HDLC_ENCODING_BIPHASE_SPACE; break; case ENCODING_MANCHESTER: new_encoding = HDLC_ENCODING_BIPHASE_LEVEL; break; default: return -EINVAL; } switch (parity) { case PARITY_NONE: new_crctype = HDLC_CRC_NONE; break; case PARITY_CRC16_PR1_CCITT: new_crctype = HDLC_CRC_16_CCITT; break; case PARITY_CRC32_PR1_CCITT: new_crctype = HDLC_CRC_32_CCITT; break; default: return -EINVAL; } info->params.encoding = new_encoding; info->params.crc_type = new_crctype; /* if network interface up, reprogram hardware */ if (info->netcount) { tty = tty_port_tty_get(&info->port); mgslpc_program_hw(info, tty); tty_kref_put(tty); } return 0; } /** * called by generic HDLC layer to send frame * * skb socket buffer containing HDLC frame * dev pointer to network device structure */ static netdev_tx_t hdlcdev_xmit(struct sk_buff *skb, struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk(KERN_INFO "%s:hdlc_xmit(%s)\n",__FILE__,dev->name); /* stop sending until this frame completes */ netif_stop_queue(dev); /* copy data to device buffers */ skb_copy_from_linear_data(skb, info->tx_buf, skb->len); info->tx_get = 0; info->tx_put = info->tx_count = skb->len; /* update network statistics */ dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* done with socket buffer, so free it */ dev_kfree_skb(skb); /* save start time for transmit timeout detection */ dev->trans_start = jiffies; /* start hardware transmitter if necessary */ spin_lock_irqsave(&info->lock,flags); if (!info->tx_active) { struct tty_struct *tty = tty_port_tty_get(&info->port); tx_start(info, tty); tty_kref_put(tty); } spin_unlock_irqrestore(&info->lock,flags); return NETDEV_TX_OK; } /** * called by network layer when interface enabled * claim resources and initialize hardware * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_open(struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); struct tty_struct *tty; int rc; unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_open(%s)\n",__FILE__,dev->name); /* generic HDLC layer open processing */ if ((rc = hdlc_open(dev))) return rc; /* arbitrate between network and tty opens */ spin_lock_irqsave(&info->netlock, flags); if (info->port.count != 0 || info->netcount != 0) { printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name); spin_unlock_irqrestore(&info->netlock, flags); return -EBUSY; } info->netcount=1; spin_unlock_irqrestore(&info->netlock, flags); tty = tty_port_tty_get(&info->port); /* claim resources and init adapter */ if ((rc = startup(info, tty)) != 0) { tty_kref_put(tty); spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return rc; } /* assert DTR and RTS, apply hardware settings */ info->serial_signals |= SerialSignal_RTS + SerialSignal_DTR; mgslpc_program_hw(info, tty); tty_kref_put(tty); /* enable network layer transmit */ dev->trans_start = jiffies; netif_start_queue(dev); /* inform generic HDLC layer of current DCD status */ spin_lock_irqsave(&info->lock, flags); get_signals(info); spin_unlock_irqrestore(&info->lock, flags); if (info->serial_signals & SerialSignal_DCD) netif_carrier_on(dev); else netif_carrier_off(dev); return 0; } /** * called by network layer when interface is disabled * shutdown hardware and release resources * * dev pointer to network device structure * * returns 0 if success, otherwise error code */ static int hdlcdev_close(struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); struct tty_struct *tty = tty_port_tty_get(&info->port); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_close(%s)\n",__FILE__,dev->name); netif_stop_queue(dev); /* shutdown adapter and release resources */ shutdown(info, tty); tty_kref_put(tty); hdlc_close(dev); spin_lock_irqsave(&info->netlock, flags); info->netcount=0; spin_unlock_irqrestore(&info->netlock, flags); return 0; } /** * called by network layer to process IOCTL call to network device * * dev pointer to network device structure * ifr pointer to network interface request structure * cmd IOCTL command code * * returns 0 if success, otherwise error code */ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings new_line; sync_serial_settings __user *line = ifr->ifr_settings.ifs_ifsu.sync; MGSLPC_INFO *info = dev_to_port(dev); unsigned int flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name); /* return error if TTY interface open */ if (info->port.count) return -EBUSY; if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); memset(&new_line, 0, size); switch(ifr->ifr_settings.type) { case IF_GET_IFACE: /* return current sync_serial_settings */ ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); switch (flags){ case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN): new_line.clock_type = CLOCK_EXT; break; case (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_INT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG): new_line.clock_type = CLOCK_TXINT; break; case (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN): new_line.clock_type = CLOCK_TXFROMRX; break; default: new_line.clock_type = CLOCK_DEFAULT; } new_line.clock_rate = info->params.clock_speed; new_line.loopback = info->params.loopback ? 1:0; if (copy_to_user(line, &new_line, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: /* set sync_serial_settings */ if(!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&new_line, line, size)) return -EFAULT; switch (new_line.clock_type) { case CLOCK_EXT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_TXCPIN; break; case CLOCK_TXFROMRX: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_RXCPIN; break; case CLOCK_INT: flags = HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG; break; case CLOCK_TXINT: flags = HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_TXC_BRG; break; case CLOCK_DEFAULT: flags = info->params.flags & (HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); break; default: return -EINVAL; } if (new_line.loopback != 0 && new_line.loopback != 1) return -EINVAL; info->params.flags &= ~(HDLC_FLAG_RXC_RXCPIN | HDLC_FLAG_RXC_DPLL | HDLC_FLAG_RXC_BRG | HDLC_FLAG_RXC_TXCPIN | HDLC_FLAG_TXC_TXCPIN | HDLC_FLAG_TXC_DPLL | HDLC_FLAG_TXC_BRG | HDLC_FLAG_TXC_RXCPIN); info->params.flags |= flags; info->params.loopback = new_line.loopback; if (flags & (HDLC_FLAG_RXC_BRG | HDLC_FLAG_TXC_BRG)) info->params.clock_speed = new_line.clock_rate; else info->params.clock_speed = 0; /* if network interface up, reprogram hardware */ if (info->netcount) { struct tty_struct *tty = tty_port_tty_get(&info->port); mgslpc_program_hw(info, tty); tty_kref_put(tty); } return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } /** * called by network layer when transmit timeout is detected * * dev pointer to network device structure */ static void hdlcdev_tx_timeout(struct net_device *dev) { MGSLPC_INFO *info = dev_to_port(dev); unsigned long flags; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_tx_timeout(%s)\n",dev->name); dev->stats.tx_errors++; dev->stats.tx_aborted_errors++; spin_lock_irqsave(&info->lock,flags); tx_stop(info); spin_unlock_irqrestore(&info->lock,flags); netif_wake_queue(dev); } /** * called by device driver when transmit completes * reenable network layer transmit if stopped * * info pointer to device instance information */ static void hdlcdev_tx_done(MGSLPC_INFO *info) { if (netif_queue_stopped(info->netdev)) netif_wake_queue(info->netdev); } /** * called by device driver when frame received * pass frame to network layer * * info pointer to device instance information * buf pointer to buffer contianing frame data * size count of data bytes in buf */ static void hdlcdev_rx(MGSLPC_INFO *info, char *buf, int size) { struct sk_buff *skb = dev_alloc_skb(size); struct net_device *dev = info->netdev; if (debug_level >= DEBUG_LEVEL_INFO) printk("hdlcdev_rx(%s)\n",dev->name); if (skb == NULL) { printk(KERN_NOTICE "%s: can't alloc skb, dropping packet\n", dev->name); dev->stats.rx_dropped++; return; } memcpy(skb_put(skb, size), buf, size); skb->protocol = hdlc_type_trans(skb, dev); dev->stats.rx_packets++; dev->stats.rx_bytes += size; netif_rx(skb); } static const struct net_device_ops hdlcdev_ops = { .ndo_open = hdlcdev_open, .ndo_stop = hdlcdev_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = hdlcdev_ioctl, .ndo_tx_timeout = hdlcdev_tx_timeout, }; /** * called by device driver when adding device instance * do generic HDLC initialization * * info pointer to device instance information * * returns 0 if success, otherwise error code */ static int hdlcdev_init(MGSLPC_INFO *info) { int rc; struct net_device *dev; hdlc_device *hdlc; /* allocate and initialize network and HDLC layer objects */ if (!(dev = alloc_hdlcdev(info))) { printk(KERN_ERR "%s:hdlc device allocation failure\n",__FILE__); return -ENOMEM; } /* for network layer reporting purposes only */ dev->base_addr = info->io_base; dev->irq = info->irq_level; /* network layer callbacks and settings */ dev->netdev_ops = &hdlcdev_ops; dev->watchdog_timeo = 10 * HZ; dev->tx_queue_len = 50; /* generic HDLC layer callbacks and settings */ hdlc = dev_to_hdlc(dev); hdlc->attach = hdlcdev_attach; hdlc->xmit = hdlcdev_xmit; /* register objects with HDLC layer */ if ((rc = register_hdlc_device(dev))) { printk(KERN_WARNING "%s:unable to register hdlc device\n",__FILE__); free_netdev(dev); return rc; } info->netdev = dev; return 0; } /** * called by device driver when removing device instance * do generic HDLC cleanup * * info pointer to device instance information */ static void hdlcdev_exit(MGSLPC_INFO *info) { unregister_hdlc_device(info->netdev); free_netdev(info->netdev); info->netdev = NULL; } #endif /* CONFIG_HDLC */
gpl-2.0
lcrponte/android_kernel_msm
drivers/mtd/nand/s3c2410.c
4928
29430
/* linux/drivers/mtd/nand/s3c2410.c * * Copyright © 2004-2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * Samsung S3C2410/S3C2440/S3C2412 NAND driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef CONFIG_MTD_NAND_S3C2410_DEBUG #define DEBUG #endif #include <linux/module.h> #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/cpufreq.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <plat/regs-nand.h> #include <plat/nand.h> #ifdef CONFIG_MTD_NAND_S3C2410_HWECC static int hardware_ecc = 1; #else static int hardware_ecc = 0; #endif #ifdef CONFIG_MTD_NAND_S3C2410_CLKSTOP static const int clock_stop = 1; #else static const int clock_stop = 0; #endif /* new oob placement block for use with hardware ecc generation */ static struct nand_ecclayout nand_hw_eccoob = { .eccbytes = 3, .eccpos = {0, 1, 2}, .oobfree = {{8, 8}} }; /* controller and mtd information */ struct s3c2410_nand_info; /** * struct s3c2410_nand_mtd - driver MTD structure * @mtd: The MTD instance to pass to the MTD layer. * @chip: The NAND chip information. * @set: The platform information supplied for this set of NAND chips. * @info: Link back to the hardware information. * @scan_res: The result from calling nand_scan_ident(). */ struct s3c2410_nand_mtd { struct mtd_info mtd; struct nand_chip chip; struct s3c2410_nand_set *set; struct s3c2410_nand_info *info; int scan_res; }; enum s3c_cpu_type { TYPE_S3C2410, TYPE_S3C2412, TYPE_S3C2440, }; enum s3c_nand_clk_state { CLOCK_DISABLE = 0, CLOCK_ENABLE, CLOCK_SUSPEND, }; /* overview of the s3c2410 nand state */ /** * struct s3c2410_nand_info - NAND controller state. * @mtds: An array of MTD instances on this controoler. * @platform: The platform data for this board. * @device: The platform device we bound to. * @area: The IO area resource that came from request_mem_region(). * @clk: The clock resource for this controller. * @regs: The area mapped for the hardware registers described by @area. * @sel_reg: Pointer to the register controlling the NAND selection. * @sel_bit: The bit in @sel_reg to select the NAND chip. * @mtd_count: The number of MTDs created from this controller. * @save_sel: The contents of @sel_reg to be saved over suspend. * @clk_rate: The clock rate from @clk. * @clk_state: The current clock state. * @cpu_type: The exact type of this controller. */ struct s3c2410_nand_info { /* mtd info */ struct nand_hw_control controller; struct s3c2410_nand_mtd *mtds; struct s3c2410_platform_nand *platform; /* device info */ struct device *device; struct resource *area; struct clk *clk; void __iomem *regs; void __iomem *sel_reg; int sel_bit; int mtd_count; unsigned long save_sel; unsigned long clk_rate; enum s3c_nand_clk_state clk_state; enum s3c_cpu_type cpu_type; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif }; /* conversion functions */ static struct s3c2410_nand_mtd *s3c2410_nand_mtd_toours(struct mtd_info *mtd) { return container_of(mtd, struct s3c2410_nand_mtd, mtd); } static struct s3c2410_nand_info *s3c2410_nand_mtd_toinfo(struct mtd_info *mtd) { return s3c2410_nand_mtd_toours(mtd)->info; } static struct s3c2410_nand_info *to_nand_info(struct platform_device *dev) { return platform_get_drvdata(dev); } static struct s3c2410_platform_nand *to_nand_plat(struct platform_device *dev) { return dev->dev.platform_data; } static inline int allow_clk_suspend(struct s3c2410_nand_info *info) { return clock_stop; } /** * s3c2410_nand_clk_set_state - Enable, disable or suspend NAND clock. * @info: The controller instance. * @new_state: State to which clock should be set. */ static void s3c2410_nand_clk_set_state(struct s3c2410_nand_info *info, enum s3c_nand_clk_state new_state) { if (!allow_clk_suspend(info) && new_state == CLOCK_SUSPEND) return; if (info->clk_state == CLOCK_ENABLE) { if (new_state != CLOCK_ENABLE) clk_disable(info->clk); } else { if (new_state == CLOCK_ENABLE) clk_enable(info->clk); } info->clk_state = new_state; } /* timing calculations */ #define NS_IN_KHZ 1000000 /** * s3c_nand_calc_rate - calculate timing data. * @wanted: The cycle time in nanoseconds. * @clk: The clock rate in kHz. * @max: The maximum divider value. * * Calculate the timing value from the given parameters. */ static int s3c_nand_calc_rate(int wanted, unsigned long clk, int max) { int result; result = DIV_ROUND_UP((wanted * clk), NS_IN_KHZ); pr_debug("result %d from %ld, %d\n", result, clk, wanted); if (result > max) { printk("%d ns is too big for current clock rate %ld\n", wanted, clk); return -1; } if (result < 1) result = 1; return result; } #define to_ns(ticks,clk) (((ticks) * NS_IN_KHZ) / (unsigned int)(clk)) /* controller setup */ /** * s3c2410_nand_setrate - setup controller timing information. * @info: The controller instance. * * Given the information supplied by the platform, calculate and set * the necessary timing registers in the hardware to generate the * necessary timing cycles to the hardware. */ static int s3c2410_nand_setrate(struct s3c2410_nand_info *info) { struct s3c2410_platform_nand *plat = info->platform; int tacls_max = (info->cpu_type == TYPE_S3C2412) ? 8 : 4; int tacls, twrph0, twrph1; unsigned long clkrate = clk_get_rate(info->clk); unsigned long uninitialized_var(set), cfg, uninitialized_var(mask); unsigned long flags; /* calculate the timing information for the controller */ info->clk_rate = clkrate; clkrate /= 1000; /* turn clock into kHz for ease of use */ if (plat != NULL) { tacls = s3c_nand_calc_rate(plat->tacls, clkrate, tacls_max); twrph0 = s3c_nand_calc_rate(plat->twrph0, clkrate, 8); twrph1 = s3c_nand_calc_rate(plat->twrph1, clkrate, 8); } else { /* default timings */ tacls = tacls_max; twrph0 = 8; twrph1 = 8; } if (tacls < 0 || twrph0 < 0 || twrph1 < 0) { dev_err(info->device, "cannot get suitable timings\n"); return -EINVAL; } dev_info(info->device, "Tacls=%d, %dns Twrph0=%d %dns, Twrph1=%d %dns\n", tacls, to_ns(tacls, clkrate), twrph0, to_ns(twrph0, clkrate), twrph1, to_ns(twrph1, clkrate)); switch (info->cpu_type) { case TYPE_S3C2410: mask = (S3C2410_NFCONF_TACLS(3) | S3C2410_NFCONF_TWRPH0(7) | S3C2410_NFCONF_TWRPH1(7)); set = S3C2410_NFCONF_EN; set |= S3C2410_NFCONF_TACLS(tacls - 1); set |= S3C2410_NFCONF_TWRPH0(twrph0 - 1); set |= S3C2410_NFCONF_TWRPH1(twrph1 - 1); break; case TYPE_S3C2440: case TYPE_S3C2412: mask = (S3C2440_NFCONF_TACLS(tacls_max - 1) | S3C2440_NFCONF_TWRPH0(7) | S3C2440_NFCONF_TWRPH1(7)); set = S3C2440_NFCONF_TACLS(tacls - 1); set |= S3C2440_NFCONF_TWRPH0(twrph0 - 1); set |= S3C2440_NFCONF_TWRPH1(twrph1 - 1); break; default: BUG(); } local_irq_save(flags); cfg = readl(info->regs + S3C2410_NFCONF); cfg &= ~mask; cfg |= set; writel(cfg, info->regs + S3C2410_NFCONF); local_irq_restore(flags); dev_dbg(info->device, "NF_CONF is 0x%lx\n", cfg); return 0; } /** * s3c2410_nand_inithw - basic hardware initialisation * @info: The hardware state. * * Do the basic initialisation of the hardware, using s3c2410_nand_setrate() * to setup the hardware access speeds and set the controller to be enabled. */ static int s3c2410_nand_inithw(struct s3c2410_nand_info *info) { int ret; ret = s3c2410_nand_setrate(info); if (ret < 0) return ret; switch (info->cpu_type) { case TYPE_S3C2410: default: break; case TYPE_S3C2440: case TYPE_S3C2412: /* enable the controller and de-assert nFCE */ writel(S3C2440_NFCONT_ENABLE, info->regs + S3C2440_NFCONT); } return 0; } /** * s3c2410_nand_select_chip - select the given nand chip * @mtd: The MTD instance for this chip. * @chip: The chip number. * * This is called by the MTD layer to either select a given chip for the * @mtd instance, or to indicate that the access has finished and the * chip can be de-selected. * * The routine ensures that the nFCE line is correctly setup, and any * platform specific selection code is called to route nFCE to the specific * chip. */ static void s3c2410_nand_select_chip(struct mtd_info *mtd, int chip) { struct s3c2410_nand_info *info; struct s3c2410_nand_mtd *nmtd; struct nand_chip *this = mtd->priv; unsigned long cur; nmtd = this->priv; info = nmtd->info; if (chip != -1) s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); cur = readl(info->sel_reg); if (chip == -1) { cur |= info->sel_bit; } else { if (nmtd->set != NULL && chip > nmtd->set->nr_chips) { dev_err(info->device, "invalid chip %d\n", chip); return; } if (info->platform != NULL) { if (info->platform->select_chip != NULL) (info->platform->select_chip) (nmtd->set, chip); } cur &= ~info->sel_bit; } writel(cur, info->sel_reg); if (chip == -1) s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } /* s3c2410_nand_hwcontrol * * Issue command and address cycles to the chip */ static void s3c2410_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, info->regs + S3C2410_NFCMD); else writeb(cmd, info->regs + S3C2410_NFADDR); } /* command and control functions */ static void s3c2440_nand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) writeb(cmd, info->regs + S3C2440_NFCMD); else writeb(cmd, info->regs + S3C2440_NFADDR); } /* s3c2410_nand_devready() * * returns 0 if the nand is busy, 1 if it is ready */ static int s3c2410_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2410_NFSTAT) & S3C2410_NFSTAT_BUSY; } static int s3c2440_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2440_NFSTAT) & S3C2440_NFSTAT_READY; } static int s3c2412_nand_devready(struct mtd_info *mtd) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); return readb(info->regs + S3C2412_NFSTAT) & S3C2412_NFSTAT_READY; } /* ECC handling functions */ static int s3c2410_nand_correct_data(struct mtd_info *mtd, u_char *dat, u_char *read_ecc, u_char *calc_ecc) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned int diff0, diff1, diff2; unsigned int bit, byte; pr_debug("%s(%p,%p,%p,%p)\n", __func__, mtd, dat, read_ecc, calc_ecc); diff0 = read_ecc[0] ^ calc_ecc[0]; diff1 = read_ecc[1] ^ calc_ecc[1]; diff2 = read_ecc[2] ^ calc_ecc[2]; pr_debug("%s: rd %02x%02x%02x calc %02x%02x%02x diff %02x%02x%02x\n", __func__, read_ecc[0], read_ecc[1], read_ecc[2], calc_ecc[0], calc_ecc[1], calc_ecc[2], diff0, diff1, diff2); if (diff0 == 0 && diff1 == 0 && diff2 == 0) return 0; /* ECC is ok */ /* sometimes people do not think about using the ECC, so check * to see if we have an 0xff,0xff,0xff read ECC and then ignore * the error, on the assumption that this is an un-eccd page. */ if (read_ecc[0] == 0xff && read_ecc[1] == 0xff && read_ecc[2] == 0xff && info->platform->ignore_unset_ecc) return 0; /* Can we correct this ECC (ie, one row and column change). * Note, this is similar to the 256 error code on smartmedia */ if (((diff0 ^ (diff0 >> 1)) & 0x55) == 0x55 && ((diff1 ^ (diff1 >> 1)) & 0x55) == 0x55 && ((diff2 ^ (diff2 >> 1)) & 0x55) == 0x55) { /* calculate the bit position of the error */ bit = ((diff2 >> 3) & 1) | ((diff2 >> 4) & 2) | ((diff2 >> 5) & 4); /* calculate the byte position of the error */ byte = ((diff2 << 7) & 0x100) | ((diff1 << 0) & 0x80) | ((diff1 << 1) & 0x40) | ((diff1 << 2) & 0x20) | ((diff1 << 3) & 0x10) | ((diff0 >> 4) & 0x08) | ((diff0 >> 3) & 0x04) | ((diff0 >> 2) & 0x02) | ((diff0 >> 1) & 0x01); dev_dbg(info->device, "correcting error bit %d, byte %d\n", bit, byte); dat[byte] ^= (1 << bit); return 1; } /* if there is only one bit difference in the ECC, then * one of only a row or column parity has changed, which * means the error is most probably in the ECC itself */ diff0 |= (diff1 << 8); diff0 |= (diff2 << 16); if ((diff0 & ~(1<<fls(diff0))) == 0) return 1; return -1; } /* ECC functions * * These allow the s3c2410 and s3c2440 to use the controller's ECC * generator block to ECC the data as it passes through] */ static void s3c2410_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2410_NFCONF); ctrl |= S3C2410_NFCONF_INITECC; writel(ctrl, info->regs + S3C2410_NFCONF); } static void s3c2412_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2440_NFCONT); writel(ctrl | S3C2412_NFCONT_INIT_MAIN_ECC, info->regs + S3C2440_NFCONT); } static void s3c2440_nand_enable_hwecc(struct mtd_info *mtd, int mode) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ctrl; ctrl = readl(info->regs + S3C2440_NFCONT); writel(ctrl | S3C2440_NFCONT_INITECC, info->regs + S3C2440_NFCONT); } static int s3c2410_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); ecc_code[0] = readb(info->regs + S3C2410_NFECC + 0); ecc_code[1] = readb(info->regs + S3C2410_NFECC + 1); ecc_code[2] = readb(info->regs + S3C2410_NFECC + 2); pr_debug("%s: returning ecc %02x%02x%02x\n", __func__, ecc_code[0], ecc_code[1], ecc_code[2]); return 0; } static int s3c2412_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ecc = readl(info->regs + S3C2412_NFMECC0); ecc_code[0] = ecc; ecc_code[1] = ecc >> 8; ecc_code[2] = ecc >> 16; pr_debug("calculate_ecc: returning ecc %02x,%02x,%02x\n", ecc_code[0], ecc_code[1], ecc_code[2]); return 0; } static int s3c2440_nand_calculate_ecc(struct mtd_info *mtd, const u_char *dat, u_char *ecc_code) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); unsigned long ecc = readl(info->regs + S3C2440_NFMECC0); ecc_code[0] = ecc; ecc_code[1] = ecc >> 8; ecc_code[2] = ecc >> 16; pr_debug("%s: returning ecc %06lx\n", __func__, ecc & 0xffffff); return 0; } /* over-ride the standard functions for a little more speed. We can * use read/write block to move the data buffers to/from the controller */ static void s3c2410_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *this = mtd->priv; readsb(this->IO_ADDR_R, buf, len); } static void s3c2440_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); readsl(info->regs + S3C2440_NFDATA, buf, len >> 2); /* cleanup if we've got less than a word to do */ if (len & 3) { buf += len & ~3; for (; len & 3; len--) *buf++ = readb(info->regs + S3C2440_NFDATA); } } static void s3c2410_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *this = mtd->priv; writesb(this->IO_ADDR_W, buf, len); } static void s3c2440_nand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct s3c2410_nand_info *info = s3c2410_nand_mtd_toinfo(mtd); writesl(info->regs + S3C2440_NFDATA, buf, len >> 2); /* cleanup any fractional write */ if (len & 3) { buf += len & ~3; for (; len & 3; len--, buf++) writeb(*buf, info->regs + S3C2440_NFDATA); } } /* cpufreq driver support */ #ifdef CONFIG_CPU_FREQ static int s3c2410_nand_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct s3c2410_nand_info *info; unsigned long newclk; info = container_of(nb, struct s3c2410_nand_info, freq_transition); newclk = clk_get_rate(info->clk); if ((val == CPUFREQ_POSTCHANGE && newclk < info->clk_rate) || (val == CPUFREQ_PRECHANGE && newclk > info->clk_rate)) { s3c2410_nand_setrate(info); } return 0; } static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) { info->freq_transition.notifier_call = s3c2410_nand_cpufreq_transition; return cpufreq_register_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) { cpufreq_unregister_notifier(&info->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int s3c2410_nand_cpufreq_register(struct s3c2410_nand_info *info) { return 0; } static inline void s3c2410_nand_cpufreq_deregister(struct s3c2410_nand_info *info) { } #endif /* device management functions */ static int s3c24xx_nand_remove(struct platform_device *pdev) { struct s3c2410_nand_info *info = to_nand_info(pdev); platform_set_drvdata(pdev, NULL); if (info == NULL) return 0; s3c2410_nand_cpufreq_deregister(info); /* Release all our mtds and their partitions, then go through * freeing the resources used */ if (info->mtds != NULL) { struct s3c2410_nand_mtd *ptr = info->mtds; int mtdno; for (mtdno = 0; mtdno < info->mtd_count; mtdno++, ptr++) { pr_debug("releasing mtd %d (%p)\n", mtdno, ptr); nand_release(&ptr->mtd); } kfree(info->mtds); } /* free the common resources */ if (!IS_ERR(info->clk)) { s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); clk_put(info->clk); } if (info->regs != NULL) { iounmap(info->regs); info->regs = NULL; } if (info->area != NULL) { release_resource(info->area); kfree(info->area); info->area = NULL; } kfree(info); return 0; } static int s3c2410_nand_add_partition(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *mtd, struct s3c2410_nand_set *set) { if (set) mtd->mtd.name = set->name; return mtd_device_parse_register(&mtd->mtd, NULL, NULL, set->partitions, set->nr_partitions); } /** * s3c2410_nand_init_chip - initialise a single instance of an chip * @info: The base NAND controller the chip is on. * @nmtd: The new controller MTD instance to fill in. * @set: The information passed from the board specific platform data. * * Initialise the given @nmtd from the information in @info and @set. This * readies the structure for use with the MTD layer functions by ensuring * all pointers are setup and the necessary control routines selected. */ static void s3c2410_nand_init_chip(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *nmtd, struct s3c2410_nand_set *set) { struct nand_chip *chip = &nmtd->chip; void __iomem *regs = info->regs; chip->write_buf = s3c2410_nand_write_buf; chip->read_buf = s3c2410_nand_read_buf; chip->select_chip = s3c2410_nand_select_chip; chip->chip_delay = 50; chip->priv = nmtd; chip->options = set->options; chip->controller = &info->controller; switch (info->cpu_type) { case TYPE_S3C2410: chip->IO_ADDR_W = regs + S3C2410_NFDATA; info->sel_reg = regs + S3C2410_NFCONF; info->sel_bit = S3C2410_NFCONF_nFCE; chip->cmd_ctrl = s3c2410_nand_hwcontrol; chip->dev_ready = s3c2410_nand_devready; break; case TYPE_S3C2440: chip->IO_ADDR_W = regs + S3C2440_NFDATA; info->sel_reg = regs + S3C2440_NFCONT; info->sel_bit = S3C2440_NFCONT_nFCE; chip->cmd_ctrl = s3c2440_nand_hwcontrol; chip->dev_ready = s3c2440_nand_devready; chip->read_buf = s3c2440_nand_read_buf; chip->write_buf = s3c2440_nand_write_buf; break; case TYPE_S3C2412: chip->IO_ADDR_W = regs + S3C2440_NFDATA; info->sel_reg = regs + S3C2440_NFCONT; info->sel_bit = S3C2412_NFCONT_nFCE0; chip->cmd_ctrl = s3c2440_nand_hwcontrol; chip->dev_ready = s3c2412_nand_devready; if (readl(regs + S3C2410_NFCONF) & S3C2412_NFCONF_NANDBOOT) dev_info(info->device, "System booted from NAND\n"); break; } chip->IO_ADDR_R = chip->IO_ADDR_W; nmtd->info = info; nmtd->mtd.priv = chip; nmtd->mtd.owner = THIS_MODULE; nmtd->set = set; if (hardware_ecc) { chip->ecc.calculate = s3c2410_nand_calculate_ecc; chip->ecc.correct = s3c2410_nand_correct_data; chip->ecc.mode = NAND_ECC_HW; chip->ecc.strength = 1; switch (info->cpu_type) { case TYPE_S3C2410: chip->ecc.hwctl = s3c2410_nand_enable_hwecc; chip->ecc.calculate = s3c2410_nand_calculate_ecc; break; case TYPE_S3C2412: chip->ecc.hwctl = s3c2412_nand_enable_hwecc; chip->ecc.calculate = s3c2412_nand_calculate_ecc; break; case TYPE_S3C2440: chip->ecc.hwctl = s3c2440_nand_enable_hwecc; chip->ecc.calculate = s3c2440_nand_calculate_ecc; break; } } else { chip->ecc.mode = NAND_ECC_SOFT; } if (set->ecc_layout != NULL) chip->ecc.layout = set->ecc_layout; if (set->disable_ecc) chip->ecc.mode = NAND_ECC_NONE; switch (chip->ecc.mode) { case NAND_ECC_NONE: dev_info(info->device, "NAND ECC disabled\n"); break; case NAND_ECC_SOFT: dev_info(info->device, "NAND soft ECC\n"); break; case NAND_ECC_HW: dev_info(info->device, "NAND hardware ECC\n"); break; default: dev_info(info->device, "NAND ECC UNKNOWN\n"); break; } /* If you use u-boot BBT creation code, specifying this flag will * let the kernel fish out the BBT from the NAND, and also skip the * full NAND scan that can take 1/2s or so. Little things... */ if (set->flash_bbt) { chip->bbt_options |= NAND_BBT_USE_FLASH; chip->options |= NAND_SKIP_BBTSCAN; } } /** * s3c2410_nand_update_chip - post probe update * @info: The controller instance. * @nmtd: The driver version of the MTD instance. * * This routine is called after the chip probe has successfully completed * and the relevant per-chip information updated. This call ensure that * we update the internal state accordingly. * * The internal state is currently limited to the ECC state information. */ static void s3c2410_nand_update_chip(struct s3c2410_nand_info *info, struct s3c2410_nand_mtd *nmtd) { struct nand_chip *chip = &nmtd->chip; dev_dbg(info->device, "chip %p => page shift %d\n", chip, chip->page_shift); if (chip->ecc.mode != NAND_ECC_HW) return; /* change the behaviour depending on wether we are using * the large or small page nand device */ if (chip->page_shift > 10) { chip->ecc.size = 256; chip->ecc.bytes = 3; } else { chip->ecc.size = 512; chip->ecc.bytes = 3; chip->ecc.layout = &nand_hw_eccoob; } } /* s3c24xx_nand_probe * * called by device layer when it finds a device matching * one our driver can handled. This code checks to see if * it can allocate all necessary resources then calls the * nand layer to look for devices */ static int s3c24xx_nand_probe(struct platform_device *pdev) { struct s3c2410_platform_nand *plat = to_nand_plat(pdev); enum s3c_cpu_type cpu_type; struct s3c2410_nand_info *info; struct s3c2410_nand_mtd *nmtd; struct s3c2410_nand_set *sets; struct resource *res; int err = 0; int size; int nr_sets; int setno; cpu_type = platform_get_device_id(pdev)->driver_data; pr_debug("s3c2410_nand_probe(%p)\n", pdev); info = kzalloc(sizeof(*info), GFP_KERNEL); if (info == NULL) { dev_err(&pdev->dev, "no memory for flash info\n"); err = -ENOMEM; goto exit_error; } platform_set_drvdata(pdev, info); spin_lock_init(&info->controller.lock); init_waitqueue_head(&info->controller.wq); /* get the clock source and enable it */ info->clk = clk_get(&pdev->dev, "nand"); if (IS_ERR(info->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); err = -ENOENT; goto exit_error; } s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); /* allocate and map the resource */ /* currently we assume we have the one resource */ res = pdev->resource; size = resource_size(res); info->area = request_mem_region(res->start, size, pdev->name); if (info->area == NULL) { dev_err(&pdev->dev, "cannot reserve register region\n"); err = -ENOENT; goto exit_error; } info->device = &pdev->dev; info->platform = plat; info->regs = ioremap(res->start, size); info->cpu_type = cpu_type; if (info->regs == NULL) { dev_err(&pdev->dev, "cannot reserve register region\n"); err = -EIO; goto exit_error; } dev_dbg(&pdev->dev, "mapped registers at %p\n", info->regs); /* initialise the hardware */ err = s3c2410_nand_inithw(info); if (err != 0) goto exit_error; sets = (plat != NULL) ? plat->sets : NULL; nr_sets = (plat != NULL) ? plat->nr_sets : 1; info->mtd_count = nr_sets; /* allocate our information */ size = nr_sets * sizeof(*info->mtds); info->mtds = kzalloc(size, GFP_KERNEL); if (info->mtds == NULL) { dev_err(&pdev->dev, "failed to allocate mtd storage\n"); err = -ENOMEM; goto exit_error; } /* initialise all possible chips */ nmtd = info->mtds; for (setno = 0; setno < nr_sets; setno++, nmtd++) { pr_debug("initialising set %d (%p, info %p)\n", setno, nmtd, info); s3c2410_nand_init_chip(info, nmtd, sets); nmtd->scan_res = nand_scan_ident(&nmtd->mtd, (sets) ? sets->nr_chips : 1, NULL); if (nmtd->scan_res == 0) { s3c2410_nand_update_chip(info, nmtd); nand_scan_tail(&nmtd->mtd); s3c2410_nand_add_partition(info, nmtd, sets); } if (sets != NULL) sets++; } err = s3c2410_nand_cpufreq_register(info); if (err < 0) { dev_err(&pdev->dev, "failed to init cpufreq support\n"); goto exit_error; } if (allow_clk_suspend(info)) { dev_info(&pdev->dev, "clock idle support enabled\n"); s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } pr_debug("initialised ok\n"); return 0; exit_error: s3c24xx_nand_remove(pdev); if (err == 0) err = -EINVAL; return err; } /* PM Support */ #ifdef CONFIG_PM static int s3c24xx_nand_suspend(struct platform_device *dev, pm_message_t pm) { struct s3c2410_nand_info *info = platform_get_drvdata(dev); if (info) { info->save_sel = readl(info->sel_reg); /* For the moment, we must ensure nFCE is high during * the time we are suspended. This really should be * handled by suspending the MTDs we are using, but * that is currently not the case. */ writel(info->save_sel | info->sel_bit, info->sel_reg); s3c2410_nand_clk_set_state(info, CLOCK_DISABLE); } return 0; } static int s3c24xx_nand_resume(struct platform_device *dev) { struct s3c2410_nand_info *info = platform_get_drvdata(dev); unsigned long sel; if (info) { s3c2410_nand_clk_set_state(info, CLOCK_ENABLE); s3c2410_nand_inithw(info); /* Restore the state of the nFCE line. */ sel = readl(info->sel_reg); sel &= ~info->sel_bit; sel |= info->save_sel & info->sel_bit; writel(sel, info->sel_reg); s3c2410_nand_clk_set_state(info, CLOCK_SUSPEND); } return 0; } #else #define s3c24xx_nand_suspend NULL #define s3c24xx_nand_resume NULL #endif /* driver device registration */ static struct platform_device_id s3c24xx_driver_ids[] = { { .name = "s3c2410-nand", .driver_data = TYPE_S3C2410, }, { .name = "s3c2440-nand", .driver_data = TYPE_S3C2440, }, { .name = "s3c2412-nand", .driver_data = TYPE_S3C2412, }, { .name = "s3c6400-nand", .driver_data = TYPE_S3C2412, /* compatible with 2412 */ }, { } }; MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids); static struct platform_driver s3c24xx_nand_driver = { .probe = s3c24xx_nand_probe, .remove = s3c24xx_nand_remove, .suspend = s3c24xx_nand_suspend, .resume = s3c24xx_nand_resume, .id_table = s3c24xx_driver_ids, .driver = { .name = "s3c24xx-nand", .owner = THIS_MODULE, }, }; static int __init s3c2410_nand_init(void) { printk("S3C24XX NAND Driver, (c) 2004 Simtec Electronics\n"); return platform_driver_register(&s3c24xx_nand_driver); } static void __exit s3c2410_nand_exit(void) { platform_driver_unregister(&s3c24xx_nand_driver); } module_init(s3c2410_nand_init); module_exit(s3c2410_nand_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Ben Dooks <ben@simtec.co.uk>"); MODULE_DESCRIPTION("S3C24XX MTD NAND driver");
gpl-2.0
penreturns/AK-OnePone
drivers/usb/gadget/gmidi.c
5184
5548
/* * gmidi.c -- USB MIDI Gadget Driver * * Copyright (C) 2006 Thumtronics Pty Ltd. * Developed for Thumtronics by Grey Innovation * Ben Williamson <ben.williamson@greyinnovation.com> * * This software is distributed under the terms of the GNU General Public * License ("GPL") version 2, as published by the Free Software Foundation. * * This code is based in part on: * * Gadget Zero driver, Copyright (C) 2003-2004 David Brownell. * USB Audio driver, Copyright (C) 2002 by Takashi Iwai. * USB MIDI driver, Copyright (C) 2002-2005 Clemens Ladisch. * * Refer to the USB Device Class Definition for MIDI Devices: * http://www.usb.org/developers/devclass_docs/midi10.pdf */ /* #define VERBOSE_DEBUG */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/utsname.h> #include <linux/module.h> #include <linux/device.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/audio.h> #include <linux/usb/midi.h> #include "gadget_chips.h" #include "composite.c" #include "usbstring.c" #include "config.c" #include "epautoconf.c" #include "f_midi.c" /*-------------------------------------------------------------------------*/ MODULE_AUTHOR("Ben Williamson"); MODULE_LICENSE("GPL v2"); static const char shortname[] = "g_midi"; static const char longname[] = "MIDI Gadget"; static int index = SNDRV_DEFAULT_IDX1; module_param(index, int, S_IRUGO); MODULE_PARM_DESC(index, "Index value for the USB MIDI Gadget adapter."); static char *id = SNDRV_DEFAULT_STR1; module_param(id, charp, S_IRUGO); MODULE_PARM_DESC(id, "ID string for the USB MIDI Gadget adapter."); static unsigned int buflen = 256; module_param(buflen, uint, S_IRUGO); MODULE_PARM_DESC(buflen, "MIDI buffer length"); static unsigned int qlen = 32; module_param(qlen, uint, S_IRUGO); MODULE_PARM_DESC(qlen, "USB read request queue length"); static unsigned int in_ports = 1; module_param(in_ports, uint, S_IRUGO); MODULE_PARM_DESC(in_ports, "Number of MIDI input ports"); static unsigned int out_ports = 1; module_param(out_ports, uint, S_IRUGO); MODULE_PARM_DESC(out_ports, "Number of MIDI output ports"); /* Thanks to Grey Innovation for donating this product ID. * * DO NOT REUSE THESE IDs with a protocol-incompatible driver!! Ever!! * Instead: allocate your own, using normal USB-IF procedures. */ #define DRIVER_VENDOR_NUM 0x17b3 /* Grey Innovation */ #define DRIVER_PRODUCT_NUM 0x0004 /* Linux-USB "MIDI Gadget" */ /* string IDs are assigned dynamically */ #define STRING_MANUFACTURER_IDX 0 #define STRING_PRODUCT_IDX 1 #define STRING_DESCRIPTION_IDX 2 static struct usb_device_descriptor device_desc = { .bLength = USB_DT_DEVICE_SIZE, .bDescriptorType = USB_DT_DEVICE, .bcdUSB = __constant_cpu_to_le16(0x0200), .bDeviceClass = USB_CLASS_PER_INTERFACE, .idVendor = __constant_cpu_to_le16(DRIVER_VENDOR_NUM), .idProduct = __constant_cpu_to_le16(DRIVER_PRODUCT_NUM), /* .iManufacturer = DYNAMIC */ /* .iProduct = DYNAMIC */ .bNumConfigurations = 1, }; static struct usb_string strings_dev[] = { [STRING_MANUFACTURER_IDX].s = "Grey Innovation", [STRING_PRODUCT_IDX].s = "MIDI Gadget", [STRING_DESCRIPTION_IDX].s = "MIDI", { } /* end of list */ }; static struct usb_gadget_strings stringtab_dev = { .language = 0x0409, /* en-us */ .strings = strings_dev, }; static struct usb_gadget_strings *dev_strings[] = { &stringtab_dev, NULL, }; static int __exit midi_unbind(struct usb_composite_dev *dev) { return 0; } static struct usb_configuration midi_config = { .label = "MIDI Gadget", .bConfigurationValue = 1, /* .iConfiguration = DYNAMIC */ .bmAttributes = USB_CONFIG_ATT_ONE, .bMaxPower = CONFIG_USB_GADGET_VBUS_DRAW / 2, }; static int __init midi_bind_config(struct usb_configuration *c) { return f_midi_bind_config(c, index, id, in_ports, out_ports, buflen, qlen); } static int __init midi_bind(struct usb_composite_dev *cdev) { struct usb_gadget *gadget = cdev->gadget; int gcnum, status; status = usb_string_id(cdev); if (status < 0) return status; strings_dev[STRING_MANUFACTURER_IDX].id = status; device_desc.iManufacturer = status; status = usb_string_id(cdev); if (status < 0) return status; strings_dev[STRING_PRODUCT_IDX].id = status; device_desc.iProduct = status; /* config description */ status = usb_string_id(cdev); if (status < 0) return status; strings_dev[STRING_DESCRIPTION_IDX].id = status; midi_config.iConfiguration = status; gcnum = usb_gadget_controller_number(gadget); if (gcnum < 0) { /* gmidi is so simple (no altsettings) that * it SHOULD NOT have problems with bulk-capable hardware. * so warn about unrecognized controllers, don't panic. */ pr_warning("%s: controller '%s' not recognized\n", __func__, gadget->name); device_desc.bcdDevice = cpu_to_le16(0x9999); } else { device_desc.bcdDevice = cpu_to_le16(0x0200 + gcnum); } status = usb_add_config(cdev, &midi_config, midi_bind_config); if (status < 0) return status; pr_info("%s\n", longname); return 0; } static struct usb_composite_driver midi_driver = { .name = (char *) longname, .dev = &device_desc, .strings = dev_strings, .max_speed = USB_SPEED_HIGH, .unbind = __exit_p(midi_unbind), }; static int __init midi_init(void) { return usb_composite_probe(&midi_driver, midi_bind); } module_init(midi_init); static void __exit midi_cleanup(void) { usb_composite_unregister(&midi_driver); } module_exit(midi_cleanup);
gpl-2.0
phuthinh100/Kernel-JB--sky-A830L
arch/avr32/kernel/cpu.c
7232
10329
/* * Copyright (C) 2005-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/device.h> #include <linux/seq_file.h> #include <linux/cpu.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/param.h> #include <linux/errno.h> #include <linux/clk.h> #include <asm/setup.h> #include <asm/sysreg.h> static DEFINE_PER_CPU(struct cpu, cpu_devices); #ifdef CONFIG_PERFORMANCE_COUNTERS /* * XXX: If/when a SMP-capable implementation of AVR32 will ever be * made, we must make sure that the code executes on the correct CPU. */ static ssize_t show_pc0event(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "0x%lx\n", (pccr >> 12) & 0x3f); } static ssize_t store_pc0event(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf || val > 0x3f) return -EINVAL; val = (val << 12) | (sysreg_read(PCCR) & 0xfffc0fff); sysreg_write(PCCR, val); return count; } static ssize_t show_pc0count(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long pcnt0; pcnt0 = sysreg_read(PCNT0); return sprintf(buf, "%lu\n", pcnt0); } static ssize_t store_pc0count(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; sysreg_write(PCNT0, val); return count; } static ssize_t show_pc1event(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "0x%lx\n", (pccr >> 18) & 0x3f); } static ssize_t store_pc1event(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf || val > 0x3f) return -EINVAL; val = (val << 18) | (sysreg_read(PCCR) & 0xff03ffff); sysreg_write(PCCR, val); return count; } static ssize_t show_pc1count(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long pcnt1; pcnt1 = sysreg_read(PCNT1); return sprintf(buf, "%lu\n", pcnt1); } static ssize_t store_pc1count(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; sysreg_write(PCNT1, val); return count; } static ssize_t show_pccycles(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long pccnt; pccnt = sysreg_read(PCCNT); return sprintf(buf, "%lu\n", pccnt); } static ssize_t store_pccycles(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; sysreg_write(PCCNT, val); return count; } static ssize_t show_pcenable(struct device *dev, struct device_attribute *attr, char *buf) { unsigned long pccr; pccr = sysreg_read(PCCR); return sprintf(buf, "%c\n", (pccr & 1)?'1':'0'); } static ssize_t store_pcenable(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { unsigned long pccr, val; char *endp; val = simple_strtoul(buf, &endp, 0); if (endp == buf) return -EINVAL; if (val) val = 1; pccr = sysreg_read(PCCR); pccr = (pccr & ~1UL) | val; sysreg_write(PCCR, pccr); return count; } static DEVICE_ATTR(pc0event, 0600, show_pc0event, store_pc0event); static DEVICE_ATTR(pc0count, 0600, show_pc0count, store_pc0count); static DEVICE_ATTR(pc1event, 0600, show_pc1event, store_pc1event); static DEVICE_ATTR(pc1count, 0600, show_pc1count, store_pc1count); static DEVICE_ATTR(pccycles, 0600, show_pccycles, store_pccycles); static DEVICE_ATTR(pcenable, 0600, show_pcenable, store_pcenable); #endif /* CONFIG_PERFORMANCE_COUNTERS */ static int __init topology_init(void) { int cpu; for_each_possible_cpu(cpu) { struct cpu *c = &per_cpu(cpu_devices, cpu); register_cpu(c, cpu); #ifdef CONFIG_PERFORMANCE_COUNTERS device_create_file(&c->dev, &dev_attr_pc0event); device_create_file(&c->dev, &dev_attr_pc0count); device_create_file(&c->dev, &dev_attr_pc1event); device_create_file(&c->dev, &dev_attr_pc1count); device_create_file(&c->dev, &dev_attr_pccycles); device_create_file(&c->dev, &dev_attr_pcenable); #endif } return 0; } subsys_initcall(topology_init); struct chip_id_map { u16 mid; u16 pn; const char *name; }; static const struct chip_id_map chip_names[] = { { .mid = 0x1f, .pn = 0x1e82, .name = "AT32AP700x" }, }; #define NR_CHIP_NAMES ARRAY_SIZE(chip_names) static const char *cpu_names[] = { "Morgan", "AP7", }; #define NR_CPU_NAMES ARRAY_SIZE(cpu_names) static const char *arch_names[] = { "AVR32A", "AVR32B", }; #define NR_ARCH_NAMES ARRAY_SIZE(arch_names) static const char *mmu_types[] = { "No MMU", "ITLB and DTLB", "Shared TLB", "MPU" }; static const char *cpu_feature_flags[] = { "rmw", "dsp", "simd", "ocd", "perfctr", "java", "fpu", }; static const char *get_chip_name(struct avr32_cpuinfo *cpu) { unsigned int i; unsigned int mid = avr32_get_manufacturer_id(cpu); unsigned int pn = avr32_get_product_number(cpu); for (i = 0; i < NR_CHIP_NAMES; i++) { if (chip_names[i].mid == mid && chip_names[i].pn == pn) return chip_names[i].name; } return "(unknown)"; } void __init setup_processor(void) { unsigned long config0, config1; unsigned long features; unsigned cpu_id, cpu_rev, arch_id, arch_rev, mmu_type; unsigned device_id; unsigned tmp; unsigned i; config0 = sysreg_read(CONFIG0); config1 = sysreg_read(CONFIG1); cpu_id = SYSREG_BFEXT(PROCESSORID, config0); cpu_rev = SYSREG_BFEXT(PROCESSORREVISION, config0); arch_id = SYSREG_BFEXT(AT, config0); arch_rev = SYSREG_BFEXT(AR, config0); mmu_type = SYSREG_BFEXT(MMUT, config0); device_id = ocd_read(DID); boot_cpu_data.arch_type = arch_id; boot_cpu_data.cpu_type = cpu_id; boot_cpu_data.arch_revision = arch_rev; boot_cpu_data.cpu_revision = cpu_rev; boot_cpu_data.tlb_config = mmu_type; boot_cpu_data.device_id = device_id; tmp = SYSREG_BFEXT(ILSZ, config1); if (tmp) { boot_cpu_data.icache.ways = 1 << SYSREG_BFEXT(IASS, config1); boot_cpu_data.icache.sets = 1 << SYSREG_BFEXT(ISET, config1); boot_cpu_data.icache.linesz = 1 << (tmp + 1); } tmp = SYSREG_BFEXT(DLSZ, config1); if (tmp) { boot_cpu_data.dcache.ways = 1 << SYSREG_BFEXT(DASS, config1); boot_cpu_data.dcache.sets = 1 << SYSREG_BFEXT(DSET, config1); boot_cpu_data.dcache.linesz = 1 << (tmp + 1); } if ((cpu_id >= NR_CPU_NAMES) || (arch_id >= NR_ARCH_NAMES)) { printk ("Unknown CPU configuration (ID %02x, arch %02x), " "continuing anyway...\n", cpu_id, arch_id); return; } printk ("CPU: %s chip revision %c\n", get_chip_name(&boot_cpu_data), avr32_get_chip_revision(&boot_cpu_data) + 'A'); printk ("CPU: %s [%02x] core revision %d (%s arch revision %d)\n", cpu_names[cpu_id], cpu_id, cpu_rev, arch_names[arch_id], arch_rev); printk ("CPU: MMU configuration: %s\n", mmu_types[mmu_type]); printk ("CPU: features:"); features = 0; if (config0 & SYSREG_BIT(CONFIG0_R)) features |= AVR32_FEATURE_RMW; if (config0 & SYSREG_BIT(CONFIG0_D)) features |= AVR32_FEATURE_DSP; if (config0 & SYSREG_BIT(CONFIG0_S)) features |= AVR32_FEATURE_SIMD; if (config0 & SYSREG_BIT(CONFIG0_O)) features |= AVR32_FEATURE_OCD; if (config0 & SYSREG_BIT(CONFIG0_P)) features |= AVR32_FEATURE_PCTR; if (config0 & SYSREG_BIT(CONFIG0_J)) features |= AVR32_FEATURE_JAVA; if (config0 & SYSREG_BIT(CONFIG0_F)) features |= AVR32_FEATURE_FPU; for (i = 0; i < ARRAY_SIZE(cpu_feature_flags); i++) if (features & (1 << i)) printk(" %s", cpu_feature_flags[i]); printk("\n"); boot_cpu_data.features = features; } #ifdef CONFIG_PROC_FS static int c_show(struct seq_file *m, void *v) { unsigned int icache_size, dcache_size; unsigned int cpu = smp_processor_id(); unsigned int freq; unsigned int i; icache_size = boot_cpu_data.icache.ways * boot_cpu_data.icache.sets * boot_cpu_data.icache.linesz; dcache_size = boot_cpu_data.dcache.ways * boot_cpu_data.dcache.sets * boot_cpu_data.dcache.linesz; seq_printf(m, "processor\t: %d\n", cpu); seq_printf(m, "chip type\t: %s revision %c\n", get_chip_name(&boot_cpu_data), avr32_get_chip_revision(&boot_cpu_data) + 'A'); if (boot_cpu_data.arch_type < NR_ARCH_NAMES) seq_printf(m, "cpu arch\t: %s revision %d\n", arch_names[boot_cpu_data.arch_type], boot_cpu_data.arch_revision); if (boot_cpu_data.cpu_type < NR_CPU_NAMES) seq_printf(m, "cpu core\t: %s revision %d\n", cpu_names[boot_cpu_data.cpu_type], boot_cpu_data.cpu_revision); freq = (clk_get_rate(boot_cpu_data.clk) + 500) / 1000; seq_printf(m, "cpu MHz\t\t: %u.%03u\n", freq / 1000, freq % 1000); seq_printf(m, "i-cache\t\t: %dK (%u ways x %u sets x %u)\n", icache_size >> 10, boot_cpu_data.icache.ways, boot_cpu_data.icache.sets, boot_cpu_data.icache.linesz); seq_printf(m, "d-cache\t\t: %dK (%u ways x %u sets x %u)\n", dcache_size >> 10, boot_cpu_data.dcache.ways, boot_cpu_data.dcache.sets, boot_cpu_data.dcache.linesz); seq_printf(m, "features\t:"); for (i = 0; i < ARRAY_SIZE(cpu_feature_flags); i++) if (boot_cpu_data.features & (1 << i)) seq_printf(m, " %s", cpu_feature_flags[i]); seq_printf(m, "\nbogomips\t: %lu.%02lu\n", boot_cpu_data.loops_per_jiffy / (500000/HZ), (boot_cpu_data.loops_per_jiffy / (5000/HZ)) % 100); return 0; } static void *c_start(struct seq_file *m, loff_t *pos) { return *pos < 1 ? (void *)1 : NULL; } static void *c_next(struct seq_file *m, void *v, loff_t *pos) { ++*pos; return NULL; } static void c_stop(struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show }; #endif /* CONFIG_PROC_FS */
gpl-2.0
xtrymind/android_kernel_msm
drivers/media/rc/keymaps/rc-digittrade.c
9536
2708
/* * Digittrade DVB-T USB Stick remote controller keytable * * Copyright (C) 2010 Antti Palosaari <crope@iki.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> /* Digittrade DVB-T USB Stick remote controller. */ /* Imported from af9015.h. Initial keytable was from Alain Kalker <miki@dds.nl> */ /* Digittrade DVB-T USB Stick */ static struct rc_map_table digittrade[] = { { 0x0000, KEY_9 }, { 0x0001, KEY_EPG }, /* EPG */ { 0x0002, KEY_VOLUMEDOWN }, /* Vol Dn */ { 0x0003, KEY_TEXT }, /* TELETEXT */ { 0x0004, KEY_8 }, { 0x0005, KEY_MUTE }, /* MUTE */ { 0x0006, KEY_POWER2 }, /* POWER */ { 0x0009, KEY_ZOOM }, /* FULLSCREEN */ { 0x000a, KEY_RECORD }, /* RECORD */ { 0x000d, KEY_SUBTITLE }, /* SUBTITLE */ { 0x000e, KEY_STOP }, /* STOP */ { 0x0010, KEY_OK }, /* RETURN */ { 0x0011, KEY_2 }, { 0x0012, KEY_4 }, { 0x0015, KEY_3 }, { 0x0016, KEY_5 }, { 0x0017, KEY_CHANNELDOWN }, /* Ch Dn */ { 0x0019, KEY_CHANNELUP }, /* CH Up */ { 0x001a, KEY_PAUSE }, /* PAUSE */ { 0x001b, KEY_1 }, { 0x001d, KEY_AUDIO }, /* DUAL SOUND */ { 0x001e, KEY_PLAY }, /* PLAY */ { 0x001f, KEY_CAMERA }, /* SNAPSHOT */ { 0x0040, KEY_VOLUMEUP }, /* Vol Up */ { 0x0048, KEY_7 }, { 0x004c, KEY_6 }, { 0x004d, KEY_PLAYPAUSE }, /* TIMESHIFT */ { 0x0054, KEY_0 }, }; static struct rc_map_list digittrade_map = { .map = { .scan = digittrade, .size = ARRAY_SIZE(digittrade), .rc_type = RC_TYPE_NEC, .name = RC_MAP_DIGITTRADE, } }; static int __init init_rc_map_digittrade(void) { return rc_map_register(&digittrade_map); } static void __exit exit_rc_map_digittrade(void) { rc_map_unregister(&digittrade_map); } module_init(init_rc_map_digittrade) module_exit(exit_rc_map_digittrade) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
gpl-2.0
maxfierke/axstrom-kernel
arch/mips/pci/fixup-ip32.c
9536
1518
#include <linux/init.h> #include <linux/kernel.h> #include <linux/pci.h> #include <asm/ip32/ip32_ints.h> /* * O2 has up to 5 PCI devices connected into the MACE bridge. The device * map looks like this: * * 0 aic7xxx 0 * 1 aic7xxx 1 * 2 expansion slot * 3 N/C * 4 N/C */ #define SCSI0 MACEPCI_SCSI0_IRQ #define SCSI1 MACEPCI_SCSI1_IRQ #define INTA0 MACEPCI_SLOT0_IRQ #define INTA1 MACEPCI_SLOT1_IRQ #define INTA2 MACEPCI_SLOT2_IRQ #define INTB MACEPCI_SHARED0_IRQ #define INTC MACEPCI_SHARED1_IRQ #define INTD MACEPCI_SHARED2_IRQ static char irq_tab_mace[][5] __initdata = { /* Dummy INT#A INT#B INT#C INT#D */ {0, 0, 0, 0, 0}, /* This is placeholder row - never used */ {0, SCSI0, SCSI0, SCSI0, SCSI0}, {0, SCSI1, SCSI1, SCSI1, SCSI1}, {0, INTA0, INTB, INTC, INTD}, {0, INTA1, INTC, INTD, INTB}, {0, INTA2, INTD, INTB, INTC}, }; /* * Given a PCI slot number (a la PCI_SLOT(...)) and the interrupt pin of * the device (1-4 => A-D), tell what irq to use. Note that we don't * in theory have slots 4 and 5, and we never normally use the shared * irqs. I suppose a device without a pin A will thank us for doing it * right if there exists such a broken piece of crap. */ int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { return irq_tab_mace[slot][pin]; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; }
gpl-2.0
TeamRegular/android_kernel_samsung_kminilte
arch/mips/vr41xx/casio-e55/setup.c
13888
1339
/* * setup.c, Setup for the CASIO CASSIOPEIA E-11/15/55/65. * * Copyright (C) 2002-2006 Yoichi Yuasa <yuasa@linux-mips.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/ioport.h> #include <asm/io.h> #define E55_ISA_IO_BASE 0x1400c000 #define E55_ISA_IO_SIZE 0x03ff4000 #define E55_ISA_IO_START 0 #define E55_ISA_IO_END (E55_ISA_IO_SIZE - 1) #define E55_IO_PORT_BASE KSEG1ADDR(E55_ISA_IO_BASE) static int __init casio_e55_setup(void) { set_io_port_base(E55_IO_PORT_BASE); ioport_resource.start = E55_ISA_IO_START; ioport_resource.end = E55_ISA_IO_END; return 0; } arch_initcall(casio_e55_setup);
gpl-2.0
Barbatos/ioq3-for-UrbanTerror-4
code/q3_ui/ui_saveconfig.c
65
6066
/* =========================================================================== Copyright (C) 1999-2005 Id Software, Inc. This file is part of Quake III Arena source code. Quake III Arena source code is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. Quake III Arena source code is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Quake III Arena source code; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA =========================================================================== */ // /* ============================================================================= SAVE CONFIG MENU ============================================================================= */ #include "ui_local.h" #define ART_BACK0 "menu/art/back_0" #define ART_BACK1 "menu/art/back_1" #define ART_SAVE0 "menu/art/save_0" #define ART_SAVE1 "menu/art/save_1" #define ART_BACKGROUND "menu/art/cut_frame" #define ID_NAME 10 #define ID_BACK 11 #define ID_SAVE 12 typedef struct { menuframework_s menu; menutext_s banner; menubitmap_s background; menufield_s savename; menubitmap_s back; menubitmap_s save; } saveConfig_t; static saveConfig_t saveConfig; /* =============== UI_SaveConfigMenu_BackEvent =============== */ static void UI_SaveConfigMenu_BackEvent( void *ptr, int event ) { if( event != QM_ACTIVATED ) { return; } UI_PopMenu(); } /* =============== UI_SaveConfigMenu_SaveEvent =============== */ static void UI_SaveConfigMenu_SaveEvent( void *ptr, int event ) { char configname[MAX_QPATH]; if( event != QM_ACTIVATED ) { return; } if( !saveConfig.savename.field.buffer[0] ) { return; } COM_StripExtension(saveConfig.savename.field.buffer, configname, sizeof(configname)); trap_Cmd_ExecuteText( EXEC_APPEND, va( "writeconfig %s.cfg\n", configname ) ); UI_PopMenu(); } /* =============== UI_SaveConfigMenu_SavenameDraw =============== */ static void UI_SaveConfigMenu_SavenameDraw( void *self ) { menufield_s *f; int style; float *color; f = (menufield_s *)self; if( f == Menu_ItemAtCursor( &saveConfig.menu ) ) { style = UI_LEFT|UI_PULSE|UI_SMALLFONT; color = text_color_highlight; } else { style = UI_LEFT|UI_SMALLFONT; color = colorRed; } UI_DrawProportionalString( 320, 192, "Enter filename:", UI_CENTER|UI_SMALLFONT, color_orange ); UI_FillRect( f->generic.x, f->generic.y, f->field.widthInChars*SMALLCHAR_WIDTH, SMALLCHAR_HEIGHT, colorBlack ); MField_Draw( &f->field, f->generic.x, f->generic.y, style, color ); } /* ================= UI_SaveConfigMenu_Init ================= */ static void UI_SaveConfigMenu_Init( void ) { memset( &saveConfig, 0, sizeof(saveConfig) ); UI_SaveConfigMenu_Cache(); saveConfig.menu.wrapAround = qtrue; saveConfig.menu.fullscreen = qtrue; saveConfig.banner.generic.type = MTYPE_BTEXT; saveConfig.banner.generic.x = 320; saveConfig.banner.generic.y = 16; saveConfig.banner.string = "SAVE CONFIG"; saveConfig.banner.color = color_white; saveConfig.banner.style = UI_CENTER; saveConfig.background.generic.type = MTYPE_BITMAP; saveConfig.background.generic.name = ART_BACKGROUND; saveConfig.background.generic.flags = QMF_INACTIVE; saveConfig.background.generic.x = 142; saveConfig.background.generic.y = 118; saveConfig.background.width = 359; saveConfig.background.height = 256; saveConfig.savename.generic.type = MTYPE_FIELD; saveConfig.savename.generic.flags = QMF_NODEFAULTINIT|QMF_UPPERCASE; saveConfig.savename.generic.ownerdraw = UI_SaveConfigMenu_SavenameDraw; saveConfig.savename.field.widthInChars = 20; saveConfig.savename.field.maxchars = 20; saveConfig.savename.generic.x = 240; saveConfig.savename.generic.y = 155+72; saveConfig.savename.generic.left = 240; saveConfig.savename.generic.top = 155+72; saveConfig.savename.generic.right = 233 + 20*SMALLCHAR_WIDTH; saveConfig.savename.generic.bottom = 155+72 + SMALLCHAR_HEIGHT+2; saveConfig.back.generic.type = MTYPE_BITMAP; saveConfig.back.generic.name = ART_BACK0; saveConfig.back.generic.flags = QMF_LEFT_JUSTIFY|QMF_PULSEIFFOCUS; saveConfig.back.generic.id = ID_BACK; saveConfig.back.generic.callback = UI_SaveConfigMenu_BackEvent; saveConfig.back.generic.x = 0; saveConfig.back.generic.y = 480-64; saveConfig.back.width = 128; saveConfig.back.height = 64; saveConfig.back.focuspic = ART_BACK1; saveConfig.save.generic.type = MTYPE_BITMAP; saveConfig.save.generic.name = ART_SAVE0; saveConfig.save.generic.flags = QMF_RIGHT_JUSTIFY|QMF_PULSEIFFOCUS; saveConfig.save.generic.id = ID_SAVE; saveConfig.save.generic.callback = UI_SaveConfigMenu_SaveEvent; saveConfig.save.generic.x = 640; saveConfig.save.generic.y = 480-64; saveConfig.save.width = 128; saveConfig.save.height = 64; saveConfig.save.focuspic = ART_SAVE1; Menu_AddItem( &saveConfig.menu, &saveConfig.banner ); Menu_AddItem( &saveConfig.menu, &saveConfig.background ); Menu_AddItem( &saveConfig.menu, &saveConfig.savename ); Menu_AddItem( &saveConfig.menu, &saveConfig.back ); Menu_AddItem( &saveConfig.menu, &saveConfig.save ); } /* ================= UI_SaveConfigMenu_Cache ================= */ void UI_SaveConfigMenu_Cache( void ) { trap_R_RegisterShaderNoMip( ART_BACK0 ); trap_R_RegisterShaderNoMip( ART_BACK1 ); trap_R_RegisterShaderNoMip( ART_SAVE0 ); trap_R_RegisterShaderNoMip( ART_SAVE1 ); trap_R_RegisterShaderNoMip( ART_BACKGROUND ); } /* =============== UI_SaveConfigMenu =============== */ void UI_SaveConfigMenu( void ) { UI_SaveConfigMenu_Init(); UI_PushMenu( &saveConfig.menu ); }
gpl-2.0
hollycroxton/android_kernel_htc_m7
kernel/stop_machine.c
321
9797
/* * kernel/stop_machine.c * * Copyright (C) 2008, 2005 IBM Corporation. * Copyright (C) 2008, 2005 Rusty Russell rusty@rustcorp.com.au * Copyright (C) 2010 SUSE Linux Products GmbH * Copyright (C) 2010 Tejun Heo <tj@kernel.org> * * This file is released under the GPLv2 and any later version. */ #include <linux/completion.h> #include <linux/cpu.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/export.h> #include <linux/percpu.h> #include <linux/sched.h> #include <linux/stop_machine.h> #include <linux/interrupt.h> #include <linux/kallsyms.h> #include <linux/atomic.h> struct cpu_stop_done { atomic_t nr_todo; bool executed; int ret; struct completion completion; }; struct cpu_stopper { spinlock_t lock; bool enabled; struct list_head works; struct task_struct *thread; }; static DEFINE_PER_CPU(struct cpu_stopper, cpu_stopper); static bool stop_machine_initialized = false; static void cpu_stop_init_done(struct cpu_stop_done *done, unsigned int nr_todo) { memset(done, 0, sizeof(*done)); atomic_set(&done->nr_todo, nr_todo); init_completion(&done->completion); } static void cpu_stop_signal_done(struct cpu_stop_done *done, bool executed) { if (done) { if (executed) done->executed = true; if (atomic_dec_and_test(&done->nr_todo)) complete(&done->completion); } } static void cpu_stop_queue_work(struct cpu_stopper *stopper, struct cpu_stop_work *work) { unsigned long flags; spin_lock_irqsave(&stopper->lock, flags); if (stopper->enabled) { list_add_tail(&work->list, &stopper->works); wake_up_process(stopper->thread); } else cpu_stop_signal_done(work->done, false); spin_unlock_irqrestore(&stopper->lock, flags); } int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg) { struct cpu_stop_done done; struct cpu_stop_work work = { .fn = fn, .arg = arg, .done = &done }; cpu_stop_init_done(&done, 1); cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &work); wait_for_completion(&done.completion); return done.executed ? done.ret : -ENOENT; } void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg, struct cpu_stop_work *work_buf) { *work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, }; cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), work_buf); } DEFINE_MUTEX(stop_cpus_mutex); static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work); static void queue_stop_cpus_work(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg, struct cpu_stop_done *done) { struct cpu_stop_work *work; unsigned int cpu; for_each_cpu(cpu, cpumask) { work = &per_cpu(stop_cpus_work, cpu); work->fn = fn; work->arg = arg; work->done = done; } preempt_disable(); for_each_cpu(cpu, cpumask) cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu), &per_cpu(stop_cpus_work, cpu)); preempt_enable(); } static int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { struct cpu_stop_done done; cpu_stop_init_done(&done, cpumask_weight(cpumask)); queue_stop_cpus_work(cpumask, fn, arg, &done); wait_for_completion(&done.completion); return done.executed ? done.ret : -ENOENT; } int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { int ret; mutex_lock(&stop_cpus_mutex); ret = __stop_cpus(cpumask, fn, arg); mutex_unlock(&stop_cpus_mutex); return ret; } int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg) { int ret; if (!mutex_trylock(&stop_cpus_mutex)) return -EAGAIN; ret = __stop_cpus(cpumask, fn, arg); mutex_unlock(&stop_cpus_mutex); return ret; } static int cpu_stopper_thread(void *data) { struct cpu_stopper *stopper = data; struct cpu_stop_work *work; int ret; repeat: set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop()) { __set_current_state(TASK_RUNNING); return 0; } work = NULL; spin_lock_irq(&stopper->lock); if (!list_empty(&stopper->works)) { work = list_first_entry(&stopper->works, struct cpu_stop_work, list); list_del_init(&work->list); } spin_unlock_irq(&stopper->lock); if (work) { cpu_stop_fn_t fn = work->fn; void *arg = work->arg; struct cpu_stop_done *done = work->done; char ksym_buf[KSYM_NAME_LEN] __maybe_unused; __set_current_state(TASK_RUNNING); preempt_disable(); ret = fn(arg); if (ret) done->ret = ret; preempt_enable(); WARN_ONCE(preempt_count(), "cpu_stop: %s(%p) leaked preempt count\n", kallsyms_lookup((unsigned long)fn, NULL, NULL, NULL, ksym_buf), arg); cpu_stop_signal_done(done, true); } else schedule(); goto repeat; } extern void sched_set_stop_task(int cpu, struct task_struct *stop); static int __cpuinit cpu_stop_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); struct task_struct *p; switch (action & ~CPU_TASKS_FROZEN) { case CPU_UP_PREPARE: BUG_ON(stopper->thread || stopper->enabled || !list_empty(&stopper->works)); p = kthread_create_on_node(cpu_stopper_thread, stopper, cpu_to_node(cpu), "migration/%d", cpu); if (IS_ERR(p)) return notifier_from_errno(PTR_ERR(p)); get_task_struct(p); kthread_bind(p, cpu); sched_set_stop_task(cpu, p); stopper->thread = p; break; case CPU_ONLINE: wake_up_process(stopper->thread); spin_lock_irq(&stopper->lock); stopper->enabled = true; spin_unlock_irq(&stopper->lock); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: case CPU_POST_DEAD: { struct cpu_stop_work *work; sched_set_stop_task(cpu, NULL); kthread_stop(stopper->thread); spin_lock_irq(&stopper->lock); list_for_each_entry(work, &stopper->works, list) cpu_stop_signal_done(work->done, false); stopper->enabled = false; spin_unlock_irq(&stopper->lock); put_task_struct(stopper->thread); stopper->thread = NULL; break; } #endif } return NOTIFY_OK; } static struct notifier_block __cpuinitdata cpu_stop_cpu_notifier = { .notifier_call = cpu_stop_cpu_callback, .priority = 10, }; static int __init cpu_stop_init(void) { void *bcpu = (void *)(long)smp_processor_id(); unsigned int cpu; int err; for_each_possible_cpu(cpu) { struct cpu_stopper *stopper = &per_cpu(cpu_stopper, cpu); spin_lock_init(&stopper->lock); INIT_LIST_HEAD(&stopper->works); } err = cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_UP_PREPARE, bcpu); BUG_ON(err != NOTIFY_OK); cpu_stop_cpu_callback(&cpu_stop_cpu_notifier, CPU_ONLINE, bcpu); register_cpu_notifier(&cpu_stop_cpu_notifier); stop_machine_initialized = true; return 0; } early_initcall(cpu_stop_init); #ifdef CONFIG_STOP_MACHINE enum stopmachine_state { STOPMACHINE_NONE, STOPMACHINE_PREPARE, STOPMACHINE_DISABLE_IRQ, STOPMACHINE_RUN, STOPMACHINE_EXIT, }; struct stop_machine_data { int (*fn)(void *); void *data; unsigned int num_threads; const struct cpumask *active_cpus; enum stopmachine_state state; atomic_t thread_ack; }; static void set_state(struct stop_machine_data *smdata, enum stopmachine_state newstate) { atomic_set(&smdata->thread_ack, smdata->num_threads); smp_wmb(); smdata->state = newstate; } static void ack_state(struct stop_machine_data *smdata) { if (atomic_dec_and_test(&smdata->thread_ack)) set_state(smdata, smdata->state + 1); } static int stop_machine_cpu_stop(void *data) { struct stop_machine_data *smdata = data; enum stopmachine_state curstate = STOPMACHINE_NONE; int cpu = smp_processor_id(), err = 0; unsigned long flags; bool is_active; local_save_flags(flags); if (!smdata->active_cpus) is_active = cpu == cpumask_first(cpu_online_mask); else is_active = cpumask_test_cpu(cpu, smdata->active_cpus); do { cpu_relax(); if (smdata->state != curstate) { curstate = smdata->state; switch (curstate) { case STOPMACHINE_DISABLE_IRQ: local_irq_disable(); hard_irq_disable(); break; case STOPMACHINE_RUN: if (is_active) err = smdata->fn(smdata->data); break; default: break; } ack_state(smdata); } } while (curstate != STOPMACHINE_EXIT); local_irq_restore(flags); return err; } int __stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) { struct stop_machine_data smdata = { .fn = fn, .data = data, .num_threads = num_online_cpus(), .active_cpus = cpus }; if (!stop_machine_initialized) { unsigned long flags; int ret; WARN_ON_ONCE(smdata.num_threads != 1); local_irq_save(flags); hard_irq_disable(); ret = (*fn)(data); local_irq_restore(flags); return ret; } set_state(&smdata, STOPMACHINE_PREPARE); return stop_cpus(cpu_online_mask, stop_machine_cpu_stop, &smdata); } int stop_machine(int (*fn)(void *), void *data, const struct cpumask *cpus) { int ret; get_online_cpus(); ret = __stop_machine(fn, data, cpus); put_online_cpus(); return ret; } EXPORT_SYMBOL_GPL(stop_machine); int stop_machine_from_inactive_cpu(int (*fn)(void *), void *data, const struct cpumask *cpus) { struct stop_machine_data smdata = { .fn = fn, .data = data, .active_cpus = cpus }; struct cpu_stop_done done; int ret; BUG_ON(cpu_active(raw_smp_processor_id())); smdata.num_threads = num_active_cpus() + 1; while (!mutex_trylock(&stop_cpus_mutex)) cpu_relax(); set_state(&smdata, STOPMACHINE_PREPARE); cpu_stop_init_done(&done, num_active_cpus()); queue_stop_cpus_work(cpu_active_mask, stop_machine_cpu_stop, &smdata, &done); ret = stop_machine_cpu_stop(&smdata); while (!completion_done(&done.completion)) cpu_relax(); mutex_unlock(&stop_cpus_mutex); return ret ?: done.ret; } #endif
gpl-2.0
gpandcb/pkernel
arch/tile/kernel/hardwall.c
1345
30634
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/fs.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/rwsem.h> #include <linux/kprobes.h> #include <linux/sched.h> #include <linux/hardirq.h> #include <linux/uaccess.h> #include <linux/smp.h> #include <linux/cdev.h> #include <linux/compat.h> #include <asm/hardwall.h> #include <asm/traps.h> #include <asm/siginfo.h> #include <asm/irq_regs.h> #include <arch/interrupts.h> #include <arch/spr_def.h> /* * Implement a per-cpu "hardwall" resource class such as UDN or IPI. * We use "hardwall" nomenclature throughout for historical reasons. * The lock here controls access to the list data structure as well as * to the items on the list. */ struct hardwall_type { int index; int is_xdn; int is_idn; int disabled; const char *name; struct list_head list; spinlock_t lock; struct proc_dir_entry *proc_dir; }; enum hardwall_index { HARDWALL_UDN = 0, #ifndef __tilepro__ HARDWALL_IDN = 1, HARDWALL_IPI = 2, #endif _HARDWALL_TYPES }; static struct hardwall_type hardwall_types[] = { { /* user-space access to UDN */ 0, 1, 0, 0, "udn", LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_UDN].lock), NULL }, #ifndef __tilepro__ { /* user-space access to IDN */ 1, 1, 1, 1, /* disabled pending hypervisor support */ "idn", LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IDN].lock), NULL }, { /* access to user-space IPI */ 2, 0, 0, 0, "ipi", LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list), __SPIN_LOCK_UNLOCKED(hardwall_types[HARDWALL_IPI].lock), NULL }, #endif }; /* * This data structure tracks the cpu data, etc., associated * one-to-one with a "struct file *" from opening a hardwall device file. * Note that the file's private data points back to this structure. */ struct hardwall_info { struct list_head list; /* for hardwall_types.list */ struct list_head task_head; /* head of tasks in this hardwall */ struct hardwall_type *type; /* type of this resource */ struct cpumask cpumask; /* cpus reserved */ int id; /* integer id for this hardwall */ int teardown_in_progress; /* are we tearing this one down? */ /* Remaining fields only valid for user-network resources. */ int ulhc_x; /* upper left hand corner x coord */ int ulhc_y; /* upper left hand corner y coord */ int width; /* rectangle width */ int height; /* rectangle height */ #if CHIP_HAS_REV1_XDN() atomic_t xdn_pending_count; /* cores in phase 1 of drain */ #endif }; /* /proc/tile/hardwall */ static struct proc_dir_entry *hardwall_proc_dir; /* Functions to manage files in /proc/tile/hardwall. */ static void hardwall_add_proc(struct hardwall_info *); static void hardwall_remove_proc(struct hardwall_info *); /* Allow disabling UDN access. */ static int __init noudn(char *str) { pr_info("User-space UDN access is disabled\n"); hardwall_types[HARDWALL_UDN].disabled = 1; return 0; } early_param("noudn", noudn); #ifndef __tilepro__ /* Allow disabling IDN access. */ static int __init noidn(char *str) { pr_info("User-space IDN access is disabled\n"); hardwall_types[HARDWALL_IDN].disabled = 1; return 0; } early_param("noidn", noidn); /* Allow disabling IPI access. */ static int __init noipi(char *str) { pr_info("User-space IPI access is disabled\n"); hardwall_types[HARDWALL_IPI].disabled = 1; return 0; } early_param("noipi", noipi); #endif /* * Low-level primitives for UDN/IDN */ #ifdef __tilepro__ #define mtspr_XDN(hwt, name, val) \ do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0) #define mtspr_MPL_XDN(hwt, name, val) \ do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0) #define mfspr_XDN(hwt, name) \ ((void)(hwt), __insn_mfspr(SPR_UDN_##name)) #else #define mtspr_XDN(hwt, name, val) \ do { \ if ((hwt)->is_idn) \ __insn_mtspr(SPR_IDN_##name, (val)); \ else \ __insn_mtspr(SPR_UDN_##name, (val)); \ } while (0) #define mtspr_MPL_XDN(hwt, name, val) \ do { \ if ((hwt)->is_idn) \ __insn_mtspr(SPR_MPL_IDN_##name, (val)); \ else \ __insn_mtspr(SPR_MPL_UDN_##name, (val)); \ } while (0) #define mfspr_XDN(hwt, name) \ ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name)) #endif /* Set a CPU bit if the CPU is online. */ #define cpu_online_set(cpu, dst) do { \ if (cpu_online(cpu)) \ cpumask_set_cpu(cpu, dst); \ } while (0) /* Does the given rectangle contain the given x,y coordinate? */ static int contains(struct hardwall_info *r, int x, int y) { return (x >= r->ulhc_x && x < r->ulhc_x + r->width) && (y >= r->ulhc_y && y < r->ulhc_y + r->height); } /* Compute the rectangle parameters and validate the cpumask. */ static int check_rectangle(struct hardwall_info *r, struct cpumask *mask) { int x, y, cpu, ulhc, lrhc; /* The first cpu is the ULHC, the last the LRHC. */ ulhc = find_first_bit(cpumask_bits(mask), nr_cpumask_bits); lrhc = find_last_bit(cpumask_bits(mask), nr_cpumask_bits); /* Compute the rectangle attributes from the cpus. */ r->ulhc_x = cpu_x(ulhc); r->ulhc_y = cpu_y(ulhc); r->width = cpu_x(lrhc) - r->ulhc_x + 1; r->height = cpu_y(lrhc) - r->ulhc_y + 1; /* Width and height must be positive */ if (r->width <= 0 || r->height <= 0) return -EINVAL; /* Confirm that the cpumask is exactly the rectangle. */ for (y = 0, cpu = 0; y < smp_height; ++y) for (x = 0; x < smp_width; ++x, ++cpu) if (cpumask_test_cpu(cpu, mask) != contains(r, x, y)) return -EINVAL; /* * Note that offline cpus can't be drained when this user network * rectangle eventually closes. We used to detect this * situation and print a warning, but it annoyed users and * they ignored it anyway, so now we just return without a * warning. */ return 0; } /* * Hardware management of hardwall setup, teardown, trapping, * and enabling/disabling PL0 access to the networks. */ /* Bit field values to mask together for writes to SPR_XDN_DIRECTION_PROTECT */ enum direction_protect { N_PROTECT = (1 << 0), E_PROTECT = (1 << 1), S_PROTECT = (1 << 2), W_PROTECT = (1 << 3), C_PROTECT = (1 << 4), }; static inline int xdn_which_interrupt(struct hardwall_type *hwt) { #ifndef __tilepro__ if (hwt->is_idn) return INT_IDN_FIREWALL; #endif return INT_UDN_FIREWALL; } static void enable_firewall_interrupts(struct hardwall_type *hwt) { arch_local_irq_unmask_now(xdn_which_interrupt(hwt)); } static void disable_firewall_interrupts(struct hardwall_type *hwt) { arch_local_irq_mask_now(xdn_which_interrupt(hwt)); } /* Set up hardwall on this cpu based on the passed hardwall_info. */ static void hardwall_setup_func(void *info) { struct hardwall_info *r = info; struct hardwall_type *hwt = r->type; int cpu = smp_processor_id(); /* on_each_cpu disables preemption */ int x = cpu_x(cpu); int y = cpu_y(cpu); int bits = 0; if (x == r->ulhc_x) bits |= W_PROTECT; if (x == r->ulhc_x + r->width - 1) bits |= E_PROTECT; if (y == r->ulhc_y) bits |= N_PROTECT; if (y == r->ulhc_y + r->height - 1) bits |= S_PROTECT; BUG_ON(bits == 0); mtspr_XDN(hwt, DIRECTION_PROTECT, bits); enable_firewall_interrupts(hwt); } /* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ static void hardwall_protect_rectangle(struct hardwall_info *r) { int x, y, cpu, delta; struct cpumask rect_cpus; cpumask_clear(&rect_cpus); /* First include the top and bottom edges */ cpu = r->ulhc_y * smp_width + r->ulhc_x; delta = (r->height - 1) * smp_width; for (x = 0; x < r->width; ++x, ++cpu) { cpu_online_set(cpu, &rect_cpus); cpu_online_set(cpu + delta, &rect_cpus); } /* Then the left and right edges */ cpu -= r->width; delta = r->width - 1; for (y = 0; y < r->height; ++y, cpu += smp_width) { cpu_online_set(cpu, &rect_cpus); cpu_online_set(cpu + delta, &rect_cpus); } /* Then tell all the cpus to set up their protection SPR */ on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1); } /* Entered from INT_xDN_FIREWALL interrupt vector with irqs disabled. */ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) { struct hardwall_info *rect; struct hardwall_type *hwt; struct task_struct *p; struct siginfo info; int cpu = smp_processor_id(); int found_processes; struct pt_regs *old_regs = set_irq_regs(regs); irq_enter(); /* Figure out which network trapped. */ switch (fault_num) { #ifndef __tilepro__ case INT_IDN_FIREWALL: hwt = &hardwall_types[HARDWALL_IDN]; break; #endif case INT_UDN_FIREWALL: hwt = &hardwall_types[HARDWALL_UDN]; break; default: BUG(); } BUG_ON(hwt->disabled); /* This tile trapped a network access; find the rectangle. */ spin_lock(&hwt->lock); list_for_each_entry(rect, &hwt->list, list) { if (cpumask_test_cpu(cpu, &rect->cpumask)) break; } /* * It shouldn't be possible not to find this cpu on the * rectangle list, since only cpus in rectangles get hardwalled. * The hardwall is only removed after the user network is drained. */ BUG_ON(&rect->list == &hwt->list); /* * If we already started teardown on this hardwall, don't worry; * the abort signal has been sent and we are just waiting for things * to quiesce. */ if (rect->teardown_in_progress) { pr_notice("cpu %d: detected %s hardwall violation %#lx while teardown already in progress\n", cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); goto done; } /* * Kill off any process that is activated in this rectangle. * We bypass security to deliver the signal, since it must be * one of the activated processes that generated the user network * message that caused this trap, and all the activated * processes shared a single open file so are pretty tightly * bound together from a security point of view to begin with. */ rect->teardown_in_progress = 1; wmb(); /* Ensure visibility of rectangle before notifying processes. */ pr_notice("cpu %d: detected %s hardwall violation %#lx...\n", cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT)); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_HARDWALL; found_processes = 0; list_for_each_entry(p, &rect->task_head, thread.hardwall[hwt->index].list) { BUG_ON(p->thread.hardwall[hwt->index].info != rect); if (!(p->flags & PF_EXITING)) { found_processes = 1; pr_notice("hardwall: killing %d\n", p->pid); do_send_sig_info(info.si_signo, &info, p, false); } } if (!found_processes) pr_notice("hardwall: no associated processes!\n"); done: spin_unlock(&hwt->lock); /* * We have to disable firewall interrupts now, or else when we * return from this handler, we will simply re-interrupt back to * it. However, we can't clear the protection bits, since we * haven't yet drained the network, and that would allow packets * to cross out of the hardwall region. */ disable_firewall_interrupts(hwt); irq_exit(); set_irq_regs(old_regs); } /* Allow access from user space to the user network. */ void grant_hardwall_mpls(struct hardwall_type *hwt) { #ifndef __tilepro__ if (!hwt->is_xdn) { __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1); return; } #endif mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1); mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1); mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1); mtspr_MPL_XDN(hwt, TIMER_SET_0, 1); #if !CHIP_HAS_REV1_XDN() mtspr_MPL_XDN(hwt, REFILL_SET_0, 1); mtspr_MPL_XDN(hwt, CA_SET_0, 1); #endif } /* Deny access from user space to the user network. */ void restrict_hardwall_mpls(struct hardwall_type *hwt) { #ifndef __tilepro__ if (!hwt->is_xdn) { __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1); return; } #endif mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1); mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1); mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1); mtspr_MPL_XDN(hwt, TIMER_SET_1, 1); #if !CHIP_HAS_REV1_XDN() mtspr_MPL_XDN(hwt, REFILL_SET_1, 1); mtspr_MPL_XDN(hwt, CA_SET_1, 1); #endif } /* Restrict or deny as necessary for the task we're switching to. */ void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next) { int i; for (i = 0; i < HARDWALL_TYPES; ++i) { if (prev->thread.hardwall[i].info != NULL) { if (next->thread.hardwall[i].info == NULL) restrict_hardwall_mpls(&hardwall_types[i]); } else if (next->thread.hardwall[i].info != NULL) { grant_hardwall_mpls(&hardwall_types[i]); } } } /* Does this task have the right to IPI the given cpu? */ int hardwall_ipi_valid(int cpu) { #ifdef __tilegx__ struct hardwall_info *info = current->thread.hardwall[HARDWALL_IPI].info; return info && cpumask_test_cpu(cpu, &info->cpumask); #else return 0; #endif } /* * Code to create, activate, deactivate, and destroy hardwall resources. */ /* Create a hardwall for the given resource */ static struct hardwall_info *hardwall_create(struct hardwall_type *hwt, size_t size, const unsigned char __user *bits) { struct hardwall_info *iter, *info; struct cpumask mask; unsigned long flags; int rc; /* Reject crazy sizes out of hand, a la sys_mbind(). */ if (size > PAGE_SIZE) return ERR_PTR(-EINVAL); /* Copy whatever fits into a cpumask. */ if (copy_from_user(&mask, bits, min(sizeof(struct cpumask), size))) return ERR_PTR(-EFAULT); /* * If the size was short, clear the rest of the mask; * otherwise validate that the rest of the user mask was zero * (we don't try hard to be efficient when validating huge masks). */ if (size < sizeof(struct cpumask)) { memset((char *)&mask + size, 0, sizeof(struct cpumask) - size); } else if (size > sizeof(struct cpumask)) { size_t i; for (i = sizeof(struct cpumask); i < size; ++i) { char c; if (get_user(c, &bits[i])) return ERR_PTR(-EFAULT); if (c) return ERR_PTR(-EINVAL); } } /* Allocate a new hardwall_info optimistically. */ info = kmalloc(sizeof(struct hardwall_info), GFP_KERNEL | __GFP_ZERO); if (info == NULL) return ERR_PTR(-ENOMEM); INIT_LIST_HEAD(&info->task_head); info->type = hwt; /* Compute the rectangle size and validate that it's plausible. */ cpumask_copy(&info->cpumask, &mask); info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits); if (hwt->is_xdn) { rc = check_rectangle(info, &mask); if (rc != 0) { kfree(info); return ERR_PTR(rc); } } /* * Eliminate cpus that are not part of this Linux client. * Note that this allows for configurations that we might not want to * support, such as one client on every even cpu, another client on * every odd cpu. */ cpumask_and(&info->cpumask, &info->cpumask, cpu_online_mask); /* Confirm it doesn't overlap and add it to the list. */ spin_lock_irqsave(&hwt->lock, flags); list_for_each_entry(iter, &hwt->list, list) { if (cpumask_intersects(&iter->cpumask, &info->cpumask)) { spin_unlock_irqrestore(&hwt->lock, flags); kfree(info); return ERR_PTR(-EBUSY); } } list_add_tail(&info->list, &hwt->list); spin_unlock_irqrestore(&hwt->lock, flags); /* Set up appropriate hardwalling on all affected cpus. */ if (hwt->is_xdn) hardwall_protect_rectangle(info); /* Create a /proc/tile/hardwall entry. */ hardwall_add_proc(info); return info; } /* Activate a given hardwall on this cpu for this process. */ static int hardwall_activate(struct hardwall_info *info) { int cpu; unsigned long flags; struct task_struct *p = current; struct thread_struct *ts = &p->thread; struct hardwall_type *hwt; /* Require a hardwall. */ if (info == NULL) return -ENODATA; /* Not allowed to activate a hardwall that is being torn down. */ if (info->teardown_in_progress) return -EINVAL; /* * Get our affinity; if we're not bound to this tile uniquely, * we can't access the network registers. */ if (cpumask_weight(&p->cpus_allowed) != 1) return -EPERM; /* Make sure we are bound to a cpu assigned to this resource. */ cpu = smp_processor_id(); BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); if (!cpumask_test_cpu(cpu, &info->cpumask)) return -EINVAL; /* If we are already bound to this hardwall, it's a no-op. */ hwt = info->type; if (ts->hardwall[hwt->index].info) { BUG_ON(ts->hardwall[hwt->index].info != info); return 0; } /* Success! This process gets to use the resource on this cpu. */ ts->hardwall[hwt->index].info = info; spin_lock_irqsave(&hwt->lock, flags); list_add(&ts->hardwall[hwt->index].list, &info->task_head); spin_unlock_irqrestore(&hwt->lock, flags); grant_hardwall_mpls(hwt); printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n", p->pid, p->comm, hwt->name, cpu); return 0; } /* * Deactivate a task's hardwall. Must hold lock for hardwall_type. * This method may be called from exit_thread(), so we don't want to * rely on too many fields of struct task_struct still being valid. * We assume the cpus_allowed, pid, and comm fields are still valid. */ static void _hardwall_deactivate(struct hardwall_type *hwt, struct task_struct *task) { struct thread_struct *ts = &task->thread; if (cpumask_weight(&task->cpus_allowed) != 1) { pr_err("pid %d (%s) releasing %s hardwall with an affinity mask containing %d cpus!\n", task->pid, task->comm, hwt->name, cpumask_weight(&task->cpus_allowed)); BUG(); } BUG_ON(ts->hardwall[hwt->index].info == NULL); ts->hardwall[hwt->index].info = NULL; list_del(&ts->hardwall[hwt->index].list); if (task == current) restrict_hardwall_mpls(hwt); } /* Deactivate a task's hardwall. */ static int hardwall_deactivate(struct hardwall_type *hwt, struct task_struct *task) { unsigned long flags; int activated; spin_lock_irqsave(&hwt->lock, flags); activated = (task->thread.hardwall[hwt->index].info != NULL); if (activated) _hardwall_deactivate(hwt, task); spin_unlock_irqrestore(&hwt->lock, flags); if (!activated) return -EINVAL; printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n", task->pid, task->comm, hwt->name, raw_smp_processor_id()); return 0; } void hardwall_deactivate_all(struct task_struct *task) { int i; for (i = 0; i < HARDWALL_TYPES; ++i) if (task->thread.hardwall[i].info) hardwall_deactivate(&hardwall_types[i], task); } /* Stop the switch before draining the network. */ static void stop_xdn_switch(void *arg) { #if !CHIP_HAS_REV1_XDN() /* Freeze the switch and the demux. */ __insn_mtspr(SPR_UDN_SP_FREEZE, SPR_UDN_SP_FREEZE__SP_FRZ_MASK | SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); #else /* * Drop all packets bound for the core or off the edge. * We rely on the normal hardwall protection setup code * to have set the low four bits to trigger firewall interrupts, * and shift those bits up to trigger "drop on send" semantics, * plus adding "drop on send to core" for all switches. * In practice it seems the switches latch the DIRECTION_PROTECT * SPR so they won't start dropping if they're already * delivering the last message to the core, but it doesn't * hurt to enable it here. */ struct hardwall_type *hwt = arg; unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT); mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5); #endif } static void empty_xdn_demuxes(struct hardwall_type *hwt) { #ifndef __tilepro__ if (hwt->is_idn) { while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0)) (void) __tile_idn0_receive(); while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1)) (void) __tile_idn1_receive(); return; } #endif while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) (void) __tile_udn0_receive(); while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1)) (void) __tile_udn1_receive(); while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2)) (void) __tile_udn2_receive(); while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3)) (void) __tile_udn3_receive(); } /* Drain all the state from a stopped switch. */ static void drain_xdn_switch(void *arg) { struct hardwall_info *info = arg; struct hardwall_type *hwt = info->type; #if CHIP_HAS_REV1_XDN() /* * The switches have been configured to drop any messages * destined for cores (or off the edge of the rectangle). * But the current message may continue to be delivered, * so we wait until all the cores have finished any pending * messages before we stop draining. */ int pending = mfspr_XDN(hwt, PENDING); while (pending--) { empty_xdn_demuxes(hwt); if (hwt->is_idn) __tile_idn_send(0); else __tile_udn_send(0); } atomic_dec(&info->xdn_pending_count); while (atomic_read(&info->xdn_pending_count)) empty_xdn_demuxes(hwt); #else int i; int from_tile_words, ca_count; /* Empty out the 5 switch point fifos. */ for (i = 0; i < 5; i++) { int words, j; __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); words = __insn_mfspr(SPR_UDN_SP_STATE) & 0xF; for (j = 0; j < words; j++) (void) __insn_mfspr(SPR_UDN_SP_FIFO_DATA); BUG_ON((__insn_mfspr(SPR_UDN_SP_STATE) & 0xF) != 0); } /* Dump out the 3 word fifo at top. */ from_tile_words = (__insn_mfspr(SPR_UDN_DEMUX_STATUS) >> 10) & 0x3; for (i = 0; i < from_tile_words; i++) (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); /* Empty out demuxes. */ empty_xdn_demuxes(hwt); /* Empty out catch all. */ ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); for (i = 0; i < ca_count; i++) (void) __insn_mfspr(SPR_UDN_CA_DATA); BUG_ON(__insn_mfspr(SPR_UDN_DEMUX_CA_COUNT) != 0); /* Clear demux logic. */ __insn_mtspr(SPR_UDN_DEMUX_CTL, 1); /* * Write switch state; experimentation indicates that 0xc3000 * is an idle switch point. */ for (i = 0; i < 5; i++) { __insn_mtspr(SPR_UDN_SP_FIFO_SEL, i); __insn_mtspr(SPR_UDN_SP_STATE, 0xc3000); } #endif } /* Reset random XDN state registers at boot up and during hardwall teardown. */ static void reset_xdn_network_state(struct hardwall_type *hwt) { if (hwt->disabled) return; /* Clear out other random registers so we have a clean slate. */ mtspr_XDN(hwt, DIRECTION_PROTECT, 0); mtspr_XDN(hwt, AVAIL_EN, 0); mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0); #if !CHIP_HAS_REV1_XDN() /* Reset UDN coordinates to their standard value */ { unsigned int cpu = smp_processor_id(); unsigned int x = cpu_x(cpu); unsigned int y = cpu_y(cpu); __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); } /* Set demux tags to predefined values and enable them. */ __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); __insn_mtspr(SPR_UDN_TAG_0, (1 << 0)); __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); /* Set other rev0 random registers to a clean state. */ __insn_mtspr(SPR_UDN_REFILL_EN, 0); __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); /* Start the switch and demux. */ __insn_mtspr(SPR_UDN_SP_FREEZE, 0); #endif } void reset_network_state(void) { reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]); #ifndef __tilepro__ reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]); #endif } /* Restart an XDN switch after draining. */ static void restart_xdn_switch(void *arg) { struct hardwall_type *hwt = arg; #if CHIP_HAS_REV1_XDN() /* One last drain step to avoid races with injection and draining. */ empty_xdn_demuxes(hwt); #endif reset_xdn_network_state(hwt); /* Disable firewall interrupts. */ disable_firewall_interrupts(hwt); } /* Last reference to a hardwall is gone, so clear the network. */ static void hardwall_destroy(struct hardwall_info *info) { struct task_struct *task; struct hardwall_type *hwt; unsigned long flags; /* Make sure this file actually represents a hardwall. */ if (info == NULL) return; /* * Deactivate any remaining tasks. It's possible to race with * some other thread that is exiting and hasn't yet called * deactivate (when freeing its thread_info), so we carefully * deactivate any remaining tasks before freeing the * hardwall_info object itself. */ hwt = info->type; info->teardown_in_progress = 1; spin_lock_irqsave(&hwt->lock, flags); list_for_each_entry(task, &info->task_head, thread.hardwall[hwt->index].list) _hardwall_deactivate(hwt, task); spin_unlock_irqrestore(&hwt->lock, flags); if (hwt->is_xdn) { /* Configure the switches for draining the user network. */ printk(KERN_DEBUG "Clearing %s hardwall rectangle %dx%d %d,%d\n", hwt->name, info->width, info->height, info->ulhc_x, info->ulhc_y); on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1); /* Drain the network. */ #if CHIP_HAS_REV1_XDN() atomic_set(&info->xdn_pending_count, cpumask_weight(&info->cpumask)); on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0); #else on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1); #endif /* Restart switch and disable firewall. */ on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1); } /* Remove the /proc/tile/hardwall entry. */ hardwall_remove_proc(info); /* Now free the hardwall from the list. */ spin_lock_irqsave(&hwt->lock, flags); BUG_ON(!list_empty(&info->task_head)); list_del(&info->list); spin_unlock_irqrestore(&hwt->lock, flags); kfree(info); } static int hardwall_proc_show(struct seq_file *sf, void *v) { struct hardwall_info *info = sf->private; seq_printf(sf, "%*pbl\n", cpumask_pr_args(&info->cpumask)); return 0; } static int hardwall_proc_open(struct inode *inode, struct file *file) { return single_open(file, hardwall_proc_show, PDE_DATA(inode)); } static const struct file_operations hardwall_proc_fops = { .open = hardwall_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void hardwall_add_proc(struct hardwall_info *info) { char buf[64]; snprintf(buf, sizeof(buf), "%d", info->id); proc_create_data(buf, 0444, info->type->proc_dir, &hardwall_proc_fops, info); } static void hardwall_remove_proc(struct hardwall_info *info) { char buf[64]; snprintf(buf, sizeof(buf), "%d", info->id); remove_proc_entry(buf, info->type->proc_dir); } int proc_pid_hardwall(struct seq_file *m, struct pid_namespace *ns, struct pid *pid, struct task_struct *task) { int i; int n = 0; for (i = 0; i < HARDWALL_TYPES; ++i) { struct hardwall_info *info = task->thread.hardwall[i].info; if (info) seq_printf(m, "%s: %d\n", info->type->name, info->id); } return n; } void proc_tile_hardwall_init(struct proc_dir_entry *root) { int i; for (i = 0; i < HARDWALL_TYPES; ++i) { struct hardwall_type *hwt = &hardwall_types[i]; if (hwt->disabled) continue; if (hardwall_proc_dir == NULL) hardwall_proc_dir = proc_mkdir("hardwall", root); hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir); } } /* * Character device support via ioctl/close. */ static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) { struct hardwall_info *info = file->private_data; int minor = iminor(file->f_mapping->host); struct hardwall_type* hwt; if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) return -EINVAL; BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES); BUILD_BUG_ON(HARDWALL_TYPES != sizeof(hardwall_types)/sizeof(hardwall_types[0])); if (minor < 0 || minor >= HARDWALL_TYPES) return -EINVAL; hwt = &hardwall_types[minor]; WARN_ON(info && hwt != info->type); switch (_IOC_NR(a)) { case _HARDWALL_CREATE: if (hwt->disabled) return -ENOSYS; if (info != NULL) return -EALREADY; info = hardwall_create(hwt, _IOC_SIZE(a), (const unsigned char __user *)b); if (IS_ERR(info)) return PTR_ERR(info); file->private_data = info; return 0; case _HARDWALL_ACTIVATE: return hardwall_activate(info); case _HARDWALL_DEACTIVATE: if (current->thread.hardwall[hwt->index].info != info) return -EINVAL; return hardwall_deactivate(hwt, current); case _HARDWALL_GET_ID: return info ? info->id : -EINVAL; default: return -EINVAL; } } #ifdef CONFIG_COMPAT static long hardwall_compat_ioctl(struct file *file, unsigned int a, unsigned long b) { /* Sign-extend the argument so it can be used as a pointer. */ return hardwall_ioctl(file, a, (unsigned long)compat_ptr(b)); } #endif /* The user process closed the file; revoke access to user networks. */ static int hardwall_flush(struct file *file, fl_owner_t owner) { struct hardwall_info *info = file->private_data; struct task_struct *task, *tmp; unsigned long flags; if (info) { /* * NOTE: if multiple threads are activated on this hardwall * file, the other threads will continue having access to the * user network until they are context-switched out and back * in again. * * NOTE: A NULL files pointer means the task is being torn * down, so in that case we also deactivate it. */ struct hardwall_type *hwt = info->type; spin_lock_irqsave(&hwt->lock, flags); list_for_each_entry_safe(task, tmp, &info->task_head, thread.hardwall[hwt->index].list) { if (task->files == owner || task->files == NULL) _hardwall_deactivate(hwt, task); } spin_unlock_irqrestore(&hwt->lock, flags); } return 0; } /* This hardwall is gone, so destroy it. */ static int hardwall_release(struct inode *inode, struct file *file) { hardwall_destroy(file->private_data); return 0; } static const struct file_operations dev_hardwall_fops = { .open = nonseekable_open, .unlocked_ioctl = hardwall_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = hardwall_compat_ioctl, #endif .flush = hardwall_flush, .release = hardwall_release, }; static struct cdev hardwall_dev; static int __init dev_hardwall_init(void) { int rc; dev_t dev; rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall"); if (rc < 0) return rc; cdev_init(&hardwall_dev, &dev_hardwall_fops); rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES); if (rc < 0) return rc; return 0; } late_initcall(dev_hardwall_init);
gpl-2.0
ruleless/linux
drivers/power/wm831x_backup.c
1345
5612
/* * Backup battery driver for Wolfson Microelectronics wm831x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/auxadc.h> #include <linux/mfd/wm831x/pmu.h> #include <linux/mfd/wm831x/pdata.h> struct wm831x_backup { struct wm831x *wm831x; struct power_supply *backup; struct power_supply_desc backup_desc; char name[20]; }; static int wm831x_backup_read_voltage(struct wm831x *wm831x, enum wm831x_auxadc src, union power_supply_propval *val) { int ret; ret = wm831x_auxadc_read_uv(wm831x, src); if (ret >= 0) val->intval = ret; return ret; } /********************************************************************* * Backup supply properties *********************************************************************/ static void wm831x_config_backup(struct wm831x *wm831x) { struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; struct wm831x_backup_pdata *pdata; int ret, reg; if (!wm831x_pdata || !wm831x_pdata->backup) { dev_warn(wm831x->dev, "No backup battery charger configuration\n"); return; } pdata = wm831x_pdata->backup; reg = 0; if (pdata->charger_enable) reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA; if (pdata->no_constant_voltage) reg |= WM831X_BKUP_CHG_MODE; switch (pdata->vlim) { case 2500: break; case 3100: reg |= WM831X_BKUP_CHG_VLIM; break; default: dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n", pdata->vlim); } switch (pdata->ilim) { case 100: break; case 200: reg |= 1; break; case 300: reg |= 2; break; case 400: reg |= 3; break; default: dev_err(wm831x->dev, "Invalid backup current limit %duA\n", pdata->ilim); } ret = wm831x_reg_unlock(wm831x); if (ret != 0) { dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); return; } ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL, WM831X_BKUP_CHG_ENA_MASK | WM831X_BKUP_CHG_MODE_MASK | WM831X_BKUP_BATT_DET_ENA_MASK | WM831X_BKUP_CHG_VLIM_MASK | WM831X_BKUP_CHG_ILIM_MASK, reg); if (ret != 0) dev_err(wm831x->dev, "Failed to set backup charger config: %d\n", ret); wm831x_reg_lock(wm831x); } static int wm831x_backup_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct wm831x_backup *devdata = dev_get_drvdata(psy->dev.parent); struct wm831x *wm831x = devdata->wm831x; int ret = 0; ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL); if (ret < 0) return ret; switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (ret & WM831X_BKUP_CHG_STS) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT, val); break; case POWER_SUPPLY_PROP_PRESENT: if (ret & WM831X_BKUP_CHG_STS) val->intval = 1; else val->intval = 0; break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property wm831x_backup_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_PRESENT, }; /********************************************************************* * Initialisation *********************************************************************/ static int wm831x_backup_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; struct wm831x_backup *devdata; devdata = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_backup), GFP_KERNEL); if (devdata == NULL) return -ENOMEM; devdata->wm831x = wm831x; platform_set_drvdata(pdev, devdata); /* We ignore configuration failures since we can still read * back the status without enabling the charger (which may * already be enabled anyway). */ wm831x_config_backup(wm831x); if (wm831x_pdata && wm831x_pdata->wm831x_num) snprintf(devdata->name, sizeof(devdata->name), "wm831x-backup.%d", wm831x_pdata->wm831x_num); else snprintf(devdata->name, sizeof(devdata->name), "wm831x-backup"); devdata->backup_desc.name = devdata->name; devdata->backup_desc.type = POWER_SUPPLY_TYPE_BATTERY; devdata->backup_desc.properties = wm831x_backup_props; devdata->backup_desc.num_properties = ARRAY_SIZE(wm831x_backup_props); devdata->backup_desc.get_property = wm831x_backup_get_prop; devdata->backup = power_supply_register(&pdev->dev, &devdata->backup_desc, NULL); return PTR_ERR_OR_ZERO(devdata->backup); } static int wm831x_backup_remove(struct platform_device *pdev) { struct wm831x_backup *devdata = platform_get_drvdata(pdev); power_supply_unregister(devdata->backup); return 0; } static struct platform_driver wm831x_backup_driver = { .probe = wm831x_backup_probe, .remove = wm831x_backup_remove, .driver = { .name = "wm831x-backup", }, }; module_platform_driver(wm831x_backup_driver); MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-backup");
gpl-2.0
thehelios/pm-linux-3.8.y
arch/powerpc/platforms/85xx/sbc8548.c
2625
3273
/* * Wind River SBC8548 setup and early boot code. * * Copyright 2007 Wind River Systems Inc. * * By Paul Gortmaker (see MAINTAINERS for contact information) * * Based largely on the MPC8548CDS support - Copyright 2005 Freescale Inc. * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <asm/pgtable.h> #include <asm/page.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" static int sbc_rev; static void __init sbc8548_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* Extract the HW Rev from the EPLD on the board */ static int __init sbc8548_hw_rev(void) { struct device_node *np; struct resource res; unsigned int *rev; int board_rev = 0; np = of_find_compatible_node(NULL, NULL, "hw-rev"); if (np == NULL) { printk("No HW-REV found in DTB.\n"); return -ENODEV; } of_address_to_resource(np, 0, &res); of_node_put(np); rev = ioremap(res.start,sizeof(unsigned int)); board_rev = (*rev) >> 28; iounmap(rev); return board_rev; } /* * Setup the architecture */ static void __init sbc8548_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("sbc8548_setup_arch()", 0); fsl_pci_assign_primary(); sbc_rev = sbc8548_hw_rev(); } static void sbc8548_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Wind River\n"); seq_printf(m, "Machine\t\t: SBC8548 v%d\n", sbc_rev); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } machine_arch_initcall(sbc8548, mpc85xx_common_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init sbc8548_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "SBC8548"); } define_machine(sbc8548) { .name = "SBC8548", .probe = sbc8548_probe, .setup_arch = sbc8548_setup_arch, .init_IRQ = sbc8548_pic_init, .show_cpuinfo = sbc8548_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Stane1983/android_kernel_xiaomi_dior_DEPRECATED
drivers/video/msm/mdp_ppp31.c
3649
8511
/* drivers/video/msm/mdp_ppp31.c * * Copyright (C) 2009 The Linux Foundation. All rights reserved. * Copyright (C) 2009 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/errno.h> #include <linux/kernel.h> #include <asm/io.h> #include <linux/msm_mdp.h> #include "mdp_hw.h" #include "mdp_ppp.h" #define NUM_COEFFS 32 struct mdp_scale_coeffs { uint16_t c[4][NUM_COEFFS]; }; struct mdp_scale_tbl_info { uint16_t offset; uint32_t set:2; int use_pr; struct mdp_scale_coeffs coeffs; }; enum { MDP_SCALE_PT2TOPT4, MDP_SCALE_PT4TOPT6, MDP_SCALE_PT6TOPT8, MDP_SCALE_PT8TO8, MDP_SCALE_MAX, }; static struct mdp_scale_coeffs mdp_scale_pr_coeffs = { .c = { [0] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, [1] = { 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, [2] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, 511, }, [3] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, }, }; static struct mdp_scale_tbl_info mdp_scale_tbl[MDP_SCALE_MAX] = { [ MDP_SCALE_PT2TOPT4 ] = { .offset = 0, .set = MDP_PPP_SCALE_COEFF_D0_SET, .use_pr = -1, .coeffs.c = { [0] = { 131, 131, 130, 129, 128, 127, 127, 126, 125, 125, 124, 123, 123, 121, 120, 119, 119, 118, 117, 117, 116, 115, 115, 114, 113, 112, 111, 110, 109, 109, 108, 107, }, [1] = { 141, 140, 140, 140, 140, 139, 138, 138, 138, 137, 137, 137, 136, 137, 137, 137, 136, 136, 136, 135, 135, 135, 134, 134, 134, 134, 134, 133, 133, 132, 132, 132, }, [2] = { 132, 132, 132, 133, 133, 134, 134, 134, 134, 134, 135, 135, 135, 136, 136, 136, 137, 137, 137, 136, 137, 137, 137, 138, 138, 138, 139, 140, 140, 140, 140, 141, }, [3] = { 107, 108, 109, 109, 110, 111, 112, 113, 114, 115, 115, 116, 117, 117, 118, 119, 119, 120, 121, 123, 123, 124, 125, 125, 126, 127, 127, 128, 129, 130, 131, 131, } }, }, [ MDP_SCALE_PT4TOPT6 ] = { .offset = 32, .set = MDP_PPP_SCALE_COEFF_D1_SET, .use_pr = -1, .coeffs.c = { [0] = { 136, 132, 128, 123, 119, 115, 111, 107, 103, 98, 95, 91, 87, 84, 80, 76, 73, 69, 66, 62, 59, 57, 54, 50, 47, 44, 41, 39, 36, 33, 32, 29, }, [1] = { 206, 205, 204, 204, 201, 200, 199, 197, 196, 194, 191, 191, 189, 185, 184, 182, 180, 178, 176, 173, 170, 168, 165, 162, 160, 157, 155, 152, 148, 146, 142, 140, }, [2] = { 140, 142, 146, 148, 152, 155, 157, 160, 162, 165, 168, 170, 173, 176, 178, 180, 182, 184, 185, 189, 191, 191, 194, 196, 197, 199, 200, 201, 204, 204, 205, 206, }, [3] = { 29, 32, 33, 36, 39, 41, 44, 47, 50, 54, 57, 59, 62, 66, 69, 73, 76, 80, 84, 87, 91, 95, 98, 103, 107, 111, 115, 119, 123, 128, 132, 136, }, }, }, [ MDP_SCALE_PT6TOPT8 ] = { .offset = 64, .set = MDP_PPP_SCALE_COEFF_D2_SET, .use_pr = -1, .coeffs.c = { [0] = { 104, 96, 89, 82, 75, 68, 61, 55, 49, 43, 38, 33, 28, 24, 20, 16, 12, 9, 6, 4, 2, 0, -2, -4, -5, -6, -7, -7, -8, -8, -8, -8, }, [1] = { 303, 303, 302, 300, 298, 296, 293, 289, 286, 281, 276, 270, 265, 258, 252, 245, 238, 230, 223, 214, 206, 197, 189, 180, 172, 163, 154, 145, 137, 128, 120, 112, }, [2] = { 112, 120, 128, 137, 145, 154, 163, 172, 180, 189, 197, 206, 214, 223, 230, 238, 245, 252, 258, 265, 270, 276, 281, 286, 289, 293, 296, 298, 300, 302, 303, 303, }, [3] = { -8, -8, -8, -8, -7, -7, -6, -5, -4, -2, 0, 2, 4, 6, 9, 12, 16, 20, 24, 28, 33, 38, 43, 49, 55, 61, 68, 75, 82, 89, 96, 104, }, }, }, [ MDP_SCALE_PT8TO8 ] = { .offset = 96, .set = MDP_PPP_SCALE_COEFF_U1_SET, .use_pr = -1, .coeffs.c = { [0] = { 0, -7, -13, -19, -24, -28, -32, -34, -37, -39, -40, -41, -41, -41, -40, -40, -38, -37, -35, -33, -31, -29, -26, -24, -21, -18, -15, -13, -10, -7, -5, -2, }, [1] = { 511, 507, 501, 494, 485, 475, 463, 450, 436, 422, 405, 388, 370, 352, 333, 314, 293, 274, 253, 233, 213, 193, 172, 152, 133, 113, 95, 77, 60, 43, 28, 13, }, [2] = { 0, 13, 28, 43, 60, 77, 95, 113, 133, 152, 172, 193, 213, 233, 253, 274, 294, 314, 333, 352, 370, 388, 405, 422, 436, 450, 463, 475, 485, 494, 501, 507, }, [3] = { 0, -2, -5, -7, -10, -13, -15, -18, -21, -24, -26, -29, -31, -33, -35, -37, -38, -40, -40, -41, -41, -41, -40, -39, -37, -34, -32, -28, -24, -19, -13, -7, }, }, }, }; static void load_table(const struct mdp_info *mdp, int scale, int use_pr) { int i; uint32_t val; struct mdp_scale_coeffs *coeffs; struct mdp_scale_tbl_info *tbl = &mdp_scale_tbl[scale]; if (use_pr == tbl->use_pr) return; tbl->use_pr = use_pr; if (!use_pr) coeffs = &tbl->coeffs; else coeffs = &mdp_scale_pr_coeffs; for (i = 0; i < NUM_COEFFS; ++i) { val = ((coeffs->c[1][i] & 0x3ff) << 16) | (coeffs->c[0][i] & 0x3ff); mdp_writel(mdp, val, MDP_PPP_SCALE_COEFF_LSBn(tbl->offset + i)); val = ((coeffs->c[3][i] & 0x3ff) << 16) | (coeffs->c[2][i] & 0x3ff); mdp_writel(mdp, val, MDP_PPP_SCALE_COEFF_MSBn(tbl->offset + i)); } } #define SCALER_PHASE_BITS 29 static void scale_params(uint32_t dim_in, uint32_t dim_out, uint32_t scaler, uint32_t *phase_init, uint32_t *phase_step) { uint64_t src = dim_in; uint64_t dst = dim_out; uint64_t numer; uint64_t denom; *phase_init = 0; if (dst == 1) { /* if destination is 1 pixel wide, the value of phase_step * is unimportant. */ *phase_step = (uint32_t) (src << SCALER_PHASE_BITS); if (scaler == MDP_PPP_SCALER_FIR) *phase_init = (uint32_t) ((src - 1) << SCALER_PHASE_BITS); return; } if (scaler == MDP_PPP_SCALER_FIR) { numer = (src - 1) << SCALER_PHASE_BITS; denom = dst - 1; /* we want to round up the result*/ numer += denom - 1; } else { numer = src << SCALER_PHASE_BITS; denom = dst; } do_div(numer, denom); *phase_step = (uint32_t) numer; } static int scale_idx(int factor) { int idx; if (factor > 80) idx = MDP_SCALE_PT8TO8; else if (factor > 60) idx = MDP_SCALE_PT6TOPT8; else if (factor > 40) idx = MDP_SCALE_PT4TOPT6; else idx = MDP_SCALE_PT2TOPT4; return idx; } int mdp_ppp_cfg_scale(const struct mdp_info *mdp, struct ppp_regs *regs, struct mdp_rect *src_rect, struct mdp_rect *dst_rect, uint32_t src_format, uint32_t dst_format) { uint32_t x_fac; uint32_t y_fac; uint32_t scaler_x = MDP_PPP_SCALER_FIR; uint32_t scaler_y = MDP_PPP_SCALER_FIR; // Don't use pixel repeat mode, it looks bad int use_pr = 0; int x_idx; int y_idx; if (unlikely(src_rect->w > 2048 || src_rect->h > 2048)) return -ENOTSUPP; x_fac = (dst_rect->w * 100) / src_rect->w; y_fac = (dst_rect->h * 100) / src_rect->h; /* if down-scaling by a factor smaller than 1/4, use M/N */ scaler_x = x_fac <= 25 ? MDP_PPP_SCALER_MN : MDP_PPP_SCALER_FIR; scaler_y = y_fac <= 25 ? MDP_PPP_SCALER_MN : MDP_PPP_SCALER_FIR; scale_params(src_rect->w, dst_rect->w, scaler_x, &regs->phasex_init, &regs->phasex_step); scale_params(src_rect->h, dst_rect->h, scaler_y, &regs->phasey_init, &regs->phasey_step); x_idx = scale_idx(x_fac); y_idx = scale_idx(y_fac); load_table(mdp, x_idx, use_pr); load_table(mdp, y_idx, use_pr); regs->scale_cfg = 0; // Enable SVI when source or destination is YUV if (!IS_RGB(src_format) && !IS_RGB(dst_format)) regs->scale_cfg |= (1 << 6); regs->scale_cfg |= (mdp_scale_tbl[x_idx].set << 2) | (mdp_scale_tbl[x_idx].set << 4); regs->scale_cfg |= (scaler_x << 0) | (scaler_y << 1); return 0; } int mdp_ppp_load_blur(const struct mdp_info *mdp) { return -ENOTSUPP; } void mdp_ppp_init_scale(const struct mdp_info *mdp) { int scale; for (scale = 0; scale < MDP_SCALE_MAX; ++scale) load_table(mdp, scale, 0); }
gpl-2.0
gokulnatha/GT-I9500
drivers/input/touchscreen/ads7846.c
4929
34257
/* * ADS7846 based touchscreen and sensor driver * * Copyright (c) 2005 David Brownell * Copyright (c) 2006 Nokia Corporation * Various changes: Imre Deak <imre.deak@nokia.com> * * Using code from: * - corgi_ts.c * Copyright (C) 2004-2005 Richard Purdie * - omap_ts.[hc], ads7846.h, ts_osk.c * Copyright (C) 2002 MontaVista Software * Copyright (C) 2004 Texas Instruments * Copyright (C) 2005 Dirk Behme * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/hwmon.h> #include <linux/init.h> #include <linux/err.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/pm.h> #include <linux/gpio.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <linux/regulator/consumer.h> #include <linux/module.h> #include <asm/irq.h> /* * This code has been heavily tested on a Nokia 770, and lightly * tested on other ads7846 devices (OSK/Mistral, Lubbock, Spitz). * TSC2046 is just newer ads7846 silicon. * Support for ads7843 tested on Atmel at91sam926x-EK. * Support for ads7845 has only been stubbed in. * Support for Analog Devices AD7873 and AD7843 tested. * * IRQ handling needs a workaround because of a shortcoming in handling * edge triggered IRQs on some platforms like the OMAP1/2. These * platforms don't handle the ARM lazy IRQ disabling properly, thus we * have to maintain our own SW IRQ disabled status. This should be * removed as soon as the affected platform's IRQ handling is fixed. * * App note sbaa036 talks in more detail about accurate sampling... * that ought to help in situations like LCDs inducing noise (which * can also be helped by using synch signals) and more generally. * This driver tries to utilize the measures described in the app * note. The strength of filtering can be set in the board-* specific * files. */ #define TS_POLL_DELAY 1 /* ms delay before the first sample */ #define TS_POLL_PERIOD 5 /* ms delay between samples */ /* this driver doesn't aim at the peak continuous sample rate */ #define SAMPLE_BITS (8 /*cmd*/ + 16 /*sample*/ + 2 /* before, after */) struct ts_event { /* * For portability, we can't read 12 bit values using SPI (which * would make the controller deliver them as native byte order u16 * with msbs zeroed). Instead, we read them as two 8-bit values, * *** WHICH NEED BYTESWAPPING *** and range adjustment. */ u16 x; u16 y; u16 z1, z2; bool ignore; u8 x_buf[3]; u8 y_buf[3]; }; /* * We allocate this separately to avoid cache line sharing issues when * driver is used with DMA-based SPI controllers (like atmel_spi) on * systems where main memory is not DMA-coherent (most non-x86 boards). */ struct ads7846_packet { u8 read_x, read_y, read_z1, read_z2, pwrdown; u16 dummy; /* for the pwrdown read */ struct ts_event tc; /* for ads7845 with mpc5121 psc spi we use 3-byte buffers */ u8 read_x_cmd[3], read_y_cmd[3], pwrdown_cmd[3]; }; struct ads7846 { struct input_dev *input; char phys[32]; char name[32]; struct spi_device *spi; struct regulator *reg; #if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE) struct attribute_group *attr_group; struct device *hwmon; #endif u16 model; u16 vref_mv; u16 vref_delay_usecs; u16 x_plate_ohms; u16 pressure_max; bool swap_xy; bool use_internal; struct ads7846_packet *packet; struct spi_transfer xfer[18]; struct spi_message msg[5]; int msg_count; wait_queue_head_t wait; bool pendown; int read_cnt; int read_rep; int last_read; u16 debounce_max; u16 debounce_tol; u16 debounce_rep; u16 penirq_recheck_delay_usecs; struct mutex lock; bool stopped; /* P: lock */ bool disabled; /* P: lock */ bool suspended; /* P: lock */ int (*filter)(void *data, int data_idx, int *val); void *filter_data; void (*filter_cleanup)(void *data); int (*get_pendown_state)(void); int gpio_pendown; void (*wait_for_sync)(void); }; /* leave chip selected when we're done, for quicker re-select? */ #if 0 #define CS_CHANGE(xfer) ((xfer).cs_change = 1) #else #define CS_CHANGE(xfer) ((xfer).cs_change = 0) #endif /*--------------------------------------------------------------------------*/ /* The ADS7846 has touchscreen and other sensors. * Earlier ads784x chips are somewhat compatible. */ #define ADS_START (1 << 7) #define ADS_A2A1A0_d_y (1 << 4) /* differential */ #define ADS_A2A1A0_d_z1 (3 << 4) /* differential */ #define ADS_A2A1A0_d_z2 (4 << 4) /* differential */ #define ADS_A2A1A0_d_x (5 << 4) /* differential */ #define ADS_A2A1A0_temp0 (0 << 4) /* non-differential */ #define ADS_A2A1A0_vbatt (2 << 4) /* non-differential */ #define ADS_A2A1A0_vaux (6 << 4) /* non-differential */ #define ADS_A2A1A0_temp1 (7 << 4) /* non-differential */ #define ADS_8_BIT (1 << 3) #define ADS_12_BIT (0 << 3) #define ADS_SER (1 << 2) /* non-differential */ #define ADS_DFR (0 << 2) /* differential */ #define ADS_PD10_PDOWN (0 << 0) /* low power mode + penirq */ #define ADS_PD10_ADC_ON (1 << 0) /* ADC on */ #define ADS_PD10_REF_ON (2 << 0) /* vREF on + penirq */ #define ADS_PD10_ALL_ON (3 << 0) /* ADC + vREF on */ #define MAX_12BIT ((1<<12)-1) /* leave ADC powered up (disables penirq) between differential samples */ #define READ_12BIT_DFR(x, adc, vref) (ADS_START | ADS_A2A1A0_d_ ## x \ | ADS_12_BIT | ADS_DFR | \ (adc ? ADS_PD10_ADC_ON : 0) | (vref ? ADS_PD10_REF_ON : 0)) #define READ_Y(vref) (READ_12BIT_DFR(y, 1, vref)) #define READ_Z1(vref) (READ_12BIT_DFR(z1, 1, vref)) #define READ_Z2(vref) (READ_12BIT_DFR(z2, 1, vref)) #define READ_X(vref) (READ_12BIT_DFR(x, 1, vref)) #define PWRDOWN (READ_12BIT_DFR(y, 0, 0)) /* LAST */ /* single-ended samples need to first power up reference voltage; * we leave both ADC and VREF powered */ #define READ_12BIT_SER(x) (ADS_START | ADS_A2A1A0_ ## x \ | ADS_12_BIT | ADS_SER) #define REF_ON (READ_12BIT_DFR(x, 1, 1)) #define REF_OFF (READ_12BIT_DFR(y, 0, 0)) /* Must be called with ts->lock held */ static void ads7846_stop(struct ads7846 *ts) { if (!ts->disabled && !ts->suspended) { /* Signal IRQ thread to stop polling and disable the handler. */ ts->stopped = true; mb(); wake_up(&ts->wait); disable_irq(ts->spi->irq); } } /* Must be called with ts->lock held */ static void ads7846_restart(struct ads7846 *ts) { if (!ts->disabled && !ts->suspended) { /* Tell IRQ thread that it may poll the device. */ ts->stopped = false; mb(); enable_irq(ts->spi->irq); } } /* Must be called with ts->lock held */ static void __ads7846_disable(struct ads7846 *ts) { ads7846_stop(ts); regulator_disable(ts->reg); /* * We know the chip's in low power mode since we always * leave it that way after every request */ } /* Must be called with ts->lock held */ static void __ads7846_enable(struct ads7846 *ts) { regulator_enable(ts->reg); ads7846_restart(ts); } static void ads7846_disable(struct ads7846 *ts) { mutex_lock(&ts->lock); if (!ts->disabled) { if (!ts->suspended) __ads7846_disable(ts); ts->disabled = true; } mutex_unlock(&ts->lock); } static void ads7846_enable(struct ads7846 *ts) { mutex_lock(&ts->lock); if (ts->disabled) { ts->disabled = false; if (!ts->suspended) __ads7846_enable(ts); } mutex_unlock(&ts->lock); } /*--------------------------------------------------------------------------*/ /* * Non-touchscreen sensors only use single-ended conversions. * The range is GND..vREF. The ads7843 and ads7835 must use external vREF; * ads7846 lets that pin be unconnected, to use internal vREF. */ struct ser_req { u8 ref_on; u8 command; u8 ref_off; u16 scratch; struct spi_message msg; struct spi_transfer xfer[6]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ __be16 sample ____cacheline_aligned; }; struct ads7845_ser_req { u8 command[3]; struct spi_message msg; struct spi_transfer xfer[2]; /* * DMA (thus cache coherency maintenance) requires the * transfer buffers to live in their own cache lines. */ u8 sample[3] ____cacheline_aligned; }; static int ads7846_read12_ser(struct device *dev, unsigned command) { struct spi_device *spi = to_spi_device(dev); struct ads7846 *ts = dev_get_drvdata(dev); struct ser_req *req; int status; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; spi_message_init(&req->msg); /* maybe turn on internal vREF, and let it settle */ if (ts->use_internal) { req->ref_on = REF_ON; req->xfer[0].tx_buf = &req->ref_on; req->xfer[0].len = 1; spi_message_add_tail(&req->xfer[0], &req->msg); req->xfer[1].rx_buf = &req->scratch; req->xfer[1].len = 2; /* for 1uF, settle for 800 usec; no cap, 100 usec. */ req->xfer[1].delay_usecs = ts->vref_delay_usecs; spi_message_add_tail(&req->xfer[1], &req->msg); /* Enable reference voltage */ command |= ADS_PD10_REF_ON; } /* Enable ADC in every case */ command |= ADS_PD10_ADC_ON; /* take sample */ req->command = (u8) command; req->xfer[2].tx_buf = &req->command; req->xfer[2].len = 1; spi_message_add_tail(&req->xfer[2], &req->msg); req->xfer[3].rx_buf = &req->sample; req->xfer[3].len = 2; spi_message_add_tail(&req->xfer[3], &req->msg); /* REVISIT: take a few more samples, and compare ... */ /* converter in low power mode & enable PENIRQ */ req->ref_off = PWRDOWN; req->xfer[4].tx_buf = &req->ref_off; req->xfer[4].len = 1; spi_message_add_tail(&req->xfer[4], &req->msg); req->xfer[5].rx_buf = &req->scratch; req->xfer[5].len = 2; CS_CHANGE(req->xfer[5]); spi_message_add_tail(&req->xfer[5], &req->msg); mutex_lock(&ts->lock); ads7846_stop(ts); status = spi_sync(spi, &req->msg); ads7846_restart(ts); mutex_unlock(&ts->lock); if (status == 0) { /* on-wire is a must-ignore bit, a BE12 value, then padding */ status = be16_to_cpu(req->sample); status = status >> 3; status &= 0x0fff; } kfree(req); return status; } static int ads7845_read12_ser(struct device *dev, unsigned command) { struct spi_device *spi = to_spi_device(dev); struct ads7846 *ts = dev_get_drvdata(dev); struct ads7845_ser_req *req; int status; req = kzalloc(sizeof *req, GFP_KERNEL); if (!req) return -ENOMEM; spi_message_init(&req->msg); req->command[0] = (u8) command; req->xfer[0].tx_buf = req->command; req->xfer[0].rx_buf = req->sample; req->xfer[0].len = 3; spi_message_add_tail(&req->xfer[0], &req->msg); mutex_lock(&ts->lock); ads7846_stop(ts); status = spi_sync(spi, &req->msg); ads7846_restart(ts); mutex_unlock(&ts->lock); if (status == 0) { /* BE12 value, then padding */ status = be16_to_cpu(*((u16 *)&req->sample[1])); status = status >> 3; status &= 0x0fff; } kfree(req); return status; } #if defined(CONFIG_HWMON) || defined(CONFIG_HWMON_MODULE) #define SHOW(name, var, adjust) static ssize_t \ name ## _show(struct device *dev, struct device_attribute *attr, char *buf) \ { \ struct ads7846 *ts = dev_get_drvdata(dev); \ ssize_t v = ads7846_read12_ser(dev, \ READ_12BIT_SER(var)); \ if (v < 0) \ return v; \ return sprintf(buf, "%u\n", adjust(ts, v)); \ } \ static DEVICE_ATTR(name, S_IRUGO, name ## _show, NULL); /* Sysfs conventions report temperatures in millidegrees Celsius. * ADS7846 could use the low-accuracy two-sample scheme, but can't do the high * accuracy scheme without calibration data. For now we won't try either; * userspace sees raw sensor values, and must scale/calibrate appropriately. */ static inline unsigned null_adjust(struct ads7846 *ts, ssize_t v) { return v; } SHOW(temp0, temp0, null_adjust) /* temp1_input */ SHOW(temp1, temp1, null_adjust) /* temp2_input */ /* sysfs conventions report voltages in millivolts. We can convert voltages * if we know vREF. userspace may need to scale vAUX to match the board's * external resistors; we assume that vBATT only uses the internal ones. */ static inline unsigned vaux_adjust(struct ads7846 *ts, ssize_t v) { unsigned retval = v; /* external resistors may scale vAUX into 0..vREF */ retval *= ts->vref_mv; retval = retval >> 12; return retval; } static inline unsigned vbatt_adjust(struct ads7846 *ts, ssize_t v) { unsigned retval = vaux_adjust(ts, v); /* ads7846 has a resistor ladder to scale this signal down */ if (ts->model == 7846) retval *= 4; return retval; } SHOW(in0_input, vaux, vaux_adjust) SHOW(in1_input, vbatt, vbatt_adjust) static struct attribute *ads7846_attributes[] = { &dev_attr_temp0.attr, &dev_attr_temp1.attr, &dev_attr_in0_input.attr, &dev_attr_in1_input.attr, NULL, }; static struct attribute_group ads7846_attr_group = { .attrs = ads7846_attributes, }; static struct attribute *ads7843_attributes[] = { &dev_attr_in0_input.attr, &dev_attr_in1_input.attr, NULL, }; static struct attribute_group ads7843_attr_group = { .attrs = ads7843_attributes, }; static struct attribute *ads7845_attributes[] = { &dev_attr_in0_input.attr, NULL, }; static struct attribute_group ads7845_attr_group = { .attrs = ads7845_attributes, }; static int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts) { struct device *hwmon; int err; /* hwmon sensors need a reference voltage */ switch (ts->model) { case 7846: if (!ts->vref_mv) { dev_dbg(&spi->dev, "assuming 2.5V internal vREF\n"); ts->vref_mv = 2500; ts->use_internal = true; } break; case 7845: case 7843: if (!ts->vref_mv) { dev_warn(&spi->dev, "external vREF for ADS%d not specified\n", ts->model); return 0; } break; } /* different chips have different sensor groups */ switch (ts->model) { case 7846: ts->attr_group = &ads7846_attr_group; break; case 7845: ts->attr_group = &ads7845_attr_group; break; case 7843: ts->attr_group = &ads7843_attr_group; break; default: dev_dbg(&spi->dev, "ADS%d not recognized\n", ts->model); return 0; } err = sysfs_create_group(&spi->dev.kobj, ts->attr_group); if (err) return err; hwmon = hwmon_device_register(&spi->dev); if (IS_ERR(hwmon)) { sysfs_remove_group(&spi->dev.kobj, ts->attr_group); return PTR_ERR(hwmon); } ts->hwmon = hwmon; return 0; } static void ads784x_hwmon_unregister(struct spi_device *spi, struct ads7846 *ts) { if (ts->hwmon) { sysfs_remove_group(&spi->dev.kobj, ts->attr_group); hwmon_device_unregister(ts->hwmon); } } #else static inline int ads784x_hwmon_register(struct spi_device *spi, struct ads7846 *ts) { return 0; } static inline void ads784x_hwmon_unregister(struct spi_device *spi, struct ads7846 *ts) { } #endif static ssize_t ads7846_pen_down_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ads7846 *ts = dev_get_drvdata(dev); return sprintf(buf, "%u\n", ts->pendown); } static DEVICE_ATTR(pen_down, S_IRUGO, ads7846_pen_down_show, NULL); static ssize_t ads7846_disable_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ads7846 *ts = dev_get_drvdata(dev); return sprintf(buf, "%u\n", ts->disabled); } static ssize_t ads7846_disable_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ads7846 *ts = dev_get_drvdata(dev); unsigned int i; int err; err = kstrtouint(buf, 10, &i); if (err) return err; if (i) ads7846_disable(ts); else ads7846_enable(ts); return count; } static DEVICE_ATTR(disable, 0664, ads7846_disable_show, ads7846_disable_store); static struct attribute *ads784x_attributes[] = { &dev_attr_pen_down.attr, &dev_attr_disable.attr, NULL, }; static struct attribute_group ads784x_attr_group = { .attrs = ads784x_attributes, }; /*--------------------------------------------------------------------------*/ static int get_pendown_state(struct ads7846 *ts) { if (ts->get_pendown_state) return ts->get_pendown_state(); return !gpio_get_value(ts->gpio_pendown); } static void null_wait_for_sync(void) { } static int ads7846_debounce_filter(void *ads, int data_idx, int *val) { struct ads7846 *ts = ads; if (!ts->read_cnt || (abs(ts->last_read - *val) > ts->debounce_tol)) { /* Start over collecting consistent readings. */ ts->read_rep = 0; /* * Repeat it, if this was the first read or the read * wasn't consistent enough. */ if (ts->read_cnt < ts->debounce_max) { ts->last_read = *val; ts->read_cnt++; return ADS7846_FILTER_REPEAT; } else { /* * Maximum number of debouncing reached and still * not enough number of consistent readings. Abort * the whole sample, repeat it in the next sampling * period. */ ts->read_cnt = 0; return ADS7846_FILTER_IGNORE; } } else { if (++ts->read_rep > ts->debounce_rep) { /* * Got a good reading for this coordinate, * go for the next one. */ ts->read_cnt = 0; ts->read_rep = 0; return ADS7846_FILTER_OK; } else { /* Read more values that are consistent. */ ts->read_cnt++; return ADS7846_FILTER_REPEAT; } } } static int ads7846_no_filter(void *ads, int data_idx, int *val) { return ADS7846_FILTER_OK; } static int ads7846_get_value(struct ads7846 *ts, struct spi_message *m) { struct spi_transfer *t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); if (ts->model == 7845) { return be16_to_cpup((__be16 *)&(((char*)t->rx_buf)[1])) >> 3; } else { /* * adjust: on-wire is a must-ignore bit, a BE12 value, then * padding; built from two 8 bit values written msb-first. */ return be16_to_cpup((__be16 *)t->rx_buf) >> 3; } } static void ads7846_update_value(struct spi_message *m, int val) { struct spi_transfer *t = list_entry(m->transfers.prev, struct spi_transfer, transfer_list); *(u16 *)t->rx_buf = val; } static void ads7846_read_state(struct ads7846 *ts) { struct ads7846_packet *packet = ts->packet; struct spi_message *m; int msg_idx = 0; int val; int action; int error; while (msg_idx < ts->msg_count) { ts->wait_for_sync(); m = &ts->msg[msg_idx]; error = spi_sync(ts->spi, m); if (error) { dev_err(&ts->spi->dev, "spi_async --> %d\n", error); packet->tc.ignore = true; return; } /* * Last message is power down request, no need to convert * or filter the value. */ if (msg_idx < ts->msg_count - 1) { val = ads7846_get_value(ts, m); action = ts->filter(ts->filter_data, msg_idx, &val); switch (action) { case ADS7846_FILTER_REPEAT: continue; case ADS7846_FILTER_IGNORE: packet->tc.ignore = true; msg_idx = ts->msg_count - 1; continue; case ADS7846_FILTER_OK: ads7846_update_value(m, val); packet->tc.ignore = false; msg_idx++; break; default: BUG(); } } else { msg_idx++; } } } static void ads7846_report_state(struct ads7846 *ts) { struct ads7846_packet *packet = ts->packet; unsigned int Rt; u16 x, y, z1, z2; /* * ads7846_get_value() does in-place conversion (including byte swap) * from on-the-wire format as part of debouncing to get stable * readings. */ if (ts->model == 7845) { x = *(u16 *)packet->tc.x_buf; y = *(u16 *)packet->tc.y_buf; z1 = 0; z2 = 0; } else { x = packet->tc.x; y = packet->tc.y; z1 = packet->tc.z1; z2 = packet->tc.z2; } /* range filtering */ if (x == MAX_12BIT) x = 0; if (ts->model == 7843) { Rt = ts->pressure_max / 2; } else if (ts->model == 7845) { if (get_pendown_state(ts)) Rt = ts->pressure_max / 2; else Rt = 0; dev_vdbg(&ts->spi->dev, "x/y: %d/%d, PD %d\n", x, y, Rt); } else if (likely(x && z1)) { /* compute touch pressure resistance using equation #2 */ Rt = z2; Rt -= z1; Rt *= x; Rt *= ts->x_plate_ohms; Rt /= z1; Rt = (Rt + 2047) >> 12; } else { Rt = 0; } /* * Sample found inconsistent by debouncing or pressure is beyond * the maximum. Don't report it to user space, repeat at least * once more the measurement */ if (packet->tc.ignore || Rt > ts->pressure_max) { dev_vdbg(&ts->spi->dev, "ignored %d pressure %d\n", packet->tc.ignore, Rt); return; } /* * Maybe check the pendown state before reporting. This discards * false readings when the pen is lifted. */ if (ts->penirq_recheck_delay_usecs) { udelay(ts->penirq_recheck_delay_usecs); if (!get_pendown_state(ts)) Rt = 0; } /* * NOTE: We can't rely on the pressure to determine the pen down * state, even this controller has a pressure sensor. The pressure * value can fluctuate for quite a while after lifting the pen and * in some cases may not even settle at the expected value. * * The only safe way to check for the pen up condition is in the * timer by reading the pen signal state (it's a GPIO _and_ IRQ). */ if (Rt) { struct input_dev *input = ts->input; if (ts->swap_xy) swap(x, y); if (!ts->pendown) { input_report_key(input, BTN_TOUCH, 1); ts->pendown = true; dev_vdbg(&ts->spi->dev, "DOWN\n"); } input_report_abs(input, ABS_X, x); input_report_abs(input, ABS_Y, y); input_report_abs(input, ABS_PRESSURE, ts->pressure_max - Rt); input_sync(input); dev_vdbg(&ts->spi->dev, "%4d/%4d/%4d\n", x, y, Rt); } } static irqreturn_t ads7846_hard_irq(int irq, void *handle) { struct ads7846 *ts = handle; return get_pendown_state(ts) ? IRQ_WAKE_THREAD : IRQ_HANDLED; } static irqreturn_t ads7846_irq(int irq, void *handle) { struct ads7846 *ts = handle; /* Start with a small delay before checking pendown state */ msleep(TS_POLL_DELAY); while (!ts->stopped && get_pendown_state(ts)) { /* pen is down, continue with the measurement */ ads7846_read_state(ts); if (!ts->stopped) ads7846_report_state(ts); wait_event_timeout(ts->wait, ts->stopped, msecs_to_jiffies(TS_POLL_PERIOD)); } if (ts->pendown) { struct input_dev *input = ts->input; input_report_key(input, BTN_TOUCH, 0); input_report_abs(input, ABS_PRESSURE, 0); input_sync(input); ts->pendown = false; dev_vdbg(&ts->spi->dev, "UP\n"); } return IRQ_HANDLED; } #ifdef CONFIG_PM_SLEEP static int ads7846_suspend(struct device *dev) { struct ads7846 *ts = dev_get_drvdata(dev); mutex_lock(&ts->lock); if (!ts->suspended) { if (!ts->disabled) __ads7846_disable(ts); if (device_may_wakeup(&ts->spi->dev)) enable_irq_wake(ts->spi->irq); ts->suspended = true; } mutex_unlock(&ts->lock); return 0; } static int ads7846_resume(struct device *dev) { struct ads7846 *ts = dev_get_drvdata(dev); mutex_lock(&ts->lock); if (ts->suspended) { ts->suspended = false; if (device_may_wakeup(&ts->spi->dev)) disable_irq_wake(ts->spi->irq); if (!ts->disabled) __ads7846_enable(ts); } mutex_unlock(&ts->lock); return 0; } #endif static SIMPLE_DEV_PM_OPS(ads7846_pm, ads7846_suspend, ads7846_resume); static int __devinit ads7846_setup_pendown(struct spi_device *spi, struct ads7846 *ts) { struct ads7846_platform_data *pdata = spi->dev.platform_data; int err; /* * REVISIT when the irq can be triggered active-low, or if for some * reason the touchscreen isn't hooked up, we don't need to access * the pendown state. */ if (pdata->get_pendown_state) { ts->get_pendown_state = pdata->get_pendown_state; } else if (gpio_is_valid(pdata->gpio_pendown)) { err = gpio_request_one(pdata->gpio_pendown, GPIOF_IN, "ads7846_pendown"); if (err) { dev_err(&spi->dev, "failed to request/setup pendown GPIO%d: %d\n", pdata->gpio_pendown, err); return err; } ts->gpio_pendown = pdata->gpio_pendown; } else { dev_err(&spi->dev, "no get_pendown_state nor gpio_pendown?\n"); return -EINVAL; } return 0; } /* * Set up the transfers to read touchscreen state; this assumes we * use formula #2 for pressure, not #3. */ static void __devinit ads7846_setup_spi_msg(struct ads7846 *ts, const struct ads7846_platform_data *pdata) { struct spi_message *m = &ts->msg[0]; struct spi_transfer *x = ts->xfer; struct ads7846_packet *packet = ts->packet; int vref = pdata->keep_vref_on; if (ts->model == 7873) { /* * The AD7873 is almost identical to the ADS7846 * keep VREF off during differential/ratiometric * conversion modes. */ ts->model = 7846; vref = 0; } ts->msg_count = 1; spi_message_init(m); m->context = ts; if (ts->model == 7845) { packet->read_y_cmd[0] = READ_Y(vref); packet->read_y_cmd[1] = 0; packet->read_y_cmd[2] = 0; x->tx_buf = &packet->read_y_cmd[0]; x->rx_buf = &packet->tc.y_buf[0]; x->len = 3; spi_message_add_tail(x, m); } else { /* y- still on; turn on only y+ (and ADC) */ packet->read_y = READ_Y(vref); x->tx_buf = &packet->read_y; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.y; x->len = 2; spi_message_add_tail(x, m); } /* * The first sample after switching drivers can be low quality; * optionally discard it, using a second one after the signals * have had enough time to stabilize. */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_y; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.y; x->len = 2; spi_message_add_tail(x, m); } ts->msg_count++; m++; spi_message_init(m); m->context = ts; if (ts->model == 7845) { x++; packet->read_x_cmd[0] = READ_X(vref); packet->read_x_cmd[1] = 0; packet->read_x_cmd[2] = 0; x->tx_buf = &packet->read_x_cmd[0]; x->rx_buf = &packet->tc.x_buf[0]; x->len = 3; spi_message_add_tail(x, m); } else { /* turn y- off, x+ on, then leave in lowpower */ x++; packet->read_x = READ_X(vref); x->tx_buf = &packet->read_x; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.x; x->len = 2; spi_message_add_tail(x, m); } /* ... maybe discard first sample ... */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_x; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.x; x->len = 2; spi_message_add_tail(x, m); } /* turn y+ off, x- on; we'll use formula #2 */ if (ts->model == 7846) { ts->msg_count++; m++; spi_message_init(m); m->context = ts; x++; packet->read_z1 = READ_Z1(vref); x->tx_buf = &packet->read_z1; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z1; x->len = 2; spi_message_add_tail(x, m); /* ... maybe discard first sample ... */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_z1; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z1; x->len = 2; spi_message_add_tail(x, m); } ts->msg_count++; m++; spi_message_init(m); m->context = ts; x++; packet->read_z2 = READ_Z2(vref); x->tx_buf = &packet->read_z2; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z2; x->len = 2; spi_message_add_tail(x, m); /* ... maybe discard first sample ... */ if (pdata->settle_delay_usecs) { x->delay_usecs = pdata->settle_delay_usecs; x++; x->tx_buf = &packet->read_z2; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->tc.z2; x->len = 2; spi_message_add_tail(x, m); } } /* power down */ ts->msg_count++; m++; spi_message_init(m); m->context = ts; if (ts->model == 7845) { x++; packet->pwrdown_cmd[0] = PWRDOWN; packet->pwrdown_cmd[1] = 0; packet->pwrdown_cmd[2] = 0; x->tx_buf = &packet->pwrdown_cmd[0]; x->len = 3; } else { x++; packet->pwrdown = PWRDOWN; x->tx_buf = &packet->pwrdown; x->len = 1; spi_message_add_tail(x, m); x++; x->rx_buf = &packet->dummy; x->len = 2; } CS_CHANGE(*x); spi_message_add_tail(x, m); } static int __devinit ads7846_probe(struct spi_device *spi) { struct ads7846 *ts; struct ads7846_packet *packet; struct input_dev *input_dev; struct ads7846_platform_data *pdata = spi->dev.platform_data; unsigned long irq_flags; int err; if (!spi->irq) { dev_dbg(&spi->dev, "no IRQ?\n"); return -ENODEV; } if (!pdata) { dev_dbg(&spi->dev, "no platform data?\n"); return -ENODEV; } /* don't exceed max specified sample rate */ if (spi->max_speed_hz > (125000 * SAMPLE_BITS)) { dev_dbg(&spi->dev, "f(sample) %d KHz?\n", (spi->max_speed_hz/SAMPLE_BITS)/1000); return -EINVAL; } /* We'd set TX word size 8 bits and RX word size to 13 bits ... except * that even if the hardware can do that, the SPI controller driver * may not. So we stick to very-portable 8 bit words, both RX and TX. */ spi->bits_per_word = 8; spi->mode = SPI_MODE_0; err = spi_setup(spi); if (err < 0) return err; ts = kzalloc(sizeof(struct ads7846), GFP_KERNEL); packet = kzalloc(sizeof(struct ads7846_packet), GFP_KERNEL); input_dev = input_allocate_device(); if (!ts || !packet || !input_dev) { err = -ENOMEM; goto err_free_mem; } dev_set_drvdata(&spi->dev, ts); ts->packet = packet; ts->spi = spi; ts->input = input_dev; ts->vref_mv = pdata->vref_mv; ts->swap_xy = pdata->swap_xy; mutex_init(&ts->lock); init_waitqueue_head(&ts->wait); ts->model = pdata->model ? : 7846; ts->vref_delay_usecs = pdata->vref_delay_usecs ? : 100; ts->x_plate_ohms = pdata->x_plate_ohms ? : 400; ts->pressure_max = pdata->pressure_max ? : ~0; if (pdata->filter != NULL) { if (pdata->filter_init != NULL) { err = pdata->filter_init(pdata, &ts->filter_data); if (err < 0) goto err_free_mem; } ts->filter = pdata->filter; ts->filter_cleanup = pdata->filter_cleanup; } else if (pdata->debounce_max) { ts->debounce_max = pdata->debounce_max; if (ts->debounce_max < 2) ts->debounce_max = 2; ts->debounce_tol = pdata->debounce_tol; ts->debounce_rep = pdata->debounce_rep; ts->filter = ads7846_debounce_filter; ts->filter_data = ts; } else { ts->filter = ads7846_no_filter; } err = ads7846_setup_pendown(spi, ts); if (err) goto err_cleanup_filter; if (pdata->penirq_recheck_delay_usecs) ts->penirq_recheck_delay_usecs = pdata->penirq_recheck_delay_usecs; ts->wait_for_sync = pdata->wait_for_sync ? : null_wait_for_sync; snprintf(ts->phys, sizeof(ts->phys), "%s/input0", dev_name(&spi->dev)); snprintf(ts->name, sizeof(ts->name), "ADS%d Touchscreen", ts->model); input_dev->name = ts->name; input_dev->phys = ts->phys; input_dev->dev.parent = &spi->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, pdata->x_min ? : 0, pdata->x_max ? : MAX_12BIT, 0, 0); input_set_abs_params(input_dev, ABS_Y, pdata->y_min ? : 0, pdata->y_max ? : MAX_12BIT, 0, 0); input_set_abs_params(input_dev, ABS_PRESSURE, pdata->pressure_min, pdata->pressure_max, 0, 0); ads7846_setup_spi_msg(ts, pdata); ts->reg = regulator_get(&spi->dev, "vcc"); if (IS_ERR(ts->reg)) { err = PTR_ERR(ts->reg); dev_err(&spi->dev, "unable to get regulator: %d\n", err); goto err_free_gpio; } err = regulator_enable(ts->reg); if (err) { dev_err(&spi->dev, "unable to enable regulator: %d\n", err); goto err_put_regulator; } irq_flags = pdata->irq_flags ? : IRQF_TRIGGER_FALLING; irq_flags |= IRQF_ONESHOT; err = request_threaded_irq(spi->irq, ads7846_hard_irq, ads7846_irq, irq_flags, spi->dev.driver->name, ts); if (err && !pdata->irq_flags) { dev_info(&spi->dev, "trying pin change workaround on irq %d\n", spi->irq); irq_flags |= IRQF_TRIGGER_RISING; err = request_threaded_irq(spi->irq, ads7846_hard_irq, ads7846_irq, irq_flags, spi->dev.driver->name, ts); } if (err) { dev_dbg(&spi->dev, "irq %d busy?\n", spi->irq); goto err_disable_regulator; } err = ads784x_hwmon_register(spi, ts); if (err) goto err_free_irq; dev_info(&spi->dev, "touchscreen, irq %d\n", spi->irq); /* * Take a first sample, leaving nPENIRQ active and vREF off; avoid * the touchscreen, in case it's not connected. */ if (ts->model == 7845) ads7845_read12_ser(&spi->dev, PWRDOWN); else (void) ads7846_read12_ser(&spi->dev, READ_12BIT_SER(vaux)); err = sysfs_create_group(&spi->dev.kobj, &ads784x_attr_group); if (err) goto err_remove_hwmon; err = input_register_device(input_dev); if (err) goto err_remove_attr_group; device_init_wakeup(&spi->dev, pdata->wakeup); return 0; err_remove_attr_group: sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group); err_remove_hwmon: ads784x_hwmon_unregister(spi, ts); err_free_irq: free_irq(spi->irq, ts); err_disable_regulator: regulator_disable(ts->reg); err_put_regulator: regulator_put(ts->reg); err_free_gpio: if (!ts->get_pendown_state) gpio_free(ts->gpio_pendown); err_cleanup_filter: if (ts->filter_cleanup) ts->filter_cleanup(ts->filter_data); err_free_mem: input_free_device(input_dev); kfree(packet); kfree(ts); return err; } static int __devexit ads7846_remove(struct spi_device *spi) { struct ads7846 *ts = dev_get_drvdata(&spi->dev); device_init_wakeup(&spi->dev, false); sysfs_remove_group(&spi->dev.kobj, &ads784x_attr_group); ads7846_disable(ts); free_irq(ts->spi->irq, ts); input_unregister_device(ts->input); ads784x_hwmon_unregister(spi, ts); regulator_disable(ts->reg); regulator_put(ts->reg); if (!ts->get_pendown_state) { /* * If we are not using specialized pendown method we must * have been relying on gpio we set up ourselves. */ gpio_free(ts->gpio_pendown); } if (ts->filter_cleanup) ts->filter_cleanup(ts->filter_data); kfree(ts->packet); kfree(ts); dev_dbg(&spi->dev, "unregistered touchscreen\n"); return 0; } static struct spi_driver ads7846_driver = { .driver = { .name = "ads7846", .owner = THIS_MODULE, .pm = &ads7846_pm, }, .probe = ads7846_probe, .remove = __devexit_p(ads7846_remove), }; module_spi_driver(ads7846_driver); MODULE_DESCRIPTION("ADS7846 TouchScreen Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:ads7846");
gpl-2.0
dexmc/flo2
drivers/watchdog/sb_wdog.c
7233
8770
/* * Watchdog driver for SiByte SB1 SoCs * * Copyright (C) 2007 OnStor, Inc. * Andrew Sharp <andy.sharp@lsi.com> * * This driver is intended to make the second of two hardware watchdogs * on the Sibyte 12XX and 11XX SoCs available to the user. There are two * such devices available on the SoC, but it seems that there isn't an * enumeration class for watchdogs in Linux like there is for RTCs. * The second is used rather than the first because it uses IRQ 1, * thereby avoiding all that IRQ 0 problematic nonsense. * * I have not tried this driver on a 1480 processor; it might work * just well enough to really screw things up. * * It is a simple timer, and there is an interrupt that is raised the * first time the timer expires. The second time it expires, the chip * is reset and there is no way to redirect that NMI. Which could * be problematic in some cases where this chip is sitting on the HT * bus and has just taken responsibility for providing a cache block. * Since the reset can't be redirected to the external reset pin, it is * possible that other HT connected processors might hang and not reset. * For Linux, a soft reset would probably be even worse than a hard reset. * There you have it. * * The timer takes 23 bits of a 64 bit register (?) as a count value, * and decrements the count every microsecond, for a max value of * 0x7fffff usec or about 8.3ish seconds. * * This watchdog borrows some user semantics from the softdog driver, * in that if you close the fd, it leaves the watchdog running, unless * you previously wrote a 'V' to the fd, in which case it disables * the watchdog when you close the fd like some other drivers. * * Based on various other watchdog drivers, which are probably all * loosely based on something Alan Cox wrote years ago. * * (c) Copyright 1996 Alan Cox <alan@lxorguk.ukuu.org.uk>, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 1 or 2 as published by the Free Software Foundation. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/reboot.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/interrupt.h> #include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_scd.h> static DEFINE_SPINLOCK(sbwd_lock); /* * set the initial count value of a timer * * wdog is the iomem address of the cfg register */ void sbwdog_set(char __iomem *wdog, unsigned long t) { spin_lock(&sbwd_lock); __raw_writeb(0, wdog); __raw_writeq(t & 0x7fffffUL, wdog - 0x10); spin_unlock(&sbwd_lock); } /* * cause the timer to [re]load it's initial count and start counting * all over again * * wdog is the iomem address of the cfg register */ void sbwdog_pet(char __iomem *wdog) { spin_lock(&sbwd_lock); __raw_writeb(__raw_readb(wdog) | 1, wdog); spin_unlock(&sbwd_lock); } static unsigned long sbwdog_gate; /* keeps it to one thread only */ static char __iomem *kern_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_0)); static char __iomem *user_dog = (char __iomem *)(IO_BASE + (A_SCD_WDOG_CFG_1)); static unsigned long timeout = 0x7fffffUL; /* useconds: 8.3ish secs. */ static int expect_close; static const struct watchdog_info ident = { .options = WDIOF_CARDRESET | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .identity = "SiByte Watchdog", }; /* * Allow only a single thread to walk the dog */ static int sbwdog_open(struct inode *inode, struct file *file) { nonseekable_open(inode, file); if (test_and_set_bit(0, &sbwdog_gate)) return -EBUSY; __module_get(THIS_MODULE); /* * Activate the timer */ sbwdog_set(user_dog, timeout); __raw_writeb(1, user_dog); return 0; } /* * Put the dog back in the kennel. */ static int sbwdog_release(struct inode *inode, struct file *file) { if (expect_close == 42) { __raw_writeb(0, user_dog); module_put(THIS_MODULE); } else { pr_crit("%s: Unexpected close, not stopping watchdog!\n", ident.identity); sbwdog_pet(user_dog); } clear_bit(0, &sbwdog_gate); expect_close = 0; return 0; } /* * 42 - the answer */ static ssize_t sbwdog_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { int i; if (len) { /* * restart the timer */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 42; } sbwdog_pet(user_dog); } return len; } static long sbwdog_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -ENOTTY; unsigned long time; void __user *argp = (void __user *)arg; int __user *p = argp; switch (cmd) { case WDIOC_GETSUPPORT: ret = copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; break; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: ret = put_user(0, p); break; case WDIOC_KEEPALIVE: sbwdog_pet(user_dog); ret = 0; break; case WDIOC_SETTIMEOUT: ret = get_user(time, p); if (ret) break; time *= 1000000; if (time > 0x7fffffUL) { ret = -EINVAL; break; } timeout = time; sbwdog_set(user_dog, timeout); sbwdog_pet(user_dog); case WDIOC_GETTIMEOUT: /* * get the remaining count from the ... count register * which is 1*8 before the config register */ ret = put_user(__raw_readq(user_dog - 8) / 1000000, p); break; } return ret; } /* * Notifier for system down */ static int sbwdog_notify_sys(struct notifier_block *this, unsigned long code, void *erf) { if (code == SYS_DOWN || code == SYS_HALT) { /* * sit and sit */ __raw_writeb(0, user_dog); __raw_writeb(0, kern_dog); } return NOTIFY_DONE; } static const struct file_operations sbwdog_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = sbwdog_write, .unlocked_ioctl = sbwdog_ioctl, .open = sbwdog_open, .release = sbwdog_release, }; static struct miscdevice sbwdog_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &sbwdog_fops, }; static struct notifier_block sbwdog_notifier = { .notifier_call = sbwdog_notify_sys, }; /* * interrupt handler * * doesn't do a whole lot for user, but oh so cleverly written so kernel * code can use it to re-up the watchdog, thereby saving the kernel from * having to create and maintain a timer, just to tickle another timer, * which is just so wrong. */ irqreturn_t sbwdog_interrupt(int irq, void *addr) { unsigned long wd_init; char *wd_cfg_reg = (char *)addr; u8 cfg; cfg = __raw_readb(wd_cfg_reg); wd_init = __raw_readq(wd_cfg_reg - 8) & 0x7fffff; /* * if it's the second watchdog timer, it's for those users */ if (wd_cfg_reg == user_dog) pr_crit("%s in danger of initiating system reset " "in %ld.%01ld seconds\n", ident.identity, wd_init / 1000000, (wd_init / 100000) % 10); else cfg |= 1; __raw_writeb(cfg, wd_cfg_reg); return IRQ_HANDLED; } static int __init sbwdog_init(void) { int ret; /* * register a reboot notifier */ ret = register_reboot_notifier(&sbwdog_notifier); if (ret) { pr_err("%s: cannot register reboot notifier (err=%d)\n", ident.identity, ret); return ret; } /* * get the resources */ ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED, ident.identity, (void *)user_dog); if (ret) { pr_err("%s: failed to request irq 1 - %d\n", ident.identity, ret); goto out; } ret = misc_register(&sbwdog_miscdev); if (ret == 0) { pr_info("%s: timeout is %ld.%ld secs\n", ident.identity, timeout / 1000000, (timeout / 100000) % 10); return 0; } free_irq(1, (void *)user_dog); out: unregister_reboot_notifier(&sbwdog_notifier); return ret; } static void __exit sbwdog_exit(void) { misc_deregister(&sbwdog_miscdev); free_irq(1, (void *)user_dog); unregister_reboot_notifier(&sbwdog_notifier); } module_init(sbwdog_init); module_exit(sbwdog_exit); MODULE_AUTHOR("Andrew Sharp <andy.sharp@lsi.com>"); MODULE_DESCRIPTION("SiByte Watchdog"); module_param(timeout, ulong, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in microseconds (max/default 8388607 or 8.3ish secs)"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); /* * example code that can be put in a platform code area to utilize the * first watchdog timer for the kernels own purpose. void platform_wd_setup(void) { int ret; ret = request_irq(1, sbwdog_interrupt, IRQF_SHARED, "Kernel Watchdog", IOADDR(A_SCD_WDOG_CFG_0)); if (ret) { pr_crit("Watchdog IRQ zero(0) failed to be requested - %d\n", ret); } } */
gpl-2.0
rofirrim/gcc-tiny
libgo/runtime/go-unsetenv.c
66
1150
/* go-unsetenv.c -- unset an environment variable from Go. Copyright 2015 The Go Authors. All rights reserved. Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. */ #include "config.h" #include <stddef.h> #include <stdlib.h> #include "go-alloc.h" #include "runtime.h" #include "arch.h" #include "malloc.h" /* Unset an environment variable from Go. This is called by syscall.Unsetenv. */ void unsetenv_c (String) __asm__ (GOSYM_PREFIX "syscall.unsetenv_c"); void unsetenv_c (String k) { const byte *ks; unsigned char *kn; intgo len; ks = k.str; if (ks == NULL) ks = (const byte *) ""; kn = NULL; #ifdef HAVE_UNSETENV if (ks != NULL && ks[k.len] != 0) { // Objects that are explicitly freed must be at least 16 bytes in size, // so that they are not allocated using tiny alloc. len = k.len + 1; if (len < TinySize) len = TinySize; kn = __go_alloc (len); __builtin_memcpy (kn, ks, k.len); ks = kn; } unsetenv ((const char *) ks); #endif /* !defined(HAVE_UNSETENV) */ if (kn != NULL) __go_free (kn); }
gpl-2.0
remakeelectric/openwrt-backfire
target/linux/adm5120/files/arch/mips/adm5120/infineon/infineon.c
66
2414
/* * Infineon Reference Boards * * Copyright (C) 2007-2008 Gabor Juhos <juhosg@openwrt.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * */ #include "infineon.h" #include <prom/admboot.h> #define EASY_CONFIG_OFFSET 0x10000 #define EASY_CONFIG_SIZE 0x1000 #ifdef CONFIG_MTD_PARTITIONS static struct mtd_partition easy_partitions[] = { { .name = "admboot", .offset = 0, .size = 64*1024, .mask_flags = MTD_WRITEABLE, } , { .name = "boardcfg", .offset = MTDPART_OFS_APPEND, .size = 64*1024, } , { .name = "firmware", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, } }; #endif /* CONFIG_MTD_PARTITIONS */ static __init void easy_setup_mac(void) { u8 mac_base[6]; int err; err = admboot_get_mac_base(EASY_CONFIG_OFFSET, EASY_CONFIG_SIZE, mac_base); if ((err) || !is_valid_ether_addr(mac_base)) random_ether_addr(mac_base); adm5120_setup_eth_macs(mac_base); } static void switch_bank_gpio3(unsigned bank) { switch (bank) { case 0: gpio_set_value(ADM5120_GPIO_PIN3, 0); break; case 1: gpio_set_value(ADM5120_GPIO_PIN3, 1); break; } } void __init easy_setup_pqfp(void) { /* setup flash A20 line */ gpio_request(ADM5120_GPIO_PIN3, NULL); gpio_direction_output(ADM5120_GPIO_PIN3, 0); adm5120_flash0_data.switch_bank = switch_bank_gpio3; #ifdef CONFIG_MTD_PARTITIONS adm5120_flash0_data.nr_parts = ARRAY_SIZE(easy_partitions); adm5120_flash0_data.parts = easy_partitions; #endif /* CONFIG_MTD_PARTITIONS */ adm5120_add_device_uart(0); adm5120_add_device_uart(1); adm5120_add_device_flash(0); easy_setup_mac(); } static void switch_bank_gpio5(unsigned bank) { switch (bank) { case 0: gpio_set_value(ADM5120_GPIO_PIN5, 0); break; case 1: gpio_set_value(ADM5120_GPIO_PIN5, 1); break; } } void __init easy_setup_bga(void) { /* setup flash A20 line */ gpio_request(ADM5120_GPIO_PIN5, NULL); gpio_direction_output(ADM5120_GPIO_PIN5, 0); adm5120_flash0_data.switch_bank = switch_bank_gpio5; #ifdef CONFIG_MTD_PARTITIONS adm5120_flash0_data.nr_parts = ARRAY_SIZE(easy_partitions); adm5120_flash0_data.parts = easy_partitions; #endif /* CONFIG_MTD_PARTITIONS */ adm5120_add_device_uart(0); adm5120_add_device_uart(1); adm5120_add_device_flash(0); easy_setup_mac(); }
gpl-2.0
lookfiresu123/my_linux-3.13.0
drivers/media/v4l2-core/videobuf2-core.c
66
74558
/* * videobuf2-core.c - V4L2 driver helper framework * * Copyright (C) 2010 Samsung Electronics * * Author: Pawel Osciak <pawel@osciak.com> * Marek Szyprowski <m.szyprowski@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. */ #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/sched.h> #include <media/v4l2-dev.h> #include <media/v4l2-fh.h> #include <media/v4l2-event.h> #include <media/videobuf2-core.h> static int debug; module_param(debug, int, 0644); #define dprintk(level, fmt, arg...) \ do { \ if (debug >= level) \ printk(KERN_DEBUG "vb2: " fmt, ## arg); \ } while (0) #define call_memop(q, op, args...) \ (((q)->mem_ops->op) ? \ ((q)->mem_ops->op(args)) : 0) #define call_qop(q, op, args...) \ (((q)->ops->op) ? ((q)->ops->op(args)) : 0) #define V4L2_BUFFER_MASK_FLAGS (V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \ V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \ V4L2_BUF_FLAG_PREPARED | \ V4L2_BUF_FLAG_TIMESTAMP_MASK) /** * __vb2_buf_mem_alloc() - allocate video memory for the given buffer */ static int __vb2_buf_mem_alloc(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; void *mem_priv; int plane; /* * Allocate memory for all planes in this buffer * NOTE: mmapped areas should be page aligned */ for (plane = 0; plane < vb->num_planes; ++plane) { unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]); mem_priv = call_memop(q, alloc, q->alloc_ctx[plane], size, q->gfp_flags); if (IS_ERR_OR_NULL(mem_priv)) goto free; /* Associate allocator private data with this plane */ vb->planes[plane].mem_priv = mem_priv; vb->v4l2_planes[plane].length = q->plane_sizes[plane]; } return 0; free: /* Free already allocated memory if one of the allocations failed */ for (; plane > 0; --plane) { call_memop(q, put, vb->planes[plane - 1].mem_priv); vb->planes[plane - 1].mem_priv = NULL; } return -ENOMEM; } /** * __vb2_buf_mem_free() - free memory of the given buffer */ static void __vb2_buf_mem_free(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) { call_memop(q, put, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; dprintk(3, "Freed plane %d of buffer %d\n", plane, vb->v4l2_buf.index); } } /** * __vb2_buf_userptr_put() - release userspace memory associated with * a USERPTR buffer */ static void __vb2_buf_userptr_put(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) { if (vb->planes[plane].mem_priv) call_memop(q, put_userptr, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; } } /** * __vb2_plane_dmabuf_put() - release memory associated with * a DMABUF shared plane */ static void __vb2_plane_dmabuf_put(struct vb2_queue *q, struct vb2_plane *p) { if (!p->mem_priv) return; if (p->dbuf_mapped) call_memop(q, unmap_dmabuf, p->mem_priv); call_memop(q, detach_dmabuf, p->mem_priv); dma_buf_put(p->dbuf); memset(p, 0, sizeof(*p)); } /** * __vb2_buf_dmabuf_put() - release memory associated with * a DMABUF shared buffer */ static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) __vb2_plane_dmabuf_put(q, &vb->planes[plane]); } /** * __setup_lengths() - setup initial lengths for every plane in * every buffer on the queue */ static void __setup_lengths(struct vb2_queue *q, unsigned int n) { unsigned int buffer, plane; struct vb2_buffer *vb; for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { vb = q->bufs[buffer]; if (!vb) continue; for (plane = 0; plane < vb->num_planes; ++plane) vb->v4l2_planes[plane].length = q->plane_sizes[plane]; } } /** * __setup_offsets() - setup unique offsets ("cookies") for every plane in * every buffer on the queue */ static void __setup_offsets(struct vb2_queue *q, unsigned int n) { unsigned int buffer, plane; struct vb2_buffer *vb; unsigned long off; if (q->num_buffers) { struct v4l2_plane *p; vb = q->bufs[q->num_buffers - 1]; p = &vb->v4l2_planes[vb->num_planes - 1]; off = PAGE_ALIGN(p->m.mem_offset + p->length); } else { off = 0; } for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) { vb = q->bufs[buffer]; if (!vb) continue; for (plane = 0; plane < vb->num_planes; ++plane) { vb->v4l2_planes[plane].m.mem_offset = off; dprintk(3, "Buffer %d, plane %d offset 0x%08lx\n", buffer, plane, off); off += vb->v4l2_planes[plane].length; off = PAGE_ALIGN(off); } } } /** * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type) * video buffer memory for all buffers/planes on the queue and initializes the * queue * * Returns the number of buffers successfully allocated. */ static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory, unsigned int num_buffers, unsigned int num_planes) { unsigned int buffer; struct vb2_buffer *vb; int ret; for (buffer = 0; buffer < num_buffers; ++buffer) { /* Allocate videobuf buffer structures */ vb = kzalloc(q->buf_struct_size, GFP_KERNEL); if (!vb) { dprintk(1, "Memory alloc for buffer struct failed\n"); break; } /* Length stores number of planes for multiplanar buffers */ if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) vb->v4l2_buf.length = num_planes; vb->state = VB2_BUF_STATE_DEQUEUED; vb->vb2_queue = q; vb->num_planes = num_planes; vb->v4l2_buf.index = q->num_buffers + buffer; vb->v4l2_buf.type = q->type; vb->v4l2_buf.memory = memory; /* Allocate video buffer memory for the MMAP type */ if (memory == V4L2_MEMORY_MMAP) { ret = __vb2_buf_mem_alloc(vb); if (ret) { dprintk(1, "Failed allocating memory for " "buffer %d\n", buffer); kfree(vb); break; } /* * Call the driver-provided buffer initialization * callback, if given. An error in initialization * results in queue setup failure. */ ret = call_qop(q, buf_init, vb); if (ret) { dprintk(1, "Buffer %d %p initialization" " failed\n", buffer, vb); __vb2_buf_mem_free(vb); kfree(vb); break; } } q->bufs[q->num_buffers + buffer] = vb; } __setup_lengths(q, buffer); if (memory == V4L2_MEMORY_MMAP) __setup_offsets(q, buffer); dprintk(1, "Allocated %d buffers, %d plane(s) each\n", buffer, num_planes); return buffer; } /** * __vb2_free_mem() - release all video buffer memory for a given queue */ static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers) { unsigned int buffer; struct vb2_buffer *vb; for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; ++buffer) { vb = q->bufs[buffer]; if (!vb) continue; /* Free MMAP buffers or release USERPTR buffers */ if (q->memory == V4L2_MEMORY_MMAP) __vb2_buf_mem_free(vb); else if (q->memory == V4L2_MEMORY_DMABUF) __vb2_buf_dmabuf_put(vb); else __vb2_buf_userptr_put(vb); } } /** * __vb2_queue_free() - free buffers at the end of the queue - video memory and * related information, if no buffers are left return the queue to an * uninitialized state. Might be called even if the queue has already been freed. */ static void __vb2_queue_free(struct vb2_queue *q, unsigned int buffers) { unsigned int buffer; /* Call driver-provided cleanup function for each buffer, if provided */ if (q->ops->buf_cleanup) { for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; ++buffer) { if (NULL == q->bufs[buffer]) continue; q->ops->buf_cleanup(q->bufs[buffer]); } } /* Release video buffer memory */ __vb2_free_mem(q, buffers); /* Free videobuf buffers */ for (buffer = q->num_buffers - buffers; buffer < q->num_buffers; ++buffer) { kfree(q->bufs[buffer]); q->bufs[buffer] = NULL; } q->num_buffers -= buffers; if (!q->num_buffers) q->memory = 0; INIT_LIST_HEAD(&q->queued_list); } /** * __verify_planes_array() - verify that the planes array passed in struct * v4l2_buffer from userspace can be safely used */ static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b) { if (!V4L2_TYPE_IS_MULTIPLANAR(b->type)) return 0; /* Is memory for copying plane information present? */ if (NULL == b->m.planes) { dprintk(1, "Multi-planar buffer passed but " "planes array not provided\n"); return -EINVAL; } if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) { dprintk(1, "Incorrect planes array length, " "expected %d, got %d\n", vb->num_planes, b->length); return -EINVAL; } return 0; } /** * __verify_length() - Verify that the bytesused value for each plane fits in * the plane length and that the data offset doesn't exceed the bytesused value. */ static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b) { unsigned int length; unsigned int plane; if (!V4L2_TYPE_IS_OUTPUT(b->type)) return 0; if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { for (plane = 0; plane < vb->num_planes; ++plane) { length = (b->memory == V4L2_MEMORY_USERPTR) ? b->m.planes[plane].length : vb->v4l2_planes[plane].length; if (b->m.planes[plane].bytesused > length) return -EINVAL; if (b->m.planes[plane].data_offset > 0 && b->m.planes[plane].data_offset >= b->m.planes[plane].bytesused) return -EINVAL; } } else { length = (b->memory == V4L2_MEMORY_USERPTR) ? b->length : vb->v4l2_planes[0].length; if (b->bytesused > length) return -EINVAL; } return 0; } /** * __buffer_in_use() - return true if the buffer is in use and * the queue cannot be freed (by the means of REQBUFS(0)) call */ static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb) { unsigned int plane; for (plane = 0; plane < vb->num_planes; ++plane) { void *mem_priv = vb->planes[plane].mem_priv; /* * If num_users() has not been provided, call_memop * will return 0, apparently nobody cares about this * case anyway. If num_users() returns more than 1, * we are not the only user of the plane's memory. */ if (mem_priv && call_memop(q, num_users, mem_priv) > 1) return true; } return false; } /** * __buffers_in_use() - return true if any buffers on the queue are in use and * the queue cannot be freed (by the means of REQBUFS(0)) call */ static bool __buffers_in_use(struct vb2_queue *q) { unsigned int buffer; for (buffer = 0; buffer < q->num_buffers; ++buffer) { if (__buffer_in_use(q, q->bufs[buffer])) return true; } return false; } /** * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be * returned to userspace */ static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b) { struct vb2_queue *q = vb->vb2_queue; /* Copy back data such as timestamp, flags, etc. */ memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m)); b->reserved2 = vb->v4l2_buf.reserved2; b->reserved = vb->v4l2_buf.reserved; if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) { /* * Fill in plane-related data if userspace provided an array * for it. The caller has already verified memory and size. */ b->length = vb->num_planes; memcpy(b->m.planes, vb->v4l2_planes, b->length * sizeof(struct v4l2_plane)); } else { /* * We use length and offset in v4l2_planes array even for * single-planar buffers, but userspace does not. */ b->length = vb->v4l2_planes[0].length; b->bytesused = vb->v4l2_planes[0].bytesused; if (q->memory == V4L2_MEMORY_MMAP) b->m.offset = vb->v4l2_planes[0].m.mem_offset; else if (q->memory == V4L2_MEMORY_USERPTR) b->m.userptr = vb->v4l2_planes[0].m.userptr; else if (q->memory == V4L2_MEMORY_DMABUF) b->m.fd = vb->v4l2_planes[0].m.fd; } /* * Clear any buffer state related flags. */ b->flags &= ~V4L2_BUFFER_MASK_FLAGS; b->flags |= q->timestamp_type; switch (vb->state) { case VB2_BUF_STATE_QUEUED: case VB2_BUF_STATE_ACTIVE: b->flags |= V4L2_BUF_FLAG_QUEUED; break; case VB2_BUF_STATE_ERROR: b->flags |= V4L2_BUF_FLAG_ERROR; /* fall through */ case VB2_BUF_STATE_DONE: b->flags |= V4L2_BUF_FLAG_DONE; break; case VB2_BUF_STATE_PREPARED: b->flags |= V4L2_BUF_FLAG_PREPARED; break; case VB2_BUF_STATE_DEQUEUED: /* nothing */ break; } if (__buffer_in_use(q, vb)) b->flags |= V4L2_BUF_FLAG_MAPPED; } /** * vb2_querybuf() - query video buffer information * @q: videobuf queue * @b: buffer struct passed from userspace to vidioc_querybuf handler * in driver * * Should be called from vidioc_querybuf ioctl handler in driver. * This function will verify the passed v4l2_buffer structure and fill the * relevant information for the userspace. * * The return values from this function are intended to be directly returned * from vidioc_querybuf handler in driver. */ int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b) { struct vb2_buffer *vb; int ret; if (b->type != q->type) { dprintk(1, "querybuf: wrong buffer type\n"); return -EINVAL; } if (b->index >= q->num_buffers) { dprintk(1, "querybuf: buffer index out of range\n"); return -EINVAL; } vb = q->bufs[b->index]; ret = __verify_planes_array(vb, b); if (!ret) __fill_v4l2_buffer(vb, b); return ret; } EXPORT_SYMBOL(vb2_querybuf); /** * __verify_userptr_ops() - verify that all memory operations required for * USERPTR queue type have been provided */ static int __verify_userptr_ops(struct vb2_queue *q) { if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr || !q->mem_ops->put_userptr) return -EINVAL; return 0; } /** * __verify_mmap_ops() - verify that all memory operations required for * MMAP queue type have been provided */ static int __verify_mmap_ops(struct vb2_queue *q) { if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc || !q->mem_ops->put || !q->mem_ops->mmap) return -EINVAL; return 0; } /** * __verify_dmabuf_ops() - verify that all memory operations required for * DMABUF queue type have been provided */ static int __verify_dmabuf_ops(struct vb2_queue *q) { if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf || !q->mem_ops->detach_dmabuf || !q->mem_ops->map_dmabuf || !q->mem_ops->unmap_dmabuf) return -EINVAL; return 0; } /** * __verify_memory_type() - Check whether the memory type and buffer type * passed to a buffer operation are compatible with the queue. */ static int __verify_memory_type(struct vb2_queue *q, enum v4l2_memory memory, enum v4l2_buf_type type) { if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR && memory != V4L2_MEMORY_DMABUF) { dprintk(1, "reqbufs: unsupported memory type\n"); return -EINVAL; } if (type != q->type) { dprintk(1, "reqbufs: requested type is incorrect\n"); return -EINVAL; } /* * Make sure all the required memory ops for given memory type * are available. */ if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) { dprintk(1, "reqbufs: MMAP for current setup unsupported\n"); return -EINVAL; } if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) { dprintk(1, "reqbufs: USERPTR for current setup unsupported\n"); return -EINVAL; } if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) { dprintk(1, "reqbufs: DMABUF for current setup unsupported\n"); return -EINVAL; } /* * Place the busy tests at the end: -EBUSY can be ignored when * create_bufs is called with count == 0, but count == 0 should still * do the memory and type validation. */ if (q->fileio) { dprintk(1, "reqbufs: file io in progress\n"); return -EBUSY; } return 0; } /** * __reqbufs() - Initiate streaming * @q: videobuf2 queue * @req: struct passed from userspace to vidioc_reqbufs handler in driver * * Should be called from vidioc_reqbufs ioctl handler of a driver. * This function: * 1) verifies streaming parameters passed from the userspace, * 2) sets up the queue, * 3) negotiates number of buffers and planes per buffer with the driver * to be used during streaming, * 4) allocates internal buffer structures (struct vb2_buffer), according to * the agreed parameters, * 5) for MMAP memory type, allocates actual video memory, using the * memory handling/allocation routines provided during queue initialization * * If req->count is 0, all the memory will be freed instead. * If the queue has been allocated previously (by a previous vb2_reqbufs) call * and the queue is not busy, memory will be reallocated. * * The return values from this function are intended to be directly returned * from vidioc_reqbufs handler in driver. */ static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) { unsigned int num_buffers, allocated_buffers, num_planes = 0; int ret; if (q->streaming) { dprintk(1, "reqbufs: streaming active\n"); return -EBUSY; } if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) { /* * We already have buffers allocated, so first check if they * are not in use and can be freed. */ if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) { dprintk(1, "reqbufs: memory in use, cannot free\n"); return -EBUSY; } __vb2_queue_free(q, q->num_buffers); /* * In case of REQBUFS(0) return immediately without calling * driver's queue_setup() callback and allocating resources. */ if (req->count == 0) return 0; } /* * Make sure the requested values and current defaults are sane. */ num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME); memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); q->memory = req->memory; /* * Ask the driver how many buffers and planes per buffer it requires. * Driver also sets the size and allocator context for each plane. */ ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, q->plane_sizes, q->alloc_ctx); if (ret) return ret; /* Finally, allocate buffers and video memory */ ret = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes); if (ret == 0) { dprintk(1, "Memory allocation failed\n"); return -ENOMEM; } allocated_buffers = ret; /* * Check if driver can handle the allocated number of buffers. */ if (allocated_buffers < num_buffers) { num_buffers = allocated_buffers; ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes, q->plane_sizes, q->alloc_ctx); if (!ret && allocated_buffers < num_buffers) ret = -ENOMEM; /* * Either the driver has accepted a smaller number of buffers, * or .queue_setup() returned an error */ } q->num_buffers = allocated_buffers; if (ret < 0) { __vb2_queue_free(q, allocated_buffers); return ret; } /* * Return the number of successfully allocated buffers * to the userspace. */ req->count = allocated_buffers; return 0; } /** * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and * type values. * @q: videobuf2 queue * @req: struct passed from userspace to vidioc_reqbufs handler in driver */ int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) { int ret = __verify_memory_type(q, req->memory, req->type); return ret ? ret : __reqbufs(q, req); } EXPORT_SYMBOL_GPL(vb2_reqbufs); /** * __create_bufs() - Allocate buffers and any required auxiliary structs * @q: videobuf2 queue * @create: creation parameters, passed from userspace to vidioc_create_bufs * handler in driver * * Should be called from vidioc_create_bufs ioctl handler of a driver. * This function: * 1) verifies parameter sanity * 2) calls the .queue_setup() queue operation * 3) performs any necessary memory allocations * * The return values from this function are intended to be directly returned * from vidioc_create_bufs handler in driver. */ static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) { unsigned int num_planes = 0, num_buffers, allocated_buffers; int ret; if (q->num_buffers == VIDEO_MAX_FRAME) { dprintk(1, "%s(): maximum number of buffers already allocated\n", __func__); return -ENOBUFS; } if (!q->num_buffers) { memset(q->plane_sizes, 0, sizeof(q->plane_sizes)); memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx)); q->memory = create->memory; } num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers); /* * Ask the driver, whether the requested number of buffers, planes per * buffer and their sizes are acceptable */ ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, &num_planes, q->plane_sizes, q->alloc_ctx); if (ret) return ret; /* Finally, allocate buffers and video memory */ ret = __vb2_queue_alloc(q, create->memory, num_buffers, num_planes); if (ret == 0) { dprintk(1, "Memory allocation failed\n"); return -ENOMEM; } allocated_buffers = ret; /* * Check if driver can handle the so far allocated number of buffers. */ if (ret < num_buffers) { num_buffers = ret; /* * q->num_buffers contains the total number of buffers, that the * queue driver has set up */ ret = call_qop(q, queue_setup, q, &create->format, &num_buffers, &num_planes, q->plane_sizes, q->alloc_ctx); if (!ret && allocated_buffers < num_buffers) ret = -ENOMEM; /* * Either the driver has accepted a smaller number of buffers, * or .queue_setup() returned an error */ } q->num_buffers += allocated_buffers; if (ret < 0) { __vb2_queue_free(q, allocated_buffers); return -ENOMEM; } /* * Return the number of successfully allocated buffers * to the userspace. */ create->count = allocated_buffers; return 0; } /** * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the * memory and type values. * @q: videobuf2 queue * @create: creation parameters, passed from userspace to vidioc_create_bufs * handler in driver */ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) { int ret = __verify_memory_type(q, create->memory, create->format.type); create->index = q->num_buffers; if (create->count == 0) return ret != -EBUSY ? ret : 0; return ret ? ret : __create_bufs(q, create); } EXPORT_SYMBOL_GPL(vb2_create_bufs); /** * vb2_plane_vaddr() - Return a kernel virtual address of a given plane * @vb: vb2_buffer to which the plane in question belongs to * @plane_no: plane number for which the address is to be returned * * This function returns a kernel virtual address of a given plane if * such a mapping exist, NULL otherwise. */ void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no) { struct vb2_queue *q = vb->vb2_queue; if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_memop(q, vaddr, vb->planes[plane_no].mem_priv); } EXPORT_SYMBOL_GPL(vb2_plane_vaddr); /** * vb2_plane_cookie() - Return allocator specific cookie for the given plane * @vb: vb2_buffer to which the plane in question belongs to * @plane_no: plane number for which the cookie is to be returned * * This function returns an allocator specific cookie for a given plane if * available, NULL otherwise. The allocator should provide some simple static * inline function, which would convert this cookie to the allocator specific * type that can be used directly by the driver to access the buffer. This can * be for example physical address, pointer to scatter list or IOMMU mapping. */ void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no) { struct vb2_queue *q = vb->vb2_queue; if (plane_no > vb->num_planes || !vb->planes[plane_no].mem_priv) return NULL; return call_memop(q, cookie, vb->planes[plane_no].mem_priv); } EXPORT_SYMBOL_GPL(vb2_plane_cookie); /** * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished * @vb: vb2_buffer returned from the driver * @state: either VB2_BUF_STATE_DONE if the operation finished successfully * or VB2_BUF_STATE_ERROR if the operation finished with an error * * This function should be called by the driver after a hardware operation on * a buffer is finished and the buffer may be returned to userspace. The driver * cannot use this buffer anymore until it is queued back to it by videobuf * by the means of buf_queue callback. Only buffers previously queued to the * driver by buf_queue can be passed to this function. */ void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state) { struct vb2_queue *q = vb->vb2_queue; unsigned long flags; unsigned int plane; if (vb->state != VB2_BUF_STATE_ACTIVE) return; if (state != VB2_BUF_STATE_DONE && state != VB2_BUF_STATE_ERROR) return; dprintk(4, "Done processing on buffer %d, state: %d\n", vb->v4l2_buf.index, state); /* sync buffers */ for (plane = 0; plane < vb->num_planes; ++plane) call_memop(q, finish, vb->planes[plane].mem_priv); /* Add the buffer to the done buffers list */ spin_lock_irqsave(&q->done_lock, flags); vb->state = state; list_add_tail(&vb->done_entry, &q->done_list); atomic_dec(&q->queued_count); spin_unlock_irqrestore(&q->done_lock, flags); /* Inform any processes that may be waiting for buffers */ wake_up(&q->done_wq); } EXPORT_SYMBOL_GPL(vb2_buffer_done); /** * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a * v4l2_buffer by the userspace. The caller has already verified that struct * v4l2_buffer has a valid number of planes. */ static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b, struct v4l2_plane *v4l2_planes) { unsigned int plane; if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) { /* Fill in driver-provided information for OUTPUT types */ if (V4L2_TYPE_IS_OUTPUT(b->type)) { /* * Will have to go up to b->length when API starts * accepting variable number of planes. */ for (plane = 0; plane < vb->num_planes; ++plane) { v4l2_planes[plane].bytesused = b->m.planes[plane].bytesused; v4l2_planes[plane].data_offset = b->m.planes[plane].data_offset; } } if (b->memory == V4L2_MEMORY_USERPTR) { for (plane = 0; plane < vb->num_planes; ++plane) { v4l2_planes[plane].m.userptr = b->m.planes[plane].m.userptr; v4l2_planes[plane].length = b->m.planes[plane].length; } } if (b->memory == V4L2_MEMORY_DMABUF) { for (plane = 0; plane < vb->num_planes; ++plane) { v4l2_planes[plane].m.fd = b->m.planes[plane].m.fd; v4l2_planes[plane].length = b->m.planes[plane].length; v4l2_planes[plane].data_offset = b->m.planes[plane].data_offset; } } } else { /* * Single-planar buffers do not use planes array, * so fill in relevant v4l2_buffer struct fields instead. * In videobuf we use our internal V4l2_planes struct for * single-planar buffers as well, for simplicity. */ if (V4L2_TYPE_IS_OUTPUT(b->type)) { v4l2_planes[0].bytesused = b->bytesused; v4l2_planes[0].data_offset = 0; } if (b->memory == V4L2_MEMORY_USERPTR) { v4l2_planes[0].m.userptr = b->m.userptr; v4l2_planes[0].length = b->length; } if (b->memory == V4L2_MEMORY_DMABUF) { v4l2_planes[0].m.fd = b->m.fd; v4l2_planes[0].length = b->length; v4l2_planes[0].data_offset = 0; } } vb->v4l2_buf.field = b->field; vb->v4l2_buf.timestamp = b->timestamp; vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS; } /** * __qbuf_userptr() - handle qbuf of a USERPTR buffer */ static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b) { struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct vb2_queue *q = vb->vb2_queue; void *mem_priv; unsigned int plane; int ret; int write = !V4L2_TYPE_IS_OUTPUT(q->type); /* Copy relevant information provided by the userspace */ __fill_vb2_buffer(vb, b, planes); for (plane = 0; plane < vb->num_planes; ++plane) { /* Skip the plane if already verified */ if (vb->v4l2_planes[plane].m.userptr && vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr && vb->v4l2_planes[plane].length == planes[plane].length) continue; dprintk(3, "qbuf: userspace address for plane %d changed, " "reacquiring memory\n", plane); /* Check if the provided plane buffer is large enough */ if (planes[plane].length < q->plane_sizes[plane]) { dprintk(1, "qbuf: provided buffer size %u is less than " "setup size %u for plane %d\n", planes[plane].length, q->plane_sizes[plane], plane); ret = -EINVAL; goto err; } /* Release previously acquired memory if present */ if (vb->planes[plane].mem_priv) call_memop(q, put_userptr, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; vb->v4l2_planes[plane].m.userptr = 0; vb->v4l2_planes[plane].length = 0; /* Acquire each plane's memory */ mem_priv = call_memop(q, get_userptr, q->alloc_ctx[plane], planes[plane].m.userptr, planes[plane].length, write); if (IS_ERR_OR_NULL(mem_priv)) { dprintk(1, "qbuf: failed acquiring userspace " "memory for plane %d\n", plane); ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL; goto err; } vb->planes[plane].mem_priv = mem_priv; } /* * Call driver-specific initialization on the newly acquired buffer, * if provided. */ ret = call_qop(q, buf_init, vb); if (ret) { dprintk(1, "qbuf: buffer initialization failed\n"); goto err; } /* * Now that everything is in order, copy relevant information * provided by userspace. */ for (plane = 0; plane < vb->num_planes; ++plane) vb->v4l2_planes[plane] = planes[plane]; return 0; err: /* In case of errors, release planes that were already acquired */ for (plane = 0; plane < vb->num_planes; ++plane) { if (vb->planes[plane].mem_priv) call_memop(q, put_userptr, vb->planes[plane].mem_priv); vb->planes[plane].mem_priv = NULL; vb->v4l2_planes[plane].m.userptr = 0; vb->v4l2_planes[plane].length = 0; } return ret; } /** * __qbuf_mmap() - handle qbuf of an MMAP buffer */ static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b) { __fill_vb2_buffer(vb, b, vb->v4l2_planes); return 0; } /** * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer */ static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b) { struct v4l2_plane planes[VIDEO_MAX_PLANES]; struct vb2_queue *q = vb->vb2_queue; void *mem_priv; unsigned int plane; int ret; int write = !V4L2_TYPE_IS_OUTPUT(q->type); /* Verify and copy relevant information provided by the userspace */ __fill_vb2_buffer(vb, b, planes); for (plane = 0; plane < vb->num_planes; ++plane) { struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd); if (IS_ERR_OR_NULL(dbuf)) { dprintk(1, "qbuf: invalid dmabuf fd for plane %d\n", plane); ret = -EINVAL; goto err; } /* use DMABUF size if length is not provided */ if (planes[plane].length == 0) planes[plane].length = dbuf->size; if (planes[plane].length < planes[plane].data_offset + q->plane_sizes[plane]) { ret = -EINVAL; goto err; } /* Skip the plane if already verified */ if (dbuf == vb->planes[plane].dbuf && vb->v4l2_planes[plane].length == planes[plane].length) { dma_buf_put(dbuf); continue; } dprintk(1, "qbuf: buffer for plane %d changed\n", plane); /* Release previously acquired memory if present */ __vb2_plane_dmabuf_put(q, &vb->planes[plane]); memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane)); /* Acquire each plane's memory */ mem_priv = call_memop(q, attach_dmabuf, q->alloc_ctx[plane], dbuf, planes[plane].length, write); if (IS_ERR(mem_priv)) { dprintk(1, "qbuf: failed to attach dmabuf\n"); ret = PTR_ERR(mem_priv); dma_buf_put(dbuf); goto err; } vb->planes[plane].dbuf = dbuf; vb->planes[plane].mem_priv = mem_priv; } /* TODO: This pins the buffer(s) with dma_buf_map_attachment()).. but * really we want to do this just before the DMA, not while queueing * the buffer(s).. */ for (plane = 0; plane < vb->num_planes; ++plane) { ret = call_memop(q, map_dmabuf, vb->planes[plane].mem_priv); if (ret) { dprintk(1, "qbuf: failed to map dmabuf for plane %d\n", plane); goto err; } vb->planes[plane].dbuf_mapped = 1; } /* * Call driver-specific initialization on the newly acquired buffer, * if provided. */ ret = call_qop(q, buf_init, vb); if (ret) { dprintk(1, "qbuf: buffer initialization failed\n"); goto err; } /* * Now that everything is in order, copy relevant information * provided by userspace. */ for (plane = 0; plane < vb->num_planes; ++plane) vb->v4l2_planes[plane] = planes[plane]; return 0; err: /* In case of errors, release planes that were already acquired */ __vb2_buf_dmabuf_put(vb); return ret; } /** * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing */ static void __enqueue_in_driver(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; unsigned int plane; vb->state = VB2_BUF_STATE_ACTIVE; atomic_inc(&q->queued_count); /* sync buffers */ for (plane = 0; plane < vb->num_planes; ++plane) call_memop(q, prepare, vb->planes[plane].mem_priv); q->ops->buf_queue(vb); } static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b) { struct vb2_queue *q = vb->vb2_queue; int ret; ret = __verify_length(vb, b); if (ret < 0) { dprintk(1, "%s(): plane parameters verification failed: %d\n", __func__, ret); return ret; } switch (q->memory) { case V4L2_MEMORY_MMAP: ret = __qbuf_mmap(vb, b); break; case V4L2_MEMORY_USERPTR: ret = __qbuf_userptr(vb, b); break; case V4L2_MEMORY_DMABUF: ret = __qbuf_dmabuf(vb, b); break; default: WARN(1, "Invalid queue type\n"); ret = -EINVAL; } if (!ret) ret = call_qop(q, buf_prepare, vb); if (ret) dprintk(1, "qbuf: buffer preparation failed: %d\n", ret); else vb->state = VB2_BUF_STATE_PREPARED; return ret; } static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, const char *opname, int (*handler)(struct vb2_queue *, struct v4l2_buffer *, struct vb2_buffer *)) { struct rw_semaphore *mmap_sem = NULL; struct vb2_buffer *vb; int ret; /* * In case of user pointer buffers vb2 allocators need to get direct * access to userspace pages. This requires getting the mmap semaphore * for read access in the current process structure. The same semaphore * is taken before calling mmap operation, while both qbuf/prepare_buf * and mmap are called by the driver or v4l2 core with the driver's lock * held. To avoid an AB-BA deadlock (mmap_sem then driver's lock in mmap * and driver's lock then mmap_sem in qbuf/prepare_buf) the videobuf2 * core releases the driver's lock, takes mmap_sem and then takes the * driver's lock again. * * To avoid racing with other vb2 calls, which might be called after * releasing the driver's lock, this operation is performed at the * beginning of qbuf/prepare_buf processing. This way the queue status * is consistent after getting the driver's lock back. */ if (q->memory == V4L2_MEMORY_USERPTR) { mmap_sem = &current->mm->mmap_sem; call_qop(q, wait_prepare, q); down_read(mmap_sem); call_qop(q, wait_finish, q); } if (q->fileio) { dprintk(1, "%s(): file io in progress\n", opname); ret = -EBUSY; goto unlock; } if (b->type != q->type) { dprintk(1, "%s(): invalid buffer type\n", opname); ret = -EINVAL; goto unlock; } if (b->index >= q->num_buffers) { dprintk(1, "%s(): buffer index out of range\n", opname); ret = -EINVAL; goto unlock; } vb = q->bufs[b->index]; if (NULL == vb) { /* Should never happen */ dprintk(1, "%s(): buffer is NULL\n", opname); ret = -EINVAL; goto unlock; } if (b->memory != q->memory) { dprintk(1, "%s(): invalid memory type\n", opname); ret = -EINVAL; goto unlock; } ret = __verify_planes_array(vb, b); if (ret) goto unlock; ret = handler(q, b, vb); if (ret) goto unlock; /* Fill buffer information for the userspace */ __fill_v4l2_buffer(vb, b); dprintk(1, "%s() of buffer %d succeeded\n", opname, vb->v4l2_buf.index); unlock: if (mmap_sem) up_read(mmap_sem); return ret; } static int __vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b, struct vb2_buffer *vb) { if (vb->state != VB2_BUF_STATE_DEQUEUED) { dprintk(1, "%s(): invalid buffer state %d\n", __func__, vb->state); return -EINVAL; } return __buf_prepare(vb, b); } /** * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel * @q: videobuf2 queue * @b: buffer structure passed from userspace to vidioc_prepare_buf * handler in driver * * Should be called from vidioc_prepare_buf ioctl handler of a driver. * This function: * 1) verifies the passed buffer, * 2) calls buf_prepare callback in the driver (if provided), in which * driver-specific buffer initialization can be performed, * * The return values from this function are intended to be directly returned * from vidioc_prepare_buf handler in driver. */ int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b) { return vb2_queue_or_prepare_buf(q, b, "prepare_buf", __vb2_prepare_buf); } EXPORT_SYMBOL_GPL(vb2_prepare_buf); static int __vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b, struct vb2_buffer *vb) { int ret; switch (vb->state) { case VB2_BUF_STATE_DEQUEUED: ret = __buf_prepare(vb, b); if (ret) return ret; case VB2_BUF_STATE_PREPARED: break; default: dprintk(1, "qbuf: buffer already in use\n"); return -EINVAL; } /* * Add to the queued buffers list, a buffer will stay on it until * dequeued in dqbuf. */ list_add_tail(&vb->queued_entry, &q->queued_list); vb->state = VB2_BUF_STATE_QUEUED; /* * If already streaming, give the buffer to driver for processing. * If not, the buffer will be given to driver on next streamon. */ if (q->streaming) __enqueue_in_driver(vb); return 0; } /** * vb2_qbuf() - Queue a buffer from userspace * @q: videobuf2 queue * @b: buffer structure passed from userspace to vidioc_qbuf handler * in driver * * Should be called from vidioc_qbuf ioctl handler of a driver. * This function: * 1) verifies the passed buffer, * 2) if necessary, calls buf_prepare callback in the driver (if provided), in * which driver-specific buffer initialization can be performed, * 3) if streaming is on, queues the buffer in driver by the means of buf_queue * callback for processing. * * The return values from this function are intended to be directly returned * from vidioc_qbuf handler in driver. */ int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b) { return vb2_queue_or_prepare_buf(q, b, "qbuf", __vb2_qbuf); } EXPORT_SYMBOL_GPL(vb2_qbuf); /** * __vb2_wait_for_done_vb() - wait for a buffer to become available * for dequeuing * * Will sleep if required for nonblocking == false. */ static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking) { /* * All operations on vb_done_list are performed under done_lock * spinlock protection. However, buffers may be removed from * it and returned to userspace only while holding both driver's * lock and the done_lock spinlock. Thus we can be sure that as * long as we hold the driver's lock, the list will remain not * empty if list_empty() check succeeds. */ for (;;) { int ret; if (!q->streaming) { dprintk(1, "Streaming off, will not wait for buffers\n"); return -EINVAL; } if (!list_empty(&q->done_list)) { /* * Found a buffer that we were waiting for. */ break; } if (nonblocking) { dprintk(1, "Nonblocking and no buffers to dequeue, " "will not wait\n"); return -EAGAIN; } /* * We are streaming and blocking, wait for another buffer to * become ready or for streamoff. Driver's lock is released to * allow streamoff or qbuf to be called while waiting. */ call_qop(q, wait_prepare, q); /* * All locks have been released, it is safe to sleep now. */ dprintk(3, "Will sleep waiting for buffers\n"); ret = wait_event_interruptible(q->done_wq, !list_empty(&q->done_list) || !q->streaming); /* * We need to reevaluate both conditions again after reacquiring * the locks or return an error if one occurred. */ call_qop(q, wait_finish, q); if (ret) { dprintk(1, "Sleep was interrupted\n"); return ret; } } return 0; } /** * __vb2_get_done_vb() - get a buffer ready for dequeuing * * Will sleep if required for nonblocking == false. */ static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb, struct v4l2_buffer *b, int nonblocking) { unsigned long flags; int ret; /* * Wait for at least one buffer to become available on the done_list. */ ret = __vb2_wait_for_done_vb(q, nonblocking); if (ret) return ret; /* * Driver's lock has been held since we last verified that done_list * is not empty, so no need for another list_empty(done_list) check. */ spin_lock_irqsave(&q->done_lock, flags); *vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); /* * Only remove the buffer from done_list if v4l2_buffer can handle all * the planes. */ ret = __verify_planes_array(*vb, b); if (!ret) list_del(&(*vb)->done_entry); spin_unlock_irqrestore(&q->done_lock, flags); return ret; } /** * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2 * @q: videobuf2 queue * * This function will wait until all buffers that have been given to the driver * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call * wait_prepare, wait_finish pair. It is intended to be called with all locks * taken, for example from stop_streaming() callback. */ int vb2_wait_for_all_buffers(struct vb2_queue *q) { if (!q->streaming) { dprintk(1, "Streaming off, will not wait for buffers\n"); return -EINVAL; } wait_event(q->done_wq, !atomic_read(&q->queued_count)); return 0; } EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers); /** * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state */ static void __vb2_dqbuf(struct vb2_buffer *vb) { struct vb2_queue *q = vb->vb2_queue; unsigned int i; /* nothing to do if the buffer is already dequeued */ if (vb->state == VB2_BUF_STATE_DEQUEUED) return; vb->state = VB2_BUF_STATE_DEQUEUED; /* unmap DMABUF buffer */ if (q->memory == V4L2_MEMORY_DMABUF) for (i = 0; i < vb->num_planes; ++i) { if (!vb->planes[i].dbuf_mapped) continue; call_memop(q, unmap_dmabuf, vb->planes[i].mem_priv); vb->planes[i].dbuf_mapped = 0; } } /** * vb2_dqbuf() - Dequeue a buffer to the userspace * @q: videobuf2 queue * @b: buffer structure passed from userspace to vidioc_dqbuf handler * in driver * @nonblocking: if true, this call will not sleep waiting for a buffer if no * buffers ready for dequeuing are present. Normally the driver * would be passing (file->f_flags & O_NONBLOCK) here * * Should be called from vidioc_dqbuf ioctl handler of a driver. * This function: * 1) verifies the passed buffer, * 2) calls buf_finish callback in the driver (if provided), in which * driver can perform any additional operations that may be required before * returning the buffer to userspace, such as cache sync, * 3) the buffer struct members are filled with relevant information for * the userspace. * * The return values from this function are intended to be directly returned * from vidioc_dqbuf handler in driver. */ int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking) { struct vb2_buffer *vb = NULL; int ret; if (q->fileio) { dprintk(1, "dqbuf: file io in progress\n"); return -EBUSY; } if (b->type != q->type) { dprintk(1, "dqbuf: invalid buffer type\n"); return -EINVAL; } ret = __vb2_get_done_vb(q, &vb, b, nonblocking); if (ret < 0) return ret; ret = call_qop(q, buf_finish, vb); if (ret) { dprintk(1, "dqbuf: buffer finish failed\n"); return ret; } switch (vb->state) { case VB2_BUF_STATE_DONE: dprintk(3, "dqbuf: Returning done buffer\n"); break; case VB2_BUF_STATE_ERROR: dprintk(3, "dqbuf: Returning done buffer with errors\n"); break; default: dprintk(1, "dqbuf: Invalid buffer state\n"); return -EINVAL; } /* Fill buffer information for the userspace */ __fill_v4l2_buffer(vb, b); /* Remove from videobuf queue */ list_del(&vb->queued_entry); /* go back to dequeued state */ __vb2_dqbuf(vb); dprintk(1, "dqbuf of buffer %d, with state %d\n", vb->v4l2_buf.index, vb->state); return 0; } EXPORT_SYMBOL_GPL(vb2_dqbuf); /** * __vb2_queue_cancel() - cancel and stop (pause) streaming * * Removes all queued buffers from driver's queue and all buffers queued by * userspace from videobuf's queue. Returns to state after reqbufs. */ static void __vb2_queue_cancel(struct vb2_queue *q) { unsigned int i; /* * Tell driver to stop all transactions and release all queued * buffers. */ if (q->streaming) call_qop(q, stop_streaming, q); q->streaming = 0; /* * Remove all buffers from videobuf's list... */ INIT_LIST_HEAD(&q->queued_list); /* * ...and done list; userspace will not receive any buffers it * has not already dequeued before initiating cancel. */ INIT_LIST_HEAD(&q->done_list); atomic_set(&q->queued_count, 0); wake_up_all(&q->done_wq); /* * Reinitialize all buffers for next use. */ for (i = 0; i < q->num_buffers; ++i) __vb2_dqbuf(q->bufs[i]); } /** * vb2_streamon - start streaming * @q: videobuf2 queue * @type: type argument passed from userspace to vidioc_streamon handler * * Should be called from vidioc_streamon handler of a driver. * This function: * 1) verifies current state * 2) passes any previously queued buffers to the driver and starts streaming * * The return values from this function are intended to be directly returned * from vidioc_streamon handler in the driver. */ int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type) { struct vb2_buffer *vb; int ret; if (q->fileio) { dprintk(1, "streamon: file io in progress\n"); return -EBUSY; } if (type != q->type) { dprintk(1, "streamon: invalid stream type\n"); return -EINVAL; } if (q->streaming) { dprintk(1, "streamon: already streaming\n"); return -EBUSY; } /* * If any buffers were queued before streamon, * we can now pass them to driver for processing. */ list_for_each_entry(vb, &q->queued_list, queued_entry) __enqueue_in_driver(vb); /* * Let driver notice that streaming state has been enabled. */ ret = call_qop(q, start_streaming, q, atomic_read(&q->queued_count)); if (ret) { dprintk(1, "streamon: driver refused to start streaming\n"); __vb2_queue_cancel(q); return ret; } q->streaming = 1; dprintk(3, "Streamon successful\n"); return 0; } EXPORT_SYMBOL_GPL(vb2_streamon); /** * vb2_streamoff - stop streaming * @q: videobuf2 queue * @type: type argument passed from userspace to vidioc_streamoff handler * * Should be called from vidioc_streamoff handler of a driver. * This function: * 1) verifies current state, * 2) stop streaming and dequeues any queued buffers, including those previously * passed to the driver (after waiting for the driver to finish). * * This call can be used for pausing playback. * The return values from this function are intended to be directly returned * from vidioc_streamoff handler in the driver */ int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type) { if (q->fileio) { dprintk(1, "streamoff: file io in progress\n"); return -EBUSY; } if (type != q->type) { dprintk(1, "streamoff: invalid stream type\n"); return -EINVAL; } if (!q->streaming) { dprintk(1, "streamoff: not streaming\n"); return -EINVAL; } /* * Cancel will pause streaming and remove all buffers from the driver * and videobuf, effectively returning control over them to userspace. */ __vb2_queue_cancel(q); dprintk(3, "Streamoff successful\n"); return 0; } EXPORT_SYMBOL_GPL(vb2_streamoff); /** * __find_plane_by_offset() - find plane associated with the given offset off */ static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off, unsigned int *_buffer, unsigned int *_plane) { struct vb2_buffer *vb; unsigned int buffer, plane; /* * Go over all buffers and their planes, comparing the given offset * with an offset assigned to each plane. If a match is found, * return its buffer and plane numbers. */ for (buffer = 0; buffer < q->num_buffers; ++buffer) { vb = q->bufs[buffer]; for (plane = 0; plane < vb->num_planes; ++plane) { if (vb->v4l2_planes[plane].m.mem_offset == off) { *_buffer = buffer; *_plane = plane; return 0; } } } return -EINVAL; } /** * vb2_expbuf() - Export a buffer as a file descriptor * @q: videobuf2 queue * @eb: export buffer structure passed from userspace to vidioc_expbuf * handler in driver * * The return values from this function are intended to be directly returned * from vidioc_expbuf handler in driver. */ int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb) { struct vb2_buffer *vb = NULL; struct vb2_plane *vb_plane; int ret; struct dma_buf *dbuf; if (q->memory != V4L2_MEMORY_MMAP) { dprintk(1, "Queue is not currently set up for mmap\n"); return -EINVAL; } if (!q->mem_ops->get_dmabuf) { dprintk(1, "Queue does not support DMA buffer exporting\n"); return -EINVAL; } if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) { dprintk(1, "Queue does support only O_CLOEXEC and access mode flags\n"); return -EINVAL; } if (eb->type != q->type) { dprintk(1, "qbuf: invalid buffer type\n"); return -EINVAL; } if (eb->index >= q->num_buffers) { dprintk(1, "buffer index out of range\n"); return -EINVAL; } vb = q->bufs[eb->index]; if (eb->plane >= vb->num_planes) { dprintk(1, "buffer plane out of range\n"); return -EINVAL; } vb_plane = &vb->planes[eb->plane]; dbuf = call_memop(q, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE); if (IS_ERR_OR_NULL(dbuf)) { dprintk(1, "Failed to export buffer %d, plane %d\n", eb->index, eb->plane); return -EINVAL; } ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE); if (ret < 0) { dprintk(3, "buffer %d, plane %d failed to export (%d)\n", eb->index, eb->plane, ret); dma_buf_put(dbuf); return ret; } dprintk(3, "buffer %d, plane %d exported as %d descriptor\n", eb->index, eb->plane, ret); eb->fd = ret; return 0; } EXPORT_SYMBOL_GPL(vb2_expbuf); /** * vb2_mmap() - map video buffers into application address space * @q: videobuf2 queue * @vma: vma passed to the mmap file operation handler in the driver * * Should be called from mmap file operation handler of a driver. * This function maps one plane of one of the available video buffers to * userspace. To map whole video memory allocated on reqbufs, this function * has to be called once per each plane per each buffer previously allocated. * * When the userspace application calls mmap, it passes to it an offset returned * to it earlier by the means of vidioc_querybuf handler. That offset acts as * a "cookie", which is then used to identify the plane to be mapped. * This function finds a plane with a matching offset and a mapping is performed * by the means of a provided memory operation. * * The return values from this function are intended to be directly returned * from the mmap handler in driver. */ int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma) { unsigned long off = vma->vm_pgoff << PAGE_SHIFT; struct vb2_buffer *vb; unsigned int buffer, plane; int ret; unsigned long length; if (q->memory != V4L2_MEMORY_MMAP) { dprintk(1, "Queue is not currently set up for mmap\n"); return -EINVAL; } /* * Check memory area access mode. */ if (!(vma->vm_flags & VM_SHARED)) { dprintk(1, "Invalid vma flags, VM_SHARED needed\n"); return -EINVAL; } if (V4L2_TYPE_IS_OUTPUT(q->type)) { if (!(vma->vm_flags & VM_WRITE)) { dprintk(1, "Invalid vma flags, VM_WRITE needed\n"); return -EINVAL; } } else { if (!(vma->vm_flags & VM_READ)) { dprintk(1, "Invalid vma flags, VM_READ needed\n"); return -EINVAL; } } /* * Find the plane corresponding to the offset passed by userspace. */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) return ret; vb = q->bufs[buffer]; /* * MMAP requires page_aligned buffers. * The buffer length was page_aligned at __vb2_buf_mem_alloc(), * so, we need to do the same here. */ length = PAGE_ALIGN(vb->v4l2_planes[plane].length); if (length < (vma->vm_end - vma->vm_start)) { dprintk(1, "MMAP invalid, as it would overflow buffer length\n"); return -EINVAL; } ret = call_memop(q, mmap, vb->planes[plane].mem_priv, vma); if (ret) return ret; dprintk(3, "Buffer %d, plane %d successfully mapped\n", buffer, plane); return 0; } EXPORT_SYMBOL_GPL(vb2_mmap); #ifndef CONFIG_MMU unsigned long vb2_get_unmapped_area(struct vb2_queue *q, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { unsigned long off = pgoff << PAGE_SHIFT; struct vb2_buffer *vb; unsigned int buffer, plane; int ret; if (q->memory != V4L2_MEMORY_MMAP) { dprintk(1, "Queue is not currently set up for mmap\n"); return -EINVAL; } /* * Find the plane corresponding to the offset passed by userspace. */ ret = __find_plane_by_offset(q, off, &buffer, &plane); if (ret) return ret; vb = q->bufs[buffer]; return (unsigned long)vb2_plane_vaddr(vb, plane); } EXPORT_SYMBOL_GPL(vb2_get_unmapped_area); #endif static int __vb2_init_fileio(struct vb2_queue *q, int read); static int __vb2_cleanup_fileio(struct vb2_queue *q); /** * vb2_poll() - implements poll userspace operation * @q: videobuf2 queue * @file: file argument passed to the poll file operation handler * @wait: wait argument passed to the poll file operation handler * * This function implements poll file operation handler for a driver. * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will * be informed that the file descriptor of a video device is available for * reading. * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor * will be reported as available for writing. * * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any * pending events. * * The return values from this function are intended to be directly returned * from poll handler in driver. */ unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait) { struct video_device *vfd = video_devdata(file); unsigned long req_events = poll_requested_events(wait); struct vb2_buffer *vb = NULL; unsigned int res = 0; unsigned long flags; if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { struct v4l2_fh *fh = file->private_data; if (v4l2_event_pending(fh)) res = POLLPRI; else if (req_events & POLLPRI) poll_wait(file, &fh->wait, wait); } if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM))) return res; if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM))) return res; /* * Start file I/O emulator only if streaming API has not been used yet. */ if (q->num_buffers == 0 && q->fileio == NULL) { if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && (req_events & (POLLIN | POLLRDNORM))) { if (__vb2_init_fileio(q, 1)) return res | POLLERR; } if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && (req_events & (POLLOUT | POLLWRNORM))) { if (__vb2_init_fileio(q, 0)) return res | POLLERR; /* * Write to OUTPUT queue can be done immediately. */ return res | POLLOUT | POLLWRNORM; } } /* * There is nothing to wait for if no buffers have already been queued. */ if (list_empty(&q->queued_list)) return res | POLLERR; if (list_empty(&q->done_list)) poll_wait(file, &q->done_wq, wait); /* * Take first buffer available for dequeuing. */ spin_lock_irqsave(&q->done_lock, flags); if (!list_empty(&q->done_list)) vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry); spin_unlock_irqrestore(&q->done_lock, flags); if (vb && (vb->state == VB2_BUF_STATE_DONE || vb->state == VB2_BUF_STATE_ERROR)) { return (V4L2_TYPE_IS_OUTPUT(q->type)) ? res | POLLOUT | POLLWRNORM : res | POLLIN | POLLRDNORM; } return res; } EXPORT_SYMBOL_GPL(vb2_poll); /** * vb2_queue_init() - initialize a videobuf2 queue * @q: videobuf2 queue; this structure should be allocated in driver * * The vb2_queue structure should be allocated by the driver. The driver is * responsible of clearing it's content and setting initial values for some * required entries before calling this function. * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer * to the struct vb2_queue description in include/media/videobuf2-core.h * for more information. */ int vb2_queue_init(struct vb2_queue *q) { /* * Sanity check */ if (WARN_ON(!q) || WARN_ON(!q->ops) || WARN_ON(!q->mem_ops) || WARN_ON(!q->type) || WARN_ON(!q->io_modes) || WARN_ON(!q->ops->queue_setup) || WARN_ON(!q->ops->buf_queue) || WARN_ON(q->timestamp_type & ~V4L2_BUF_FLAG_TIMESTAMP_MASK)) return -EINVAL; /* Warn that the driver should choose an appropriate timestamp type */ WARN_ON(q->timestamp_type == V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN); INIT_LIST_HEAD(&q->queued_list); INIT_LIST_HEAD(&q->done_list); spin_lock_init(&q->done_lock); init_waitqueue_head(&q->done_wq); if (q->buf_struct_size == 0) q->buf_struct_size = sizeof(struct vb2_buffer); return 0; } EXPORT_SYMBOL_GPL(vb2_queue_init); /** * vb2_queue_release() - stop streaming, release the queue and free memory * @q: videobuf2 queue * * This function stops streaming and performs necessary clean ups, including * freeing video buffer memory. The driver is responsible for freeing * the vb2_queue structure itself. */ void vb2_queue_release(struct vb2_queue *q) { __vb2_cleanup_fileio(q); __vb2_queue_cancel(q); __vb2_queue_free(q, q->num_buffers); } EXPORT_SYMBOL_GPL(vb2_queue_release); /** * struct vb2_fileio_buf - buffer context used by file io emulator * * vb2 provides a compatibility layer and emulator of file io (read and * write) calls on top of streaming API. This structure is used for * tracking context related to the buffers. */ struct vb2_fileio_buf { void *vaddr; unsigned int size; unsigned int pos; unsigned int queued:1; }; /** * struct vb2_fileio_data - queue context used by file io emulator * * vb2 provides a compatibility layer and emulator of file io (read and * write) calls on top of streaming API. For proper operation it required * this structure to save the driver state between each call of the read * or write function. */ struct vb2_fileio_data { struct v4l2_requestbuffers req; struct v4l2_buffer b; struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME]; unsigned int index; unsigned int q_count; unsigned int dq_count; unsigned int flags; }; /** * __vb2_init_fileio() - initialize file io emulator * @q: videobuf2 queue * @read: mode selector (1 means read, 0 means write) */ static int __vb2_init_fileio(struct vb2_queue *q, int read) { struct vb2_fileio_data *fileio; int i, ret; unsigned int count = 0; /* * Sanity check */ if ((read && !(q->io_modes & VB2_READ)) || (!read && !(q->io_modes & VB2_WRITE))) BUG(); /* * Check if device supports mapping buffers to kernel virtual space. */ if (!q->mem_ops->vaddr) return -EBUSY; /* * Check if streaming api has not been already activated. */ if (q->streaming || q->num_buffers > 0) return -EBUSY; /* * Start with count 1, driver can increase it in queue_setup() */ count = 1; dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n", (read) ? "read" : "write", count, q->io_flags); fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL); if (fileio == NULL) return -ENOMEM; fileio->flags = q->io_flags; /* * Request buffers and use MMAP type to force driver * to allocate buffers by itself. */ fileio->req.count = count; fileio->req.memory = V4L2_MEMORY_MMAP; fileio->req.type = q->type; ret = vb2_reqbufs(q, &fileio->req); if (ret) goto err_kfree; /* * Check if plane_count is correct * (multiplane buffers are not supported). */ if (q->bufs[0]->num_planes != 1) { ret = -EBUSY; goto err_reqbufs; } /* * Get kernel address of each buffer. */ for (i = 0; i < q->num_buffers; i++) { fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0); if (fileio->bufs[i].vaddr == NULL) { ret = -EINVAL; goto err_reqbufs; } fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0); } /* * Read mode requires pre queuing of all buffers. */ if (read) { /* * Queue all buffers. */ for (i = 0; i < q->num_buffers; i++) { struct v4l2_buffer *b = &fileio->b; memset(b, 0, sizeof(*b)); b->type = q->type; b->memory = q->memory; b->index = i; ret = vb2_qbuf(q, b); if (ret) goto err_reqbufs; fileio->bufs[i].queued = 1; } /* * Start streaming. */ ret = vb2_streamon(q, q->type); if (ret) goto err_reqbufs; } q->fileio = fileio; return ret; err_reqbufs: fileio->req.count = 0; vb2_reqbufs(q, &fileio->req); err_kfree: kfree(fileio); return ret; } /** * __vb2_cleanup_fileio() - free resourced used by file io emulator * @q: videobuf2 queue */ static int __vb2_cleanup_fileio(struct vb2_queue *q) { struct vb2_fileio_data *fileio = q->fileio; if (fileio) { /* * Hack fileio context to enable direct calls to vb2 ioctl * interface. */ q->fileio = NULL; vb2_streamoff(q, q->type); fileio->req.count = 0; vb2_reqbufs(q, &fileio->req); kfree(fileio); dprintk(3, "file io emulator closed\n"); } return 0; } /** * __vb2_perform_fileio() - perform a single file io (read or write) operation * @q: videobuf2 queue * @data: pointed to target userspace buffer * @count: number of bytes to read or write * @ppos: file handle position tracking pointer * @nonblock: mode selector (1 means blocking calls, 0 means nonblocking) * @read: access mode selector (1 means read, 0 means write) */ static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblock, int read) { struct vb2_fileio_data *fileio; struct vb2_fileio_buf *buf; int ret, index; dprintk(3, "file io: mode %s, offset %ld, count %zd, %sblocking\n", read ? "read" : "write", (long)*ppos, count, nonblock ? "non" : ""); if (!data) return -EINVAL; /* * Initialize emulator on first call. */ if (!q->fileio) { ret = __vb2_init_fileio(q, read); dprintk(3, "file io: vb2_init_fileio result: %d\n", ret); if (ret) return ret; } fileio = q->fileio; /* * Hack fileio context to enable direct calls to vb2 ioctl interface. * The pointer will be restored before returning from this function. */ q->fileio = NULL; index = fileio->index; buf = &fileio->bufs[index]; /* * Check if we need to dequeue the buffer. */ if (buf->queued) { struct vb2_buffer *vb; /* * Call vb2_dqbuf to get buffer back. */ memset(&fileio->b, 0, sizeof(fileio->b)); fileio->b.type = q->type; fileio->b.memory = q->memory; fileio->b.index = index; ret = vb2_dqbuf(q, &fileio->b, nonblock); dprintk(5, "file io: vb2_dqbuf result: %d\n", ret); if (ret) goto end; fileio->dq_count += 1; /* * Get number of bytes filled by the driver */ vb = q->bufs[index]; buf->size = vb2_get_plane_payload(vb, 0); buf->queued = 0; } /* * Limit count on last few bytes of the buffer. */ if (buf->pos + count > buf->size) { count = buf->size - buf->pos; dprintk(5, "reducing read count: %zd\n", count); } /* * Transfer data to userspace. */ dprintk(3, "file io: copying %zd bytes - buffer %d, offset %u\n", count, index, buf->pos); if (read) ret = copy_to_user(data, buf->vaddr + buf->pos, count); else ret = copy_from_user(buf->vaddr + buf->pos, data, count); if (ret) { dprintk(3, "file io: error copying data\n"); ret = -EFAULT; goto end; } /* * Update counters. */ buf->pos += count; *ppos += count; /* * Queue next buffer if required. */ if (buf->pos == buf->size || (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) { /* * Check if this is the last buffer to read. */ if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) && fileio->dq_count == 1) { dprintk(3, "file io: read limit reached\n"); /* * Restore fileio pointer and release the context. */ q->fileio = fileio; return __vb2_cleanup_fileio(q); } /* * Call vb2_qbuf and give buffer to the driver. */ memset(&fileio->b, 0, sizeof(fileio->b)); fileio->b.type = q->type; fileio->b.memory = q->memory; fileio->b.index = index; fileio->b.bytesused = buf->pos; ret = vb2_qbuf(q, &fileio->b); dprintk(5, "file io: vb2_dbuf result: %d\n", ret); if (ret) goto end; /* * Buffer has been queued, update the status */ buf->pos = 0; buf->queued = 1; buf->size = q->bufs[0]->v4l2_planes[0].length; fileio->q_count += 1; /* * Switch to the next buffer */ fileio->index = (index + 1) % q->num_buffers; /* * Start streaming if required. */ if (!read && !q->streaming) { ret = vb2_streamon(q, q->type); if (ret) goto end; } } /* * Return proper number of bytes processed. */ if (ret == 0) ret = count; end: /* * Restore the fileio context and block vb2 ioctl interface. */ q->fileio = fileio; return ret; } size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count, loff_t *ppos, int nonblocking) { return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1); } EXPORT_SYMBOL_GPL(vb2_read); size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count, loff_t *ppos, int nonblocking) { return __vb2_perform_fileio(q, (char __user *) data, count, ppos, nonblocking, 0); } EXPORT_SYMBOL_GPL(vb2_write); /* * The following functions are not part of the vb2 core API, but are helper * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations * and struct vb2_ops. * They contain boilerplate code that most if not all drivers have to do * and so they simplify the driver code. */ /* The queue is busy if there is a owner and you are not that owner. */ static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file) { return vdev->queue->owner && vdev->queue->owner != file->private_data; } /* vb2 ioctl helpers */ int vb2_ioctl_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct video_device *vdev = video_devdata(file); int res = __verify_memory_type(vdev->queue, p->memory, p->type); if (res) return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; res = __reqbufs(vdev->queue, p); /* If count == 0, then the owner has released all buffers and he is no longer owner of the queue. Otherwise we have a new owner. */ if (res == 0) vdev->queue->owner = p->count ? file->private_data : NULL; return res; } EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs); int vb2_ioctl_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *p) { struct video_device *vdev = video_devdata(file); int res = __verify_memory_type(vdev->queue, p->memory, p->format.type); p->index = vdev->queue->num_buffers; /* If count == 0, then just check if memory and type are valid. Any -EBUSY result from __verify_memory_type can be mapped to 0. */ if (p->count == 0) return res != -EBUSY ? res : 0; if (res) return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; res = __create_bufs(vdev->queue, p); if (res == 0) vdev->queue->owner = file->private_data; return res; } EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs); int vb2_ioctl_prepare_buf(struct file *file, void *priv, struct v4l2_buffer *p) { struct video_device *vdev = video_devdata(file); if (vb2_queue_is_busy(vdev, file)) return -EBUSY; return vb2_prepare_buf(vdev->queue, p); } EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf); int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct video_device *vdev = video_devdata(file); /* No need to call vb2_queue_is_busy(), anyone can query buffers. */ return vb2_querybuf(vdev->queue, p); } EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf); int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct video_device *vdev = video_devdata(file); if (vb2_queue_is_busy(vdev, file)) return -EBUSY; return vb2_qbuf(vdev->queue, p); } EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf); int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct video_device *vdev = video_devdata(file); if (vb2_queue_is_busy(vdev, file)) return -EBUSY; return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK); } EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf); int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct video_device *vdev = video_devdata(file); if (vb2_queue_is_busy(vdev, file)) return -EBUSY; return vb2_streamon(vdev->queue, i); } EXPORT_SYMBOL_GPL(vb2_ioctl_streamon); int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct video_device *vdev = video_devdata(file); if (vb2_queue_is_busy(vdev, file)) return -EBUSY; return vb2_streamoff(vdev->queue, i); } EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff); int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p) { struct video_device *vdev = video_devdata(file); if (vb2_queue_is_busy(vdev, file)) return -EBUSY; return vb2_expbuf(vdev->queue, p); } EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf); /* v4l2_file_operations helpers */ int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma) { struct video_device *vdev = video_devdata(file); struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; int err; if (lock && mutex_lock_interruptible(lock)) return -ERESTARTSYS; err = vb2_mmap(vdev->queue, vma); if (lock) mutex_unlock(lock); return err; } EXPORT_SYMBOL_GPL(vb2_fop_mmap); int vb2_fop_release(struct file *file) { struct video_device *vdev = video_devdata(file); if (file->private_data == vdev->queue->owner) { vb2_queue_release(vdev->queue); vdev->queue->owner = NULL; } return v4l2_fh_release(file); } EXPORT_SYMBOL_GPL(vb2_fop_release); ssize_t vb2_fop_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct video_device *vdev = video_devdata(file); struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; int err = -EBUSY; if (lock && mutex_lock_interruptible(lock)) return -ERESTARTSYS; if (vb2_queue_is_busy(vdev, file)) goto exit; err = vb2_write(vdev->queue, buf, count, ppos, file->f_flags & O_NONBLOCK); if (vdev->queue->fileio) vdev->queue->owner = file->private_data; exit: if (lock) mutex_unlock(lock); return err; } EXPORT_SYMBOL_GPL(vb2_fop_write); ssize_t vb2_fop_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct video_device *vdev = video_devdata(file); struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; int err = -EBUSY; if (lock && mutex_lock_interruptible(lock)) return -ERESTARTSYS; if (vb2_queue_is_busy(vdev, file)) goto exit; err = vb2_read(vdev->queue, buf, count, ppos, file->f_flags & O_NONBLOCK); if (vdev->queue->fileio) vdev->queue->owner = file->private_data; exit: if (lock) mutex_unlock(lock); return err; } EXPORT_SYMBOL_GPL(vb2_fop_read); unsigned int vb2_fop_poll(struct file *file, poll_table *wait) { struct video_device *vdev = video_devdata(file); struct vb2_queue *q = vdev->queue; struct mutex *lock = q->lock ? q->lock : vdev->lock; unsigned long req_events = poll_requested_events(wait); unsigned res; void *fileio; bool must_lock = false; /* Try to be smart: only lock if polling might start fileio, otherwise locking will only introduce unwanted delays. */ if (q->num_buffers == 0 && q->fileio == NULL) { if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) && (req_events & (POLLIN | POLLRDNORM))) must_lock = true; else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) && (req_events & (POLLOUT | POLLWRNORM))) must_lock = true; } /* If locking is needed, but this helper doesn't know how, then you shouldn't be using this helper but you should write your own. */ WARN_ON(must_lock && !lock); if (must_lock && lock && mutex_lock_interruptible(lock)) return POLLERR; fileio = q->fileio; res = vb2_poll(vdev->queue, file, wait); /* If fileio was started, then we have a new queue owner. */ if (must_lock && !fileio && q->fileio) q->owner = file->private_data; if (must_lock && lock) mutex_unlock(lock); return res; } EXPORT_SYMBOL_GPL(vb2_fop_poll); #ifndef CONFIG_MMU unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct video_device *vdev = video_devdata(file); struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock; int ret; if (lock && mutex_lock_interruptible(lock)) return -ERESTARTSYS; ret = vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags); if (lock) mutex_unlock(lock); return ret; } EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area); #endif /* vb2_ops helpers. Only use if vq->lock is non-NULL. */ void vb2_ops_wait_prepare(struct vb2_queue *vq) { mutex_unlock(vq->lock); } EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare); void vb2_ops_wait_finish(struct vb2_queue *vq) { mutex_lock(vq->lock); } EXPORT_SYMBOL_GPL(vb2_ops_wait_finish); MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2"); MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski"); MODULE_LICENSE("GPL");
gpl-2.0
pkirchhofer/nsa325-kernel
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
66
55974
/* * V4L2 Driver for SuperH Mobile CEU interface * * Copyright (C) 2008 Magnus Damm * * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", * * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/of.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <media/v4l2-async.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/soc_camera.h> #include <media/sh_mobile_ceu.h> #include <media/sh_mobile_csi2.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> #include "soc_scale_crop.h" /* register offsets for sh7722 / sh7723 */ #define CAPSR 0x00 /* Capture start register */ #define CAPCR 0x04 /* Capture control register */ #define CAMCR 0x08 /* Capture interface control register */ #define CMCYR 0x0c /* Capture interface cycle register */ #define CAMOR 0x10 /* Capture interface offset register */ #define CAPWR 0x14 /* Capture interface width register */ #define CAIFR 0x18 /* Capture interface input format register */ #define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */ #define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */ #define CRCNTR 0x28 /* CEU register control register */ #define CRCMPR 0x2c /* CEU register forcible control register */ #define CFLCR 0x30 /* Capture filter control register */ #define CFSZR 0x34 /* Capture filter size clip register */ #define CDWDR 0x38 /* Capture destination width register */ #define CDAYR 0x3c /* Capture data address Y register */ #define CDACR 0x40 /* Capture data address C register */ #define CDBYR 0x44 /* Capture data bottom-field address Y register */ #define CDBCR 0x48 /* Capture data bottom-field address C register */ #define CBDSR 0x4c /* Capture bundle destination size register */ #define CFWCR 0x5c /* Firewall operation control register */ #define CLFCR 0x60 /* Capture low-pass filter control register */ #define CDOCR 0x64 /* Capture data output control register */ #define CDDCR 0x68 /* Capture data complexity level register */ #define CDDAR 0x6c /* Capture data complexity level address register */ #define CEIER 0x70 /* Capture event interrupt enable register */ #define CETCR 0x74 /* Capture event flag clear register */ #define CSTSR 0x7c /* Capture status register */ #define CSRTR 0x80 /* Capture software reset register */ #define CDSSR 0x84 /* Capture data size register */ #define CDAYR2 0x90 /* Capture data address Y register 2 */ #define CDACR2 0x94 /* Capture data address C register 2 */ #define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ #define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ #undef DEBUG_GEOMETRY #ifdef DEBUG_GEOMETRY #define dev_geo dev_info #else #define dev_geo dev_dbg #endif /* per video frame buffer */ struct sh_mobile_ceu_buffer { struct vb2_buffer vb; /* v4l buffer must be first */ struct list_head queue; }; struct sh_mobile_ceu_dev { struct soc_camera_host ici; /* Asynchronous CSI2 linking */ struct v4l2_async_subdev *csi2_asd; struct v4l2_subdev *csi2_sd; /* Synchronous probing compatibility */ struct platform_device *csi2_pdev; unsigned int irq; void __iomem *base; size_t video_limit; size_t buf_total; spinlock_t lock; /* Protects video buffer lists */ struct list_head capture; struct vb2_buffer *active; struct vb2_alloc_ctx *alloc_ctx; struct sh_mobile_ceu_info *pdata; struct completion complete; u32 cflcr; /* static max sizes either from platform data or default */ int max_width; int max_height; enum v4l2_field field; int sequence; unsigned long flags; unsigned int image_mode:1; unsigned int is_16bit:1; unsigned int frozen:1; }; struct sh_mobile_ceu_cam { /* CEU offsets within the camera output, before the CEU scaler */ unsigned int ceu_left; unsigned int ceu_top; /* Client output, as seen by the CEU */ unsigned int width; unsigned int height; /* * User window from S_CROP / G_CROP, produced by client cropping and * scaling, CEU scaling and CEU cropping, mapped back onto the client * input window */ struct v4l2_rect subrect; /* Camera cropping rectangle */ struct v4l2_rect rect; const struct soc_mbus_pixelfmt *extra_fmt; enum v4l2_mbus_pixelcode code; }; static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb) { return container_of(vb, struct sh_mobile_ceu_buffer, vb); } static void ceu_write(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs, u32 data) { iowrite32(data, priv->base + reg_offs); } static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) { return ioread32(priv->base + reg_offs); } static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) { int i, success = 0; ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ /* wait CSTSR.CPTON bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CSTSR) & 1)) { success++; break; } udelay(1); } /* wait CAPSR.CPKIL bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) { success++; break; } udelay(1); } if (2 != success) { dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n"); return -EIO; } return 0; } /* * Videobuf operations */ /* * .queue_setup() is called to check, whether the driver can accept the * requested number of buffers and to fill in plane sizes * for the current frame format if required */ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, const struct v4l2_format *fmt, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; if (fmt) { const struct soc_camera_format_xlate *xlate = soc_camera_xlate_by_fourcc(icd, fmt->fmt.pix.pixelformat); unsigned int bytes_per_line; int ret; if (!xlate) return -EINVAL; ret = soc_mbus_bytes_per_line(fmt->fmt.pix.width, xlate->host_fmt); if (ret < 0) return ret; bytes_per_line = max_t(u32, fmt->fmt.pix.bytesperline, ret); ret = soc_mbus_image_size(xlate->host_fmt, bytes_per_line, fmt->fmt.pix.height); if (ret < 0) return ret; sizes[0] = max_t(u32, fmt->fmt.pix.sizeimage, ret); } else { /* Called from VIDIOC_REQBUFS or in compatibility mode */ sizes[0] = icd->sizeimage; } alloc_ctxs[0] = pcdev->alloc_ctx; if (!vq->num_buffers) pcdev->sequence = 0; if (!*count) *count = 2; /* If *num_planes != 0, we have already verified *count. */ if (pcdev->video_limit && !*num_planes) { size_t size = PAGE_ALIGN(sizes[0]) * *count; if (size + pcdev->buf_total > pcdev->video_limit) *count = (pcdev->video_limit - pcdev->buf_total) / PAGE_ALIGN(sizes[0]); } *num_planes = 1; dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]); return 0; } #define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */ #define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */ #define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */ #define CEU_CEIER_VBP (1 << 20) /* vbp error */ #define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */ #define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP) /* * return value doesn't reflex the success/failure to queue the new buffer, * but rather the status of the previous buffer. */ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) { struct soc_camera_device *icd = pcdev->ici.icd; dma_addr_t phys_addr_top, phys_addr_bottom; unsigned long top1, top2; unsigned long bottom1, bottom2; u32 status; bool planar; int ret = 0; /* * The hardware is _very_ picky about this sequence. Especially * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge * several not-so-well documented interrupt sources in CETCR. */ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK); status = ceu_read(pcdev, CETCR); ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC); if (!pcdev->frozen) ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK); ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP); ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW); /* * When a VBP interrupt occurs, a capture end interrupt does not occur * and the image of that frame is not captured correctly. So, soft reset * is needed here. */ if (status & CEU_CEIER_VBP) { sh_mobile_ceu_soft_reset(pcdev); ret = -EIO; } if (pcdev->frozen) { complete(&pcdev->complete); return ret; } if (!pcdev->active) return ret; if (V4L2_FIELD_INTERLACED_BT == pcdev->field) { top1 = CDBYR; top2 = CDBCR; bottom1 = CDAYR; bottom2 = CDACR; } else { top1 = CDAYR; top2 = CDACR; bottom1 = CDBYR; bottom2 = CDBCR; } phys_addr_top = vb2_dma_contig_plane_dma_addr(pcdev->active, 0); switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: planar = true; break; default: planar = false; } ceu_write(pcdev, top1, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->bytesperline; ceu_write(pcdev, bottom1, phys_addr_bottom); } if (planar) { phys_addr_top += icd->bytesperline * icd->user_height; ceu_write(pcdev, top2, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->bytesperline; ceu_write(pcdev, bottom2, phys_addr_bottom); } } ceu_write(pcdev, CAPSR, 0x1); /* start capture */ return ret; } static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) { struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); /* Added list head initialization on alloc */ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); return 0; } static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); unsigned long size; size = icd->sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", vb->v4l2_buf.index, vb2_plane_size(vb, 0), size); goto error; } vb2_set_plane_payload(vb, 0, size); dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); #ifdef DEBUG /* * This can be useful if you want to see if we actually fill * the buffer with something */ if (vb2_plane_vaddr(vb, 0)) memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); #endif spin_lock_irq(&pcdev->lock); list_add_tail(&buf->queue, &pcdev->capture); if (!pcdev->active) { /* * Because there were no active buffer at this moment, * we are not interested in the return value of * sh_mobile_ceu_capture here. */ pcdev->active = vb; sh_mobile_ceu_capture(pcdev); } spin_unlock_irq(&pcdev->lock); return; error: vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); } static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); struct sh_mobile_ceu_dev *pcdev = ici->priv; spin_lock_irq(&pcdev->lock); if (pcdev->active == vb) { /* disable capture (release DMA buffer), reset */ ceu_write(pcdev, CAPSR, 1 << 16); pcdev->active = NULL; } /* * Doesn't hurt also if the list is empty, but it hurts, if queuing the * buffer failed, and .buf_init() hasn't been called */ if (buf->queue.next) list_del_init(&buf->queue); pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0)); dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, pcdev->buf_total); spin_unlock_irq(&pcdev->lock); } static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0)); dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, pcdev->buf_total); /* This is for locking debugging only */ INIT_LIST_HEAD(&to_ceu_vb(vb)->queue); return 0; } static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q) { struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct list_head *buf_head, *tmp; spin_lock_irq(&pcdev->lock); pcdev->active = NULL; list_for_each_safe(buf_head, tmp, &pcdev->capture) list_del_init(buf_head); spin_unlock_irq(&pcdev->lock); return sh_mobile_ceu_soft_reset(pcdev); } static struct vb2_ops sh_mobile_ceu_videobuf_ops = { .queue_setup = sh_mobile_ceu_videobuf_setup, .buf_prepare = sh_mobile_ceu_videobuf_prepare, .buf_queue = sh_mobile_ceu_videobuf_queue, .buf_cleanup = sh_mobile_ceu_videobuf_release, .buf_init = sh_mobile_ceu_videobuf_init, .wait_prepare = soc_camera_unlock, .wait_finish = soc_camera_lock, .stop_streaming = sh_mobile_ceu_stop_streaming, }; static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) { struct sh_mobile_ceu_dev *pcdev = data; struct vb2_buffer *vb; int ret; spin_lock(&pcdev->lock); vb = pcdev->active; if (!vb) /* Stale interrupt from a released buffer */ goto out; list_del_init(&to_ceu_vb(vb)->queue); if (!list_empty(&pcdev->capture)) pcdev->active = &list_entry(pcdev->capture.next, struct sh_mobile_ceu_buffer, queue)->vb; else pcdev->active = NULL; ret = sh_mobile_ceu_capture(pcdev); v4l2_get_timestamp(&vb->v4l2_buf.timestamp); if (!ret) { vb->v4l2_buf.field = pcdev->field; vb->v4l2_buf.sequence = pcdev->sequence++; } vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); out: spin_unlock(&pcdev->lock); return IRQ_HANDLED; } static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev) { struct v4l2_subdev *sd; if (pcdev->csi2_sd) return pcdev->csi2_sd; if (pcdev->csi2_asd) { char name[] = "sh-mobile-csi2"; v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev) if (!strncmp(name, sd->name, sizeof(name) - 1)) { pcdev->csi2_sd = sd; return sd; } } return NULL; } static struct v4l2_subdev *csi2_subdev(struct sh_mobile_ceu_dev *pcdev, struct soc_camera_device *icd) { struct v4l2_subdev *sd = pcdev->csi2_sd; return sd && sd->grp_id == soc_camera_grp_id(icd) ? sd : NULL; } static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *csi2_sd = find_csi2(pcdev); int ret; if (csi2_sd) { csi2_sd->grp_id = soc_camera_grp_id(icd); v4l2_set_subdev_hostdata(csi2_sd, icd); } ret = v4l2_subdev_call(csi2_sd, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; /* * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver * has not found this soc-camera device among its clients */ if (csi2_sd && ret == -ENODEV) csi2_sd->grp_id = 0; dev_info(icd->parent, "SuperH Mobile CEU%s driver attached to camera %d\n", csi2_sd && csi2_sd->grp_id ? "/CSI-2" : "", icd->devnum); return 0; } static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *csi2_sd = find_csi2(pcdev); dev_info(icd->parent, "SuperH Mobile CEU driver detached from camera %d\n", icd->devnum); v4l2_subdev_call(csi2_sd, core, s_power, 0); } /* Called with .host_lock held */ static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici) { struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret; pm_runtime_get_sync(ici->v4l2_dev.dev); pcdev->buf_total = 0; ret = sh_mobile_ceu_soft_reset(pcdev); return 0; } /* Called with .host_lock held */ static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici) { struct sh_mobile_ceu_dev *pcdev = ici->priv; /* disable capture, disable interrupts */ ceu_write(pcdev, CEIER, 0); sh_mobile_ceu_soft_reset(pcdev); /* make sure active buffer is canceled */ spin_lock_irq(&pcdev->lock); if (pcdev->active) { list_del_init(&to_ceu_vb(pcdev->active)->queue); vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR); pcdev->active = NULL; } spin_unlock_irq(&pcdev->lock); pm_runtime_put(ici->v4l2_dev.dev); } /* * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)" * in SH7722 Hardware Manual */ static unsigned int size_dst(unsigned int src, unsigned int scale) { unsigned int mant_pre = scale >> 12; if (!src || !scale) return src; return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) * mant_pre * 4096 / scale + 1; } static u16 calc_scale(unsigned int src, unsigned int *dst) { u16 scale; if (src == *dst) return 0; scale = (src * 4096 / *dst) & ~7; while (scale > 4096 && size_dst(src, scale) < *dst) scale -= 8; *dst = size_dst(src, scale); return scale; } /* rect is guaranteed to not exceed the scaled camera rectangle */ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned int height, width, cdwdr_width, in_width, in_height; unsigned int left_offset, top_offset; u32 camor; dev_geo(icd->parent, "Crop %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); left_offset = cam->ceu_left; top_offset = cam->ceu_top; WARN_ON(icd->user_width & 3 || icd->user_height & 3); width = icd->user_width; if (pcdev->image_mode) { in_width = cam->width; if (!pcdev->is_16bit) { in_width *= 2; left_offset *= 2; } } else { unsigned int w_factor; switch (icd->current_fmt->host_fmt->packing) { case SOC_MBUS_PACKING_2X8_PADHI: w_factor = 2; break; default: w_factor = 1; } in_width = cam->width * w_factor; left_offset *= w_factor; } cdwdr_width = icd->bytesperline; height = icd->user_height; in_height = cam->height; if (V4L2_FIELD_NONE != pcdev->field) { height = (height / 2) & ~3; in_height /= 2; top_offset /= 2; cdwdr_width *= 2; } /* CSI2 special configuration */ if (csi2_subdev(pcdev, icd)) { in_width = ((in_width - 2) * 2); left_offset *= 2; } /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ camor = left_offset | (top_offset << 16); dev_geo(icd->parent, "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, (in_height << 16) | in_width, (height << 16) | width, cdwdr_width); ceu_write(pcdev, CAMOR, camor); ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */ ceu_write(pcdev, CFSZR, (height << 16) | width); ceu_write(pcdev, CDWDR, cdwdr_width); } static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev) { u32 capsr = ceu_read(pcdev, CAPSR); ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */ return capsr; } static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr) { unsigned long timeout = jiffies + 10 * HZ; /* * Wait until the end of the current frame. It can take a long time, * but if it has been aborted by a CAPSR reset, it shoule exit sooner. */ while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout)) msleep(1); if (time_after(jiffies, timeout)) { dev_err(pcdev->ici.v4l2_dev.dev, "Timeout waiting for frame end! Interface problem?\n"); return; } /* Wait until reset clears, this shall not hang... */ while (ceu_read(pcdev, CAPSR) & (1 << 16)) udelay(10); /* Anything to restore? */ if (capsr & ~(1 << 16)) ceu_write(pcdev, CAPSR, capsr); } /* Find the bus subdevice driver, e.g., CSI2 */ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev, struct soc_camera_device *icd) { return csi2_subdev(pcdev, icd) ? : soc_camera_to_subdev(icd); } #define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \ V4L2_MBUS_PCLK_SAMPLE_RISING | \ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ V4L2_MBUS_HSYNC_ACTIVE_LOW | \ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ V4L2_MBUS_VSYNC_ACTIVE_LOW | \ V4L2_MBUS_DATA_ACTIVE_HIGH) /* Capture is not running, no interrupts, no locking needed */ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; unsigned long value, common_flags = CEU_BUS_FLAGS; u32 capsr = capture_save_reset(pcdev); unsigned int yuv_lineskip; int ret; /* * If the client doesn't implement g_mbus_config, we just use our * platform data */ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, common_flags); if (!common_flags) return -EINVAL; } else if (ret != -ENOIOCTLCMD) { return ret; } /* Make choises, based on platform preferences */ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW) common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW; } if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) { if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW) common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW; } cfg.flags = common_flags; ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; if (icd->current_fmt->host_fmt->bits_per_sample > 8) pcdev->is_16bit = 1; else pcdev->is_16bit = 0; ceu_write(pcdev, CRCNTR, 0); ceu_write(pcdev, CRCMPR, 0); value = 0x00000010; /* data fetch by default */ yuv_lineskip = 0x10; switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: /* convert 4:2:2 -> 4:2:0 */ yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */ /* fall-through */ case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: switch (cam->code) { case V4L2_MBUS_FMT_UYVY8_2X8: value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ break; case V4L2_MBUS_FMT_VYUY8_2X8: value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ break; case V4L2_MBUS_FMT_YUYV8_2X8: value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ break; case V4L2_MBUS_FMT_YVYU8_2X8: value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ break; default: BUG(); } } if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; if (csi2_subdev(pcdev, icd)) /* CSI2 mode */ value |= 3 << 12; else if (pcdev->is_16bit) value |= 1 << 12; else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT) value |= 2 << 12; ceu_write(pcdev, CAMCR, value); ceu_write(pcdev, CAPCR, 0x00300000); switch (pcdev->field) { case V4L2_FIELD_INTERLACED_TB: value = 0x101; break; case V4L2_FIELD_INTERLACED_BT: value = 0x102; break; default: value = 0; break; } ceu_write(pcdev, CAIFR, value); sh_mobile_ceu_set_rect(icd); mdelay(1); dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr); ceu_write(pcdev, CFLCR, pcdev->cflcr); /* * A few words about byte order (observed in Big Endian mode) * * In data fetch mode bytes are received in chunks of 8 bytes. * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) * * The data is however by default written to memory in reverse order: * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) * * The lowest three bits of CDOCR allows us to do swapping, * using 7 we swap the data bytes to match the incoming order: * D0, D1, D2, D3, D4, D5, D6, D7 */ value = 0x00000007 | yuv_lineskip; ceu_write(pcdev, CDOCR, value); ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ capture_restore(pcdev, capsr); /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ return 0; } static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd); unsigned long common_flags = CEU_BUS_FLAGS; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; int ret; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) common_flags = soc_mbus_config_compatible(&cfg, common_flags); else if (ret != -ENOIOCTLCMD) return ret; if (!common_flags || buswidth > 16) return -EINVAL; return 0; } static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { { .fourcc = V4L2_PIX_FMT_NV12, .name = "NV12", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, }, { .fourcc = V4L2_PIX_FMT_NV21, .name = "NV21", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, }, { .fourcc = V4L2_PIX_FMT_NV16, .name = "NV16", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, }, { .fourcc = V4L2_PIX_FMT_NV61, .name = "NV61", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, }, }; /* This will be corrected as we get more formats */ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_1_5X8) || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl) { return container_of(ctrl->handler, struct soc_camera_device, ctrl_handler); } static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl) { struct soc_camera_device *icd = ctrl_to_icd(ctrl); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; switch (ctrl->id) { case V4L2_CID_SHARPNESS: switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: ceu_write(pcdev, CLFCR, !ctrl->val); return 0; } break; } return -EINVAL; } static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = { .s_ctrl = sh_mobile_ceu_s_ctrl, }; static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; enum v4l2_mbus_pixelcode code; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_warn(dev, "unsupported format code #%u: %d\n", idx, code); return 0; } if (!csi2_subdev(pcdev, icd)) { /* Are there any restrictions in the CSI-2 case? */ ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; } if (!icd->host_priv) { struct v4l2_mbus_framefmt mf; struct v4l2_rect rect; int shift = 0; /* Add our control */ v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops, V4L2_CID_SHARPNESS, 0, 1, 1, 1); if (icd->ctrl_handler.error) return icd->ctrl_handler.error; /* FIXME: subwindow is lost between close / open */ /* Cache current client geometry */ ret = soc_camera_client_g_rect(sd, &rect); if (ret < 0) return ret; /* First time */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; /* * All currently existing CEU implementations support 2560x1920 * or larger frames. If the sensor is proposing too big a frame, * don't bother with possibly supportred by the CEU larger * sizes, just try VGA multiples. If needed, this can be * adjusted in the future. */ while ((mf.width > pcdev->max_width || mf.height > pcdev->max_height) && shift < 4) { /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf.width = 2560 >> shift; mf.height = 1920 >> shift; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, &mf); if (ret < 0) return ret; shift++; } if (shift == 4) { dev_err(dev, "Failed to configure the client below %ux%x\n", mf.width, mf.height); return -EIO; } dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height); cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) return -ENOMEM; /* We are called with current camera crop, initialise subrect with it */ cam->rect = rect; cam->subrect = rect; cam->width = mf.width; cam->height = mf.height; icd->host_priv = cam; } else { cam = icd->host_priv; } /* Beginning of a pass */ if (!idx) cam->extra_fmt = NULL; switch (code) { case V4L2_MBUS_FMT_UYVY8_2X8: case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: if (cam->extra_fmt) break; /* * Our case is simple so far: for any of the above four camera * formats we add all our four synthesized NV* formats, so, * just marking the device with a single flag suffices. If * the format generation rules are more complex, you would have * to actually hang your already added / counted formats onto * the host_priv pointer and check whether the format you're * going to add now is already there. */ cam->extra_fmt = sh_mobile_ceu_formats; n = ARRAY_SIZE(sh_mobile_ceu_formats); formats += n; for (k = 0; xlate && k < n; k++) { xlate->host_fmt = &sh_mobile_ceu_formats[k]; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s using code %d\n", sh_mobile_ceu_formats[k].name, code); } break; default: if (!sh_mobile_ceu_packing_supported(fmt)) return 0; } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s in pass-through mode\n", fmt->name); } return formats; } static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) { kfree(icd->host_priv); icd->host_priv = NULL; } #define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale) #define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out) /* * CEU can scale and crop, but we don't want to waste bandwidth and kill the * framerate by always requesting the maximum image from the client. See * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of * scaling and cropping algorithms and for the meaning of referenced here steps. */ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, const struct v4l2_crop *a) { struct v4l2_crop a_writable = *a; const struct v4l2_rect *rect = &a_writable.c; struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_crop cam_crop; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *cam_rect = &cam_crop.c; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_mbus_framefmt mf; unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, out_width, out_height; int interm_width, interm_height; u32 capsr, cflcr; int ret; dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height, rect->left, rect->top); /* During camera cropping its output window can change too, stop CEU */ capsr = capture_save_reset(pcdev); dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); /* * 1. - 2. Apply iterative camera S_CROP for new input window, read back * actual camera rectangle. */ ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop, &cam->rect, &cam->subrect); if (ret < 0) return ret; dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); /* On success cam_crop contains current camera crop */ /* 3. Retrieve camera output window */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; if (mf.width > pcdev->max_width || mf.height > pcdev->max_height) return -EINVAL; /* 4. Calculate camera scales */ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); /* Calculate intermediate window */ interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); if (interm_width < icd->user_width) { u32 new_scale_h; new_scale_h = calc_generic_scale(rect->width, icd->user_width); mf.width = scale_down(cam_rect->width, new_scale_h); } if (interm_height < icd->user_height) { u32 new_scale_v; new_scale_v = calc_generic_scale(rect->height, icd->user_height); mf.height = scale_down(cam_rect->height, new_scale_v); } if (interm_width < icd->user_width || interm_height < icd->user_height) { ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, s_mbus_fmt, &mf); if (ret < 0) return ret; dev_geo(dev, "New camera output %ux%u\n", mf.width, mf.height); scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); } /* Cache camera output window */ cam->width = mf.width; cam->height = mf.height; if (pcdev->image_mode) { out_width = min(interm_width, icd->user_width); out_height = min(interm_height, icd->user_height); } else { out_width = interm_width; out_height = interm_height; } /* * 5. Calculate CEU scales from camera scales from results of (5) and * the user window */ scale_ceu_h = calc_scale(interm_width, &out_width); scale_ceu_v = calc_scale(interm_height, &out_height); dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); /* Apply CEU scales. */ cflcr = scale_ceu_h | (scale_ceu_v << 16); if (cflcr != pcdev->cflcr) { pcdev->cflcr = cflcr; ceu_write(pcdev, CFLCR, cflcr); } icd->user_width = out_width & ~3; icd->user_height = out_height & ~3; /* Offsets are applied at the CEU scaling filter input */ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1; cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1; /* 6. Use CEU cropping to crop to the new window. */ sh_mobile_ceu_set_rect(icd); cam->subrect = *rect; dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); /* Restore capture. The CE bit can be cleared by the hardware */ if (pcdev->active) capsr |= 1; capture_restore(pcdev, capsr); /* Even if only camera cropping succeeded */ return ret; } static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct sh_mobile_ceu_cam *cam = icd->host_priv; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->c = cam->subrect; return 0; } /* Similar to set_crop multistage iterative algorithm */ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; unsigned int ceu_sub_width = pcdev->max_width, ceu_sub_height = pcdev->max_height; u16 scale_v, scale_h; int ret; bool image_mode; enum v4l2_field field; switch (pix->field) { default: pix->field = V4L2_FIELD_NONE; /* fall-through */ case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_NONE: field = pix->field; break; case V4L2_FIELD_INTERLACED: field = V4L2_FIELD_INTERLACED_TB; break; } xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } /* 1.-4. Calculate desired client output geometry */ soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12); mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: image_mode = true; break; default: image_mode = false; } dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code, pix->width, pix->height); dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height); /* 5. - 9. */ ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect, &mf, &ceu_sub_width, &ceu_sub_height, image_mode && V4L2_FIELD_NONE == field, 12); dev_geo(dev, "5-9: client scale return %d\n", ret); /* Done with the camera. Now see if we can improve the result */ dev_geo(dev, "fmt %ux%u, requested %ux%u\n", mf.width, mf.height, pix->width, pix->height); if (ret < 0) return ret; if (mf.code != xlate->code) return -EINVAL; /* 9. Prepare CEU crop */ cam->width = mf.width; cam->height = mf.height; /* 10. Use CEU scaling to scale to the requested user window. */ /* We cannot scale up */ if (pix->width > ceu_sub_width) ceu_sub_width = pix->width; if (pix->height > ceu_sub_height) ceu_sub_height = pix->height; pix->colorspace = mf.colorspace; if (image_mode) { /* Scale pix->{width x height} down to width x height */ scale_h = calc_scale(ceu_sub_width, &pix->width); scale_v = calc_scale(ceu_sub_height, &pix->height); } else { pix->width = ceu_sub_width; pix->height = ceu_sub_height; scale_h = 0; scale_v = 0; } pcdev->cflcr = scale_h | (scale_v << 16); /* * We have calculated CFLCR, the actual configuration will be performed * in sh_mobile_ceu_set_bus_param() */ dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", ceu_sub_width, scale_h, pix->width, ceu_sub_height, scale_v, pix->height); cam->code = xlate->code; icd->current_fmt = xlate; pcdev->field = field; pcdev->image_mode = image_mode; /* CFSZR requirement */ pix->width &= ~3; pix->height &= ~3; return 0; } #define CEU_CHDW_MAX 8188U /* Maximum line stride */ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; int width, height; int ret; dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height); xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { xlate = icd->current_fmt; dev_dbg(icd->parent, "Format %x not found, keeping %x\n", pixfmt, xlate->host_fmt->fourcc); pixfmt = xlate->host_fmt->fourcc; pix->pixelformat = pixfmt; pix->colorspace = icd->colorspace; } /* FIXME: calculate using depth and bus width */ /* CFSZR requires height and width to be 4-pixel aligned */ v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2, &pix->height, 4, pcdev->max_height, 2, 0); width = pix->width; height = pix->height; /* limit to sensor capabilities */ mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.code = xlate->code; mf.colorspace = pix->colorspace; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: /* FIXME: check against rect_max after converting soc-camera */ /* We can scale precisely, need a bigger image from camera */ if (pix->width < width || pix->height < height) { /* * We presume, the sensor behaves sanely, i.e., if * requested a bigger rectangle, it will not return a * smaller one. */ mf.width = pcdev->max_width; mf.height = pcdev->max_height; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), video, try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(icd->parent, "FIXME: client try_fmt() = %d\n", ret); return ret; } } /* We will scale exactly */ if (mf.width > width) pix->width = width; if (mf.height > height) pix->height = height; pix->bytesperline = max(pix->bytesperline, pix->width); pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX); pix->bytesperline &= ~3; break; default: /* Configurable stride isn't supported in pass-through mode. */ pix->bytesperline = 0; } pix->width &= ~3; pix->height &= ~3; pix->sizeimage = 0; dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n", __func__, ret, pix->pixelformat, pix->width, pix->height); return ret; } static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd, const struct v4l2_crop *a) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; u32 out_width = icd->user_width, out_height = icd->user_height; int ret; /* Freeze queue */ pcdev->frozen = 1; /* Wait for frame */ ret = wait_for_completion_interruptible(&pcdev->complete); /* Stop the client */ ret = v4l2_subdev_call(sd, video, s_stream, 0); if (ret < 0) dev_warn(icd->parent, "Client failed to stop the stream: %d\n", ret); else /* Do the crop, if it fails, there's nothing more we can do */ sh_mobile_ceu_set_crop(icd, a); dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); if (icd->user_width != out_width || icd->user_height != out_height) { struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = out_width, .height = out_height, .pixelformat = icd->current_fmt->host_fmt->fourcc, .field = pcdev->field, .colorspace = icd->colorspace, }, }; ret = sh_mobile_ceu_set_fmt(icd, &f); if (!ret && (out_width != f.fmt.pix.width || out_height != f.fmt.pix.height)) ret = -EINVAL; if (!ret) { icd->user_width = out_width & ~3; icd->user_height = out_height & ~3; ret = sh_mobile_ceu_set_bus_param(icd); } } /* Thaw the queue */ pcdev->frozen = 0; spin_lock_irq(&pcdev->lock); sh_mobile_ceu_capture(pcdev); spin_unlock_irq(&pcdev->lock); /* Start the client */ ret = v4l2_subdev_call(sd, video, s_stream, 1); return ret; } static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; return vb2_poll(&icd->vb2_vidq, file, pt); } static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q, struct soc_camera_device *icd) { q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = icd; q->ops = &sh_mobile_ceu_videobuf_ops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer); q->timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; return vb2_queue_init(q); } static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { .owner = THIS_MODULE, .add = sh_mobile_ceu_add_device, .remove = sh_mobile_ceu_remove_device, .clock_start = sh_mobile_ceu_clock_start, .clock_stop = sh_mobile_ceu_clock_stop, .get_formats = sh_mobile_ceu_get_formats, .put_formats = sh_mobile_ceu_put_formats, .get_crop = sh_mobile_ceu_get_crop, .set_crop = sh_mobile_ceu_set_crop, .set_livecrop = sh_mobile_ceu_set_livecrop, .set_fmt = sh_mobile_ceu_set_fmt, .try_fmt = sh_mobile_ceu_try_fmt, .poll = sh_mobile_ceu_poll, .querycap = sh_mobile_ceu_querycap, .set_bus_param = sh_mobile_ceu_set_bus_param, .init_videobuf2 = sh_mobile_ceu_init_videobuf, }; struct bus_wait { struct notifier_block notifier; struct completion completion; struct device *dev; }; static int bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); if (wait->dev != dev) return NOTIFY_DONE; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: /* Protect from module unloading */ wait_for_completion(&wait->completion); return NOTIFY_OK; } return NOTIFY_DONE; } static int sh_mobile_ceu_probe(struct platform_device *pdev) { struct sh_mobile_ceu_dev *pcdev; struct resource *res; void __iomem *base; unsigned int irq; int err, i; struct bus_wait wait = { .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), .notifier.notifier_call = bus_notify, }; struct sh_mobile_ceu_companion *csi2; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); return -ENODEV; } pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); return -ENOMEM; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); init_completion(&pcdev->complete); pcdev->pdata = pdev->dev.platform_data; if (!pcdev->pdata && !pdev->dev.of_node) { dev_err(&pdev->dev, "CEU platform data not set.\n"); return -EINVAL; } /* TODO: implement per-device bus flags */ if (pcdev->pdata) { pcdev->max_width = pcdev->pdata->max_width; pcdev->max_height = pcdev->pdata->max_height; pcdev->flags = pcdev->pdata->flags; } if (!pcdev->max_width) { unsigned int v; err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v); if (!err) pcdev->max_width = v; if (!pcdev->max_width) pcdev->max_width = 2560; } if (!pcdev->max_height) { unsigned int v; err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v); if (!err) pcdev->max_height = v; if (!pcdev->max_height) pcdev->max_height = 1920; } base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); pcdev->irq = irq; pcdev->base = base; pcdev->video_limit = 0; /* only enabled if second resource exists */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { err = dma_declare_coherent_memory(&pdev->dev, res->start, res->start, resource_size(res), DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); if (!err) { dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); return -ENXIO; } pcdev->video_limit = resource_size(res); } /* request irq */ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED, dev_name(&pdev->dev), pcdev); if (err) { dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); goto exit_release_mem; } pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); pcdev->ici.priv = pcdev; pcdev->ici.v4l2_dev.dev = &pdev->dev; pcdev->ici.nr = pdev->id; pcdev->ici.drv_name = dev_name(&pdev->dev); pcdev->ici.ops = &sh_mobile_ceu_host_ops; pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE; pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(pcdev->alloc_ctx)) { err = PTR_ERR(pcdev->alloc_ctx); goto exit_free_clk; } if (pcdev->pdata && pcdev->pdata->asd_sizes) { struct v4l2_async_subdev **asd; char name[] = "sh-mobile-csi2"; int j; /* * CSI2 interfacing: several groups can use CSI2, pick up the * first one */ asd = pcdev->pdata->asd; for (j = 0; pcdev->pdata->asd_sizes[j]; j++) { for (i = 0; i < pcdev->pdata->asd_sizes[j]; i++, asd++) { dev_dbg(&pdev->dev, "%s(): subdev #%d, type %u\n", __func__, i, (*asd)->bus_type); if ((*asd)->bus_type == V4L2_ASYNC_BUS_PLATFORM && !strncmp(name, (*asd)->match.platform.name, sizeof(name) - 1)) { pcdev->csi2_asd = *asd; break; } } if (pcdev->csi2_asd) break; } pcdev->ici.asd = pcdev->pdata->asd; pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes; } /* Legacy CSI2 interfacing */ csi2 = pcdev->pdata ? pcdev->pdata->csi2 : NULL; if (csi2) { /* * TODO: remove this once all users are converted to * asynchronous CSI2 probing. If it has to be kept, csi2 * platform device resources have to be added, using * platform_device_add_resources() */ struct platform_device *csi2_pdev = platform_device_alloc("sh-mobile-csi2", csi2->id); struct sh_csi2_pdata *csi2_pdata = csi2->platform_data; if (!csi2_pdev) { err = -ENOMEM; goto exit_free_ctx; } pcdev->csi2_pdev = csi2_pdev; err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata)); if (err < 0) goto exit_pdev_put; csi2_pdev->resource = csi2->resource; csi2_pdev->num_resources = csi2->num_resources; err = platform_device_add(csi2_pdev); if (err < 0) goto exit_pdev_put; wait.dev = &csi2_pdev->dev; err = bus_register_notifier(&platform_bus_type, &wait.notifier); if (err < 0) goto exit_pdev_unregister; /* * From this point the driver module will not unload, until * we complete the completion. */ if (!csi2_pdev->dev.driver) { complete(&wait.completion); /* Either too late, or probing failed */ bus_unregister_notifier(&platform_bus_type, &wait.notifier); err = -ENXIO; goto exit_pdev_unregister; } /* * The module is still loaded, in the worst case it is hanging * in device release on our completion. So, _now_ dereferencing * the "owner" is safe! */ err = try_module_get(csi2_pdev->dev.driver->owner); /* Let notifier complete, if it has been locked */ complete(&wait.completion); bus_unregister_notifier(&platform_bus_type, &wait.notifier); if (!err) { err = -ENODEV; goto exit_pdev_unregister; } pcdev->csi2_sd = platform_get_drvdata(csi2_pdev); } err = soc_camera_host_register(&pcdev->ici); if (err) goto exit_csi2_unregister; if (csi2) { err = v4l2_device_register_subdev(&pcdev->ici.v4l2_dev, pcdev->csi2_sd); dev_dbg(&pdev->dev, "%s(): ret(register_subdev) = %d\n", __func__, err); if (err < 0) goto exit_host_unregister; /* v4l2_device_register_subdev() took a reference too */ module_put(pcdev->csi2_sd->owner); } return 0; exit_host_unregister: soc_camera_host_unregister(&pcdev->ici); exit_csi2_unregister: if (csi2) { module_put(pcdev->csi2_pdev->dev.driver->owner); exit_pdev_unregister: platform_device_del(pcdev->csi2_pdev); exit_pdev_put: pcdev->csi2_pdev->resource = NULL; platform_device_put(pcdev->csi2_pdev); } exit_free_ctx: vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); exit_free_clk: pm_runtime_disable(&pdev->dev); exit_release_mem: if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); return err; } static int sh_mobile_ceu_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, struct sh_mobile_ceu_dev, ici); struct platform_device *csi2_pdev = pcdev->csi2_pdev; soc_camera_host_unregister(soc_host); pm_runtime_disable(&pdev->dev); if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); if (csi2_pdev && csi2_pdev->dev.driver) { struct module *csi2_drv = csi2_pdev->dev.driver->owner; platform_device_del(csi2_pdev); csi2_pdev->resource = NULL; platform_device_put(csi2_pdev); module_put(csi2_drv); } return 0; } static int sh_mobile_ceu_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * This driver re-initializes all registers after * pm_runtime_get_sync() anyway so there is no need * to save and restore registers here. */ return 0; } static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { .runtime_suspend = sh_mobile_ceu_runtime_nop, .runtime_resume = sh_mobile_ceu_runtime_nop, }; static const struct of_device_id sh_mobile_ceu_of_match[] = { { .compatible = "renesas,sh-mobile-ceu" }, { } }; MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match); static struct platform_driver sh_mobile_ceu_driver = { .driver = { .name = "sh_mobile_ceu", .owner = THIS_MODULE, .pm = &sh_mobile_ceu_dev_pm_ops, .of_match_table = sh_mobile_ceu_of_match, }, .probe = sh_mobile_ceu_probe, .remove = sh_mobile_ceu_remove, }; static int __init sh_mobile_ceu_init(void) { /* Whatever return code */ request_module("sh_mobile_csi2"); return platform_driver_register(&sh_mobile_ceu_driver); } static void __exit sh_mobile_ceu_exit(void) { platform_driver_unregister(&sh_mobile_ceu_driver); } module_init(sh_mobile_ceu_init); module_exit(sh_mobile_ceu_exit); MODULE_DESCRIPTION("SuperH Mobile CEU driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1.0"); MODULE_ALIAS("platform:sh_mobile_ceu");
gpl-2.0
TheSSJ/android_kernel_asus_moorefield
drivers/spi/spidev_info.c
66
2024
/* * SPI debugfs interface for spidev register * * Copyright (C) 2014, Intel Corporation * Authors: Huiquan Zhong <huiquan.zhong@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/spi/spi.h> static struct spi_board_info spidev_info = { .modalias = "spidev", .max_speed_hz = 1000000, .bus_num = 1, .chip_select = 0, .mode = SPI_MODE_0, }; static int spidev_debug_open(struct inode *inode, struct file *filp) { filp->private_data = inode->i_private; return 0; } static ssize_t spidev_debug_write(struct file *filp, const char __user *ubuf, size_t cnt, loff_t *ppos) { char buf[32]; ssize_t buf_size; char *start = buf; unsigned int bus_num, cs_num; if (*ppos < 0 || !cnt) return -EINVAL; buf_size = min(cnt, (sizeof(buf)-1)); if (copy_from_user(buf, ubuf, buf_size)) return -EFAULT; buf[buf_size] = 0; while (*start == ' ') start++; bus_num = simple_strtoul(start, &start, 10); while (*start == ' ') start++; if (kstrtouint(start, 10, &cs_num)) return -EINVAL; spidev_info.bus_num = bus_num; spidev_info.chip_select = cs_num; spi_register_board_info(&spidev_info, 1); return buf_size; } static const struct file_operations spidev_debug_fops = { .open = spidev_debug_open, .write = spidev_debug_write, .llseek = generic_file_llseek, }; struct dentry *spidev_node; static __init int spidev_debug_init(void) { spidev_node = debugfs_create_file("spidev_node", S_IFREG | S_IWUSR, NULL, NULL, &spidev_debug_fops); if (!spidev_node) { pr_err("Failed to create spidev_node debug file\n"); return -ENOMEM; } return 0; } static __exit void spidev_debug_exit(void) { debugfs_remove(spidev_node); } module_init(spidev_debug_init); module_exit(spidev_debug_exit);
gpl-2.0
mike-dunn/linux-treo680
drivers/mtd/maps/intel_vr_nor.c
322
6957
/* * drivers/mtd/maps/intel_vr_nor.c * * An MTD map driver for a NOR flash bank on the Expansion Bus of the Intel * Vermilion Range chipset. * * The Vermilion Range Expansion Bus supports four chip selects, each of which * has 64MiB of address space. The 2nd BAR of the Expansion Bus PCI Device * is a 256MiB memory region containing the address spaces for all four of the * chip selects, with start addresses hardcoded on 64MiB boundaries. * * This map driver only supports NOR flash on chip select 0. The buswidth * (either 8 bits or 16 bits) is determined by reading the Expansion Bus Timing * and Control Register for Chip Select 0 (EXP_TIMING_CS0). This driver does * not modify the value in the EXP_TIMING_CS0 register except to enable writing * and disable boot acceleration. The timing parameters in the register are * assumed to have been properly initialized by the BIOS. The reset default * timing parameters are maximally conservative (slow), so access to the flash * will be slower than it should be if the BIOS has not initialized the timing * parameters. * * Author: Andy Lowe <alowe@mvista.com> * * 2006 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <linux/mtd/cfi.h> #include <linux/mtd/flashchip.h> #define DRV_NAME "vr_nor" struct vr_nor_mtd { void __iomem *csr_base; struct map_info map; struct mtd_info *info; struct pci_dev *dev; }; /* Expansion Bus Configuration and Status Registers are in BAR 0 */ #define EXP_CSR_MBAR 0 /* Expansion Bus Memory Window is BAR 1 */ #define EXP_WIN_MBAR 1 /* Maximum address space for Chip Select 0 is 64MiB */ #define CS0_SIZE 0x04000000 /* Chip Select 0 is at offset 0 in the Memory Window */ #define CS0_START 0x0 /* Chip Select 0 Timing Register is at offset 0 in CSR */ #define EXP_TIMING_CS0 0x00 #define TIMING_CS_EN (1 << 31) /* Chip Select Enable */ #define TIMING_BOOT_ACCEL_DIS (1 << 8) /* Boot Acceleration Disable */ #define TIMING_WR_EN (1 << 1) /* Write Enable */ #define TIMING_BYTE_EN (1 << 0) /* 8-bit vs 16-bit bus */ #define TIMING_MASK 0x3FFF0000 static void vr_nor_destroy_partitions(struct vr_nor_mtd *p) { mtd_device_unregister(p->info); } static int vr_nor_init_partitions(struct vr_nor_mtd *p) { /* register the flash bank */ /* partition the flash bank */ return mtd_device_parse_register(p->info, NULL, NULL, NULL, 0); } static void vr_nor_destroy_mtd_setup(struct vr_nor_mtd *p) { map_destroy(p->info); } static int vr_nor_mtd_setup(struct vr_nor_mtd *p) { static const char * const probe_types[] = { "cfi_probe", "jedec_probe", NULL }; const char * const *type; for (type = probe_types; !p->info && *type; type++) p->info = do_map_probe(*type, &p->map); if (!p->info) return -ENODEV; p->info->owner = THIS_MODULE; return 0; } static void vr_nor_destroy_maps(struct vr_nor_mtd *p) { unsigned int exp_timing_cs0; /* write-protect the flash bank */ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); exp_timing_cs0 &= ~TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); /* unmap the flash window */ iounmap(p->map.virt); /* unmap the csr window */ iounmap(p->csr_base); } /* * Initialize the map_info structure and map the flash. * Returns 0 on success, nonzero otherwise. */ static int vr_nor_init_maps(struct vr_nor_mtd *p) { unsigned long csr_phys, csr_len; unsigned long win_phys, win_len; unsigned int exp_timing_cs0; int err; csr_phys = pci_resource_start(p->dev, EXP_CSR_MBAR); csr_len = pci_resource_len(p->dev, EXP_CSR_MBAR); win_phys = pci_resource_start(p->dev, EXP_WIN_MBAR); win_len = pci_resource_len(p->dev, EXP_WIN_MBAR); if (!csr_phys || !csr_len || !win_phys || !win_len) return -ENODEV; if (win_len < (CS0_START + CS0_SIZE)) return -ENXIO; p->csr_base = ioremap_nocache(csr_phys, csr_len); if (!p->csr_base) return -ENOMEM; exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); if (!(exp_timing_cs0 & TIMING_CS_EN)) { dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 " "is disabled.\n"); err = -ENODEV; goto release; } if ((exp_timing_cs0 & TIMING_MASK) == TIMING_MASK) { dev_warn(&p->dev->dev, "Expansion Bus Chip Select 0 " "is configured for maximally slow access times.\n"); } p->map.name = DRV_NAME; p->map.bankwidth = (exp_timing_cs0 & TIMING_BYTE_EN) ? 1 : 2; p->map.phys = win_phys + CS0_START; p->map.size = CS0_SIZE; p->map.virt = ioremap_nocache(p->map.phys, p->map.size); if (!p->map.virt) { err = -ENOMEM; goto release; } simple_map_init(&p->map); /* Enable writes to flash bank */ exp_timing_cs0 |= TIMING_BOOT_ACCEL_DIS | TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); return 0; release: iounmap(p->csr_base); return err; } static struct pci_device_id vr_nor_pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x500D)}, {0,} }; static void vr_nor_pci_remove(struct pci_dev *dev) { struct vr_nor_mtd *p = pci_get_drvdata(dev); vr_nor_destroy_partitions(p); vr_nor_destroy_mtd_setup(p); vr_nor_destroy_maps(p); kfree(p); pci_release_regions(dev); pci_disable_device(dev); } static int vr_nor_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct vr_nor_mtd *p = NULL; unsigned int exp_timing_cs0; int err; err = pci_enable_device(dev); if (err) goto out; err = pci_request_regions(dev, DRV_NAME); if (err) goto disable_dev; p = kzalloc(sizeof(*p), GFP_KERNEL); err = -ENOMEM; if (!p) goto release; p->dev = dev; err = vr_nor_init_maps(p); if (err) goto release; err = vr_nor_mtd_setup(p); if (err) goto destroy_maps; err = vr_nor_init_partitions(p); if (err) goto destroy_mtd_setup; pci_set_drvdata(dev, p); return 0; destroy_mtd_setup: map_destroy(p->info); destroy_maps: /* write-protect the flash bank */ exp_timing_cs0 = readl(p->csr_base + EXP_TIMING_CS0); exp_timing_cs0 &= ~TIMING_WR_EN; writel(exp_timing_cs0, p->csr_base + EXP_TIMING_CS0); /* unmap the flash window */ iounmap(p->map.virt); /* unmap the csr window */ iounmap(p->csr_base); release: kfree(p); pci_release_regions(dev); disable_dev: pci_disable_device(dev); out: return err; } static struct pci_driver vr_nor_pci_driver = { .name = DRV_NAME, .probe = vr_nor_pci_probe, .remove = vr_nor_pci_remove, .id_table = vr_nor_pci_ids, }; module_pci_driver(vr_nor_pci_driver); MODULE_AUTHOR("Andy Lowe"); MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, vr_nor_pci_ids);
gpl-2.0
sricharanaz/venus
drivers/cpufreq/pxa2xx-cpufreq.c
322
13713
/* * Copyright (C) 2002,2003 Intrinsyc Software * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * History: * 31-Jul-2002 : Initial version [FB] * 29-Jan-2003 : added PXA255 support [FB] * 20-Apr-2003 : ported to v2.5 (Dustin McIntire, Sensoria Corp.) * * Note: * This driver may change the memory bus clock rate, but will not do any * platform specific access timing changes... for example if you have flash * memory connected to CS0, you will need to register a platform specific * notifier which will adjust the memory access strobes to maintain a * minimum strobe width. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/cpufreq.h> #include <linux/err.h> #include <linux/regulator/consumer.h> #include <linux/io.h> #include <mach/pxa2xx-regs.h> #include <mach/smemc.h> #ifdef DEBUG static unsigned int freq_debug; module_param(freq_debug, uint, 0); MODULE_PARM_DESC(freq_debug, "Set the debug messages to on=1/off=0"); #else #define freq_debug 0 #endif static struct regulator *vcc_core; static unsigned int pxa27x_maxfreq; module_param(pxa27x_maxfreq, uint, 0); MODULE_PARM_DESC(pxa27x_maxfreq, "Set the pxa27x maxfreq in MHz" "(typically 624=>pxa270, 416=>pxa271, 520=>pxa272)"); struct pxa_freqs { unsigned int khz; unsigned int membus; unsigned int cccr; unsigned int div2; unsigned int cclkcfg; int vmin; int vmax; }; /* Define the refresh period in mSec for the SDRAM and the number of rows */ #define SDRAM_TREF 64 /* standard 64ms SDRAM */ static unsigned int sdram_rows; #define CCLKCFG_TURBO 0x1 #define CCLKCFG_FCS 0x2 #define CCLKCFG_HALFTURBO 0x4 #define CCLKCFG_FASTBUS 0x8 #define MDREFR_DB2_MASK (MDREFR_K2DB2 | MDREFR_K1DB2) #define MDREFR_DRI_MASK 0xFFF #define MDCNFG_DRAC2(mdcnfg) (((mdcnfg) >> 21) & 0x3) #define MDCNFG_DRAC0(mdcnfg) (((mdcnfg) >> 5) & 0x3) /* * PXA255 definitions */ /* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ #define CCLKCFG CCLKCFG_TURBO | CCLKCFG_FCS static const struct pxa_freqs pxa255_run_freqs[] = { /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ {132700, 132700, 0x123, 1, CCLKCFG, -1, -1}, /* 133, 133, 66, 66 */ {199100, 99500, 0x141, 0, CCLKCFG, -1, -1}, /* 199, 199, 99, 99 */ {265400, 132700, 0x143, 1, CCLKCFG, -1, -1}, /* 265, 265, 133, 66 */ {331800, 165900, 0x145, 1, CCLKCFG, -1, -1}, /* 331, 331, 166, 83 */ {398100, 99500, 0x161, 0, CCLKCFG, -1, -1}, /* 398, 398, 196, 99 */ }; /* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ static const struct pxa_freqs pxa255_turbo_freqs[] = { /* CPU MEMBUS CCCR DIV2 CCLKCFG run turbo PXbus SDRAM */ { 99500, 99500, 0x121, 1, CCLKCFG, -1, -1}, /* 99, 99, 50, 50 */ {199100, 99500, 0x221, 0, CCLKCFG, -1, -1}, /* 99, 199, 50, 99 */ {298500, 99500, 0x321, 0, CCLKCFG, -1, -1}, /* 99, 287, 50, 99 */ {298600, 99500, 0x1c1, 0, CCLKCFG, -1, -1}, /* 199, 287, 99, 99 */ {398100, 99500, 0x241, 0, CCLKCFG, -1, -1}, /* 199, 398, 99, 99 */ }; #define NUM_PXA25x_RUN_FREQS ARRAY_SIZE(pxa255_run_freqs) #define NUM_PXA25x_TURBO_FREQS ARRAY_SIZE(pxa255_turbo_freqs) static struct cpufreq_frequency_table pxa255_run_freq_table[NUM_PXA25x_RUN_FREQS+1]; static struct cpufreq_frequency_table pxa255_turbo_freq_table[NUM_PXA25x_TURBO_FREQS+1]; static unsigned int pxa255_turbo_table; module_param(pxa255_turbo_table, uint, 0); MODULE_PARM_DESC(pxa255_turbo_table, "Selects the frequency table (0 = run table, !0 = turbo table)"); /* * PXA270 definitions * * For the PXA27x: * Control variables are A, L, 2N for CCCR; B, HT, T for CLKCFG. * * A = 0 => memory controller clock from table 3-7, * A = 1 => memory controller clock = system bus clock * Run mode frequency = 13 MHz * L * Turbo mode frequency = 13 MHz * L * N * System bus frequency = 13 MHz * L / (B + 1) * * In CCCR: * A = 1 * L = 16 oscillator to run mode ratio * 2N = 6 2 * (turbo mode to run mode ratio) * * In CCLKCFG: * B = 1 Fast bus mode * HT = 0 Half-Turbo mode * T = 1 Turbo mode * * For now, just support some of the combinations in table 3-7 of * PXA27x Processor Family Developer's Manual to simplify frequency * change sequences. */ #define PXA27x_CCCR(A, L, N2) (A << 25 | N2 << 7 | L) #define CCLKCFG2(B, HT, T) \ (CCLKCFG_FCS | \ ((B) ? CCLKCFG_FASTBUS : 0) | \ ((HT) ? CCLKCFG_HALFTURBO : 0) | \ ((T) ? CCLKCFG_TURBO : 0)) static struct pxa_freqs pxa27x_freqs[] = { {104000, 104000, PXA27x_CCCR(1, 8, 2), 0, CCLKCFG2(1, 0, 1), 900000, 1705000 }, {156000, 104000, PXA27x_CCCR(1, 8, 3), 0, CCLKCFG2(1, 0, 1), 1000000, 1705000 }, {208000, 208000, PXA27x_CCCR(0, 16, 2), 1, CCLKCFG2(0, 0, 1), 1180000, 1705000 }, {312000, 208000, PXA27x_CCCR(1, 16, 3), 1, CCLKCFG2(1, 0, 1), 1250000, 1705000 }, {416000, 208000, PXA27x_CCCR(1, 16, 4), 1, CCLKCFG2(1, 0, 1), 1350000, 1705000 }, {520000, 208000, PXA27x_CCCR(1, 16, 5), 1, CCLKCFG2(1, 0, 1), 1450000, 1705000 }, {624000, 208000, PXA27x_CCCR(1, 16, 6), 1, CCLKCFG2(1, 0, 1), 1550000, 1705000 } }; #define NUM_PXA27x_FREQS ARRAY_SIZE(pxa27x_freqs) static struct cpufreq_frequency_table pxa27x_freq_table[NUM_PXA27x_FREQS+1]; extern unsigned get_clk_frequency_khz(int info); #ifdef CONFIG_REGULATOR static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) { int ret = 0; int vmin, vmax; if (!cpu_is_pxa27x()) return 0; vmin = pxa_freq->vmin; vmax = pxa_freq->vmax; if ((vmin == -1) || (vmax == -1)) return 0; ret = regulator_set_voltage(vcc_core, vmin, vmax); if (ret) pr_err("Failed to set vcc_core in [%dmV..%dmV]\n", vmin, vmax); return ret; } static void __init pxa_cpufreq_init_voltages(void) { vcc_core = regulator_get(NULL, "vcc_core"); if (IS_ERR(vcc_core)) { pr_info("Didn't find vcc_core regulator\n"); vcc_core = NULL; } else { pr_info("Found vcc_core regulator\n"); } } #else static int pxa_cpufreq_change_voltage(const struct pxa_freqs *pxa_freq) { return 0; } static void __init pxa_cpufreq_init_voltages(void) { } #endif static void find_freq_tables(struct cpufreq_frequency_table **freq_table, const struct pxa_freqs **pxa_freqs) { if (cpu_is_pxa25x()) { if (!pxa255_turbo_table) { *pxa_freqs = pxa255_run_freqs; *freq_table = pxa255_run_freq_table; } else { *pxa_freqs = pxa255_turbo_freqs; *freq_table = pxa255_turbo_freq_table; } } else if (cpu_is_pxa27x()) { *pxa_freqs = pxa27x_freqs; *freq_table = pxa27x_freq_table; } else { BUG(); } } static void pxa27x_guess_max_freq(void) { if (!pxa27x_maxfreq) { pxa27x_maxfreq = 416000; pr_info("PXA CPU 27x max frequency not defined (pxa27x_maxfreq), assuming pxa271 with %dkHz maxfreq\n", pxa27x_maxfreq); } else { pxa27x_maxfreq *= 1000; } } static void init_sdram_rows(void) { uint32_t mdcnfg = __raw_readl(MDCNFG); unsigned int drac2 = 0, drac0 = 0; if (mdcnfg & (MDCNFG_DE2 | MDCNFG_DE3)) drac2 = MDCNFG_DRAC2(mdcnfg); if (mdcnfg & (MDCNFG_DE0 | MDCNFG_DE1)) drac0 = MDCNFG_DRAC0(mdcnfg); sdram_rows = 1 << (11 + max(drac0, drac2)); } static u32 mdrefr_dri(unsigned int freq) { u32 interval = freq * SDRAM_TREF / sdram_rows; return (interval - (cpu_is_pxa27x() ? 31 : 0)) / 32; } static unsigned int pxa_cpufreq_get(unsigned int cpu) { return get_clk_frequency_khz(0); } static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) { struct cpufreq_frequency_table *pxa_freqs_table; const struct pxa_freqs *pxa_freq_settings; unsigned long flags; unsigned int new_freq_cpu, new_freq_mem; unsigned int unused, preset_mdrefr, postset_mdrefr, cclkcfg; int ret = 0; /* Get the current policy */ find_freq_tables(&pxa_freqs_table, &pxa_freq_settings); new_freq_cpu = pxa_freq_settings[idx].khz; new_freq_mem = pxa_freq_settings[idx].membus; if (freq_debug) pr_debug("Changing CPU frequency to %d Mhz, (SDRAM %d Mhz)\n", new_freq_cpu / 1000, (pxa_freq_settings[idx].div2) ? (new_freq_mem / 2000) : (new_freq_mem / 1000)); if (vcc_core && new_freq_cpu > policy->cur) { ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); if (ret) return ret; } /* Calculate the next MDREFR. If we're slowing down the SDRAM clock * we need to preset the smaller DRI before the change. If we're * speeding up we need to set the larger DRI value after the change. */ preset_mdrefr = postset_mdrefr = __raw_readl(MDREFR); if ((preset_mdrefr & MDREFR_DRI_MASK) > mdrefr_dri(new_freq_mem)) { preset_mdrefr = (preset_mdrefr & ~MDREFR_DRI_MASK); preset_mdrefr |= mdrefr_dri(new_freq_mem); } postset_mdrefr = (postset_mdrefr & ~MDREFR_DRI_MASK) | mdrefr_dri(new_freq_mem); /* If we're dividing the memory clock by two for the SDRAM clock, this * must be set prior to the change. Clearing the divide must be done * after the change. */ if (pxa_freq_settings[idx].div2) { preset_mdrefr |= MDREFR_DB2_MASK; postset_mdrefr |= MDREFR_DB2_MASK; } else { postset_mdrefr &= ~MDREFR_DB2_MASK; } local_irq_save(flags); /* Set new the CCCR and prepare CCLKCFG */ writel(pxa_freq_settings[idx].cccr, CCCR); cclkcfg = pxa_freq_settings[idx].cclkcfg; asm volatile(" \n\ ldr r4, [%1] /* load MDREFR */ \n\ b 2f \n\ .align 5 \n\ 1: \n\ str %3, [%1] /* preset the MDREFR */ \n\ mcr p14, 0, %2, c6, c0, 0 /* set CCLKCFG[FCS] */ \n\ str %4, [%1] /* postset the MDREFR */ \n\ \n\ b 3f \n\ 2: b 1b \n\ 3: nop \n\ " : "=&r" (unused) : "r" (MDREFR), "r" (cclkcfg), "r" (preset_mdrefr), "r" (postset_mdrefr) : "r4", "r5"); local_irq_restore(flags); /* * Even if voltage setting fails, we don't report it, as the frequency * change succeeded. The voltage reduction is not a critical failure, * only power savings will suffer from this. * * Note: if the voltage change fails, and a return value is returned, a * bug is triggered (seems a deadlock). Should anybody find out where, * the "return 0" should become a "return ret". */ if (vcc_core && new_freq_cpu < policy->cur) ret = pxa_cpufreq_change_voltage(&pxa_freq_settings[idx]); return 0; } static int pxa_cpufreq_init(struct cpufreq_policy *policy) { int i; unsigned int freq; struct cpufreq_frequency_table *pxa255_freq_table; const struct pxa_freqs *pxa255_freqs; /* try to guess pxa27x cpu */ if (cpu_is_pxa27x()) pxa27x_guess_max_freq(); pxa_cpufreq_init_voltages(); init_sdram_rows(); /* set default policy and cpuinfo */ policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ /* Generate pxa25x the run cpufreq_frequency_table struct */ for (i = 0; i < NUM_PXA25x_RUN_FREQS; i++) { pxa255_run_freq_table[i].frequency = pxa255_run_freqs[i].khz; pxa255_run_freq_table[i].driver_data = i; } pxa255_run_freq_table[i].frequency = CPUFREQ_TABLE_END; /* Generate pxa25x the turbo cpufreq_frequency_table struct */ for (i = 0; i < NUM_PXA25x_TURBO_FREQS; i++) { pxa255_turbo_freq_table[i].frequency = pxa255_turbo_freqs[i].khz; pxa255_turbo_freq_table[i].driver_data = i; } pxa255_turbo_freq_table[i].frequency = CPUFREQ_TABLE_END; pxa255_turbo_table = !!pxa255_turbo_table; /* Generate the pxa27x cpufreq_frequency_table struct */ for (i = 0; i < NUM_PXA27x_FREQS; i++) { freq = pxa27x_freqs[i].khz; if (freq > pxa27x_maxfreq) break; pxa27x_freq_table[i].frequency = freq; pxa27x_freq_table[i].driver_data = i; } pxa27x_freq_table[i].driver_data = i; pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END; /* * Set the policy's minimum and maximum frequencies from the tables * just constructed. This sets cpuinfo.mxx_freq, min and max. */ if (cpu_is_pxa25x()) { find_freq_tables(&pxa255_freq_table, &pxa255_freqs); pr_info("using %s frequency table\n", pxa255_turbo_table ? "turbo" : "run"); cpufreq_table_validate_and_show(policy, pxa255_freq_table); } else if (cpu_is_pxa27x()) { cpufreq_table_validate_and_show(policy, pxa27x_freq_table); } pr_info("frequency change support initialized\n"); return 0; } static struct cpufreq_driver pxa_cpufreq_driver = { .flags = CPUFREQ_NEED_INITIAL_FREQ_CHECK, .verify = cpufreq_generic_frequency_table_verify, .target_index = pxa_set_target, .init = pxa_cpufreq_init, .get = pxa_cpufreq_get, .name = "PXA2xx", }; static int __init pxa_cpu_init(void) { int ret = -ENODEV; if (cpu_is_pxa25x() || cpu_is_pxa27x()) ret = cpufreq_register_driver(&pxa_cpufreq_driver); return ret; } static void __exit pxa_cpu_exit(void) { cpufreq_unregister_driver(&pxa_cpufreq_driver); } MODULE_AUTHOR("Intrinsyc Software Inc."); MODULE_DESCRIPTION("CPU frequency changing driver for the PXA architecture"); MODULE_LICENSE("GPL"); module_init(pxa_cpu_init); module_exit(pxa_cpu_exit);
gpl-2.0
wetek-enigma/linux-wetek-3.14.y
net/netfilter/nft_immediate.c
322
3455
/* * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Development of this code funded by Astaro AG (http://www.astaro.com/) */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netlink.h> #include <linux/netfilter.h> #include <linux/netfilter/nf_tables.h> #include <net/netfilter/nf_tables_core.h> #include <net/netfilter/nf_tables.h> struct nft_immediate_expr { struct nft_data data; enum nft_registers dreg:8; u8 dlen; }; static void nft_immediate_eval(const struct nft_expr *expr, struct nft_data data[NFT_REG_MAX + 1], const struct nft_pktinfo *pkt) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); nft_data_copy(&data[priv->dreg], &priv->data); } static const struct nla_policy nft_immediate_policy[NFTA_IMMEDIATE_MAX + 1] = { [NFTA_IMMEDIATE_DREG] = { .type = NLA_U32 }, [NFTA_IMMEDIATE_DATA] = { .type = NLA_NESTED }, }; static int nft_immediate_init(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nlattr * const tb[]) { struct nft_immediate_expr *priv = nft_expr_priv(expr); struct nft_data_desc desc; int err; if (tb[NFTA_IMMEDIATE_DREG] == NULL || tb[NFTA_IMMEDIATE_DATA] == NULL) return -EINVAL; priv->dreg = ntohl(nla_get_be32(tb[NFTA_IMMEDIATE_DREG])); err = nft_validate_output_register(priv->dreg); if (err < 0) return err; err = nft_data_init(ctx, &priv->data, &desc, tb[NFTA_IMMEDIATE_DATA]); if (err < 0) return err; priv->dlen = desc.len; err = nft_validate_data_load(ctx, priv->dreg, &priv->data, desc.type); if (err < 0) goto err1; return 0; err1: nft_data_uninit(&priv->data, desc.type); return err; } static void nft_immediate_destroy(const struct nft_expr *expr) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); return nft_data_uninit(&priv->data, nft_dreg_to_type(priv->dreg)); } static int nft_immediate_dump(struct sk_buff *skb, const struct nft_expr *expr) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); if (nla_put_be32(skb, NFTA_IMMEDIATE_DREG, htonl(priv->dreg))) goto nla_put_failure; return nft_data_dump(skb, NFTA_IMMEDIATE_DATA, &priv->data, nft_dreg_to_type(priv->dreg), priv->dlen); nla_put_failure: return -1; } static int nft_immediate_validate(const struct nft_ctx *ctx, const struct nft_expr *expr, const struct nft_data **data) { const struct nft_immediate_expr *priv = nft_expr_priv(expr); if (priv->dreg == NFT_REG_VERDICT) *data = &priv->data; return 0; } static struct nft_expr_type nft_imm_type; static const struct nft_expr_ops nft_imm_ops = { .type = &nft_imm_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_immediate_expr)), .eval = nft_immediate_eval, .init = nft_immediate_init, .destroy = nft_immediate_destroy, .dump = nft_immediate_dump, .validate = nft_immediate_validate, }; static struct nft_expr_type nft_imm_type __read_mostly = { .name = "immediate", .ops = &nft_imm_ops, .policy = nft_immediate_policy, .maxattr = NFTA_IMMEDIATE_MAX, .owner = THIS_MODULE, }; int __init nft_immediate_module_init(void) { return nft_register_expr(&nft_imm_type); } void nft_immediate_module_exit(void) { nft_unregister_expr(&nft_imm_type); }
gpl-2.0
jamison904/T989_TW_JB
arch/arm/mach-msm/qdsp6v2/audio_evrc.c
322
4988
/* evrc audio output device * * Copyright (C) 2008 Google, Inc. * Copyright (C) 2008 HTC Corporation * Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include "audio_utils_aio.h" static void q6_audio_evrc_cb(uint32_t opcode, uint32_t token, uint32_t *payload, void *priv) { struct q6audio_aio *audio = (struct q6audio_aio *)priv; pr_debug("%s:opcde = %d token = 0x%x\n", __func__, opcode, token); switch (opcode) { case ASM_DATA_EVENT_WRITE_DONE: case ASM_DATA_EVENT_READ_DONE: case ASM_DATA_CMDRSP_EOS: audio_aio_cb(opcode, token, payload, audio); break; default: pr_debug("%s:Unhandled event = 0x%8x\n", __func__, opcode); break; } } #ifdef CONFIG_DEBUG_FS static const struct file_operations audio_evrc_debug_fops = { .read = audio_aio_debug_read, .open = audio_aio_debug_open, }; #endif static long audio_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct q6audio_aio *audio = file->private_data; int rc = 0; switch (cmd) { case AUDIO_START: { pr_debug("%s[%p]: AUDIO_START session_id[%d]\n", __func__, audio, audio->ac->session); if (audio->feedback == NON_TUNNEL_MODE) { /* Configure PCM output block */ rc = q6asm_enc_cfg_blk_pcm(audio->ac, audio->pcm_cfg.sample_rate, audio->pcm_cfg.channel_count); if (rc < 0) { pr_err("pcm output block config failed\n"); break; } } rc = audio_aio_enable(audio); audio->eos_rsp = 0; audio->eos_flag = 0; if (!rc) { audio->enabled = 1; } else { audio->enabled = 0; pr_err("Audio Start procedure failed rc=%d\n", rc); break; } pr_debug("%s: AUDIO_START sessionid[%d]enable[%d]\n", __func__, audio->ac->session, audio->enabled); if (audio->stopped == 1) audio->stopped = 0; break; } default: pr_debug("%s[%p]: Calling utils ioctl\n", __func__, audio); rc = audio->codec_ioctl(file, cmd, arg); } return rc; } static int audio_open(struct inode *inode, struct file *file) { struct q6audio_aio *audio = NULL; int rc = 0; #ifdef CONFIG_DEBUG_FS /* 4 bytes represents decoder number, 1 byte for terminate string */ char name[sizeof "msm_evrc_" + 5]; #endif audio = kzalloc(sizeof(struct q6audio_aio), GFP_KERNEL); if (audio == NULL) { pr_err("Could not allocate memory for aac decode driver\n"); return -ENOMEM; } /* Settings will be re-config at AUDIO_SET_CONFIG, * but at least we need to have initial config */ audio->pcm_cfg.buffer_size = PCM_BUFSZ_MIN; audio->ac = q6asm_audio_client_alloc((app_cb) q6_audio_evrc_cb, (void *)audio); if (!audio->ac) { pr_err("Could not allocate memory for audio client\n"); kfree(audio); return -ENOMEM; } /* open in T/NT mode */ if ((file->f_mode & FMODE_WRITE) && (file->f_mode & FMODE_READ)) { rc = q6asm_open_read_write(audio->ac, FORMAT_LINEAR_PCM, FORMAT_EVRC); if (rc < 0) { pr_err("NT mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = NON_TUNNEL_MODE; audio->buf_cfg.frames_per_buf = 0x01; audio->buf_cfg.meta_info_enable = 0x01; } else if ((file->f_mode & FMODE_WRITE) && !(file->f_mode & FMODE_READ)) { rc = q6asm_open_write(audio->ac, FORMAT_EVRC); if (rc < 0) { pr_err("T mode Open failed rc=%d\n", rc); rc = -ENODEV; goto fail; } audio->feedback = TUNNEL_MODE; audio->buf_cfg.meta_info_enable = 0x00; } else { pr_err("Not supported mode\n"); rc = -EACCES; goto fail; } rc = audio_aio_open(audio, file); if (IS_ERR_OR_NULL(audio)) { pr_err("%s: audio_aio_open failed\n", __func__); rc = -EACCES; goto fail; } #ifdef CONFIG_DEBUG_FS snprintf(name, sizeof name, "msm_evrc_%04x", audio->ac->session); audio->dentry = debugfs_create_file(name, S_IFREG | S_IRUGO, NULL, (void *)audio, &audio_evrc_debug_fops); if (IS_ERR(audio->dentry)) pr_debug("debugfs_create_file failed\n"); #endif pr_info("%s:dec success mode[%d]session[%d]\n", __func__, audio->feedback, audio->ac->session); return rc; fail: q6asm_audio_client_free(audio->ac); kfree(audio); return rc; } static const struct file_operations audio_evrc_fops = { .owner = THIS_MODULE, .open = audio_open, .release = audio_aio_release, .unlocked_ioctl = audio_ioctl, .fsync = audio_aio_fsync, }; struct miscdevice audio_evrc_misc = { .minor = MISC_DYNAMIC_MINOR, .name = "msm_evrc", .fops = &audio_evrc_fops, }; static int __init audio_evrc_init(void) { return misc_register(&audio_evrc_misc); } device_initcall(audio_evrc_init);
gpl-2.0
hexiaolong2008/linux-2.6.32
drivers/media/video/pvrusb2/pvrusb2-main.c
578
4483
/* * * * Copyright (C) 2005 Mike Isely <isely@pobox.com> * Copyright (C) 2004 Aurelien Alleaume <slts@free.fr> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/videodev2.h> #include "pvrusb2-hdw.h" #include "pvrusb2-devattr.h" #include "pvrusb2-context.h" #include "pvrusb2-debug.h" #include "pvrusb2-v4l2.h" #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS #include "pvrusb2-sysfs.h" #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ #define DRIVER_AUTHOR "Mike Isely <isely@pobox.com>" #define DRIVER_DESC "Hauppauge WinTV-PVR-USB2 MPEG2 Encoder/Tuner" #define DRIVER_VERSION "V4L in-tree version" #define DEFAULT_DEBUG_MASK (PVR2_TRACE_ERROR_LEGS| \ PVR2_TRACE_INFO| \ PVR2_TRACE_STD| \ PVR2_TRACE_TOLERANCE| \ PVR2_TRACE_TRAP| \ 0) int pvrusb2_debug = DEFAULT_DEBUG_MASK; module_param_named(debug,pvrusb2_debug,int,S_IRUGO|S_IWUSR); MODULE_PARM_DESC(debug, "Debug trace mask"); #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS static struct pvr2_sysfs_class *class_ptr = NULL; #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ static void pvr_setup_attach(struct pvr2_context *pvr) { /* Create association with v4l layer */ pvr2_v4l2_create(pvr); #ifdef CONFIG_VIDEO_PVRUSB2_DVB /* Create association with dvb layer */ pvr2_dvb_create(pvr); #endif #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS pvr2_sysfs_create(pvr,class_ptr); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ } static int pvr_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct pvr2_context *pvr; /* Create underlying hardware interface */ pvr = pvr2_context_create(intf,devid,pvr_setup_attach); if (!pvr) { pvr2_trace(PVR2_TRACE_ERROR_LEGS, "Failed to create hdw handler"); return -ENOMEM; } pvr2_trace(PVR2_TRACE_INIT,"pvr_probe(pvr=%p)",pvr); usb_set_intfdata(intf, pvr); return 0; } /* * pvr_disconnect() * */ static void pvr_disconnect(struct usb_interface *intf) { struct pvr2_context *pvr = usb_get_intfdata(intf); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) BEGIN",pvr); usb_set_intfdata (intf, NULL); pvr2_context_disconnect(pvr); pvr2_trace(PVR2_TRACE_INIT,"pvr_disconnect(pvr=%p) DONE",pvr); } static struct usb_driver pvr_driver = { .name = "pvrusb2", .id_table = pvr2_device_table, .probe = pvr_probe, .disconnect = pvr_disconnect }; /* * pvr_init() / pvr_exit() * * This code is run to initialize/exit the driver. * */ static int __init pvr_init(void) { int ret; pvr2_trace(PVR2_TRACE_INIT,"pvr_init"); ret = pvr2_context_global_init(); if (ret != 0) { pvr2_trace(PVR2_TRACE_INIT,"pvr_init failure code=%d",ret); return ret; } #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS class_ptr = pvr2_sysfs_class_create(); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ ret = usb_register(&pvr_driver); if (ret == 0) printk(KERN_INFO "pvrusb2: " DRIVER_VERSION ":" DRIVER_DESC "\n"); if (pvrusb2_debug) printk(KERN_INFO "pvrusb2: Debug mask is %d (0x%x)\n", pvrusb2_debug,pvrusb2_debug); pvr2_trace(PVR2_TRACE_INIT,"pvr_init complete"); return ret; } static void __exit pvr_exit(void) { pvr2_trace(PVR2_TRACE_INIT,"pvr_exit"); usb_deregister(&pvr_driver); #ifdef CONFIG_VIDEO_PVRUSB2_SYSFS pvr2_sysfs_class_destroy(class_ptr); #endif /* CONFIG_VIDEO_PVRUSB2_SYSFS */ pvr2_context_global_done(); pvr2_trace(PVR2_TRACE_INIT,"pvr_exit complete"); } module_init(pvr_init); module_exit(pvr_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* Stuff for Emacs to see, in order to encourage consistent editing style: *** Local Variables: *** *** mode: c *** *** fill-column: 70 *** *** tab-width: 8 *** *** c-basic-offset: 8 *** *** End: *** */
gpl-2.0
sudosurootdev/linux
arch/mips/kernel/module.c
1090
7492
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Copyright (C) 2001 Rusty Russell. * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2005 Thiemo Seufer */ #undef DEBUG #include <linux/moduleloader.h> #include <linux/elf.h> #include <linux/mm.h> #include <linux/numa.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/jump_label.h> #include <asm/pgtable.h> /* MODULE_START */ struct mips_hi16 { struct mips_hi16 *next; Elf_Addr *addr; Elf_Addr value; }; static LIST_HEAD(dbe_list); static DEFINE_SPINLOCK(dbe_lock); #ifdef MODULE_START void *module_alloc(unsigned long size) { return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0)); } #endif int apply_r_mips_none(struct module *me, u32 *location, Elf_Addr v) { return 0; } static int apply_r_mips_32_rel(struct module *me, u32 *location, Elf_Addr v) { *location += v; return 0; } static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v) { if (v % 4) { pr_err("module %s: dangerous R_MIPS_26 REL relocation\n", me->name); return -ENOEXEC; } if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { printk(KERN_ERR "module %s: relocation overflow\n", me->name); return -ENOEXEC; } *location = (*location & ~0x03ffffff) | ((*location + (v >> 2)) & 0x03ffffff); return 0; } static int apply_r_mips_hi16_rel(struct module *me, u32 *location, Elf_Addr v) { struct mips_hi16 *n; /* * We cannot relocate this one now because we don't know the value of * the carry we need to add. Save the information, and let LO16 do the * actual relocation. */ n = kmalloc(sizeof *n, GFP_KERNEL); if (!n) return -ENOMEM; n->addr = (Elf_Addr *)location; n->value = v; n->next = me->arch.r_mips_hi16_list; me->arch.r_mips_hi16_list = n; return 0; } static void free_relocation_chain(struct mips_hi16 *l) { struct mips_hi16 *next; while (l) { next = l->next; kfree(l); l = next; } } static int apply_r_mips_lo16_rel(struct module *me, u32 *location, Elf_Addr v) { unsigned long insnlo = *location; struct mips_hi16 *l; Elf_Addr val, vallo; /* Sign extend the addend we extract from the lo insn. */ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; if (me->arch.r_mips_hi16_list != NULL) { l = me->arch.r_mips_hi16_list; while (l != NULL) { struct mips_hi16 *next; unsigned long insn; /* * The value for the HI16 had best be the same. */ if (v != l->value) goto out_danger; /* * Do the HI16 relocation. Note that we actually don't * need to know anything about the LO16 itself, except * where to find the low 16 bits of the addend needed * by the LO16. */ insn = *l->addr; val = ((insn & 0xffff) << 16) + vallo; val += v; /* * Account for the sign extension that will happen in * the low bits. */ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; insn = (insn & ~0xffff) | val; *l->addr = insn; next = l->next; kfree(l); l = next; } me->arch.r_mips_hi16_list = NULL; } /* * Ok, we're done with the HI16 relocs. Now deal with the LO16. */ val = v + vallo; insnlo = (insnlo & ~0xffff) | (val & 0xffff); *location = insnlo; return 0; out_danger: free_relocation_chain(l); me->arch.r_mips_hi16_list = NULL; pr_err("module %s: dangerous R_MIPS_LO16 REL relocation\n", me->name); return -ENOEXEC; } static int (*reloc_handlers_rel[]) (struct module *me, u32 *location, Elf_Addr v) = { [R_MIPS_NONE] = apply_r_mips_none, [R_MIPS_32] = apply_r_mips_32_rel, [R_MIPS_26] = apply_r_mips_26_rel, [R_MIPS_HI16] = apply_r_mips_hi16_rel, [R_MIPS_LO16] = apply_r_mips_lo16_rel }; int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, unsigned int symindex, unsigned int relsec, struct module *me) { Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr; Elf_Sym *sym; u32 *location; unsigned int i; Elf_Addr v; int res; pr_debug("Applying relocate section %u to %u\n", relsec, sechdrs[relsec].sh_info); me->arch.r_mips_hi16_list = NULL; for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { /* This is where to make the change */ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; /* This is the symbol it is referring to */ sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_MIPS_R_SYM(rel[i]); if (IS_ERR_VALUE(sym->st_value)) { /* Ignore unresolved weak symbol */ if (ELF_ST_BIND(sym->st_info) == STB_WEAK) continue; printk(KERN_WARNING "%s: Unknown symbol %s\n", me->name, strtab + sym->st_name); return -ENOENT; } v = sym->st_value; res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v); if (res) return res; } /* * Normally the hi16 list should be deallocated at this point. A * malformed binary however could contain a series of R_MIPS_HI16 * relocations not followed by a R_MIPS_LO16 relocation. In that * case, free up the list and return an error. */ if (me->arch.r_mips_hi16_list) { free_relocation_chain(me->arch.r_mips_hi16_list); me->arch.r_mips_hi16_list = NULL; return -ENOEXEC; } return 0; } /* Given an address, look for it in the module exception tables. */ const struct exception_table_entry *search_module_dbetables(unsigned long addr) { unsigned long flags; const struct exception_table_entry *e = NULL; struct mod_arch_specific *dbe; spin_lock_irqsave(&dbe_lock, flags); list_for_each_entry(dbe, &dbe_list, dbe_list) { e = search_extable(dbe->dbe_start, dbe->dbe_end - 1, addr); if (e) break; } spin_unlock_irqrestore(&dbe_lock, flags); /* Now, if we found one, we are running inside it now, hence we cannot unload the module, hence no refcnt needed. */ return e; } /* Put in dbe list if necessary. */ int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs, struct module *me) { const Elf_Shdr *s; char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; /* Make jump label nops. */ jump_label_apply_nops(me); INIT_LIST_HEAD(&me->arch.dbe_list); for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { if (strcmp("__dbe_table", secstrings + s->sh_name) != 0) continue; me->arch.dbe_start = (void *)s->sh_addr; me->arch.dbe_end = (void *)s->sh_addr + s->sh_size; spin_lock_irq(&dbe_lock); list_add(&me->arch.dbe_list, &dbe_list); spin_unlock_irq(&dbe_lock); } return 0; } void module_arch_cleanup(struct module *mod) { spin_lock_irq(&dbe_lock); list_del(&mod->arch.dbe_list); spin_unlock_irq(&dbe_lock); }
gpl-2.0
Pauliecoon/android_kernel_moto_shamu
drivers/net/wireless/iwlwifi/iwl-1000.c
2114
4669
/****************************************************************************** * * Copyright(c) 2008 - 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" #include "iwl-csr.h" #include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL1000_UCODE_API_MAX 5 #define IWL100_UCODE_API_MAX 5 /* Oldest version we won't warn about */ #define IWL1000_UCODE_API_OK 5 #define IWL100_UCODE_API_OK 5 /* Lowest firmware API version supported */ #define IWL1000_UCODE_API_MIN 1 #define IWL100_UCODE_API_MIN 5 /* EEPROM version */ #define EEPROM_1000_TX_POWER_VERSION (4) #define EEPROM_1000_EEPROM_VERSION (0x15C) #define IWL1000_FW_PRE "iwlwifi-1000-" #define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" #define IWL100_FW_PRE "iwlwifi-100-" #define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl1000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, .eeprom_size = OTP_LOW_IMAGE_SIZE, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .max_ll_items = OTP_MAX_LL_ITEMS_1000, .shadow_ram_support = false, .led_compensation = 51, .support_ct_kill_exit = true, .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, .chain_noise_scale = 1000, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 128, }; static const struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ .ht40_bands = BIT(IEEE80211_BAND_2GHZ), }; static const struct iwl_eeprom_params iwl1000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, EEPROM_REG_BAND_2_CHANNELS, EEPROM_REG_BAND_3_CHANNELS, EEPROM_REG_BAND_4_CHANNELS, EEPROM_REG_BAND_5_CHANNELS, EEPROM_REG_BAND_24_HT40_CHANNELS, EEPROM_REGULATORY_BAND_NO_HT40, } }; #define IWL_DEVICE_1000 \ .fw_name_pre = IWL1000_FW_PRE, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \ .ucode_api_ok = IWL1000_UCODE_API_OK, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_BLINK const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", IWL_DEVICE_1000, .ht_params = &iwl1000_ht_params, }; const struct iwl_cfg iwl1000_bg_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", IWL_DEVICE_1000, }; #define IWL_DEVICE_100 \ .fw_name_pre = IWL100_FW_PRE, \ .ucode_api_max = IWL100_UCODE_API_MAX, \ .ucode_api_ok = IWL100_UCODE_API_OK, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", IWL_DEVICE_100, .ht_params = &iwl1000_ht_params, }; const struct iwl_cfg iwl100_bg_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BG", IWL_DEVICE_100, }; MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
gpl-2.0
smipi1/elce2015-tiny-linux
drivers/input/joystick/as5011.c
2370
9381
/* * Copyright (c) 2010, 2011 Fabien Marteau <fabien.marteau@armadeus.com> * Sponsored by ARMadeus Systems * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Driver for Austria Microsystems joysticks AS5011 * * TODO: * - Power on the chip when open() and power down when close() * - Manage power mode */ #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/input.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/input/as5011.h> #include <linux/slab.h> #include <linux/module.h> #define DRIVER_DESC "Driver for Austria Microsystems AS5011 joystick" #define MODULE_DEVICE_ALIAS "as5011" MODULE_AUTHOR("Fabien Marteau <fabien.marteau@armadeus.com>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); /* registers */ #define AS5011_CTRL1 0x76 #define AS5011_CTRL2 0x75 #define AS5011_XP 0x43 #define AS5011_XN 0x44 #define AS5011_YP 0x53 #define AS5011_YN 0x54 #define AS5011_X_REG 0x41 #define AS5011_Y_REG 0x42 #define AS5011_X_RES_INT 0x51 #define AS5011_Y_RES_INT 0x52 /* CTRL1 bits */ #define AS5011_CTRL1_LP_PULSED 0x80 #define AS5011_CTRL1_LP_ACTIVE 0x40 #define AS5011_CTRL1_LP_CONTINUE 0x20 #define AS5011_CTRL1_INT_WUP_EN 0x10 #define AS5011_CTRL1_INT_ACT_EN 0x08 #define AS5011_CTRL1_EXT_CLK_EN 0x04 #define AS5011_CTRL1_SOFT_RST 0x02 #define AS5011_CTRL1_DATA_VALID 0x01 /* CTRL2 bits */ #define AS5011_CTRL2_EXT_SAMPLE_EN 0x08 #define AS5011_CTRL2_RC_BIAS_ON 0x04 #define AS5011_CTRL2_INV_SPINNING 0x02 #define AS5011_MAX_AXIS 80 #define AS5011_MIN_AXIS (-80) #define AS5011_FUZZ 8 #define AS5011_FLAT 40 struct as5011_device { struct input_dev *input_dev; struct i2c_client *i2c_client; unsigned int button_gpio; unsigned int button_irq; unsigned int axis_irq; }; static int as5011_i2c_write(struct i2c_client *client, uint8_t aregaddr, uint8_t avalue) { uint8_t data[2] = { aregaddr, avalue }; struct i2c_msg msg = { .addr = client->addr, .flags = I2C_M_IGNORE_NAK, .len = 2, .buf = (uint8_t *)data }; int error; error = i2c_transfer(client->adapter, &msg, 1); return error < 0 ? error : 0; } static int as5011_i2c_read(struct i2c_client *client, uint8_t aregaddr, signed char *value) { uint8_t data[2] = { aregaddr }; struct i2c_msg msg_set[2] = { { .addr = client->addr, .flags = I2C_M_REV_DIR_ADDR, .len = 1, .buf = (uint8_t *)data }, { .addr = client->addr, .flags = I2C_M_RD | I2C_M_NOSTART, .len = 1, .buf = (uint8_t *)data } }; int error; error = i2c_transfer(client->adapter, msg_set, 2); if (error < 0) return error; *value = data[0] & 0x80 ? -1 * (1 + ~data[0]) : data[0]; return 0; } static irqreturn_t as5011_button_interrupt(int irq, void *dev_id) { struct as5011_device *as5011 = dev_id; int val = gpio_get_value_cansleep(as5011->button_gpio); input_report_key(as5011->input_dev, BTN_JOYSTICK, !val); input_sync(as5011->input_dev); return IRQ_HANDLED; } static irqreturn_t as5011_axis_interrupt(int irq, void *dev_id) { struct as5011_device *as5011 = dev_id; int error; signed char x, y; error = as5011_i2c_read(as5011->i2c_client, AS5011_X_RES_INT, &x); if (error < 0) goto out; error = as5011_i2c_read(as5011->i2c_client, AS5011_Y_RES_INT, &y); if (error < 0) goto out; input_report_abs(as5011->input_dev, ABS_X, x); input_report_abs(as5011->input_dev, ABS_Y, y); input_sync(as5011->input_dev); out: return IRQ_HANDLED; } static int as5011_configure_chip(struct as5011_device *as5011, const struct as5011_platform_data *plat_dat) { struct i2c_client *client = as5011->i2c_client; int error; signed char value; /* chip soft reset */ error = as5011_i2c_write(client, AS5011_CTRL1, AS5011_CTRL1_SOFT_RST); if (error < 0) { dev_err(&client->dev, "Soft reset failed\n"); return error; } mdelay(10); error = as5011_i2c_write(client, AS5011_CTRL1, AS5011_CTRL1_LP_PULSED | AS5011_CTRL1_LP_ACTIVE | AS5011_CTRL1_INT_ACT_EN); if (error < 0) { dev_err(&client->dev, "Power config failed\n"); return error; } error = as5011_i2c_write(client, AS5011_CTRL2, AS5011_CTRL2_INV_SPINNING); if (error < 0) { dev_err(&client->dev, "Can't invert spinning\n"); return error; } /* write threshold */ error = as5011_i2c_write(client, AS5011_XP, plat_dat->xp); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } error = as5011_i2c_write(client, AS5011_XN, plat_dat->xn); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } error = as5011_i2c_write(client, AS5011_YP, plat_dat->yp); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } error = as5011_i2c_write(client, AS5011_YN, plat_dat->yn); if (error < 0) { dev_err(&client->dev, "Can't write threshold\n"); return error; } /* to free irq gpio in chip */ error = as5011_i2c_read(client, AS5011_X_RES_INT, &value); if (error < 0) { dev_err(&client->dev, "Can't read i2c X resolution value\n"); return error; } return 0; } static int as5011_probe(struct i2c_client *client, const struct i2c_device_id *id) { const struct as5011_platform_data *plat_data; struct as5011_device *as5011; struct input_dev *input_dev; int irq; int error; plat_data = dev_get_platdata(&client->dev); if (!plat_data) return -EINVAL; if (!plat_data->axis_irq) { dev_err(&client->dev, "No axis IRQ?\n"); return -EINVAL; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_NOSTART | I2C_FUNC_PROTOCOL_MANGLING)) { dev_err(&client->dev, "need i2c bus that supports protocol mangling\n"); return -ENODEV; } as5011 = kmalloc(sizeof(struct as5011_device), GFP_KERNEL); input_dev = input_allocate_device(); if (!as5011 || !input_dev) { dev_err(&client->dev, "Can't allocate memory for device structure\n"); error = -ENOMEM; goto err_free_mem; } as5011->i2c_client = client; as5011->input_dev = input_dev; as5011->button_gpio = plat_data->button_gpio; as5011->axis_irq = plat_data->axis_irq; input_dev->name = "Austria Microsystem as5011 joystick"; input_dev->id.bustype = BUS_I2C; input_dev->dev.parent = &client->dev; __set_bit(EV_KEY, input_dev->evbit); __set_bit(EV_ABS, input_dev->evbit); __set_bit(BTN_JOYSTICK, input_dev->keybit); input_set_abs_params(input_dev, ABS_X, AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT); input_set_abs_params(as5011->input_dev, ABS_Y, AS5011_MIN_AXIS, AS5011_MAX_AXIS, AS5011_FUZZ, AS5011_FLAT); error = gpio_request(as5011->button_gpio, "AS5011 button"); if (error < 0) { dev_err(&client->dev, "Failed to request button gpio\n"); goto err_free_mem; } irq = gpio_to_irq(as5011->button_gpio); if (irq < 0) { dev_err(&client->dev, "Failed to get irq number for button gpio\n"); error = irq; goto err_free_button_gpio; } as5011->button_irq = irq; error = request_threaded_irq(as5011->button_irq, NULL, as5011_button_interrupt, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "as5011_button", as5011); if (error < 0) { dev_err(&client->dev, "Can't allocate button irq %d\n", as5011->button_irq); goto err_free_button_gpio; } error = as5011_configure_chip(as5011, plat_data); if (error) goto err_free_button_irq; error = request_threaded_irq(as5011->axis_irq, NULL, as5011_axis_interrupt, plat_data->axis_irqflags | IRQF_ONESHOT, "as5011_joystick", as5011); if (error) { dev_err(&client->dev, "Can't allocate axis irq %d\n", plat_data->axis_irq); goto err_free_button_irq; } error = input_register_device(as5011->input_dev); if (error) { dev_err(&client->dev, "Failed to register input device\n"); goto err_free_axis_irq; } i2c_set_clientdata(client, as5011); return 0; err_free_axis_irq: free_irq(as5011->axis_irq, as5011); err_free_button_irq: free_irq(as5011->button_irq, as5011); err_free_button_gpio: gpio_free(as5011->button_gpio); err_free_mem: input_free_device(input_dev); kfree(as5011); return error; } static int as5011_remove(struct i2c_client *client) { struct as5011_device *as5011 = i2c_get_clientdata(client); free_irq(as5011->axis_irq, as5011); free_irq(as5011->button_irq, as5011); gpio_free(as5011->button_gpio); input_unregister_device(as5011->input_dev); kfree(as5011); return 0; } static const struct i2c_device_id as5011_id[] = { { MODULE_DEVICE_ALIAS, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, as5011_id); static struct i2c_driver as5011_driver = { .driver = { .name = "as5011", }, .probe = as5011_probe, .remove = as5011_remove, .id_table = as5011_id, }; module_i2c_driver(as5011_driver);
gpl-2.0
omnirom/android_kernel_moto_shamu
drivers/staging/rtl8192u/ieee80211/arc4.c
2626
2059
/* * Cryptographic API * * ARC4 Cipher Algorithm * * Jon Oberheide <jon@oberheide.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/module.h> #include <linux/init.h> #include "rtl_crypto.h" #define ARC4_MIN_KEY_SIZE 1 #define ARC4_MAX_KEY_SIZE 256 #define ARC4_BLOCK_SIZE 1 struct arc4_ctx { u8 S[256]; u8 x, y; }; static int arc4_set_key(void *ctx_arg, const u8 *in_key, unsigned int key_len, u32 *flags) { struct arc4_ctx *ctx = ctx_arg; int i, j = 0, k = 0; ctx->x = 1; ctx->y = 0; for(i = 0; i < 256; i++) ctx->S[i] = i; for(i = 0; i < 256; i++) { u8 a = ctx->S[i]; j = (j + in_key[k] + a) & 0xff; ctx->S[i] = ctx->S[j]; ctx->S[j] = a; if((unsigned int)++k >= key_len) k = 0; } return 0; } static void arc4_crypt(void *ctx_arg, u8 *out, const u8 *in) { struct arc4_ctx *ctx = ctx_arg; u8 *const S = ctx->S; u8 x = ctx->x; u8 y = ctx->y; u8 a, b; a = S[x]; y = (y + a) & 0xff; b = S[y]; S[x] = b; S[y] = a; x = (x + 1) & 0xff; *out++ = *in ^ S[(a + b) & 0xff]; ctx->x = x; ctx->y = y; } static struct crypto_alg arc4_alg = { .cra_name = "arc4", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = ARC4_BLOCK_SIZE, .cra_ctxsize = sizeof(struct arc4_ctx), .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(arc4_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = ARC4_MIN_KEY_SIZE, .cia_max_keysize = ARC4_MAX_KEY_SIZE, .cia_setkey = arc4_set_key, .cia_encrypt = arc4_crypt, .cia_decrypt = arc4_crypt } } }; static int __init arc4_init(void) { return crypto_register_alg(&arc4_alg); } static void __exit arc4_exit(void) { crypto_unregister_alg(&arc4_alg); } module_init(arc4_init); module_exit(arc4_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("ARC4 Cipher Algorithm"); MODULE_AUTHOR("Jon Oberheide <jon@oberheide.org>");
gpl-2.0
InfinitiveOS-Devices/android_kernel_xiaomi_ferrari
drivers/staging/line6/capture.c
2626
10466
/* * Line6 Linux USB driver - 0.9.1beta * * Copyright (C) 2004-2010 Markus Grabner (grabner@icg.tugraz.at) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "audio.h" #include "capture.h" #include "driver.h" #include "pcm.h" #include "pod.h" /* Find a free URB and submit it. */ static int submit_audio_in_urb(struct snd_line6_pcm *line6pcm) { int index; unsigned long flags; int i, urb_size; int ret; struct urb *urb_in; spin_lock_irqsave(&line6pcm->lock_audio_in, flags); index = find_first_zero_bit(&line6pcm->active_urb_in, LINE6_ISO_BUFFERS); if (index < 0 || index >= LINE6_ISO_BUFFERS) { spin_unlock_irqrestore(&line6pcm->lock_audio_in, flags); dev_err(line6pcm->line6->ifcdev, "no free URB found\n"); return -EINVAL; } urb_in = line6pcm->urb_audio_in[index]; urb_size = 0; for (i = 0; i < LINE6_ISO_PACKETS; ++i) { struct usb_iso_packet_descriptor *fin = &urb_in->iso_frame_desc[i]; fin->offset = urb_size; fin->length = line6pcm->max_packet_size; urb_size += line6pcm->max_packet_size; } urb_in->transfer_buffer = line6pcm->buffer_in + index * LINE6_ISO_PACKETS * line6pcm->max_packet_size; urb_in->transfer_buffer_length = urb_size; urb_in->context = line6pcm; ret = usb_submit_urb(urb_in, GFP_ATOMIC); if (ret == 0) set_bit(index, &line6pcm->active_urb_in); else dev_err(line6pcm->line6->ifcdev, "URB in #%d submission failed (%d)\n", index, ret); spin_unlock_irqrestore(&line6pcm->lock_audio_in, flags); return 0; } /* Submit all currently available capture URBs. */ int line6_submit_audio_in_all_urbs(struct snd_line6_pcm *line6pcm) { int ret, i; for (i = 0; i < LINE6_ISO_BUFFERS; ++i) { ret = submit_audio_in_urb(line6pcm); if (ret < 0) return ret; } return 0; } /* Unlink all currently active capture URBs. */ void line6_unlink_audio_in_urbs(struct snd_line6_pcm *line6pcm) { unsigned int i; for (i = LINE6_ISO_BUFFERS; i--;) { if (test_bit(i, &line6pcm->active_urb_in)) { if (!test_and_set_bit(i, &line6pcm->unlink_urb_in)) { struct urb *u = line6pcm->urb_audio_in[i]; usb_unlink_urb(u); } } } } /* Wait until unlinking of all currently active capture URBs has been finished. */ void line6_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm) { int timeout = HZ; unsigned int i; int alive; do { alive = 0; for (i = LINE6_ISO_BUFFERS; i--;) { if (test_bit(i, &line6pcm->active_urb_in)) alive++; } if (!alive) break; set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(1); } while (--timeout > 0); if (alive) snd_printk(KERN_ERR "timeout: still %d active urbs..\n", alive); } /* Unlink all currently active capture URBs, and wait for finishing. */ void line6_unlink_wait_clear_audio_in_urbs(struct snd_line6_pcm *line6pcm) { line6_unlink_audio_in_urbs(line6pcm); line6_wait_clear_audio_in_urbs(line6pcm); } /* Copy data into ALSA capture buffer. */ void line6_capture_copy(struct snd_line6_pcm *line6pcm, char *fbuf, int fsize) { struct snd_pcm_substream *substream = get_substream(line6pcm, SNDRV_PCM_STREAM_CAPTURE); struct snd_pcm_runtime *runtime = substream->runtime; const int bytes_per_frame = line6pcm->properties->bytes_per_frame; int frames = fsize / bytes_per_frame; if (runtime == NULL) return; if (line6pcm->pos_in_done + frames > runtime->buffer_size) { /* The transferred area goes over buffer boundary, copy two separate chunks. */ int len; len = runtime->buffer_size - line6pcm->pos_in_done; if (len > 0) { memcpy(runtime->dma_area + line6pcm->pos_in_done * bytes_per_frame, fbuf, len * bytes_per_frame); memcpy(runtime->dma_area, fbuf + len * bytes_per_frame, (frames - len) * bytes_per_frame); } else { /* this is somewhat paranoid */ dev_err(line6pcm->line6->ifcdev, "driver bug: len = %d\n", len); } } else { /* copy single chunk */ memcpy(runtime->dma_area + line6pcm->pos_in_done * bytes_per_frame, fbuf, fsize); } line6pcm->pos_in_done += frames; if (line6pcm->pos_in_done >= runtime->buffer_size) line6pcm->pos_in_done -= runtime->buffer_size; } void line6_capture_check_period(struct snd_line6_pcm *line6pcm, int length) { struct snd_pcm_substream *substream = get_substream(line6pcm, SNDRV_PCM_STREAM_CAPTURE); line6pcm->bytes_in += length; if (line6pcm->bytes_in >= line6pcm->period_in) { line6pcm->bytes_in %= line6pcm->period_in; snd_pcm_period_elapsed(substream); } } void line6_free_capture_buffer(struct snd_line6_pcm *line6pcm) { kfree(line6pcm->buffer_in); line6pcm->buffer_in = NULL; } /* * Callback for completed capture URB. */ static void audio_in_callback(struct urb *urb) { int i, index, length = 0, shutdown = 0; unsigned long flags; struct snd_line6_pcm *line6pcm = (struct snd_line6_pcm *)urb->context; line6pcm->last_frame_in = urb->start_frame; /* find index of URB */ for (index = 0; index < LINE6_ISO_BUFFERS; ++index) if (urb == line6pcm->urb_audio_in[index]) break; spin_lock_irqsave(&line6pcm->lock_audio_in, flags); for (i = 0; i < LINE6_ISO_PACKETS; ++i) { char *fbuf; int fsize; struct usb_iso_packet_descriptor *fin = &urb->iso_frame_desc[i]; if (fin->status == -EXDEV) { shutdown = 1; break; } fbuf = urb->transfer_buffer + fin->offset; fsize = fin->actual_length; if (fsize > line6pcm->max_packet_size) { dev_err(line6pcm->line6->ifcdev, "driver and/or device bug: packet too large (%d > %d)\n", fsize, line6pcm->max_packet_size); } length += fsize; /* the following assumes LINE6_ISO_PACKETS == 1: */ line6pcm->prev_fbuf = fbuf; line6pcm->prev_fsize = fsize; #ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE)) #endif if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags) && (fsize > 0)) line6_capture_copy(line6pcm, fbuf, fsize); } clear_bit(index, &line6pcm->active_urb_in); if (test_and_clear_bit(index, &line6pcm->unlink_urb_in)) shutdown = 1; spin_unlock_irqrestore(&line6pcm->lock_audio_in, flags); if (!shutdown) { submit_audio_in_urb(line6pcm); #ifdef CONFIG_LINE6_USB_IMPULSE_RESPONSE if (!(line6pcm->flags & LINE6_BITS_PCM_IMPULSE)) #endif if (test_bit(LINE6_INDEX_PCM_ALSA_CAPTURE_STREAM, &line6pcm->flags)) line6_capture_check_period(line6pcm, length); } } /* open capture callback */ static int snd_line6_capture_open(struct snd_pcm_substream *substream) { int err; struct snd_pcm_runtime *runtime = substream->runtime; struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); err = snd_pcm_hw_constraint_ratdens(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, (&line6pcm-> properties->snd_line6_rates)); if (err < 0) return err; runtime->hw = line6pcm->properties->snd_line6_capture_hw; return 0; } /* close capture callback */ static int snd_line6_capture_close(struct snd_pcm_substream *substream) { return 0; } /* hw_params capture callback */ static int snd_line6_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int ret; struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); /* -- Florian Demski [FD] */ /* don't ask me why, but this fixes the bug on my machine */ if (line6pcm == NULL) { if (substream->pcm == NULL) return -ENOMEM; if (substream->pcm->private_data == NULL) return -ENOMEM; substream->private_data = substream->pcm->private_data; line6pcm = snd_pcm_substream_chip(substream); } /* -- [FD] end */ ret = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER); if (ret < 0) return ret; ret = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (ret < 0) { line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER); return ret; } line6pcm->period_in = params_period_bytes(hw_params); return 0; } /* hw_free capture callback */ static int snd_line6_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_BUFFER); return snd_pcm_lib_free_pages(substream); } /* trigger callback */ int snd_line6_capture_trigger(struct snd_line6_pcm *line6pcm, int cmd) { int err; switch (cmd) { case SNDRV_PCM_TRIGGER_START: #ifdef CONFIG_PM case SNDRV_PCM_TRIGGER_RESUME: #endif err = line6_pcm_acquire(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM); if (err < 0) return err; break; case SNDRV_PCM_TRIGGER_STOP: #ifdef CONFIG_PM case SNDRV_PCM_TRIGGER_SUSPEND: #endif err = line6_pcm_release(line6pcm, LINE6_BIT_PCM_ALSA_CAPTURE_STREAM); if (err < 0) return err; break; default: return -EINVAL; } return 0; } /* capture pointer callback */ static snd_pcm_uframes_t snd_line6_capture_pointer(struct snd_pcm_substream *substream) { struct snd_line6_pcm *line6pcm = snd_pcm_substream_chip(substream); return line6pcm->pos_in_done; } /* capture operators */ struct snd_pcm_ops snd_line6_capture_ops = { .open = snd_line6_capture_open, .close = snd_line6_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_line6_capture_hw_params, .hw_free = snd_line6_capture_hw_free, .prepare = snd_line6_prepare, .trigger = snd_line6_trigger, .pointer = snd_line6_capture_pointer, }; int line6_create_audio_in_urbs(struct snd_line6_pcm *line6pcm) { int i; /* create audio URBs and fill in constant values: */ for (i = 0; i < LINE6_ISO_BUFFERS; ++i) { struct urb *urb; /* URB for audio in: */ urb = line6pcm->urb_audio_in[i] = usb_alloc_urb(LINE6_ISO_PACKETS, GFP_KERNEL); if (urb == NULL) { dev_err(line6pcm->line6->ifcdev, "Out of memory\n"); return -ENOMEM; } urb->dev = line6pcm->line6->usbdev; urb->pipe = usb_rcvisocpipe(line6pcm->line6->usbdev, line6pcm->ep_audio_read & USB_ENDPOINT_NUMBER_MASK); urb->transfer_flags = URB_ISO_ASAP; urb->start_frame = -1; urb->number_of_packets = LINE6_ISO_PACKETS; urb->interval = LINE6_ISO_INTERVAL; urb->error_count = 0; urb->complete = audio_in_callback; } return 0; }
gpl-2.0
n1kolaa/android_kernel_lge_msm8974
drivers/net/wireless/rtlwifi/rtl8192cu/rf.c
2882
15015
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> * *****************************************************************************/ #include "../wifi.h" #include "reg.h" #include "def.h" #include "phy.h" #include "rf.h" #include "dm.h" static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw); void rtl92cu_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); switch (bandwidth) { case HT_CHANNEL_WIDTH_20: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff) | 0x0400); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; case HT_CHANNEL_WIDTH_20_40: rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] & 0xfffff3ff)); rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK, rtlphy->rfreg_chnlval[0]); break; default: RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "unknown bandwidth: %#X\n", bandwidth); break; } } void rtl92cu_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_hal *rtlhal = rtl_hal(rtlpriv); struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 tx_agc[2] = { 0, 0 }, tmpval = 0; bool turbo_scanoff = false; u8 idx1, idx2; u8 *ptr; if (rtlhal->interface == INTF_PCI) { if (rtlefuse->eeprom_regulatory != 0) turbo_scanoff = true; } else { if ((rtlefuse->eeprom_regulatory != 0) || (rtlefuse->external_pa)) turbo_scanoff = true; } if (mac->act_scanning) { tx_agc[RF90_PATH_A] = 0x3f3f3f3f; tx_agc[RF90_PATH_B] = 0x3f3f3f3f; if (turbo_scanoff) { for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); if (rtlhal->interface == INTF_USB) { if (tx_agc[idx1] > 0x20 && rtlefuse->external_pa) tx_agc[idx1] = 0x20; } } } } else { if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL1) { tx_agc[RF90_PATH_A] = 0x10101010; tx_agc[RF90_PATH_B] = 0x10101010; } else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL1) { tx_agc[RF90_PATH_A] = 0x00000000; tx_agc[RF90_PATH_B] = 0x00000000; } else{ for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { tx_agc[idx1] = ppowerlevel[idx1] | (ppowerlevel[idx1] << 8) | (ppowerlevel[idx1] << 16) | (ppowerlevel[idx1] << 24); } if (rtlefuse->eeprom_regulatory == 0) { tmpval = (rtlphy->mcs_txpwrlevel_origoffset [0][6]) + (rtlphy->mcs_txpwrlevel_origoffset [0][7] << 8); tx_agc[RF90_PATH_A] += tmpval; tmpval = (rtlphy->mcs_txpwrlevel_origoffset [0][14]) + (rtlphy->mcs_txpwrlevel_origoffset [0][15] << 24); tx_agc[RF90_PATH_B] += tmpval; } } } for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) { ptr = (u8 *) (&(tx_agc[idx1])); for (idx2 = 0; idx2 < 4; idx2++) { if (*ptr > RF6052_MAX_TX_PWR) *ptr = RF6052_MAX_TX_PWR; ptr++; } } tmpval = tx_agc[RF90_PATH_A] & 0xff; rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_A_CCK1_MCS32); tmpval = tx_agc[RF90_PATH_A] >> 8; if (mac->mode == WIRELESS_MODE_B) tmpval = tmpval & 0xff00ffff; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] >> 24; rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK11_A_CCK2_11); tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff; rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval, RTXAGC_B_CCK1_55_MCS32); } static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel, u32 *ofdmbase, u32 *mcsbase) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u32 powerBase0, powerBase1; u8 legacy_pwrdiff = 0, ht20_pwrdiff = 0; u8 i, powerlevel[2]; for (i = 0; i < 2; i++) { powerlevel[i] = ppowerlevel[i]; legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1]; powerBase0 = powerlevel[i] + legacy_pwrdiff; powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) | (powerBase0 << 8) | powerBase0; *(ofdmbase + i) = powerBase0; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [OFDM power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(ofdmbase + i)); } for (i = 0; i < 2; i++) { if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) { ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1]; powerlevel[i] += ht20_pwrdiff; } powerBase1 = powerlevel[i]; powerBase1 = (powerBase1 << 24) | (powerBase1 << 16) | (powerBase1 << 8) | powerBase1; *(mcsbase + i) = powerBase1; RTPRINT(rtlpriv, FPHY, PHY_TXPWR, " [MCS power base index rf(%c) = 0x%x]\n", i == 0 ? 'A' : 'B', *(mcsbase + i)); } } static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw, u8 channel, u8 index, u32 *powerBase0, u32 *powerBase1, u32 *p_outwriteval) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw)); u8 i, chnlgroup = 0, pwr_diff_limit[4]; u32 writeVal, customer_limit, rf; for (rf = 0; rf < 2; rf++) { switch (rtlefuse->eeprom_regulatory) { case 0: chnlgroup = 0; writeVal = rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance,writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 1: if (rtlphy->pwrgroup_cnt == 1) chnlgroup = 0; if (rtlphy->pwrgroup_cnt >= 3) { if (channel <= 3) chnlgroup = 0; else if (channel >= 4 && channel <= 9) chnlgroup = 1; else if (channel > 9) chnlgroup = 2; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) chnlgroup++; else chnlgroup += 4; } writeVal = rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Realtek regulatory, 20MHz, writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 2: writeVal = ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Better regulatory,writeVal(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; case 3: chnlgroup = 0; if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 40MHzrf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht40[rf] [channel - 1]); } else { RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "customer's limit, 20MHz rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', rtlefuse->pwrgroup_ht20[rf] [channel - 1]); } for (i = 0; i < 4; i++) { pwr_diff_limit[i] = (u8) ((rtlphy->mcs_txpwrlevel_origoffset [chnlgroup][index + (rf ? 8 : 0)] & (0x7f << (i * 8))) >> (i * 8)); if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) { if (pwr_diff_limit[i] > rtlefuse->pwrgroup_ht40[rf] [channel - 1]) pwr_diff_limit[i] = rtlefuse-> pwrgroup_ht40[rf] [channel - 1]; } else { if (pwr_diff_limit[i] > rtlefuse->pwrgroup_ht20[rf] [channel - 1]) pwr_diff_limit[i] = rtlefuse->pwrgroup_ht20[rf] [channel - 1]; } } customer_limit = (pwr_diff_limit[3] << 24) | (pwr_diff_limit[2] << 16) | (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer's limit rf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', customer_limit); writeVal = customer_limit + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Customer, writeVal rf(%c)= 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; default: chnlgroup = 0; writeVal = rtlphy->mcs_txpwrlevel_origoffset[chnlgroup] [index + (rf ? 8 : 0)] + ((index < 2) ? powerBase0[rf] : powerBase1[rf]); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "RTK better performance, writeValrf(%c) = 0x%x\n", rf == 0 ? 'A' : 'B', writeVal); break; } if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL1) writeVal = 0x14141414; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_LEVEL2) writeVal = 0x00000000; if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1) writeVal = writeVal - 0x06060606; else if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT2) writeVal = writeVal; *(p_outwriteval + rf) = writeVal; } } static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw, u8 index, u32 *pValue) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u16 regoffset_a[6] = { RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24, RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04, RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12 }; u16 regoffset_b[6] = { RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24, RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04, RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12 }; u8 i, rf, pwr_val[4]; u32 writeVal; u16 regoffset; for (rf = 0; rf < 2; rf++) { writeVal = pValue[rf]; for (i = 0; i < 4; i++) { pwr_val[i] = (u8)((writeVal & (0x7f << (i * 8))) >> (i * 8)); if (pwr_val[i] > RF6052_MAX_TX_PWR) pwr_val[i] = RF6052_MAX_TX_PWR; } writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) | (pwr_val[1] << 8) | pwr_val[0]; if (rf == 0) regoffset = regoffset_a[index]; else regoffset = regoffset_b[index]; rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal); RTPRINT(rtlpriv, FPHY, PHY_TXPWR, "Set 0x%x = %08x\n", regoffset, writeVal); if (((get_rf_type(rtlphy) == RF_2T2R) && (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_B_MCS15_MCS12)) || ((get_rf_type(rtlphy) != RF_2T2R) && (regoffset == RTXAGC_A_MCS07_MCS04 || regoffset == RTXAGC_B_MCS07_MCS04))) { writeVal = pwr_val[3]; if (regoffset == RTXAGC_A_MCS15_MCS12 || regoffset == RTXAGC_A_MCS07_MCS04) regoffset = 0xc90; if (regoffset == RTXAGC_B_MCS15_MCS12 || regoffset == RTXAGC_B_MCS07_MCS04) regoffset = 0xc98; for (i = 0; i < 3; i++) { writeVal = (writeVal > 6) ? (writeVal - 6) : 0; rtl_write_byte(rtlpriv, (u32)(regoffset + i), (u8)writeVal); } } } } void rtl92cu_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw, u8 *ppowerlevel, u8 channel) { u32 writeVal[2], powerBase0[2], powerBase1[2]; u8 index = 0; rtl92c_phy_get_power_base(hw, ppowerlevel, channel, &powerBase0[0], &powerBase1[0]); for (index = 0; index < 6; index++) { _rtl92c_get_txpower_writeval_by_regulatory(hw, channel, index, &powerBase0[0], &powerBase1[0], &writeVal[0]); _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]); } } bool rtl92cu_phy_rf6052_config(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); bool rtstatus = true; u8 b_reg_hwparafile = 1; if (rtlphy->rf_type == RF_1T1R) rtlphy->num_total_rfpath = 1; else rtlphy->num_total_rfpath = 2; if (b_reg_hwparafile == 1) rtstatus = _rtl92c_phy_rf6052_config_parafile(hw); return rtstatus; } static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); struct rtl_phy *rtlphy = &(rtlpriv->phy); u32 u4_regvalue = 0; u8 rfpath; bool rtstatus = true; struct bb_reg_def *pphyreg; for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) { pphyreg = &rtlphy->phyreg_def[rfpath]; switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV); break; case RF90_PATH_B: case RF90_PATH_D: u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16); break; } rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREADDREAALENGTH, 0x0); udelay(1); rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0); udelay(1); switch (rfpath) { case RF90_PATH_A: rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw, (enum radio_path) rfpath); break; case RF90_PATH_B: rtstatus = rtl92cu_phy_config_rf_with_headerfile(hw, (enum radio_path) rfpath); break; case RF90_PATH_C: break; case RF90_PATH_D: break; } switch (rfpath) { case RF90_PATH_A: case RF90_PATH_C: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV, u4_regvalue); break; case RF90_PATH_B: case RF90_PATH_D: rtl_set_bbreg(hw, pphyreg->rfintfs, BRFSI_RFENV << 16, u4_regvalue); break; } if (!rtstatus) { RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "Radio[%d] Fail!!", rfpath); goto phy_rf_cfg_fail; } } RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, "<---\n"); return rtstatus; phy_rf_cfg_fail: return rtstatus; }
gpl-2.0
Kaik541/kernel_lge_gee
security/keys/compat.c
3394
3710
/* 32-bit compatibility syscall for 64-bit systems * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/syscalls.h> #include <linux/keyctl.h> #include <linux/compat.h> #include <linux/slab.h> #include "internal.h" /* * Instantiate a key with the specified compatibility multipart payload and * link the key into the destination keyring if one is given. * * The caller must have the appropriate instantiation permit set for this to * work (see keyctl_assume_authority). No other permissions are required. * * If successful, 0 will be returned. */ long compat_keyctl_instantiate_key_iov( key_serial_t id, const struct compat_iovec __user *_payload_iov, unsigned ioc, key_serial_t ringid) { struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; long ret; if (_payload_iov == 0 || ioc == 0) goto no_payload; ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc, ARRAY_SIZE(iovstack), iovstack, &iov, 1); if (ret < 0) return ret; if (ret == 0) goto no_payload_free; ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); if (iov != iovstack) kfree(iov); return ret; no_payload_free: if (iov != iovstack) kfree(iov); no_payload: return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid); } /* * The key control system call, 32-bit compatibility version for 64-bit archs * * This should only be called if the 64-bit arch uses weird pointers in 32-bit * mode or doesn't guarantee that the top 32-bits of the argument registers on * taking a 32-bit syscall are zero. If you can, you should call sys_keyctl() * directly. */ asmlinkage long compat_sys_keyctl(u32 option, u32 arg2, u32 arg3, u32 arg4, u32 arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: return keyctl_get_keyring_ID(arg2, arg3); case KEYCTL_JOIN_SESSION_KEYRING: return keyctl_join_session_keyring(compat_ptr(arg2)); case KEYCTL_UPDATE: return keyctl_update_key(arg2, compat_ptr(arg3), arg4); case KEYCTL_REVOKE: return keyctl_revoke_key(arg2); case KEYCTL_DESCRIBE: return keyctl_describe_key(arg2, compat_ptr(arg3), arg4); case KEYCTL_CLEAR: return keyctl_keyring_clear(arg2); case KEYCTL_LINK: return keyctl_keyring_link(arg2, arg3); case KEYCTL_UNLINK: return keyctl_keyring_unlink(arg2, arg3); case KEYCTL_SEARCH: return keyctl_keyring_search(arg2, compat_ptr(arg3), compat_ptr(arg4), arg5); case KEYCTL_READ: return keyctl_read_key(arg2, compat_ptr(arg3), arg4); case KEYCTL_CHOWN: return keyctl_chown_key(arg2, arg3, arg4); case KEYCTL_SETPERM: return keyctl_setperm_key(arg2, arg3); case KEYCTL_INSTANTIATE: return keyctl_instantiate_key(arg2, compat_ptr(arg3), arg4, arg5); case KEYCTL_NEGATE: return keyctl_negate_key(arg2, arg3, arg4); case KEYCTL_SET_REQKEY_KEYRING: return keyctl_set_reqkey_keyring(arg2); case KEYCTL_SET_TIMEOUT: return keyctl_set_timeout(arg2, arg3); case KEYCTL_ASSUME_AUTHORITY: return keyctl_assume_authority(arg2); case KEYCTL_GET_SECURITY: return keyctl_get_security(arg2, compat_ptr(arg3), arg4); case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); case KEYCTL_REJECT: return keyctl_reject_key(arg2, arg3, arg4, arg5); case KEYCTL_INSTANTIATE_IOV: return compat_keyctl_instantiate_key_iov( arg2, compat_ptr(arg3), arg4, arg5); default: return -EOPNOTSUPP; } }
gpl-2.0
syhost/android_kernel_pantech_ef62l
arch/blackfin/kernel/bfin_dma.c
4418
14236
/* * bfin_dma.c - Blackfin DMA implementation * * Copyright 2004-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/param.h> #include <linux/proc_fs.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include <asm/dma.h> #include <asm/uaccess.h> #include <asm/early_printk.h> /* * To make sure we work around 05000119 - we always check DMA_DONE bit, * never the DMA_RUN bit */ struct dma_channel dma_ch[MAX_DMA_CHANNELS]; EXPORT_SYMBOL(dma_ch); static int __init blackfin_dma_init(void) { int i; printk(KERN_INFO "Blackfin DMA Controller\n"); #if ANOMALY_05000480 bfin_write_DMAC_TC_PER(0x0111); #endif for (i = 0; i < MAX_DMA_CHANNELS; i++) { atomic_set(&dma_ch[i].chan_status, 0); dma_ch[i].regs = dma_io_base_addr[i]; } /* Mark MEMDMA Channel 0 as requested since we're using it internally */ request_dma(CH_MEM_STREAM0_DEST, "Blackfin dma_memcpy"); request_dma(CH_MEM_STREAM0_SRC, "Blackfin dma_memcpy"); #if defined(CONFIG_DEB_DMA_URGENT) bfin_write_EBIU_DDRQUE(bfin_read_EBIU_DDRQUE() | DEB1_URGENT | DEB2_URGENT | DEB3_URGENT); #endif return 0; } arch_initcall(blackfin_dma_init); #ifdef CONFIG_PROC_FS static int proc_dma_show(struct seq_file *m, void *v) { int i; for (i = 0; i < MAX_DMA_CHANNELS; ++i) if (dma_channel_active(i)) seq_printf(m, "%2d: %s\n", i, dma_ch[i].device_id); return 0; } static int proc_dma_open(struct inode *inode, struct file *file) { return single_open(file, proc_dma_show, NULL); } static const struct file_operations proc_dma_operations = { .open = proc_dma_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_dma_init(void) { return proc_create("dma", 0, NULL, &proc_dma_operations) != NULL; } late_initcall(proc_dma_init); #endif static void set_dma_peripheral_map(unsigned int channel, const char *device_id) { #ifdef CONFIG_BF54x unsigned int per_map; switch (channel) { case CH_UART2_RX: per_map = 0xC << 12; break; case CH_UART2_TX: per_map = 0xD << 12; break; case CH_UART3_RX: per_map = 0xE << 12; break; case CH_UART3_TX: per_map = 0xF << 12; break; default: return; } if (strncmp(device_id, "BFIN_UART", 9) == 0) dma_ch[channel].regs->peripheral_map = per_map; #endif } /** * request_dma - request a DMA channel * * Request the specific DMA channel from the system if it's available. */ int request_dma(unsigned int channel, const char *device_id) { pr_debug("request_dma() : BEGIN\n"); if (device_id == NULL) printk(KERN_WARNING "request_dma(%u): no device_id given\n", channel); #if defined(CONFIG_BF561) && ANOMALY_05000182 if (channel >= CH_IMEM_STREAM0_DEST && channel <= CH_IMEM_STREAM1_DEST) { if (get_cclk() > 500000000) { printk(KERN_WARNING "Request IMDMA failed due to ANOMALY 05000182\n"); return -EFAULT; } } #endif if (atomic_cmpxchg(&dma_ch[channel].chan_status, 0, 1)) { pr_debug("DMA CHANNEL IN USE\n"); return -EBUSY; } set_dma_peripheral_map(channel, device_id); dma_ch[channel].device_id = device_id; dma_ch[channel].irq = 0; /* This is to be enabled by putting a restriction - * you have to request DMA, before doing any operations on * descriptor/channel */ pr_debug("request_dma() : END\n"); return 0; } EXPORT_SYMBOL(request_dma); int set_dma_callback(unsigned int channel, irq_handler_t callback, void *data) { int ret; unsigned int irq; BUG_ON(channel >= MAX_DMA_CHANNELS || !callback || !atomic_read(&dma_ch[channel].chan_status)); irq = channel2irq(channel); ret = request_irq(irq, callback, 0, dma_ch[channel].device_id, data); if (ret) return ret; dma_ch[channel].irq = irq; dma_ch[channel].data = data; return 0; } EXPORT_SYMBOL(set_dma_callback); /** * clear_dma_buffer - clear DMA fifos for specified channel * * Set the Buffer Clear bit in the Configuration register of specific DMA * channel. This will stop the descriptor based DMA operation. */ static void clear_dma_buffer(unsigned int channel) { dma_ch[channel].regs->cfg |= RESTART; SSYNC(); dma_ch[channel].regs->cfg &= ~RESTART; } void free_dma(unsigned int channel) { pr_debug("freedma() : BEGIN\n"); BUG_ON(channel >= MAX_DMA_CHANNELS || !atomic_read(&dma_ch[channel].chan_status)); /* Halt the DMA */ disable_dma(channel); clear_dma_buffer(channel); if (dma_ch[channel].irq) free_irq(dma_ch[channel].irq, dma_ch[channel].data); /* Clear the DMA Variable in the Channel */ atomic_set(&dma_ch[channel].chan_status, 0); pr_debug("freedma() : END\n"); } EXPORT_SYMBOL(free_dma); #ifdef CONFIG_PM # ifndef MAX_DMA_SUSPEND_CHANNELS # define MAX_DMA_SUSPEND_CHANNELS MAX_DMA_CHANNELS # endif int blackfin_dma_suspend(void) { int i; for (i = 0; i < MAX_DMA_CHANNELS; ++i) { if (dma_ch[i].regs->cfg & DMAEN) { printk(KERN_ERR "DMA Channel %d failed to suspend\n", i); return -EBUSY; } if (i < MAX_DMA_SUSPEND_CHANNELS) dma_ch[i].saved_peripheral_map = dma_ch[i].regs->peripheral_map; } #if ANOMALY_05000480 bfin_write_DMAC_TC_PER(0x0); #endif return 0; } void blackfin_dma_resume(void) { int i; for (i = 0; i < MAX_DMA_CHANNELS; ++i) { dma_ch[i].regs->cfg = 0; if (i < MAX_DMA_SUSPEND_CHANNELS) dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map; } #if ANOMALY_05000480 bfin_write_DMAC_TC_PER(0x0111); #endif } #endif /** * blackfin_dma_early_init - minimal DMA init * * Setup a few DMA registers so we can safely do DMA transfers early on in * the kernel booting process. Really this just means using dma_memcpy(). */ void __init blackfin_dma_early_init(void) { early_shadow_stamp(); bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); } void __init early_dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; struct dma_register *dst_ch, *src_ch; early_shadow_stamp(); /* We assume that everything is 4 byte aligned, so include * a basic sanity check */ BUG_ON(dst % 4); BUG_ON(src % 4); BUG_ON(size % 4); src_ch = 0; /* Find an avalible memDMA channel */ while (1) { if (src_ch == (struct dma_register *)MDMA_S0_NEXT_DESC_PTR) { dst_ch = (struct dma_register *)MDMA_D1_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S1_NEXT_DESC_PTR; } else { dst_ch = (struct dma_register *)MDMA_D0_NEXT_DESC_PTR; src_ch = (struct dma_register *)MDMA_S0_NEXT_DESC_PTR; } if (!bfin_read16(&src_ch->cfg)) break; else if (bfin_read16(&dst_ch->irq_status) & DMA_DONE) { bfin_write16(&src_ch->cfg, 0); break; } } /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. */ __builtin_bfin_ssync(); /* Destination */ bfin_write32(&dst_ch->start_addr, dst); bfin_write16(&dst_ch->x_count, size >> 2); bfin_write16(&dst_ch->x_modify, 1 << 2); bfin_write16(&dst_ch->irq_status, DMA_DONE | DMA_ERR); /* Source */ bfin_write32(&src_ch->start_addr, src); bfin_write16(&src_ch->x_count, size >> 2); bfin_write16(&src_ch->x_modify, 1 << 2); bfin_write16(&src_ch->irq_status, DMA_DONE | DMA_ERR); /* Enable */ bfin_write16(&src_ch->cfg, DMAEN | WDSIZE_32); bfin_write16(&dst_ch->cfg, WNR | DI_EN | DMAEN | WDSIZE_32); /* Since we are atomic now, don't use the workaround ssync */ __builtin_bfin_ssync(); } void __init early_dma_memcpy_done(void) { early_shadow_stamp(); while ((bfin_read_MDMA_S0_CONFIG() && !(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) || (bfin_read_MDMA_S1_CONFIG() && !(bfin_read_MDMA_D1_IRQ_STATUS() & DMA_DONE))) continue; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_D1_IRQ_STATUS(DMA_DONE | DMA_ERR); /* * Now that DMA is done, we would normally flush cache, but * i/d cache isn't running this early, so we don't bother, * and just clear out the DMA channel for next time */ bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_S1_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); bfin_write_MDMA_D1_CONFIG(0); __builtin_bfin_ssync(); } /** * __dma_memcpy - program the MDMA registers * * Actually program MDMA0 and wait for the transfer to finish. Disable IRQs * while programming registers so that everything is fully configured. Wait * for DMA to finish with IRQs enabled. If interrupted, the initial DMA_DONE * check will make sure we don't clobber any existing transfer. */ static void __dma_memcpy(u32 daddr, s16 dmod, u32 saddr, s16 smod, size_t cnt, u32 conf) { static DEFINE_SPINLOCK(mdma_lock); unsigned long flags; spin_lock_irqsave(&mdma_lock, flags); /* Force a sync in case a previous config reset on this channel * occurred. This is needed so subsequent writes to DMA registers * are not spuriously lost/corrupted. Do it under irq lock and * without the anomaly version (because we are atomic already). */ __builtin_bfin_ssync(); if (bfin_read_MDMA_S0_CONFIG()) while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) continue; if (conf & DMA2D) { /* For larger bit sizes, we've already divided down cnt so it * is no longer a multiple of 64k. So we have to break down * the limit here so it is a multiple of the incoming size. * There is no limitation here in terms of total size other * than the hardware though as the bits lost in the shift are * made up by MODIFY (== we can hit the whole address space). * X: (2^(16 - 0)) * 1 == (2^(16 - 1)) * 2 == (2^(16 - 2)) * 4 */ u32 shift = abs(dmod) >> 1; size_t ycnt = cnt >> (16 - shift); cnt = 1 << (16 - shift); bfin_write_MDMA_D0_Y_COUNT(ycnt); bfin_write_MDMA_S0_Y_COUNT(ycnt); bfin_write_MDMA_D0_Y_MODIFY(dmod); bfin_write_MDMA_S0_Y_MODIFY(smod); } bfin_write_MDMA_D0_START_ADDR(daddr); bfin_write_MDMA_D0_X_COUNT(cnt); bfin_write_MDMA_D0_X_MODIFY(dmod); bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_START_ADDR(saddr); bfin_write_MDMA_S0_X_COUNT(cnt); bfin_write_MDMA_S0_X_MODIFY(smod); bfin_write_MDMA_S0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_CONFIG(DMAEN | conf); bfin_write_MDMA_D0_CONFIG(WNR | DI_EN | DMAEN | conf); spin_unlock_irqrestore(&mdma_lock, flags); SSYNC(); while (!(bfin_read_MDMA_D0_IRQ_STATUS() & DMA_DONE)) if (bfin_read_MDMA_S0_CONFIG()) continue; else return; bfin_write_MDMA_D0_IRQ_STATUS(DMA_DONE | DMA_ERR); bfin_write_MDMA_S0_CONFIG(0); bfin_write_MDMA_D0_CONFIG(0); } /** * _dma_memcpy - translate C memcpy settings into MDMA settings * * Handle all the high level steps before we touch the MDMA registers. So * handle direction, tweaking of sizes, and formatting of addresses. */ static void *_dma_memcpy(void *pdst, const void *psrc, size_t size) { u32 conf, shift; s16 mod; unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; if (size == 0) return NULL; if (dst % 4 == 0 && src % 4 == 0 && size % 4 == 0) { conf = WDSIZE_32; shift = 2; } else if (dst % 2 == 0 && src % 2 == 0 && size % 2 == 0) { conf = WDSIZE_16; shift = 1; } else { conf = WDSIZE_8; shift = 0; } /* If the two memory regions have a chance of overlapping, make * sure the memcpy still works as expected. Do this by having the * copy run backwards instead. */ mod = 1 << shift; if (src < dst) { mod *= -1; dst += size + mod; src += size + mod; } size >>= shift; if (size > 0x10000) conf |= DMA2D; __dma_memcpy(dst, mod, src, mod, size, conf); return pdst; } /** * dma_memcpy - DMA memcpy under mutex lock * * Do not check arguments before starting the DMA memcpy. Break the transfer * up into two pieces. The first transfer is in multiples of 64k and the * second transfer is the piece smaller than 64k. */ void *dma_memcpy(void *pdst, const void *psrc, size_t size) { unsigned long dst = (unsigned long)pdst; unsigned long src = (unsigned long)psrc; if (bfin_addr_dcacheable(src)) blackfin_dcache_flush_range(src, src + size); if (bfin_addr_dcacheable(dst)) blackfin_dcache_invalidate_range(dst, dst + size); return dma_memcpy_nocache(pdst, psrc, size); } EXPORT_SYMBOL(dma_memcpy); /** * dma_memcpy_nocache - DMA memcpy under mutex lock * - No cache flush/invalidate * * Do not check arguments before starting the DMA memcpy. Break the transfer * up into two pieces. The first transfer is in multiples of 64k and the * second transfer is the piece smaller than 64k. */ void *dma_memcpy_nocache(void *pdst, const void *psrc, size_t size) { size_t bulk, rest; bulk = size & ~0xffff; rest = size - bulk; if (bulk) _dma_memcpy(pdst, psrc, bulk); _dma_memcpy(pdst + bulk, psrc + bulk, rest); return pdst; } EXPORT_SYMBOL(dma_memcpy_nocache); /** * safe_dma_memcpy - DMA memcpy w/argument checking * * Verify arguments are safe before heading to dma_memcpy(). */ void *safe_dma_memcpy(void *dst, const void *src, size_t size) { if (!access_ok(VERIFY_WRITE, dst, size)) return NULL; if (!access_ok(VERIFY_READ, src, size)) return NULL; return dma_memcpy(dst, src, size); } EXPORT_SYMBOL(safe_dma_memcpy); static void _dma_out(unsigned long addr, unsigned long buf, unsigned short len, u16 size, u16 dma_size) { blackfin_dcache_flush_range(buf, buf + len * size); __dma_memcpy(addr, 0, buf, size, len, dma_size); } static void _dma_in(unsigned long addr, unsigned long buf, unsigned short len, u16 size, u16 dma_size) { blackfin_dcache_invalidate_range(buf, buf + len * size); __dma_memcpy(buf, size, addr, 0, len, dma_size); } #define MAKE_DMA_IO(io, bwl, isize, dmasize, cnst) \ void dma_##io##s##bwl(unsigned long addr, cnst void *buf, unsigned short len) \ { \ _dma_##io(addr, (unsigned long)buf, len, isize, WDSIZE_##dmasize); \ } \ EXPORT_SYMBOL(dma_##io##s##bwl) MAKE_DMA_IO(out, b, 1, 8, const); MAKE_DMA_IO(in, b, 1, 8, ); MAKE_DMA_IO(out, w, 2, 16, const); MAKE_DMA_IO(in, w, 2, 16, ); MAKE_DMA_IO(out, l, 4, 32, const); MAKE_DMA_IO(in, l, 4, 32, );
gpl-2.0
OneRom/kernel_moto_shamu
net/ipv6/netfilter/ip6t_rt.c
4930
5921
/* Kernel module to match ROUTING parameters. */ /* (C) 2001-2002 Andras Kis-Szabo <kisza@sch.bme.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ipv6.h> #include <linux/types.h> #include <net/checksum.h> #include <net/ipv6.h> #include <asm/byteorder.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter_ipv6/ip6_tables.h> #include <linux/netfilter_ipv6/ip6t_rt.h> MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Xtables: IPv6 Routing Header match"); MODULE_AUTHOR("Andras Kis-Szabo <kisza@sch.bme.hu>"); /* Returns 1 if the id is matched by the range, 0 otherwise */ static inline bool segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) { bool r; pr_debug("segsleft_match:%c 0x%x <= 0x%x <= 0x%x\n", invert ? '!' : ' ', min, id, max); r = (id >= min && id <= max) ^ invert; pr_debug(" result %s\n", r ? "PASS" : "FAILED"); return r; } static bool rt_mt6(const struct sk_buff *skb, struct xt_action_param *par) { struct ipv6_rt_hdr _route; const struct ipv6_rt_hdr *rh; const struct ip6t_rt *rtinfo = par->matchinfo; unsigned int temp; unsigned int ptr = 0; unsigned int hdrlen = 0; bool ret = false; struct in6_addr _addr; const struct in6_addr *ap; int err; err = ipv6_find_hdr(skb, &ptr, NEXTHDR_ROUTING, NULL, NULL); if (err < 0) { if (err != -ENOENT) par->hotdrop = true; return false; } rh = skb_header_pointer(skb, ptr, sizeof(_route), &_route); if (rh == NULL) { par->hotdrop = true; return false; } hdrlen = ipv6_optlen(rh); if (skb->len - ptr < hdrlen) { /* Pcket smaller than its length field */ return false; } pr_debug("IPv6 RT LEN %u %u ", hdrlen, rh->hdrlen); pr_debug("TYPE %04X ", rh->type); pr_debug("SGS_LEFT %u %02X\n", rh->segments_left, rh->segments_left); pr_debug("IPv6 RT segsleft %02X ", segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], rh->segments_left, !!(rtinfo->invflags & IP6T_RT_INV_SGS))); pr_debug("type %02X %02X %02X ", rtinfo->rt_type, rh->type, (!(rtinfo->flags & IP6T_RT_TYP) || ((rtinfo->rt_type == rh->type) ^ !!(rtinfo->invflags & IP6T_RT_INV_TYP)))); pr_debug("len %02X %04X %02X ", rtinfo->hdrlen, hdrlen, !(rtinfo->flags & IP6T_RT_LEN) || ((rtinfo->hdrlen == hdrlen) ^ !!(rtinfo->invflags & IP6T_RT_INV_LEN))); pr_debug("res %02X %02X %02X ", rtinfo->flags & IP6T_RT_RES, ((const struct rt0_hdr *)rh)->reserved, !((rtinfo->flags & IP6T_RT_RES) && (((const struct rt0_hdr *)rh)->reserved))); ret = (rh != NULL) && (segsleft_match(rtinfo->segsleft[0], rtinfo->segsleft[1], rh->segments_left, !!(rtinfo->invflags & IP6T_RT_INV_SGS))) && (!(rtinfo->flags & IP6T_RT_LEN) || ((rtinfo->hdrlen == hdrlen) ^ !!(rtinfo->invflags & IP6T_RT_INV_LEN))) && (!(rtinfo->flags & IP6T_RT_TYP) || ((rtinfo->rt_type == rh->type) ^ !!(rtinfo->invflags & IP6T_RT_INV_TYP))); if (ret && (rtinfo->flags & IP6T_RT_RES)) { const u_int32_t *rp; u_int32_t _reserved; rp = skb_header_pointer(skb, ptr + offsetof(struct rt0_hdr, reserved), sizeof(_reserved), &_reserved); ret = (*rp == 0); } pr_debug("#%d ", rtinfo->addrnr); if (!(rtinfo->flags & IP6T_RT_FST)) { return ret; } else if (rtinfo->flags & IP6T_RT_FST_NSTRICT) { pr_debug("Not strict "); if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { pr_debug("There isn't enough space\n"); return false; } else { unsigned int i = 0; pr_debug("#%d ", rtinfo->addrnr); for (temp = 0; temp < (unsigned int)((hdrlen - 8) / 16); temp++) { ap = skb_header_pointer(skb, ptr + sizeof(struct rt0_hdr) + temp * sizeof(_addr), sizeof(_addr), &_addr); BUG_ON(ap == NULL); if (ipv6_addr_equal(ap, &rtinfo->addrs[i])) { pr_debug("i=%d temp=%d;\n", i, temp); i++; } if (i == rtinfo->addrnr) break; } pr_debug("i=%d #%d\n", i, rtinfo->addrnr); if (i == rtinfo->addrnr) return ret; else return false; } } else { pr_debug("Strict "); if (rtinfo->addrnr > (unsigned int)((hdrlen - 8) / 16)) { pr_debug("There isn't enough space\n"); return false; } else { pr_debug("#%d ", rtinfo->addrnr); for (temp = 0; temp < rtinfo->addrnr; temp++) { ap = skb_header_pointer(skb, ptr + sizeof(struct rt0_hdr) + temp * sizeof(_addr), sizeof(_addr), &_addr); BUG_ON(ap == NULL); if (!ipv6_addr_equal(ap, &rtinfo->addrs[temp])) break; } pr_debug("temp=%d #%d\n", temp, rtinfo->addrnr); if (temp == rtinfo->addrnr && temp == (unsigned int)((hdrlen - 8) / 16)) return ret; else return false; } } return false; } static int rt_mt6_check(const struct xt_mtchk_param *par) { const struct ip6t_rt *rtinfo = par->matchinfo; if (rtinfo->invflags & ~IP6T_RT_INV_MASK) { pr_debug("unknown flags %X\n", rtinfo->invflags); return -EINVAL; } if ((rtinfo->flags & (IP6T_RT_RES | IP6T_RT_FST_MASK)) && (!(rtinfo->flags & IP6T_RT_TYP) || (rtinfo->rt_type != 0) || (rtinfo->invflags & IP6T_RT_INV_TYP))) { pr_debug("`--rt-type 0' required before `--rt-0-*'"); return -EINVAL; } return 0; } static struct xt_match rt_mt6_reg __read_mostly = { .name = "rt", .family = NFPROTO_IPV6, .match = rt_mt6, .matchsize = sizeof(struct ip6t_rt), .checkentry = rt_mt6_check, .me = THIS_MODULE, }; static int __init rt_mt6_init(void) { return xt_register_match(&rt_mt6_reg); } static void __exit rt_mt6_exit(void) { xt_unregister_match(&rt_mt6_reg); } module_init(rt_mt6_init); module_exit(rt_mt6_exit);
gpl-2.0
juldiadia/kernel_stock_g3815
drivers/regulator/dummy.c
4930
2132
/* * dummy.c * * Copyright 2010 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This is useful for systems with mixed controllable and * non-controllable regulators, as well as for allowing testing on * systems with no controllable regulators. */ #include <linux/err.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include "dummy.h" struct regulator_dev *dummy_regulator_rdev; static struct regulator_init_data dummy_initdata; static struct regulator_ops dummy_ops; static struct regulator_desc dummy_desc = { .name = "dummy", .id = -1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, .ops = &dummy_ops, }; static int __devinit dummy_regulator_probe(struct platform_device *pdev) { int ret; dummy_regulator_rdev = regulator_register(&dummy_desc, NULL, &dummy_initdata, NULL, NULL); if (IS_ERR(dummy_regulator_rdev)) { ret = PTR_ERR(dummy_regulator_rdev); pr_err("Failed to register regulator: %d\n", ret); return ret; } return 0; } static struct platform_driver dummy_regulator_driver = { .probe = dummy_regulator_probe, .driver = { .name = "reg-dummy", .owner = THIS_MODULE, }, }; static struct platform_device *dummy_pdev; void __init regulator_dummy_init(void) { int ret; dummy_pdev = platform_device_alloc("reg-dummy", -1); if (!dummy_pdev) { pr_err("Failed to allocate dummy regulator device\n"); return; } ret = platform_device_add(dummy_pdev); if (ret != 0) { pr_err("Failed to register dummy regulator device: %d\n", ret); platform_device_put(dummy_pdev); return; } ret = platform_driver_register(&dummy_regulator_driver); if (ret != 0) { pr_err("Failed to register dummy regulator driver: %d\n", ret); platform_device_unregister(dummy_pdev); } }
gpl-2.0
blitzmohit/dragonboard-rtlinux-3.4
arch/sparc/prom/misc_64.c
7234
9073
/* * misc.c: Miscellaneous prom functions that don't belong * anywhere else. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz) */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/ldc.h> static int prom_service_exists(const char *service_name) { unsigned long args[5]; args[0] = (unsigned long) "test"; args[1] = 1; args[2] = 1; args[3] = (unsigned long) service_name; args[4] = (unsigned long) -1; p1275_cmd_direct(args); if (args[4]) return 0; return 1; } void prom_sun4v_guest_soft_state(void) { const char *svc = "SUNW,soft-state-supported"; unsigned long args[3]; if (!prom_service_exists(svc)) return; args[0] = (unsigned long) svc; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } /* Reset and reboot the machine with the command 'bcommand'. */ void prom_reboot(const char *bcommand) { unsigned long args[4]; #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_reboot(bcommand); #endif args[0] = (unsigned long) "boot"; args[1] = 1; args[2] = 0; args[3] = (unsigned long) bcommand; p1275_cmd_direct(args); } /* Forth evaluate the expression contained in 'fstring'. */ void prom_feval(const char *fstring) { unsigned long args[5]; if (!fstring || fstring[0] == 0) return; args[0] = (unsigned long) "interpret"; args[1] = 1; args[2] = 1; args[3] = (unsigned long) fstring; args[4] = (unsigned long) -1; p1275_cmd_direct(args); } EXPORT_SYMBOL(prom_feval); #ifdef CONFIG_SMP extern void smp_capture(void); extern void smp_release(void); #endif /* Drop into the prom, with the chance to continue with the 'go' * prom command. */ void prom_cmdline(void) { unsigned long args[3]; unsigned long flags; local_irq_save(flags); #ifdef CONFIG_SMP smp_capture(); #endif args[0] = (unsigned long) "enter"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); #ifdef CONFIG_SMP smp_release(); #endif local_irq_restore(flags); } /* Drop into the prom, but completely terminate the program. * No chance of continuing. */ void notrace prom_halt(void) { unsigned long args[3]; #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_power_off(); #endif again: args[0] = (unsigned long) "exit"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); goto again; /* PROM is out to get me -DaveM */ } void prom_halt_power_off(void) { unsigned long args[3]; #ifdef CONFIG_SUN_LDOMS if (ldom_domaining_enabled) ldom_power_off(); #endif args[0] = (unsigned long) "SUNW,power-off"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); /* if nothing else helps, we just halt */ prom_halt(); } /* Get the idprom and stuff it into buffer 'idbuf'. Returns the * format type. 'num_bytes' is the number of bytes that your idbuf * has space for. Returns 0xff on error. */ unsigned char prom_get_idprom(char *idbuf, int num_bytes) { int len; len = prom_getproplen(prom_root_node, "idprom"); if ((len >num_bytes) || (len == -1)) return 0xff; if (!prom_getproperty(prom_root_node, "idprom", idbuf, num_bytes)) return idbuf[0]; return 0xff; } int prom_get_mmu_ihandle(void) { phandle node; int ret; if (prom_mmu_ihandle_cache != 0) return prom_mmu_ihandle_cache; node = prom_finddevice(prom_chosen_path); ret = prom_getint(node, prom_mmu_name); if (ret == -1 || ret == 0) prom_mmu_ihandle_cache = -1; else prom_mmu_ihandle_cache = ret; return ret; } static int prom_get_memory_ihandle(void) { static int memory_ihandle_cache; phandle node; int ret; if (memory_ihandle_cache != 0) return memory_ihandle_cache; node = prom_finddevice("/chosen"); ret = prom_getint(node, "memory"); if (ret == -1 || ret == 0) memory_ihandle_cache = -1; else memory_ihandle_cache = ret; return ret; } /* Load explicit I/D TLB entries. */ static long tlb_load(const char *type, unsigned long index, unsigned long tte_data, unsigned long vaddr) { unsigned long args[9]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 5; args[2] = 1; args[3] = (unsigned long) type; args[4] = (unsigned int) prom_get_mmu_ihandle(); args[5] = vaddr; args[6] = tte_data; args[7] = index; args[8] = (unsigned long) -1; p1275_cmd_direct(args); return (long) args[8]; } long prom_itlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { return tlb_load("SUNW,itlb-load", index, tte_data, vaddr); } long prom_dtlb_load(unsigned long index, unsigned long tte_data, unsigned long vaddr) { return tlb_load("SUNW,dtlb-load", index, tte_data, vaddr); } int prom_map(int mode, unsigned long size, unsigned long vaddr, unsigned long paddr) { unsigned long args[11]; int ret; args[0] = (unsigned long) prom_callmethod_name; args[1] = 7; args[2] = 1; args[3] = (unsigned long) prom_map_name; args[4] = (unsigned int) prom_get_mmu_ihandle(); args[5] = (unsigned int) mode; args[6] = size; args[7] = vaddr; args[8] = 0; args[9] = paddr; args[10] = (unsigned long) -1; p1275_cmd_direct(args); ret = (int) args[10]; if (ret == 0) ret = -1; return ret; } void prom_unmap(unsigned long size, unsigned long vaddr) { unsigned long args[7]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 4; args[2] = 0; args[3] = (unsigned long) prom_unmap_name; args[4] = (unsigned int) prom_get_mmu_ihandle(); args[5] = size; args[6] = vaddr; p1275_cmd_direct(args); } /* Set aside physical memory which is not touched or modified * across soft resets. */ int prom_retain(const char *name, unsigned long size, unsigned long align, unsigned long *paddr) { unsigned long args[11]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 5; args[2] = 3; args[3] = (unsigned long) "SUNW,retain"; args[4] = (unsigned int) prom_get_memory_ihandle(); args[5] = align; args[6] = size; args[7] = (unsigned long) name; args[8] = (unsigned long) -1; args[9] = (unsigned long) -1; args[10] = (unsigned long) -1; p1275_cmd_direct(args); if (args[8]) return (int) args[8]; /* Next we get "phys_high" then "phys_low". On 64-bit * the phys_high cell is don't care since the phys_low * cell has the full value. */ *paddr = args[10]; return 0; } /* Get "Unumber" string for the SIMM at the given * memory address. Usually this will be of the form * "Uxxxx" where xxxx is a decimal number which is * etched into the motherboard next to the SIMM slot * in question. */ int prom_getunumber(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { unsigned long args[12]; args[0] = (unsigned long) prom_callmethod_name; args[1] = 7; args[2] = 2; args[3] = (unsigned long) "SUNW,get-unumber"; args[4] = (unsigned int) prom_get_memory_ihandle(); args[5] = buflen; args[6] = (unsigned long) buf; args[7] = 0; args[8] = phys_addr; args[9] = (unsigned int) syndrome_code; args[10] = (unsigned long) -1; args[11] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[10]; } /* Power management extensions. */ void prom_sleepself(void) { unsigned long args[3]; args[0] = (unsigned long) "SUNW,sleep-self"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } int prom_sleepsystem(void) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,sleep-system"; args[1] = 0; args[2] = 1; args[3] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[3]; } int prom_wakeupsystem(void) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,wakeup-system"; args[1] = 0; args[2] = 1; args[3] = (unsigned long) -1; p1275_cmd_direct(args); return (int) args[3]; } #ifdef CONFIG_SMP void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg) { unsigned long args[6]; args[0] = (unsigned long) "SUNW,start-cpu"; args[1] = 3; args[2] = 0; args[3] = (unsigned int) cpunode; args[4] = pc; args[5] = arg; p1275_cmd_direct(args); } void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg) { unsigned long args[6]; args[0] = (unsigned long) "SUNW,start-cpu-by-cpuid"; args[1] = 3; args[2] = 0; args[3] = (unsigned int) cpuid; args[4] = pc; args[5] = arg; p1275_cmd_direct(args); } void prom_stopcpu_cpuid(int cpuid) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,stop-cpu-by-cpuid"; args[1] = 1; args[2] = 0; args[3] = (unsigned int) cpuid; p1275_cmd_direct(args); } void prom_stopself(void) { unsigned long args[3]; args[0] = (unsigned long) "SUNW,stop-self"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } void prom_idleself(void) { unsigned long args[3]; args[0] = (unsigned long) "SUNW,idle-self"; args[1] = 0; args[2] = 0; p1275_cmd_direct(args); } void prom_resumecpu(int cpunode) { unsigned long args[4]; args[0] = (unsigned long) "SUNW,resume-cpu"; args[1] = 1; args[2] = 0; args[3] = (unsigned int) cpunode; p1275_cmd_direct(args); } #endif
gpl-2.0
Kra1o5/android_kernel_huawei_u8815-gb
arch/mips/bcm63xx/timer.c
12610
4510
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> */ #include <linux/kernel.h> #include <linux/err.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <bcm63xx_cpu.h> #include <bcm63xx_io.h> #include <bcm63xx_timer.h> #include <bcm63xx_regs.h> static DEFINE_RAW_SPINLOCK(timer_reg_lock); static DEFINE_RAW_SPINLOCK(timer_data_lock); static struct clk *periph_clk; static struct timer_data { void (*cb)(void *); void *data; } timer_data[BCM63XX_TIMER_COUNT]; static irqreturn_t timer_interrupt(int irq, void *dev_id) { u32 stat; int i; raw_spin_lock(&timer_reg_lock); stat = bcm_timer_readl(TIMER_IRQSTAT_REG); bcm_timer_writel(stat, TIMER_IRQSTAT_REG); raw_spin_unlock(&timer_reg_lock); for (i = 0; i < BCM63XX_TIMER_COUNT; i++) { if (!(stat & TIMER_IRQSTAT_TIMER_CAUSE(i))) continue; raw_spin_lock(&timer_data_lock); if (!timer_data[i].cb) { raw_spin_unlock(&timer_data_lock); continue; } timer_data[i].cb(timer_data[i].data); raw_spin_unlock(&timer_data_lock); } return IRQ_HANDLED; } int bcm63xx_timer_enable(int id) { u32 reg; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); reg |= TIMER_CTL_ENABLE_MASK; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg |= TIMER_IRQSTAT_TIMER_IR_EN(id); bcm_timer_writel(reg, TIMER_IRQSTAT_REG); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_enable); int bcm63xx_timer_disable(int id) { u32 reg; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); reg &= ~TIMER_CTL_ENABLE_MASK; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg &= ~TIMER_IRQSTAT_TIMER_IR_EN(id); bcm_timer_writel(reg, TIMER_IRQSTAT_REG); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_disable); int bcm63xx_timer_register(int id, void (*callback)(void *data), void *data) { unsigned long flags; int ret; if (id >= BCM63XX_TIMER_COUNT || !callback) return -EINVAL; ret = 0; raw_spin_lock_irqsave(&timer_data_lock, flags); if (timer_data[id].cb) { ret = -EBUSY; goto out; } timer_data[id].cb = callback; timer_data[id].data = data; out: raw_spin_unlock_irqrestore(&timer_data_lock, flags); return ret; } EXPORT_SYMBOL(bcm63xx_timer_register); void bcm63xx_timer_unregister(int id) { unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return; raw_spin_lock_irqsave(&timer_data_lock, flags); timer_data[id].cb = NULL; raw_spin_unlock_irqrestore(&timer_data_lock, flags); } EXPORT_SYMBOL(bcm63xx_timer_unregister); unsigned int bcm63xx_timer_countdown(unsigned int countdown_us) { return (clk_get_rate(periph_clk) / (1000 * 1000)) * countdown_us; } EXPORT_SYMBOL(bcm63xx_timer_countdown); int bcm63xx_timer_set(int id, int monotonic, unsigned int countdown_us) { u32 reg, countdown; unsigned long flags; if (id >= BCM63XX_TIMER_COUNT) return -EINVAL; countdown = bcm63xx_timer_countdown(countdown_us); if (countdown & ~TIMER_CTL_COUNTDOWN_MASK) return -EINVAL; raw_spin_lock_irqsave(&timer_reg_lock, flags); reg = bcm_timer_readl(TIMER_CTLx_REG(id)); if (monotonic) reg &= ~TIMER_CTL_MONOTONIC_MASK; else reg |= TIMER_CTL_MONOTONIC_MASK; reg &= ~TIMER_CTL_COUNTDOWN_MASK; reg |= countdown; bcm_timer_writel(reg, TIMER_CTLx_REG(id)); raw_spin_unlock_irqrestore(&timer_reg_lock, flags); return 0; } EXPORT_SYMBOL(bcm63xx_timer_set); int bcm63xx_timer_init(void) { int ret, irq; u32 reg; reg = bcm_timer_readl(TIMER_IRQSTAT_REG); reg &= ~TIMER_IRQSTAT_TIMER0_IR_EN; reg &= ~TIMER_IRQSTAT_TIMER1_IR_EN; reg &= ~TIMER_IRQSTAT_TIMER2_IR_EN; bcm_timer_writel(reg, TIMER_IRQSTAT_REG); periph_clk = clk_get(NULL, "periph"); if (IS_ERR(periph_clk)) return -ENODEV; irq = bcm63xx_get_irq_number(IRQ_TIMER); ret = request_irq(irq, timer_interrupt, 0, "bcm63xx_timer", NULL); if (ret) { printk(KERN_ERR "bcm63xx_timer: failed to register irq\n"); return ret; } return 0; } arch_initcall(bcm63xx_timer_init);
gpl-2.0
r0j3r/gcc-4.7.4-hardened
gcc/testsuite/gcc.c-torture/unsorted/SFset.c
67
4816
#define E0 ((type *)10000000) #define reg0 r0 #define indreg0 (*p0) #define imm0 22 #define limm0 ((type)(int)&glob0) #define adr0 (*E0) #define adrreg0 (p0[10000000]) #define adrx0 (E0[x0]) #define regx0 (p0[x0]) #define E1 ((type *)(11111111 & ~(__alignof__ (type) - 1))) #define reg1 r1 #define indreg1 (*p1) #define imm1 33 #define limm1 ((type)(int)&glob1) #define adr1 (*E1) #define adrreg1 (p1[1111111/4]) #define adrx1 (E1[x1]) #define regx1 (p1[x1]) int glob0, glob1; #define type float reg0reg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = reg1; } reg0indreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = indreg1; } reg0imm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = imm1; } reg0limm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = limm1; } reg0adr1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = adr1; } reg0adrreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = adrreg1; } reg0adrx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = adrx1; } reg0regx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {reg0 = regx1; } indreg0reg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = reg1; } indreg0indreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = indreg1; } indreg0imm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = imm1; } indreg0limm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = limm1; } indreg0adr1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = adr1; } indreg0adrreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = adrreg1; } indreg0adrx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = adrx1; } indreg0regx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {indreg0 = regx1; } adr0reg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = reg1; } adr0indreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = indreg1; } adr0imm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = imm1; } adr0limm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = limm1; } adr0adr1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = adr1; } adr0adrreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = adrreg1; } adr0adrx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = adrx1; } adr0regx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adr0 = regx1; } adrreg0reg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = reg1; } adrreg0indreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = indreg1; } adrreg0imm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = imm1; } adrreg0limm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = limm1; } adrreg0adr1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = adr1; } adrreg0adrreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = adrreg1; } adrreg0adrx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = adrx1; } adrreg0regx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrreg0 = regx1; } adrx0reg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = reg1; } adrx0indreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = indreg1; } adrx0imm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = imm1; } adrx0limm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = limm1; } adrx0adr1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = adr1; } adrx0adrreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = adrreg1; } adrx0adrx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = adrx1; } adrx0regx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {adrx0 = regx1; } regx0reg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = reg1; } regx0indreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = indreg1; } regx0imm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = imm1; } regx0limm1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = limm1; } regx0adr1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = adr1; } regx0adrreg1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = adrreg1; } regx0adrx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = adrx1; } regx0regx1_set (r0, r1, x0, x1, p0, p1) type r0, r1; type *p0, *p1; {regx0 = regx1; }
gpl-2.0
T-Macgnolia/android_kernel_lge_g4stylus-stock
drivers/thermal/supply_lm_core.c
67
58645
/* Copyright (c) 2015, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s:%s " fmt, KBUILD_MODNAME, __func__ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/workqueue.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/err.h> #include <linux/of.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/clk/msm-clk.h> #include <linux/cpu.h> #include <linux/cpu_pm.h> #include <linux/cpumask.h> #include <linux/smp.h> #include <linux/pm_opp.h> #include <linux/of_platform.h> #include <linux/kthread.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/regulator/rpm-smd-regulator.h> #include <clocksource/arm_arch_timer.h> #include <linux/dma-mapping.h> #include <soc/qcom/scm.h> #include <linux/thermal.h> #include <linux/msm_thermal.h> #define CREATE_TRACE_POINTS #define TRACE_SUPPLY_LM #include <trace/trace_thermal.h> #define SUPPLY_LM_NAME_MAX 20 #define SUPPLY_LM_DRIVER_NAME "supply_lm" #define SUPPLY_LM_INTERRUPT "supply_lm_modem-interrupt" #define SUPPLY_LM_MODEM_DATA_OFFSET 0x4 #define MODEM_INTR_CLEAR 0x0 #define MODE_MAX 32 #define CPU_BUF_SIZE 64 #define SUPPLY_LM_GPU_OFF_RATE 19200000 #define NO_DIV_CPU_FREQ_HZ 533333333 #define NO_DIV_CPU_FREQ_KHZ 533333 #define MITIGATION_MAX_DELAY 30 #define SUPPLY_LM_GET_MIT_CMD 2 #define SUPPLY_LM_STEP1_REQ_CMD 3 #define __ATTR_RW(attr) __ATTR(attr, 0644, attr##_show, attr##_store) enum supply_lm_input_device { SUPPLY_LM_THERM_DEVICE, SUPPLY_LM_NUM_CORE_DEVICE, SUPPLY_LM_CPU_DEVICE, SUPPLY_LM_MODEM_DEVICE, SUPPLY_LM_GPU_DEVICE, SUPPLY_LM_DEVICE_MAX, }; enum corner_state { CORNER_STATE_OFF, CORNER_STATE_SVS, CORNER_STATE_NOMINAL, CORNER_STATE_TURBO, CORNER_STATE_MAX }; enum modem_corner_state { MODEM_CORNER_STATE_OFF, MODEM_CORNER_STATE_RETENTION, MODEM_CORNER_STATE_LOW_MINUS, MODEM_CORNER_STATE_LOW, MODEM_CORNER_STATE_NOMINAL, MODEM_CORNER_STATE_NOMINAL_PLUS, MODEM_CORNER_STATE_TURBO, MODEM_CORNER_STATE_MAX, }; enum therm_state { THERM_NORMAL, THERM_HOT, THERM_VERY_HOT, MAX_THERM_LEVEL, }; enum supply_lm_dbg_event { SUPPLY_LM_DBG_IO, SUPPLY_LM_DBG_PRE_HPLG, SUPPLY_LM_DBG_PST_HPLG, SUPPLY_LM_DBG_PRE_CFRQ, SUPPLY_LM_DBG_PST_CFRQ, SUPPLY_LM_DBG_STEP1_REL, }; enum debug_mask { SUPPLY_LM_INPUTS = BIT(0), SUPPLY_LM_IO_DATA = BIT(1), SUPPLY_LM_STEP2_MITIGATION_REQUEST = BIT(2), SUPPLY_LM_POST_MITIGATION_REQUEST = BIT(3), }; struct freq_corner_map { unsigned int freq; enum corner_state state; }; union input_device_state { enum corner_state state; enum therm_state therm_state; uint32_t num_cores; }; struct supply_lm_mitigation_data { uint32_t core_offline_req; uint32_t freq_req; uint32_t step1_rel; }; struct supply_lm_input_data { uint32_t num_cores; enum therm_state therm_state; enum corner_state cpu_state; enum corner_state gpu_state; enum corner_state modem_state; }; struct input_device_info; struct device_ops { int (*setup)(struct platform_device *, struct input_device_info *); int (*read_state)(struct input_device_info *, struct supply_lm_input_data *); void (*clean_up)(struct platform_device *, struct input_device_info *); }; struct input_device_info { char device_name[SUPPLY_LM_NAME_MAX]; bool enabled; struct device_ops ops; union input_device_state curr; struct list_head list_ptr; struct mutex lock; void *data; }; struct supply_lm_modem_hw { int irq_num; struct workqueue_struct *isr_wq; struct work_struct isr_work; void *intr_base_reg; }; struct supply_lm_core_data { bool enabled; struct platform_device *pdev; int hot_temp_degC; int hot_temp_hyst; int very_hot_temp_degC; int very_hot_temp_hyst; struct platform_device *gpu_pdev; struct clk *gpu_handle; struct supply_lm_modem_hw modem_hw; cpumask_t cpu_idle_mask; uint32_t supply_lm_limited_max_freq; cpumask_t supply_lm_offlined_cpu_mask; bool step2_cpu_freq_initiated; bool step2_hotplug_initiated; bool suspend_in_progress; uint32_t inp_trig_for_mit; struct device_clnt_data *hotplug_handle; struct device_clnt_data *cpufreq_handle; }; struct supply_lm_debugfs_entry { struct dentry *parent; struct dentry *bypass_real_inp; struct dentry *user_input; }; struct supply_lm_debug { cycle_t time; enum supply_lm_dbg_event evt; uint32_t num_cores; /* [31:24]=therm_state, [23:16]=cpu_state * [15:8]=modem_state, [7:0]=gpu_state */ uint32_t hw_state; uint32_t core_offline_req; uint32_t freq_req; uint32_t step1_rel; uint32_t arg; }; static DEFINE_MUTEX(supply_lm_tz_lock); static DEFINE_SPINLOCK(supply_lm_pm_lock); static LIST_HEAD(supply_lm_device_list); static struct supply_lm_core_data *supply_lm_data; static struct input_device_info supply_lm_devices[]; static struct threshold_info *supply_lm_therm_thresh; static struct task_struct *supply_lm_monitor_task; static struct completion supply_lm_notify_complete; static struct completion supply_lm_mitigation_complete; static struct supply_lm_debugfs_entry *supply_lm_debugfs; static u32 supply_lm_bypass_inp; static uint32_t supply_lm_therm_status[MAX_THERM_LEVEL] = {UINT_MAX, 0, 0}; static struct supply_lm_debug *supply_lm_debug; static phys_addr_t supply_lm_debug_phys; static const int num_dbg_elements = 0x100; static uint32_t cpufreq_table_len; static uint32_t gpufreq_table_len; static struct freq_corner_map *cpufreq_corner_map; static struct freq_corner_map *gpufreq_corner_map; static int supply_lm_debug_mask; static bool gpufreq_opp_corner_enabled; static void supply_lm_remove_devices(struct platform_device *pdev); static int supply_lm_devices_init(struct platform_device *pdev); module_param_named( debug_mask, supply_lm_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP ); static int apps_corner_map[] = { [RPM_REGULATOR_CORNER_NONE] = CORNER_STATE_OFF, [RPM_REGULATOR_CORNER_SVS_SOC] = CORNER_STATE_SVS, [RPM_REGULATOR_CORNER_NORMAL] = CORNER_STATE_NOMINAL, [RPM_REGULATOR_CORNER_SUPER_TURBO] = CORNER_STATE_TURBO, }; static int modem_corner_map[] = { [MODEM_CORNER_STATE_OFF] = CORNER_STATE_OFF, [MODEM_CORNER_STATE_RETENTION] = CORNER_STATE_OFF, [MODEM_CORNER_STATE_LOW_MINUS] = CORNER_STATE_OFF, [MODEM_CORNER_STATE_LOW] = CORNER_STATE_SVS, [MODEM_CORNER_STATE_NOMINAL] = CORNER_STATE_NOMINAL, [MODEM_CORNER_STATE_NOMINAL_PLUS] = CORNER_STATE_NOMINAL, [MODEM_CORNER_STATE_TURBO] = CORNER_STATE_TURBO, }; static void update_mtgtn_debug_event(enum supply_lm_dbg_event event, struct supply_lm_input_data *inp_data, struct supply_lm_mitigation_data *mtgtn_data, uint32_t arg) { struct supply_lm_debug *dbg; uint32_t idx; static int mtgtn_idx; static DEFINE_SPINLOCK(debug_lock); if (!supply_lm_debug) return; spin_lock(&debug_lock); idx = mtgtn_idx++; dbg = &supply_lm_debug[idx & (num_dbg_elements - 1)]; dbg->evt = event; dbg->time = arch_counter_get_cntpct(); if (inp_data) { dbg->num_cores = inp_data->num_cores; dbg->hw_state = (inp_data->therm_state << 24)| (inp_data->cpu_state << 16)| (inp_data->modem_state << 8)| (inp_data->gpu_state); } else { dbg->num_cores = 0xDECADEED; dbg->hw_state = 0xDECADEED; } if (mtgtn_data) { dbg->core_offline_req = mtgtn_data->core_offline_req; dbg->freq_req = mtgtn_data->freq_req; dbg->step1_rel = mtgtn_data->step1_rel; } else { dbg->core_offline_req = 0xDECADEED; dbg->freq_req = 0xDECADEED; dbg->step1_rel = 0xDECADEED; } dbg->arg = arg; spin_unlock(&debug_lock); } static ssize_t supply_lm_input_write(struct file *fp, const char __user *user_buffer, size_t count, loff_t *position) { int ret = 0; char buf[MODE_MAX]; char *cmp; enum therm_state therm; uint32_t num_cores; enum corner_state cpu; enum corner_state gpu; enum corner_state modem; if (copy_from_user(&buf, user_buffer, count)) return -EFAULT; buf[count] = '\0'; cmp = strstrip(buf); ret = sscanf(cmp, "%d %d %d %d %d", (int *)&therm, &num_cores, (int *)&cpu, (int *)&modem, (int *)&gpu); if (ret != SUPPLY_LM_DEVICE_MAX) { pr_err("Invalid user input. ret:%d\n", ret); return -EINVAL; } pr_debug("T%d N%d C%d M%d G%d\n", therm, num_cores, cpu, modem, gpu); supply_lm_devices[SUPPLY_LM_THERM_DEVICE].curr.therm_state = therm; supply_lm_devices[SUPPLY_LM_NUM_CORE_DEVICE].curr.num_cores = num_cores; supply_lm_devices[SUPPLY_LM_CPU_DEVICE].curr.state = cpu; supply_lm_devices[SUPPLY_LM_MODEM_DEVICE].curr.state = modem; supply_lm_devices[SUPPLY_LM_GPU_DEVICE].curr.state = gpu; if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); return count; } static const struct file_operations supply_lm_user_input_ops = { .write = supply_lm_input_write, }; static int create_supply_lm_debugfs(struct platform_device *pdev) { int ret = 0; if (supply_lm_debugfs) return ret; supply_lm_debugfs = devm_kzalloc(&pdev->dev, sizeof(struct supply_lm_debugfs_entry), GFP_KERNEL); if (!supply_lm_debugfs) { ret = -ENOMEM; pr_err("Memory alloc failed. err:%d\n", ret); return ret; } supply_lm_debugfs->parent = debugfs_create_dir(SUPPLY_LM_DRIVER_NAME, NULL); if (IS_ERR(supply_lm_debugfs->parent)) { ret = PTR_ERR(supply_lm_debugfs->parent); pr_err("Error creating debugfs:[%s]. err:%d\n", SUPPLY_LM_DRIVER_NAME, ret); goto create_exit; } supply_lm_debugfs->user_input = debugfs_create_file("user_input", 0600, supply_lm_debugfs->parent, &supply_lm_devices, &supply_lm_user_input_ops); if (IS_ERR(supply_lm_debugfs->user_input)) { ret = PTR_ERR(supply_lm_debugfs->user_input); pr_err("Error creating debugfs:[%s]. err:%d\n", "user_input", ret); goto create_exit; } supply_lm_debugfs->bypass_real_inp = debugfs_create_bool("bypass_real_inp", 0600, supply_lm_debugfs->parent, &supply_lm_bypass_inp); if (IS_ERR(supply_lm_debugfs->bypass_real_inp)) { ret = PTR_ERR(supply_lm_debugfs->bypass_real_inp); pr_err("Error creating debugfs:[%s]. err:%d\n", "bypass_real_inp", ret); goto create_exit; } create_exit: if (ret) { debugfs_remove_recursive(supply_lm_debugfs->parent); devm_kfree(&pdev->dev, supply_lm_debugfs); } return ret; } static void clear_all_mitigation(void) { union device_request disable_req; int ret = 0; mutex_lock(&supply_lm_tz_lock); trace_supply_lm_pre_scm(0); ret = scm_call_atomic1(SCM_SVC_PWR, SUPPLY_LM_STEP1_REQ_CMD, 0); trace_supply_lm_post_scm(ret); mutex_unlock(&supply_lm_tz_lock); if (ret < 0) pr_err("scm_call failed\n"); if (supply_lm_data->cpufreq_handle) { disable_req.freq.max_freq = CPUFREQ_MAX_NO_MITIGATION; disable_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION; devmgr_client_request_mitigation(supply_lm_data->cpufreq_handle, CPUFREQ_MITIGATION_REQ, &disable_req); supply_lm_data->supply_lm_limited_max_freq = CPUFREQ_MAX_NO_MITIGATION; } if (supply_lm_data->hotplug_handle) { HOTPLUG_NO_MITIGATION(&disable_req.offline_mask); devmgr_client_request_mitigation(supply_lm_data->hotplug_handle, HOTPLUG_MITIGATION_REQ, &disable_req); HOTPLUG_NO_MITIGATION( &supply_lm_data->supply_lm_offlined_cpu_mask); } } static ssize_t mode_store(struct kobject *kobj, struct kobj_attribute *attr, const char *buf, size_t count) { int ret = 0; if (!supply_lm_data) return -EPERM; if (!strcmp(buf, "enable")) { if (supply_lm_data->enabled) goto store_exit; /* disable and re-enable all devices */ supply_lm_remove_devices(supply_lm_data->pdev); ret = supply_lm_devices_init(supply_lm_data->pdev); if (ret) return -EINVAL; supply_lm_data->enabled = true; if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); } else if (!strcmp(buf, "disable")) { if (!supply_lm_data->enabled) goto store_exit; supply_lm_remove_devices(supply_lm_data->pdev); supply_lm_data->enabled = false; clear_all_mitigation(); } else { pr_err("write error %s\n", buf); return -EINVAL; } store_exit: return count; } static ssize_t mode_show( struct kobject *kobj, struct kobj_attribute *attr, char *buf) { if (!supply_lm_data) return -EPERM; return snprintf(buf, PAGE_SIZE, "%s\n", supply_lm_data->enabled ? "enabled" : "disabled"); } static struct kobj_attribute supply_lm_dev_attr = __ATTR_RW(mode); static int create_supply_lm_sysfs(void) { int ret = 0; struct kobject *module_kobj = NULL; module_kobj = kset_find_obj(module_kset, KBUILD_MODNAME); if (!module_kobj) { pr_err("cannot find kobject\n"); return -ENOENT; } sysfs_attr_init(&supply_lm_dev_attr.attr); ret = sysfs_create_file(module_kobj, &supply_lm_dev_attr.attr); if (ret) { pr_err( "cannot create mode sysfs kobject attribute. err:%d\n", ret); return ret; } return ret; } static void devmgr_mitigation_callback(struct device_clnt_data *clnt, union device_request *req, void *data) { uint32_t step1_rel = (int)data; int ret = 0; if (!clnt) { pr_err("Invalid client\n"); return; } if (!supply_lm_data->step2_cpu_freq_initiated && !supply_lm_data->step2_hotplug_initiated) { return; } if (supply_lm_debug_mask & SUPPLY_LM_POST_MITIGATION_REQUEST) pr_info( "status:Hotplug:%d freq:%d po_vote:%d input-req:%d\n", supply_lm_data->step2_cpu_freq_initiated, supply_lm_data->step2_hotplug_initiated, step1_rel, supply_lm_data->inp_trig_for_mit); if (supply_lm_data->step2_cpu_freq_initiated && supply_lm_data->cpufreq_handle == clnt) { supply_lm_data->step2_cpu_freq_initiated = false; update_mtgtn_debug_event(SUPPLY_LM_DBG_PST_CFRQ, NULL, NULL, req->freq.max_freq); } else if (supply_lm_data->step2_hotplug_initiated && supply_lm_data->hotplug_handle == clnt) { supply_lm_data->step2_hotplug_initiated = false; update_mtgtn_debug_event(SUPPLY_LM_DBG_PST_HPLG, NULL, NULL, 0); } else { return; } if (!supply_lm_data->step2_cpu_freq_initiated && !supply_lm_data->step2_hotplug_initiated) { if (!supply_lm_data->inp_trig_for_mit && ((step1_rel & 1) ^ (step1_rel >> 16))) { update_mtgtn_debug_event(SUPPLY_LM_DBG_STEP1_REL, NULL, NULL, step1_rel); mutex_lock(&supply_lm_tz_lock); trace_supply_lm_pre_scm(step1_rel); ret = scm_call_atomic1(SCM_SVC_PWR, SUPPLY_LM_STEP1_REQ_CMD, step1_rel >> 16); trace_supply_lm_post_scm(ret); mutex_unlock(&supply_lm_tz_lock); if (ret < 0) pr_err("scm_call failed. with ret:%d\n", ret); if (supply_lm_monitor_task) complete(&supply_lm_mitigation_complete); } } } static int supply_lm_devices_read(struct supply_lm_input_data *input) { int ret = 0; struct input_device_info *curr_device = NULL; list_for_each_entry(curr_device, &supply_lm_device_list, list_ptr) { if (!curr_device->ops.read_state) { pr_err("Sensor read not defined for %s\n", curr_device->device_name); ret = -EINVAL; break; } ret = curr_device->ops.read_state(curr_device, input); if (ret) { pr_err("Sensor read failed for %s\n", curr_device->device_name); ret = -EINVAL; break; } } return ret; } static int get_curr_hotplug_request(uint8_t core_num, cpumask_t *cpu_mask, char *buf) { int cpu = 0; cpumask_t offline_mask; HOTPLUG_NO_MITIGATION(&offline_mask); for_each_possible_cpu(cpu) { if (!cpu_online(cpu)) cpumask_set_cpu(cpu, &offline_mask); } if (core_num == cpumask_weight(&offline_mask)) { cpumask_copy(cpu_mask, &offline_mask); } else if (core_num > cpumask_weight(&offline_mask)) { for (cpu = num_possible_cpus() - 1; cpu >= 0; cpu--) { if (!cpu_online(cpu)) continue; cpumask_set_cpu(cpu, &offline_mask); if (core_num == cpumask_weight(&offline_mask)) break; } cpumask_copy(cpu_mask, &offline_mask); } else if (core_num < cpumask_weight(&offline_mask)) { for_each_possible_cpu(cpu) { if (cpu_online(cpu)) continue; if (cpumask_test_cpu(cpu, &offline_mask)) { cpumask_clear_cpu(cpu, &offline_mask); if (core_num == cpumask_weight(&offline_mask)) break; } } cpumask_copy(cpu_mask, &offline_mask); } if (SUPPLY_LM_STEP2_MITIGATION_REQUEST & supply_lm_debug_mask) { cpumask_scnprintf(buf, CPU_BUF_SIZE, &offline_mask); pr_info("core req %d offline_mask %s\n", core_num, buf); } return 0; } static int handle_step2_mitigation( struct supply_lm_mitigation_data *mit_state) { int ret = 0; cpumask_t req_cpu_mask; bool hotplug_req = false; bool freq_req = false; union device_request curr_req; char buf[CPU_BUF_SIZE]; HOTPLUG_NO_MITIGATION(&req_cpu_mask); ret = get_curr_hotplug_request(mit_state->core_offline_req, &req_cpu_mask, buf); if (ret) goto step2_exit; if (!cpumask_equal(&supply_lm_data->supply_lm_offlined_cpu_mask, &req_cpu_mask)) { hotplug_req = true; cpumask_copy(&supply_lm_data->supply_lm_offlined_cpu_mask, &req_cpu_mask); } if (supply_lm_data->supply_lm_limited_max_freq != mit_state->freq_req) { freq_req = true; supply_lm_data->supply_lm_limited_max_freq = mit_state->freq_req; } /* Handle hotplug request */ if (supply_lm_data->hotplug_handle && hotplug_req) { if (SUPPLY_LM_STEP2_MITIGATION_REQUEST & supply_lm_debug_mask) pr_info("hotplug request for cpu mask:0x%s\n", buf); update_mtgtn_debug_event(SUPPLY_LM_DBG_PRE_HPLG, NULL, mit_state, 0); supply_lm_data->hotplug_handle->usr_data = (void *)mit_state->step1_rel; supply_lm_data->step2_hotplug_initiated = true; cpumask_copy(&curr_req.offline_mask, &supply_lm_data->supply_lm_offlined_cpu_mask); ret = devmgr_client_request_mitigation( supply_lm_data->hotplug_handle, HOTPLUG_MITIGATION_REQ, &curr_req); if (ret) { pr_err("hotplug request failed. err:%d\n", ret); goto step2_exit; } } /* Handle cpufreq request */ if (supply_lm_data->cpufreq_handle && freq_req) { if (SUPPLY_LM_STEP2_MITIGATION_REQUEST & supply_lm_debug_mask) pr_info("cpufreq request for max freq %u\n", supply_lm_data->supply_lm_limited_max_freq); update_mtgtn_debug_event(SUPPLY_LM_DBG_PRE_CFRQ, NULL, mit_state, supply_lm_data->supply_lm_limited_max_freq); supply_lm_data->cpufreq_handle->usr_data = (void *)mit_state->step1_rel; supply_lm_data->step2_cpu_freq_initiated = true; curr_req.freq.max_freq = supply_lm_data->supply_lm_limited_max_freq; curr_req.freq.min_freq = CPUFREQ_MIN_NO_MITIGATION; ret = devmgr_client_request_mitigation( supply_lm_data->cpufreq_handle, CPUFREQ_MITIGATION_REQ, &curr_req); if (ret) { pr_err("cpufreq request failed. err:%d\n", ret); goto step2_exit; } } step2_exit: return ret; } static int read_and_update_mitigation_state(bool step2_req) { int ret = 0; struct supply_lm_input_data input; struct supply_lm_mitigation_data mit_state; enum corner_state req_state; uint32_t core_online_req = num_possible_cpus(); ret = supply_lm_devices_read(&input); if (ret) goto read_exit; /* * Optimize states of cpu, gpu and modem * due to single supply architecture. */ pr_debug("States before OPT T:%d N:%d C:%d G:%d M:%d\n", input.therm_state, input.num_cores, input.cpu_state, input.gpu_state, input.modem_state); req_state = max(input.gpu_state, max(input.cpu_state, input.modem_state)); input.cpu_state = req_state; if (input.gpu_state > CORNER_STATE_OFF) input.gpu_state = req_state; if (input.modem_state > CORNER_STATE_OFF) input.modem_state = req_state; pr_debug("states c:%d g:%d m:%d\n", input.cpu_state, input.gpu_state, input.modem_state); mutex_lock(&supply_lm_tz_lock); trace_supply_lm_pre_scm(input.therm_state | (input.num_cores << 4) | (input.cpu_state << 8) | (input.gpu_state << 12) | (input.modem_state << 16)); ret = scm_call_atomic5_3(SCM_SVC_PWR, SUPPLY_LM_GET_MIT_CMD, input.therm_state, input.num_cores, input.cpu_state, input.modem_state, input.gpu_state, &core_online_req, &mit_state.freq_req, &mit_state.step1_rel); mit_state.core_offline_req = num_possible_cpus() - core_online_req; trace_supply_lm_post_scm(ret); if (ret) { /* log all return variables for debug */ pr_err("scm call error. ret:%d O1:%d O2:%d O3:0x%x\n", ret, core_online_req, mit_state.freq_req, mit_state.step1_rel); mutex_unlock(&supply_lm_tz_lock); goto read_exit; } update_mtgtn_debug_event(SUPPLY_LM_DBG_IO, &input, &mit_state, 0); mutex_unlock(&supply_lm_tz_lock); if (SUPPLY_LM_IO_DATA & supply_lm_debug_mask) pr_info( "I/O:T:%d N:%d C:%d M:%d G:%d: Hplg:%d Fm:%u Step1_rel:0x%x\n", input.therm_state, input.num_cores, input.cpu_state, input.modem_state, input.gpu_state, mit_state.core_offline_req, mit_state.freq_req, mit_state.step1_rel); if (step2_req) ret = handle_step2_mitigation(&mit_state); read_exit: return ret; } static int supply_lm_monitor(void *data) { while (!kthread_should_stop()) { while (wait_for_completion_interruptible( &supply_lm_notify_complete) != 0) ; INIT_COMPLETION(supply_lm_notify_complete); if (supply_lm_data->enabled) { supply_lm_data->inp_trig_for_mit = 0; read_and_update_mitigation_state(true); } /* To serialize mitigation, wait for devmgr callback */ if (supply_lm_data->step2_cpu_freq_initiated || supply_lm_data->step2_hotplug_initiated) { while (wait_for_completion_interruptible_timeout( &supply_lm_mitigation_complete, msecs_to_jiffies(MITIGATION_MAX_DELAY)) < 0) ; INIT_COMPLETION(supply_lm_mitigation_complete); } } return 0; } static void supply_lm_remove_devices(struct platform_device *pdev) { struct input_device_info *curr_device = NULL, *next_device = NULL; list_for_each_entry_safe(curr_device, next_device, &supply_lm_device_list, list_ptr) { curr_device->ops.clean_up(pdev, curr_device); list_del(&curr_device->list_ptr); pr_debug("Deregistering Sensor:[%s]\n", curr_device->device_name); } } static int supply_lm_modem_state_read(struct input_device_info *device, struct supply_lm_input_data *curr_state) { enum modem_corner_state modem; if (supply_lm_bypass_inp) { curr_state->modem_state = device->curr.state; return 0; } modem = readl_relaxed(supply_lm_data->modem_hw.intr_base_reg + SUPPLY_LM_MODEM_DATA_OFFSET); device->curr.state = modem_corner_map[modem]; curr_state->modem_state = device->curr.state; return 0; } static void supply_lm_modem_notify(struct work_struct *work) { enum modem_corner_state modem_state; struct input_device_info *modem = &supply_lm_devices[SUPPLY_LM_MODEM_DEVICE]; if (!supply_lm_data->enabled || supply_lm_bypass_inp) goto modem_exit; trace_supply_lm_inp_start_trig(SUPPLY_LM_MODEM_DEVICE, modem->curr.state); modem_state = readl_relaxed(supply_lm_data->modem_hw.intr_base_reg + SUPPLY_LM_MODEM_DATA_OFFSET); if (modem->curr.state == modem_corner_map[modem_state]) goto enable_exit; modem->curr.state = modem_corner_map[modem_state]; if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info("s1 lm modem interrupt: modem data%d corner%d\n", modem_state, modem->curr.state); read_and_update_mitigation_state(false); supply_lm_data->inp_trig_for_mit |= BIT(SUPPLY_LM_MODEM_DEVICE); if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); enable_exit: enable_irq(supply_lm_data->modem_hw.irq_num); trace_supply_lm_inp_end_trig(SUPPLY_LM_MODEM_DEVICE, modem->curr.state); modem_exit: return; } static void clear_modem_interrupt(void) { writel_relaxed(MODEM_INTR_CLEAR, supply_lm_data->modem_hw.intr_base_reg); } static irqreturn_t supply_lm_modem_handle_isr(int irq, void *data) { struct supply_lm_core_data *supply_lm_data = (struct supply_lm_core_data *)data; clear_modem_interrupt(); disable_irq_nosync(supply_lm_data->modem_hw.irq_num); queue_work(supply_lm_data->modem_hw.isr_wq, &supply_lm_data->modem_hw.isr_work); return IRQ_HANDLED; } static int supply_lm_modem_device_init(struct platform_device *pdev, struct input_device_info *device) { int ret = 0; supply_lm_data->modem_hw.isr_wq = alloc_workqueue("supply_lm_modem_isr_wq", WQ_HIGHPRI, 0); if (!supply_lm_data->modem_hw.isr_wq) { pr_err("Error allocating workqueue\n"); ret = -ENOMEM; goto init_exit; } INIT_WORK(&supply_lm_data->modem_hw.isr_work, supply_lm_modem_notify); supply_lm_data->modem_hw.irq_num = platform_get_irq(pdev, 0); if (supply_lm_data->modem_hw.irq_num < 0) { pr_err("Error getting IRQ number. %d\n", supply_lm_data->modem_hw.irq_num); ret = supply_lm_data->modem_hw.irq_num; goto init_exit; } ret = request_irq(supply_lm_data->modem_hw.irq_num, supply_lm_modem_handle_isr, IRQF_TRIGGER_HIGH, SUPPLY_LM_INTERRUPT, supply_lm_data); if (ret) { pr_err("Error getting irq for SUPPLY LM. err:%d\n", ret); goto init_exit; } init_exit: if (ret) supply_lm_remove_devices(pdev); return ret; } static unsigned int get_voltage_from_opp(struct device *dev, unsigned long freq, unsigned int *volt_id) { int ret = 0; struct opp *opp = NULL; rcu_read_lock(); opp = dev_pm_opp_find_freq_exact(dev, freq, true); if (IS_ERR(opp)) { pr_err("failed to find valid OPP for freq: %lu\n", freq); ret = -EINVAL; goto opp_exit; } else { *volt_id = dev_pm_opp_get_voltage(opp); if (!*volt_id) { pr_err("invalid OPP for freq %lu\n", freq); ret = -EINVAL; goto opp_exit; } } opp_exit: rcu_read_unlock(); return ret; } static int supply_lm_hotplug_state_read(struct input_device_info *device, struct supply_lm_input_data *curr_state) { if (!supply_lm_bypass_inp) device->curr.num_cores = num_online_cpus(); curr_state->num_cores = device->curr.num_cores; return 0; } static int supply_lm_cpu_state_read(struct input_device_info *device, struct supply_lm_input_data *curr_state) { curr_state->cpu_state = device->curr.state; return 0; } static int __ref supply_lm_hotplug_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { uint32_t cpu = (uintptr_t)hcpu; if (!supply_lm_data->enabled || supply_lm_bypass_inp) return NOTIFY_OK; if (supply_lm_data->step2_hotplug_initiated) return NOTIFY_OK; trace_supply_lm_inp_start_trig(SUPPLY_LM_NUM_CORE_DEVICE, cpu); if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info("cpu %d event %ld\n", cpu, action); if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN || action == CPU_DEAD || action == CPU_DEAD_FROZEN) { read_and_update_mitigation_state(false); supply_lm_data->inp_trig_for_mit |= BIT(SUPPLY_LM_NUM_CORE_DEVICE); if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); } trace_supply_lm_inp_end_trig(SUPPLY_LM_NUM_CORE_DEVICE, cpu); return NOTIFY_OK; } static struct notifier_block __refdata supply_lm_hotplug_notifier = { .notifier_call = supply_lm_hotplug_callback, }; static int supply_lm_cpufreq_callback(struct notifier_block *nfb, unsigned long event, void *data) { int i = 0; enum corner_state new_corner = CORNER_STATE_MAX; enum corner_state old_corner = CORNER_STATE_MAX; struct cpufreq_freqs *freqs = data; struct input_device_info *cpu = &supply_lm_devices[SUPPLY_LM_CPU_DEVICE]; bool mitigate = false; static unsigned long local_event; static unsigned int new_freq; if (!supply_lm_data->enabled || supply_lm_bypass_inp) return NOTIFY_OK; if ((local_event == event) && (new_freq == freqs->new)) return NOTIFY_OK; trace_supply_lm_inp_start_trig(SUPPLY_LM_CPU_DEVICE, freqs->old); local_event = event; new_freq = freqs->new; for (i = cpufreq_table_len - 1; i >= 0; i--) { if (cpufreq_corner_map[i].freq == freqs->new) new_corner = cpufreq_corner_map[i].state; else if (cpufreq_corner_map[i].freq == freqs->old) old_corner = cpufreq_corner_map[i].state; if (new_corner != CORNER_STATE_MAX && old_corner != CORNER_STATE_MAX) break; } if (new_corner == CORNER_STATE_MAX || old_corner == CORNER_STATE_MAX) goto callback_exit; switch (event) { case CPUFREQ_PRECHANGE: if (new_corner > old_corner) { if (cpu->curr.state == new_corner) break; if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "PRE:old:%u new:%u old_crnr:%d new_crnr:%d\n", freqs->old, freqs->new, old_corner, new_corner); cpu->curr.state = new_corner; if (supply_lm_data->step2_cpu_freq_initiated) break; mitigate = true; } break; case CPUFREQ_POSTCHANGE: if (new_corner < old_corner) { if (cpu->curr.state == new_corner) break; if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "POST:old:%u new:%u old_crnr:%d new_crnr:%d\n", freqs->old, freqs->new, old_corner, new_corner); cpu->curr.state = new_corner; if (supply_lm_data->step2_cpu_freq_initiated) break; mitigate = true; } break; default: pr_err("Unsupported event %ld\n", event); break; } if (mitigate) { read_and_update_mitigation_state(false); supply_lm_data->inp_trig_for_mit |= BIT(SUPPLY_LM_CPU_DEVICE); if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); } callback_exit: trace_supply_lm_inp_end_trig(SUPPLY_LM_CPU_DEVICE, freqs->new); return NOTIFY_OK; } static struct notifier_block supply_lm_cpufreq_notifier = { .notifier_call = supply_lm_cpufreq_callback, }; static int create_cpufreq_opp_corner_table(struct device *dev) { int ret = 0, corner = 0; uint32_t i = 0; struct cpufreq_frequency_table *cpufreq_table; struct device *cpu_dev = NULL; if (cpufreq_corner_map) { pr_info("cpufreq corner map is already created\n"); goto fail_exit; } /* using cpu 0 dev since all cpus are in sync */ cpu_dev = get_cpu_device(0); cpufreq_table = cpufreq_frequency_get_table(0); if (!cpufreq_table) { pr_debug("error reading cpufreq table\n"); ret = -EINVAL; goto fail_exit; } while (cpufreq_table[i].frequency != CPUFREQ_TABLE_END) i++; cpufreq_table_len = i - 1; if (cpufreq_table_len < 1) { WARN(1, "CPU0 frequency table length:%d\n", cpufreq_table_len); ret = -EINVAL; goto fail_exit; } cpufreq_corner_map = devm_kzalloc(dev, sizeof(struct freq_corner_map) * cpufreq_table_len, GFP_KERNEL); if (!cpufreq_corner_map) { pr_err("Memory allocation failed\n"); ret = -ENOMEM; goto fail_exit; } i = 0; while (cpufreq_table[i].frequency != CPUFREQ_TABLE_END) { cpufreq_corner_map[i].freq = cpufreq_table[i].frequency; /* Get corner for freq which is not multiplier of 1000 in HZ */ if (cpufreq_table[i].frequency == NO_DIV_CPU_FREQ_KHZ) { ret = get_voltage_from_opp(cpu_dev, NO_DIV_CPU_FREQ_HZ, &corner); if (ret) goto fail_exit; } else { ret = get_voltage_from_opp(cpu_dev, cpufreq_table[i].frequency * 1000, &corner); if (ret) goto fail_exit; } cpufreq_corner_map[i].state = apps_corner_map[corner]; i++; } fail_exit: if (ret) { if (cpufreq_corner_map) devm_kfree(dev, cpufreq_corner_map); cpufreq_table_len = 0; } return ret; } static int supply_lm_cpu_device_init(struct platform_device *pdev, struct input_device_info *cpu) { int ret = 0; ret = create_cpufreq_opp_corner_table(&pdev->dev); if (ret) return ret; cpu->curr.state = CORNER_STATE_TURBO; ret = cpufreq_register_notifier(&supply_lm_cpufreq_notifier, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { pr_err("cannot register cpufreq notifier. err:%d\n", ret); return ret; } return ret; } static int supply_lm_hotplug_device_init(struct platform_device *pdev, struct input_device_info *num_core) { if (num_possible_cpus() > 1) register_cpu_notifier(&supply_lm_hotplug_notifier); return 0; } static int supply_lm_gpu_clk_callback(struct notifier_block *nfb, unsigned long event, void *data) { int i; uint8_t new_volt_id = CORNER_STATE_MAX, old_volt_id = CORNER_STATE_MAX; struct msm_clk_notifier_data *clk_data = (struct msm_clk_notifier_data *)data; struct input_device_info *gpu = &supply_lm_devices[SUPPLY_LM_GPU_DEVICE]; bool mitigate = false; trace_supply_lm_inp_start_trig(SUPPLY_LM_GPU_DEVICE, clk_data->old_rate); if (!supply_lm_data->enabled || supply_lm_bypass_inp) goto callback_exit; for (i = 0; i < gpufreq_table_len; i++) { if (clk_data->new_rate == gpufreq_corner_map[i].freq) new_volt_id = gpufreq_corner_map[i].state; if (clk_data->old_rate == gpufreq_corner_map[i].freq) old_volt_id = gpufreq_corner_map[i].state; if (new_volt_id != CORNER_STATE_MAX && old_volt_id != CORNER_STATE_MAX) break; } if (i >= gpufreq_table_len) goto callback_exit; switch (event) { case PRE_RATE_CHANGE: if (new_volt_id > old_volt_id) { if (gpu->curr.state == new_volt_id) break; if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "PRE:old:%lu new:%lu old_vlt:%d new_vlt:%d\n", clk_data->old_rate, clk_data->new_rate, old_volt_id, new_volt_id); gpu->curr.state = new_volt_id; mitigate = true; } break; case POST_RATE_CHANGE: if (new_volt_id < old_volt_id) { if (gpu->curr.state == new_volt_id) break; if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "POST:old:%lu new:%lu old_vlt:%d new_vlt:%d\n", clk_data->old_rate, clk_data->new_rate, old_volt_id, new_volt_id); gpu->curr.state = new_volt_id; mitigate = true; } break; default: break; } if (mitigate) { read_and_update_mitigation_state(false); supply_lm_data->inp_trig_for_mit |= BIT(SUPPLY_LM_GPU_DEVICE); if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); } callback_exit: trace_supply_lm_inp_end_trig(SUPPLY_LM_GPU_DEVICE, clk_data->new_rate); return NOTIFY_OK; } static int supply_lm_gpu_state_read(struct input_device_info *device, struct supply_lm_input_data *curr_state) { curr_state->gpu_state = device->curr.state; return 0; } static struct notifier_block supply_lm_gpu_notifier = { .notifier_call = supply_lm_gpu_clk_callback, }; static int get_gpufreq_table_from_devicetree(struct platform_device *pdev) { int ret = 0; int i = 0; struct device_node *child_node = NULL; struct device_node *gpu_pwr = NULL; gpu_pwr = of_find_compatible_node(NULL, NULL, "qcom,gpu-pwrlevels"); if (!gpu_pwr) { pr_err("Unable to find DT for qcom,gpu-pwrlevelsi\n"); ret = -EINVAL; goto read_fail; } gpufreq_table_len = of_get_child_count(gpu_pwr); if (gpufreq_table_len == 0) { pr_err("No gpu power levels nodes\n"); ret = -ENODEV; goto read_fail; } gpufreq_corner_map = devm_kzalloc(&pdev->dev, sizeof(struct freq_corner_map) * gpufreq_table_len, GFP_KERNEL); if (!gpufreq_corner_map) { pr_err("Memory cannot create\n"); ret = -ENOMEM; goto read_fail; } for_each_child_of_node(gpu_pwr, child_node) { if (i >= gpufreq_table_len) { pr_err("Invalid number of child node. err:%d\n", i); ret = -EINVAL; goto read_fail; } ret = of_property_read_u32(child_node, "qcom,gpu-freq", &gpufreq_corner_map[i].freq); if (ret) { pr_err("No gpu-freq node read error. err:%d\n", ret); goto read_fail; } i++; } read_fail: if (ret) { if (gpufreq_corner_map) devm_kfree(&pdev->dev, gpufreq_corner_map); } return ret; } static int create_gpufreq_opp_corner_table(void) { int ret = 0, i; uint32_t gpu_state = RPM_REGULATOR_CORNER_NONE; if (!gpufreq_corner_map) { pr_err("gpu frequency table is not initialized\n"); return -EINVAL; } for (i = 0; i < gpufreq_table_len; i++) { if (gpufreq_corner_map[i].freq == SUPPLY_LM_GPU_OFF_RATE) { gpufreq_corner_map[i].state = CORNER_STATE_OFF; continue; } ret = get_voltage_from_opp(&supply_lm_data->gpu_pdev->dev, gpufreq_corner_map[i].freq, &gpu_state); if (ret) { pr_err("Couldn't get corner for gpu freq:%u.ret:%d\n", gpufreq_corner_map[i].freq, ret); return -EINVAL; } gpufreq_corner_map[i].state = apps_corner_map[gpu_state]; } gpufreq_opp_corner_enabled = true; return ret; } static int supply_lm_gpu_device_init(struct platform_device *pdev, struct input_device_info *gpu) { int ret = 0; if (!gpufreq_opp_corner_enabled) { ret = create_gpufreq_opp_corner_table(); if (ret) { pr_err("can't create gpufreq corner table. err:%d\n", ret); return ret; } } ret = msm_clk_notif_register(supply_lm_data->gpu_handle, &supply_lm_gpu_notifier); if (ret) { pr_err("cannot register cpufreq notifier. err:%d\n", ret); return ret; } return ret; } static int supply_lm_therm_read(struct input_device_info *device, struct supply_lm_input_data *curr_state) { curr_state->therm_state = device->curr.therm_state; return 0; } static void supply_lm_thermal_notify(struct therm_threshold *trig_sens) { int i = 0; struct input_device_info *therm_sens = &supply_lm_devices[SUPPLY_LM_THERM_DEVICE]; struct threshold_info *trig_thresh = NULL; enum therm_state curr_therm_state = THERM_NORMAL; trace_supply_lm_inp_start_trig(SUPPLY_LM_THERM_DEVICE, therm_sens->curr.therm_state); if (!supply_lm_data->enabled || supply_lm_bypass_inp) goto notify_exit; if (!trig_sens || trig_sens->trip_triggered < 0) goto notify_exit; trig_thresh = trig_sens->parent; mutex_lock(&therm_sens->lock); switch (trig_sens->trip_triggered) { case THERMAL_TRIP_CONFIGURABLE_HI: if (trig_thresh == &supply_lm_therm_thresh[THERM_VERY_HOT - 1]) { if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "sensor:%d triggered very hot thresh for\n", trig_sens->sensor_id); supply_lm_therm_status[THERM_VERY_HOT] |= BIT(trig_sens->sensor_id); } else { if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "sensor:%d triggered hot thresh for\n", trig_sens->sensor_id); supply_lm_therm_status[THERM_HOT] |= BIT(trig_sens->sensor_id); } break; case THERMAL_TRIP_CONFIGURABLE_LOW: if (trig_thresh == &supply_lm_therm_thresh[THERM_VERY_HOT - 1]) { if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "sensor:%d cleared very hot thresh for\n", trig_sens->sensor_id); if (supply_lm_therm_status[THERM_VERY_HOT] & BIT(trig_sens->sensor_id)) supply_lm_therm_status[THERM_VERY_HOT] ^= BIT(trig_sens->sensor_id); } else { if (SUPPLY_LM_INPUTS & supply_lm_debug_mask) pr_info( "sensor:%d cleared hot thresh for\n", trig_sens->sensor_id); if (supply_lm_therm_status[THERM_HOT] & BIT(trig_sens->sensor_id)) supply_lm_therm_status[THERM_HOT] ^= BIT(trig_sens->sensor_id); } break; default: pr_err("Unsupported trip type\n"); mutex_unlock(&therm_sens->lock); goto set_and_exit; break; } for (i = MAX_THERM_LEVEL - 1; i >= 0; i--) { if (supply_lm_therm_status[i]) { curr_therm_state = i; break; } } if (i < 0) curr_therm_state = THERM_NORMAL; pr_debug("current state is %d req %d V%d H%d N%d\n", therm_sens->curr.therm_state, curr_therm_state, supply_lm_therm_status[THERM_VERY_HOT], supply_lm_therm_status[THERM_HOT], supply_lm_therm_status[THERM_NORMAL]); if (therm_sens->curr.therm_state == curr_therm_state) { mutex_unlock(&therm_sens->lock); goto set_and_exit; } therm_sens->curr.therm_state = curr_therm_state; mutex_unlock(&therm_sens->lock); read_and_update_mitigation_state(false); supply_lm_data->inp_trig_for_mit |= BIT(SUPPLY_LM_THERM_DEVICE); if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); set_and_exit: sensor_mgr_set_threshold(trig_sens->sensor_id, trig_sens->threshold); notify_exit: trace_supply_lm_inp_end_trig(SUPPLY_LM_THERM_DEVICE, therm_sens->curr.therm_state); return; } static int get_therm_devicetree_data(struct device *dev, char *key, int *hi_temp, int *low_temp) { int ret = 0; int cnt; if (!of_get_property(dev->of_node, key, &cnt) || cnt <= 0) { pr_err("Property %s not defined.\n", key); ret = -ENODEV; goto therm_data_exit; } if (cnt % (sizeof(__be32) * 2)) { pr_err("Invalid number(%d) of entry for %s\n", cnt, key); ret = -EINVAL; goto therm_data_exit; } ret = of_property_read_u32_index(dev->of_node, key, 0, hi_temp); if (ret) { pr_err("Error reading index%d\n", 0); goto therm_data_exit; } ret = of_property_read_u32_index(dev->of_node, key, 1, low_temp); if (ret) { pr_err("Error reading index%d\n", 1); goto therm_data_exit; } therm_data_exit: return ret; } static int initialize_therm_device_state( struct input_device_info *therm_device) { int ret = 0, i; long temp = 0; struct threshold_info *thresh = &supply_lm_therm_thresh[THERM_HOT - 1]; enum therm_state curr_therm_state = THERM_NORMAL; supply_lm_therm_status[THERM_VERY_HOT] = 0; supply_lm_therm_status[THERM_HOT] = 0; supply_lm_therm_status[THERM_NORMAL] = UINT_MAX; mutex_lock(&therm_device->lock); for (i = 0; i < thresh->thresh_ct; i++) { ret = sensor_get_temp(thresh->thresh_list[i].sensor_id, &temp); if (ret) { pr_err("Unable to read TSENS sensor:%d. err:%d\n", thresh->thresh_list[i].sensor_id, ret); continue; } if (temp >= supply_lm_data->very_hot_temp_degC) { supply_lm_therm_status[THERM_VERY_HOT] |= BIT(thresh->thresh_list[i].sensor_id); supply_lm_therm_status[THERM_HOT] |= BIT(thresh->thresh_list[i].sensor_id); } else if (temp >= supply_lm_data->hot_temp_degC) { supply_lm_therm_status[THERM_HOT] |= BIT(thresh->thresh_list[i].sensor_id); } } for (i = MAX_THERM_LEVEL - 1; i >= 0; i--) { if (supply_lm_therm_status[i]) { curr_therm_state = i; break; } } therm_device->curr.therm_state = curr_therm_state; mutex_unlock(&therm_device->lock); return 0; } static int supply_lm_therm_device_init(struct platform_device *pdev, struct input_device_info *therm_device) { int ret = 0; struct supply_lm_core_data *supply_lm_data = platform_get_drvdata(pdev); if (!supply_lm_therm_thresh) { supply_lm_therm_thresh = devm_kzalloc(&pdev->dev, sizeof(struct threshold_info) * (MAX_THERM_LEVEL - 1), GFP_KERNEL); if (!supply_lm_therm_thresh) { pr_err("kzalloc failed\n"); ret = -ENOMEM; goto therm_exit; } } ret = sensor_mgr_init_threshold(&pdev->dev, &supply_lm_therm_thresh[THERM_HOT - 1], MONITOR_ALL_TSENS, supply_lm_data->hot_temp_degC, supply_lm_data->hot_temp_hyst, supply_lm_thermal_notify); if (ret) { pr_err( "Error in initializing thresholds for index:%d. err:%d\n", THERM_HOT, ret); goto therm_exit; } ret = sensor_mgr_init_threshold(&pdev->dev, &supply_lm_therm_thresh[THERM_VERY_HOT - 1], MONITOR_ALL_TSENS, supply_lm_data->very_hot_temp_degC, supply_lm_data->very_hot_temp_hyst, supply_lm_thermal_notify); if (ret) { pr_err( "Error in initializing thresholds for index:%d. err:%d\n", THERM_VERY_HOT, ret); goto therm_exit; } initialize_therm_device_state(therm_device); ret = sensor_mgr_convert_id_and_set_threshold( &supply_lm_therm_thresh[THERM_HOT - 1]); if (ret) { pr_err("Error in setting thresholds for index:%d. err:%d\n", THERM_HOT, ret); goto therm_exit; } ret = sensor_mgr_convert_id_and_set_threshold( &supply_lm_therm_thresh[THERM_VERY_HOT - 1]); if (ret) { pr_err("Error in setting thresholds for index:%d. err:%d\n", THERM_VERY_HOT, ret); goto therm_exit; } therm_exit: if (ret) { if (supply_lm_therm_thresh) { devm_kfree(&pdev->dev, supply_lm_therm_thresh); supply_lm_therm_thresh = NULL; } } return ret; } static int supply_lm_devices_init(struct platform_device *pdev) { int i = 0; int ret = 0; /* initialize all devices here */ for (i = 0; i < SUPPLY_LM_DEVICE_MAX; i++) { mutex_init(&supply_lm_devices[i].lock); ret = supply_lm_devices[i].ops.setup(pdev, &supply_lm_devices[i]); if (ret) goto device_exit; list_add_tail(&supply_lm_devices[i].list_ptr, &supply_lm_device_list); supply_lm_devices[i].enabled = true; } device_exit: return ret; } static void supply_lm_therm_cleanup(struct platform_device *pdev, struct input_device_info *therm) { if (!therm->enabled) return; if (supply_lm_therm_thresh) { sensor_mgr_remove_threshold(&pdev->dev, &supply_lm_therm_thresh[THERM_VERY_HOT - 1]); sensor_mgr_remove_threshold(&pdev->dev, &supply_lm_therm_thresh[THERM_HOT - 1]); devm_kfree(&pdev->dev, supply_lm_therm_thresh); supply_lm_therm_thresh = NULL; } therm->enabled = false; } static void supply_lm_hotplug_device_exit(struct platform_device *pdev, struct input_device_info *core) { if (!core->enabled) return; unregister_cpu_notifier(&supply_lm_hotplug_notifier); core->enabled = false; } static void supply_lm_cpu_state_device_exit(struct platform_device *pdev, struct input_device_info *cpu) { int ret = 0; if (!cpu->enabled) return; ret = cpufreq_unregister_notifier(&supply_lm_cpufreq_notifier, CPUFREQ_TRANSITION_NOTIFIER); if (ret) { pr_err("cannot unregister cpufreq notifier. err:%d\n", ret); return; } if (cpufreq_corner_map) { devm_kfree(&pdev->dev, cpufreq_corner_map); cpufreq_corner_map = NULL; } cpu->enabled = false; } static void supply_lm_gpu_state_device_exit(struct platform_device *pdev, struct input_device_info *gpu) { int ret = 0; if (!gpu->enabled) return; ret = msm_clk_notif_unregister(supply_lm_data->gpu_handle, &supply_lm_gpu_notifier); if (ret) { pr_err("cannot unregister gpu clk notifier. err:%d\n", ret); return; } gpu->enabled = false; } static void supply_lm_modem_state_device_exit(struct platform_device *pdev, struct input_device_info *modem) { struct supply_lm_core_data *supply_lm_data = platform_get_drvdata(pdev); if (!modem->enabled) return; destroy_workqueue(supply_lm_data->modem_hw.isr_wq); free_irq(supply_lm_data->modem_hw.irq_num, supply_lm_data); modem->enabled = false; } static int supply_lm_cpu_pm_notify(struct notifier_block *nb, unsigned long action, void *data) { unsigned int cpu = smp_processor_id(); unsigned int cpu_idle_cnt = 0x0; struct input_device_info *modem = &supply_lm_devices[SUPPLY_LM_MODEM_DEVICE]; enum modem_corner_state modem_state; if (!supply_lm_data->enabled || supply_lm_bypass_inp) return NOTIFY_OK; switch (action) { case CPU_PM_ENTER: spin_lock(&supply_lm_pm_lock); cpumask_set_cpu(cpu, &supply_lm_data->cpu_idle_mask); cpu_idle_cnt = cpumask_weight(&supply_lm_data->cpu_idle_mask); if (cpu_idle_cnt == num_online_cpus()) disable_irq_nosync(supply_lm_data->modem_hw.irq_num); spin_unlock(&supply_lm_pm_lock); break; case CPU_PM_ENTER_FAILED: case CPU_PM_EXIT: spin_lock(&supply_lm_pm_lock); cpu_idle_cnt = cpumask_weight(&supply_lm_data->cpu_idle_mask); if (cpu_idle_cnt == num_online_cpus()) { /* Handle missing interrupt here */ modem_state = readl_relaxed(supply_lm_data->modem_hw.intr_base_reg + SUPPLY_LM_MODEM_DATA_OFFSET); if (modem->curr.state != modem_corner_map[modem_state]) { modem->curr.state = modem_corner_map[modem_state]; if (supply_lm_monitor_task && !supply_lm_data->suspend_in_progress) complete(&supply_lm_notify_complete); } enable_irq(supply_lm_data->modem_hw.irq_num); } cpumask_clear_cpu(cpu, &supply_lm_data->cpu_idle_mask); spin_unlock(&supply_lm_pm_lock); break; default: break; } return NOTIFY_OK; } static struct notifier_block supply_lm_cpu_pm_notifier = { .notifier_call = supply_lm_cpu_pm_notify, }; static int supply_lm_devm_ioremap(struct platform_device *pdev, const char *res_name, void __iomem **virt) { struct resource *supply_lm_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); if (!supply_lm_res) { pr_err("error missing config of %s reg-space\n", res_name); return -ENODEV; } *virt = devm_ioremap(&pdev->dev, supply_lm_res->start, resource_size(supply_lm_res)); if (!*virt) { pr_err("error %s ioremap(phy:0x%lx len:0x%lx) failed\n", res_name, (ulong) supply_lm_res->start, (ulong) resource_size(supply_lm_res)); return -ENOMEM; } pr_debug("%s ioremap(phy:0x%lx vir:0x%p len:0x%lx)\n", res_name, (ulong) supply_lm_res->start, *virt, (ulong) resource_size(supply_lm_res)); return 0; } static int opp_clk_get_from_handle(struct platform_device *pdev, const char *phandle, struct platform_device **opp_pdev, struct clk **opp_clk) { int ret = 0; struct device_node *opp_dev_node = NULL; if (!pdev || !phandle || !opp_pdev || !opp_clk) { pr_err("Invalid Input\n"); ret = -EINVAL; goto clk_exit; } opp_dev_node = of_parse_phandle(pdev->dev.of_node, phandle, 0); if (!opp_dev_node) { pr_err("Could not find %s device nodes\n", phandle); ret = -EINVAL; goto clk_exit; } *opp_pdev = of_find_device_by_node(opp_dev_node); if (!*opp_pdev) { pr_err("can't find device for node for %s\n", phandle); ret = -EINVAL; goto clk_exit; } *opp_clk = devm_clk_get(&(*opp_pdev)->dev, "core_clk"); if (IS_ERR(*opp_clk)) { pr_err("Error getting core clk: %lu\n", PTR_ERR(*opp_clk)); ret = PTR_ERR(*opp_clk); goto clk_exit; } clk_exit: if (ret) if (opp_clk) *opp_clk = NULL; return ret; } static int supply_lm_get_devicetree_data(struct platform_device *pdev) { int ret = 0; ret = supply_lm_devm_ioremap(pdev, "intr_reg", &supply_lm_data->modem_hw.intr_base_reg); if (ret) goto dev_exit; ret = opp_clk_get_from_handle(pdev, "gpu-dev-opp", &supply_lm_data->gpu_pdev, &supply_lm_data->gpu_handle); if (ret) goto dev_exit; ret = get_gpufreq_table_from_devicetree(pdev); if (ret) goto dev_exit; ret = get_therm_devicetree_data(&pdev->dev, "qcom,supply-lm-hot-temp-range", &supply_lm_data->hot_temp_degC, &supply_lm_data->hot_temp_hyst); if (ret) goto dev_exit; ret = get_therm_devicetree_data(&pdev->dev, "qcom,supply-lm-very-hot-temp-range", &supply_lm_data->very_hot_temp_degC, &supply_lm_data->very_hot_temp_hyst); if (ret) goto dev_exit; dev_exit: if (ret) pr_err("Error reading. err:%d\n", ret); return ret; } static int supply_lm_core_remove(struct platform_device *pdev) { struct supply_lm_core_data *supply_lm_data = platform_get_drvdata(pdev); supply_lm_remove_devices(pdev); if (gpufreq_corner_map) { devm_kfree(&pdev->dev, gpufreq_corner_map); gpufreq_corner_map = NULL; } if (!supply_lm_data) return 0; if (supply_lm_data->modem_hw.intr_base_reg) { iounmap(supply_lm_data->modem_hw.intr_base_reg); supply_lm_data->modem_hw.intr_base_reg = NULL; } /* De-register KTM handle */ if (supply_lm_data->hotplug_handle) { devmgr_unregister_mitigation_client(&pdev->dev, supply_lm_data->hotplug_handle); supply_lm_data->hotplug_handle = NULL; } if (supply_lm_data->cpufreq_handle) { devmgr_unregister_mitigation_client(&pdev->dev, supply_lm_data->cpufreq_handle); supply_lm_data->cpufreq_handle = NULL; } devm_kfree(&pdev->dev, supply_lm_data); supply_lm_data = NULL; return 0; } static void initialize_supply_lm_data(struct supply_lm_core_data *supply_lm) { if (!supply_lm) return; supply_lm->enabled = false; supply_lm->suspend_in_progress = false; supply_lm->step2_cpu_freq_initiated = false; supply_lm->step2_hotplug_initiated = false; supply_lm->supply_lm_limited_max_freq = UINT_MAX; supply_lm->supply_lm_offlined_cpu_mask = CPU_MASK_NONE; HOTPLUG_NO_MITIGATION(&supply_lm->supply_lm_offlined_cpu_mask); } static int supply_lm_core_probe(struct platform_device *pdev) { int ret = 0; struct device_clnt_data *handle = NULL; if (supply_lm_data) { pr_err("Reinitializing supply_lm core driver\n"); ret = -EEXIST; goto probe_exit; } supply_lm_data = devm_kzalloc(&pdev->dev, sizeof(struct supply_lm_core_data), GFP_KERNEL); if (!supply_lm_data) { pr_err("kzalloc failed\n"); ret = -ENOMEM; goto probe_exit; } supply_lm_data->pdev = pdev; initialize_supply_lm_data(supply_lm_data); ret = supply_lm_get_devicetree_data(pdev); if (ret) { pr_err("Error getting device tree data. err:%d\n", ret); goto devicetree_exit; } /* Initialize mitigation KTM interface */ if (num_possible_cpus() > 1) { handle = devmgr_register_mitigation_client(&pdev->dev, HOTPLUG_DEVICE, devmgr_mitigation_callback); if (IS_ERR_OR_NULL(handle)) { ret = PTR_ERR(handle); pr_err("Error registering for hotplug. ret:%d\n", ret); goto ktm_handle_exit; } supply_lm_data->hotplug_handle = handle; } handle = devmgr_register_mitigation_client(&pdev->dev, CPU0_DEVICE, devmgr_mitigation_callback); if (IS_ERR_OR_NULL(handle)) { ret = PTR_ERR(handle); pr_err("Error registering for cpufreq. ret:%d\n", ret); goto ktm_handle_exit; } supply_lm_data->cpufreq_handle = handle; platform_set_drvdata(pdev, supply_lm_data); init_completion(&supply_lm_notify_complete); init_completion(&supply_lm_mitigation_complete); supply_lm_monitor_task = kthread_run(supply_lm_monitor, NULL, "supply-lm:monitor"); if (IS_ERR(supply_lm_monitor_task)) { pr_err("Failed to create SUPPLY LM monitor thread. err:%ld\n", PTR_ERR(supply_lm_monitor_task)); ret = PTR_ERR(supply_lm_monitor_task); goto ktm_handle_exit; } spin_lock_init(&supply_lm_pm_lock); cpu_pm_register_notifier(&supply_lm_cpu_pm_notifier); ret = create_supply_lm_debugfs(pdev); if (ret) { pr_err("Creating debug_fs failed\n"); goto kthread_exit; } supply_lm_debug = dma_alloc_coherent(&pdev->dev, num_dbg_elements * sizeof(struct supply_lm_debug), &supply_lm_debug_phys, GFP_KERNEL); if (!supply_lm_debug) { pr_err("Debug counter init is failed\n"); ret = -EINVAL; goto debugfs_exit; } ret = create_supply_lm_sysfs(); if (ret) goto debugfs_exit; return ret; debugfs_exit: if (supply_lm_debugfs) { debugfs_remove_recursive(supply_lm_debugfs->parent); devm_kfree(&pdev->dev, supply_lm_debugfs); supply_lm_debugfs = NULL; } kthread_exit: if (supply_lm_monitor_task) kthread_stop(supply_lm_monitor_task); cpu_pm_unregister_notifier(&supply_lm_cpu_pm_notifier); ktm_handle_exit: if (supply_lm_data->hotplug_handle) { devmgr_unregister_mitigation_client(&pdev->dev, supply_lm_data->hotplug_handle); supply_lm_data->hotplug_handle = NULL; } if (supply_lm_data->cpufreq_handle) { devmgr_unregister_mitigation_client(&pdev->dev, supply_lm_data->cpufreq_handle); supply_lm_data->cpufreq_handle = NULL; } devicetree_exit: if (supply_lm_data->modem_hw.intr_base_reg) { iounmap(supply_lm_data->modem_hw.intr_base_reg); supply_lm_data->modem_hw.intr_base_reg = NULL; } if (gpufreq_corner_map) { devm_kfree(&pdev->dev, gpufreq_corner_map); gpufreq_corner_map = NULL; } devm_kfree(&pdev->dev, supply_lm_data); supply_lm_data = NULL; probe_exit: return ret; } static struct input_device_info supply_lm_devices[] = { [SUPPLY_LM_THERM_DEVICE] = { .device_name = "therm_state", .ops = { .setup = supply_lm_therm_device_init, .read_state = supply_lm_therm_read, .clean_up = supply_lm_therm_cleanup, }, }, [SUPPLY_LM_NUM_CORE_DEVICE] = { .device_name = "core_num", .ops = { .setup = supply_lm_hotplug_device_init, .read_state = supply_lm_hotplug_state_read, .clean_up = supply_lm_hotplug_device_exit, }, }, [SUPPLY_LM_CPU_DEVICE] = { .device_name = "cpu_state", .ops = { .setup = supply_lm_cpu_device_init, .read_state = supply_lm_cpu_state_read, .clean_up = supply_lm_cpu_state_device_exit, }, }, [SUPPLY_LM_MODEM_DEVICE] = { .device_name = "modem_state", .ops = { .setup = supply_lm_modem_device_init, .read_state = supply_lm_modem_state_read, .clean_up = supply_lm_modem_state_device_exit, }, }, [SUPPLY_LM_GPU_DEVICE] = { .device_name = "gpu_state", .ops = { .setup = supply_lm_gpu_device_init, .read_state = supply_lm_gpu_state_read, .clean_up = supply_lm_gpu_state_device_exit, }, }, }; static int supply_lm_suspend_noirq(struct device *dev) { pr_debug("Suspend in process, disabling step2 mitigation\n"); supply_lm_data->suspend_in_progress = true; return 0; } static int supply_lm_resume_noirq(struct device *dev) { pr_debug("Resuming from suspend, enabling step2 mitigation\n"); supply_lm_data->suspend_in_progress = false; return 0; } static const struct dev_pm_ops supply_lm_pm_ops = { .suspend_noirq = supply_lm_suspend_noirq, .resume_noirq = supply_lm_resume_noirq, }; static struct of_device_id supply_lm_match[] = { { .compatible = "qcom,supply-lm", }, {}, }; static struct platform_driver supply_lm_driver = { .probe = supply_lm_core_probe, .remove = supply_lm_core_remove, .driver = { .name = SUPPLY_LM_DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = supply_lm_match, .pm = &supply_lm_pm_ops, }, }; int __init supply_lm_driver_init(void) { return platform_driver_register(&supply_lm_driver); } static void __exit supply_lm_driver_exit(void) { platform_driver_unregister(&supply_lm_driver); } late_initcall(supply_lm_driver_init); module_exit(supply_lm_driver_exit); MODULE_DESCRIPTION("SUPPLY LM CORE"); MODULE_ALIAS("platform:" SUPPLY_LM_DRIVER_NAME); MODULE_LICENSE("GPL v2");
gpl-2.0
starsoc/linux-star-x7
sound/pci/cs46xx/cs46xx_lib.c
67
107452
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Abramo Bagnara <abramo@alsa-project.org> * Cirrus Logic, Inc. * Routines for control of Cirrus Logic CS461x chips * * KNOWN BUGS: * - Sometimes the SPDIF input DSP tasks get's unsynchronized * and the SPDIF get somewhat "distorcionated", or/and left right channel * are swapped. To get around this problem when it happens, mute and unmute * the SPDIF input mixer control. * - On the Hercules Game Theater XP the amplifier are sometimes turned * off on inadecuate moments which causes distorcions on sound. * * TODO: * - Secondary CODEC on some soundcards * - SPDIF input support for other sample rates then 48khz * - Posibility to mix the SPDIF output with analog sources. * - PCM channels for Center and LFE on secondary codec * * NOTE: with CONFIG_SND_CS46XX_NEW_DSP unset uses old DSP image (which * is default configuration), no SPDIF, no secondary codec, no * multi channel PCM. But known to work. * * FINALLY: A credit to the developers Tom and Jordan * at Cirrus for have helping me out with the DSP, however we * still don't have sufficient documentation and technical * references to be able to implement all fancy feutures * supported by the cs46xx DSP's. * Benny <benny@hostmobility.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/pci.h> #include <linux/pm.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/mutex.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/info.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include "cs46xx.h" #include <asm/io.h> #include "cs46xx_lib.h" #include "dsp_spos.h" static void amp_voyetra(struct snd_cs46xx *chip, int change); #ifdef CONFIG_SND_CS46XX_NEW_DSP static struct snd_pcm_ops snd_cs46xx_playback_rear_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_rear_ops; static struct snd_pcm_ops snd_cs46xx_playback_clfe_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_clfe_ops; static struct snd_pcm_ops snd_cs46xx_playback_iec958_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_iec958_ops; #endif static struct snd_pcm_ops snd_cs46xx_playback_ops; static struct snd_pcm_ops snd_cs46xx_playback_indirect_ops; static struct snd_pcm_ops snd_cs46xx_capture_ops; static struct snd_pcm_ops snd_cs46xx_capture_indirect_ops; static unsigned short snd_cs46xx_codec_read(struct snd_cs46xx *chip, unsigned short reg, int codec_index) { int count; unsigned short result,tmp; u32 offset = 0; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return 0xffff; chip->active_ctrl(chip, 1); if (codec_index == CS46XX_SECONDARY_CODEC_INDEX) offset = CS46XX_SECONDARY_CODEC_OFFSET; /* * 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address * 2. Write ACCDA = Command Data Register = 470h for data to write to AC97 * 3. Write ACCTL = Control Register = 460h for initiating the write7---55 * 4. Read ACCTL = 460h, DCV should be reset by now and 460h = 17h * 5. if DCV not cleared, break and return error * 6. Read ACSTS = Status Register = 464h, check VSTS bit */ snd_cs46xx_peekBA0(chip, BA0_ACSDA + offset); tmp = snd_cs46xx_peekBA0(chip, BA0_ACCTL); if ((tmp & ACCTL_VFRM) == 0) { snd_printk(KERN_WARNING "cs46xx: ACCTL_VFRM not set 0x%x\n",tmp); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, (tmp & (~ACCTL_ESYN)) | ACCTL_VFRM ); msleep(50); tmp = snd_cs46xx_peekBA0(chip, BA0_ACCTL + offset); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, tmp | ACCTL_ESYN | ACCTL_VFRM ); } /* * Setup the AC97 control registers on the CS461x to send the * appropriate command to the AC97 to perform the read. * ACCAD = Command Address Register = 46Ch * ACCDA = Command Data Register = 470h * ACCTL = Control Register = 460h * set DCV - will clear when process completed * set CRW - Read command * set VFRM - valid frame enabled * set ESYN - ASYNC generation enabled * set RSTN - ARST# inactive, AC97 codec not reset */ snd_cs46xx_pokeBA0(chip, BA0_ACCAD, reg); snd_cs46xx_pokeBA0(chip, BA0_ACCDA, 0); if (codec_index == CS46XX_PRIMARY_CODEC_INDEX) { snd_cs46xx_pokeBA0(chip, BA0_ACCTL,/* clear ACCTL_DCV */ ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } else { snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_TC | ACCTL_CRW | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } /* * Wait for the read to occur. */ for (count = 0; count < 1000; count++) { /* * First, we want to wait for a short time. */ udelay(10); /* * Now, check to see if the read has completed. * ACCTL = 460h, DCV should be reset by now and 460h = 17h */ if (!(snd_cs46xx_peekBA0(chip, BA0_ACCTL) & ACCTL_DCV)) goto ok1; } snd_printk(KERN_ERR "AC'97 read problem (ACCTL_DCV), reg = 0x%x\n", reg); result = 0xffff; goto end; ok1: /* * Wait for the valid status bit to go active. */ for (count = 0; count < 100; count++) { /* * Read the AC97 status register. * ACSTS = Status Register = 464h * VSTS - Valid Status */ if (snd_cs46xx_peekBA0(chip, BA0_ACSTS + offset) & ACSTS_VSTS) goto ok2; udelay(10); } snd_printk(KERN_ERR "AC'97 read problem (ACSTS_VSTS), codec_index %d, reg = 0x%x\n", codec_index, reg); result = 0xffff; goto end; ok2: /* * Read the data returned from the AC97 register. * ACSDA = Status Data Register = 474h */ #if 0 printk(KERN_DEBUG "e) reg = 0x%x, val = 0x%x, BA0_ACCAD = 0x%x\n", reg, snd_cs46xx_peekBA0(chip, BA0_ACSDA), snd_cs46xx_peekBA0(chip, BA0_ACCAD)); #endif //snd_cs46xx_peekBA0(chip, BA0_ACCAD); result = snd_cs46xx_peekBA0(chip, BA0_ACSDA + offset); end: chip->active_ctrl(chip, -1); return result; } static unsigned short snd_cs46xx_ac97_read(struct snd_ac97 * ac97, unsigned short reg) { struct snd_cs46xx *chip = ac97->private_data; unsigned short val; int codec_index = ac97->num; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return 0xffff; val = snd_cs46xx_codec_read(chip, reg, codec_index); return val; } static void snd_cs46xx_codec_write(struct snd_cs46xx *chip, unsigned short reg, unsigned short val, int codec_index) { int count; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return; chip->active_ctrl(chip, 1); /* * 1. Write ACCAD = Command Address Register = 46Ch for AC97 register address * 2. Write ACCDA = Command Data Register = 470h for data to write to AC97 * 3. Write ACCTL = Control Register = 460h for initiating the write * 4. Read ACCTL = 460h, DCV should be reset by now and 460h = 07h * 5. if DCV not cleared, break and return error */ /* * Setup the AC97 control registers on the CS461x to send the * appropriate command to the AC97 to perform the read. * ACCAD = Command Address Register = 46Ch * ACCDA = Command Data Register = 470h * ACCTL = Control Register = 460h * set DCV - will clear when process completed * reset CRW - Write command * set VFRM - valid frame enabled * set ESYN - ASYNC generation enabled * set RSTN - ARST# inactive, AC97 codec not reset */ snd_cs46xx_pokeBA0(chip, BA0_ACCAD , reg); snd_cs46xx_pokeBA0(chip, BA0_ACCDA , val); snd_cs46xx_peekBA0(chip, BA0_ACCTL); if (codec_index == CS46XX_PRIMARY_CODEC_INDEX) { snd_cs46xx_pokeBA0(chip, BA0_ACCTL, /* clear ACCTL_DCV */ ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } else { snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_DCV | ACCTL_TC | ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); } for (count = 0; count < 4000; count++) { /* * First, we want to wait for a short time. */ udelay(10); /* * Now, check to see if the write has completed. * ACCTL = 460h, DCV should be reset by now and 460h = 07h */ if (!(snd_cs46xx_peekBA0(chip, BA0_ACCTL) & ACCTL_DCV)) { goto end; } } snd_printk(KERN_ERR "AC'97 write problem, codec_index = %d, reg = 0x%x, val = 0x%x\n", codec_index, reg, val); end: chip->active_ctrl(chip, -1); } static void snd_cs46xx_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_cs46xx *chip = ac97->private_data; int codec_index = ac97->num; if (snd_BUG_ON(codec_index != CS46XX_PRIMARY_CODEC_INDEX && codec_index != CS46XX_SECONDARY_CODEC_INDEX)) return; snd_cs46xx_codec_write(chip, reg, val, codec_index); } /* * Chip initialization */ int snd_cs46xx_download(struct snd_cs46xx *chip, u32 *src, unsigned long offset, unsigned long len) { void __iomem *dst; unsigned int bank = offset >> 16; offset = offset & 0xffff; if (snd_BUG_ON((offset & 3) || (len & 3))) return -EINVAL; dst = chip->region.idx[bank+1].remap_addr + offset; len /= sizeof(u32); /* writel already converts 32-bit value to right endianess */ while (len-- > 0) { writel(*src++, dst); dst += sizeof(u32); } return 0; } #ifdef CONFIG_SND_CS46XX_NEW_DSP #include "imgs/cwc4630.h" #include "imgs/cwcasync.h" #include "imgs/cwcsnoop.h" #include "imgs/cwcbinhack.h" #include "imgs/cwcdma.h" int snd_cs46xx_clear_BA1(struct snd_cs46xx *chip, unsigned long offset, unsigned long len) { void __iomem *dst; unsigned int bank = offset >> 16; offset = offset & 0xffff; if (snd_BUG_ON((offset & 3) || (len & 3))) return -EINVAL; dst = chip->region.idx[bank+1].remap_addr + offset; len /= sizeof(u32); /* writel already converts 32-bit value to right endianess */ while (len-- > 0) { writel(0, dst); dst += sizeof(u32); } return 0; } #else /* old DSP image */ #include "cs46xx_image.h" int snd_cs46xx_download_image(struct snd_cs46xx *chip) { int idx, err; unsigned long offset = 0; for (idx = 0; idx < BA1_MEMORY_COUNT; idx++) { if ((err = snd_cs46xx_download(chip, &BA1Struct.map[offset], BA1Struct.memory[idx].offset, BA1Struct.memory[idx].size)) < 0) return err; offset += BA1Struct.memory[idx].size >> 2; } return 0; } #endif /* CONFIG_SND_CS46XX_NEW_DSP */ /* * Chip reset */ static void snd_cs46xx_reset(struct snd_cs46xx *chip) { int idx; /* * Write the reset bit of the SP control register. */ snd_cs46xx_poke(chip, BA1_SPCR, SPCR_RSTSP); /* * Write the control register. */ snd_cs46xx_poke(chip, BA1_SPCR, SPCR_DRQEN); /* * Clear the trap registers. */ for (idx = 0; idx < 8; idx++) { snd_cs46xx_poke(chip, BA1_DREG, DREG_REGID_TRAP_SELECT + idx); snd_cs46xx_poke(chip, BA1_TWPR, 0xFFFF); } snd_cs46xx_poke(chip, BA1_DREG, 0); /* * Set the frame timer to reflect the number of cycles per frame. */ snd_cs46xx_poke(chip, BA1_FRMT, 0xadf); } static int cs46xx_wait_for_fifo(struct snd_cs46xx * chip,int retry_timeout) { u32 i, status = 0; /* * Make sure the previous FIFO write operation has completed. */ for(i = 0; i < 50; i++){ status = snd_cs46xx_peekBA0(chip, BA0_SERBST); if( !(status & SERBST_WBSY) ) break; mdelay(retry_timeout); } if(status & SERBST_WBSY) { snd_printk(KERN_ERR "cs46xx: failure waiting for " "FIFO command to complete\n"); return -EINVAL; } return 0; } static void snd_cs46xx_clear_serial_FIFOs(struct snd_cs46xx *chip) { int idx, powerdown = 0; unsigned int tmp; /* * See if the devices are powered down. If so, we must power them up first * or they will not respond. */ tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1); if (!(tmp & CLKCR1_SWCE)) { snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp | CLKCR1_SWCE); powerdown = 1; } /* * We want to clear out the serial port FIFOs so we don't end up playing * whatever random garbage happens to be in them. We fill the sample FIFOS * with zero (silence). */ snd_cs46xx_pokeBA0(chip, BA0_SERBWP, 0); /* * Fill all 256 sample FIFO locations. */ for (idx = 0; idx < 0xFF; idx++) { /* * Make sure the previous FIFO write operation has completed. */ if (cs46xx_wait_for_fifo(chip,1)) { snd_printdd ("failed waiting for FIFO at addr (%02X)\n",idx); if (powerdown) snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); break; } /* * Write the serial port FIFO index. */ snd_cs46xx_pokeBA0(chip, BA0_SERBAD, idx); /* * Tell the serial port to load the new value into the FIFO location. */ snd_cs46xx_pokeBA0(chip, BA0_SERBCM, SERBCM_WRC); } /* * Now, if we powered up the devices, then power them back down again. * This is kinda ugly, but should never happen. */ if (powerdown) snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); } static void snd_cs46xx_proc_start(struct snd_cs46xx *chip) { int cnt; /* * Set the frame timer to reflect the number of cycles per frame. */ snd_cs46xx_poke(chip, BA1_FRMT, 0xadf); /* * Turn on the run, run at frame, and DMA enable bits in the local copy of * the SP control register. */ snd_cs46xx_poke(chip, BA1_SPCR, SPCR_RUN | SPCR_RUNFR | SPCR_DRQEN); /* * Wait until the run at frame bit resets itself in the SP control * register. */ for (cnt = 0; cnt < 25; cnt++) { udelay(50); if (!(snd_cs46xx_peek(chip, BA1_SPCR) & SPCR_RUNFR)) break; } if (snd_cs46xx_peek(chip, BA1_SPCR) & SPCR_RUNFR) snd_printk(KERN_ERR "SPCR_RUNFR never reset\n"); } static void snd_cs46xx_proc_stop(struct snd_cs46xx *chip) { /* * Turn off the run, run at frame, and DMA enable bits in the local copy of * the SP control register. */ snd_cs46xx_poke(chip, BA1_SPCR, 0); } /* * Sample rate routines */ #define GOF_PER_SEC 200 static void snd_cs46xx_set_play_sample_rate(struct snd_cs46xx *chip, unsigned int rate) { unsigned long flags; unsigned int tmp1, tmp2; unsigned int phiIncr; unsigned int correctionPerGOF, correctionPerSec; /* * Compute the values used to drive the actual sample rate conversion. * The following formulas are being computed, using inline assembly * since we need to use 64 bit arithmetic to compute the values: * * phiIncr = floor((Fs,in * 2^26) / Fs,out) * correctionPerGOF = floor((Fs,in * 2^26 - Fs,out * phiIncr) / * GOF_PER_SEC) * ulCorrectionPerSec = Fs,in * 2^26 - Fs,out * phiIncr -M * GOF_PER_SEC * correctionPerGOF * * i.e. * * phiIncr:other = dividend:remainder((Fs,in * 2^26) / Fs,out) * correctionPerGOF:correctionPerSec = * dividend:remainder(ulOther / GOF_PER_SEC) */ tmp1 = rate << 16; phiIncr = tmp1 / 48000; tmp1 -= phiIncr * 48000; tmp1 <<= 10; phiIncr <<= 10; tmp2 = tmp1 / 48000; phiIncr += tmp2; tmp1 -= tmp2 * 48000; correctionPerGOF = tmp1 / GOF_PER_SEC; tmp1 -= correctionPerGOF * GOF_PER_SEC; correctionPerSec = tmp1; /* * Fill in the SampleRateConverter control block. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_cs46xx_poke(chip, BA1_PSRC, ((correctionPerSec << 16) & 0xFFFF0000) | (correctionPerGOF & 0xFFFF)); snd_cs46xx_poke(chip, BA1_PPI, phiIncr); spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_cs46xx_set_capture_sample_rate(struct snd_cs46xx *chip, unsigned int rate) { unsigned long flags; unsigned int phiIncr, coeffIncr, tmp1, tmp2; unsigned int correctionPerGOF, correctionPerSec, initialDelay; unsigned int frameGroupLength, cnt; /* * We can only decimate by up to a factor of 1/9th the hardware rate. * Correct the value if an attempt is made to stray outside that limit. */ if ((rate * 9) < 48000) rate = 48000 / 9; /* * We can not capture at at rate greater than the Input Rate (48000). * Return an error if an attempt is made to stray outside that limit. */ if (rate > 48000) rate = 48000; /* * Compute the values used to drive the actual sample rate conversion. * The following formulas are being computed, using inline assembly * since we need to use 64 bit arithmetic to compute the values: * * coeffIncr = -floor((Fs,out * 2^23) / Fs,in) * phiIncr = floor((Fs,in * 2^26) / Fs,out) * correctionPerGOF = floor((Fs,in * 2^26 - Fs,out * phiIncr) / * GOF_PER_SEC) * correctionPerSec = Fs,in * 2^26 - Fs,out * phiIncr - * GOF_PER_SEC * correctionPerGOF * initialDelay = ceil((24 * Fs,in) / Fs,out) * * i.e. * * coeffIncr = neg(dividend((Fs,out * 2^23) / Fs,in)) * phiIncr:ulOther = dividend:remainder((Fs,in * 2^26) / Fs,out) * correctionPerGOF:correctionPerSec = * dividend:remainder(ulOther / GOF_PER_SEC) * initialDelay = dividend(((24 * Fs,in) + Fs,out - 1) / Fs,out) */ tmp1 = rate << 16; coeffIncr = tmp1 / 48000; tmp1 -= coeffIncr * 48000; tmp1 <<= 7; coeffIncr <<= 7; coeffIncr += tmp1 / 48000; coeffIncr ^= 0xFFFFFFFF; coeffIncr++; tmp1 = 48000 << 16; phiIncr = tmp1 / rate; tmp1 -= phiIncr * rate; tmp1 <<= 10; phiIncr <<= 10; tmp2 = tmp1 / rate; phiIncr += tmp2; tmp1 -= tmp2 * rate; correctionPerGOF = tmp1 / GOF_PER_SEC; tmp1 -= correctionPerGOF * GOF_PER_SEC; correctionPerSec = tmp1; initialDelay = ((48000 * 24) + rate - 1) / rate; /* * Fill in the VariDecimate control block. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_cs46xx_poke(chip, BA1_CSRC, ((correctionPerSec << 16) & 0xFFFF0000) | (correctionPerGOF & 0xFFFF)); snd_cs46xx_poke(chip, BA1_CCI, coeffIncr); snd_cs46xx_poke(chip, BA1_CD, (((BA1_VARIDEC_BUF_1 + (initialDelay << 2)) << 16) & 0xFFFF0000) | 0x80); snd_cs46xx_poke(chip, BA1_CPI, phiIncr); spin_unlock_irqrestore(&chip->reg_lock, flags); /* * Figure out the frame group length for the write back task. Basically, * this is just the factors of 24000 (2^6*3*5^3) that are not present in * the output sample rate. */ frameGroupLength = 1; for (cnt = 2; cnt <= 64; cnt *= 2) { if (((rate / cnt) * cnt) != rate) frameGroupLength *= 2; } if (((rate / 3) * 3) != rate) { frameGroupLength *= 3; } for (cnt = 5; cnt <= 125; cnt *= 5) { if (((rate / cnt) * cnt) != rate) frameGroupLength *= 5; } /* * Fill in the WriteBack control block. */ spin_lock_irqsave(&chip->reg_lock, flags); snd_cs46xx_poke(chip, BA1_CFG1, frameGroupLength); snd_cs46xx_poke(chip, BA1_CFG2, (0x00800000 | frameGroupLength)); snd_cs46xx_poke(chip, BA1_CCST, 0x0000FFFF); snd_cs46xx_poke(chip, BA1_CSPB, ((65536 * rate) / 24000)); snd_cs46xx_poke(chip, (BA1_CSPB + 4), 0x0000FFFF); spin_unlock_irqrestore(&chip->reg_lock, flags); } /* * PCM part */ static void snd_cs46xx_pb_trans_copy(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm * cpcm = runtime->private_data; memcpy(cpcm->hw_buf.area + rec->hw_data, runtime->dma_area + rec->sw_data, bytes); } static int snd_cs46xx_playback_transfer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm * cpcm = runtime->private_data; snd_pcm_indirect_playback_transfer(substream, &cpcm->pcm_rec, snd_cs46xx_pb_trans_copy); return 0; } static void snd_cs46xx_cp_trans_copy(struct snd_pcm_substream *substream, struct snd_pcm_indirect *rec, size_t bytes) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; memcpy(runtime->dma_area + rec->sw_data, chip->capt.hw_buf.area + rec->hw_data, bytes); } static int snd_cs46xx_capture_transfer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); snd_pcm_indirect_capture_transfer(substream, &chip->capt.pcm_rec, snd_cs46xx_cp_trans_copy); return 0; } static snd_pcm_uframes_t snd_cs46xx_playback_direct_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr; struct snd_cs46xx_pcm *cpcm = substream->runtime->private_data; if (snd_BUG_ON(!cpcm->pcm_channel)) return -ENXIO; #ifdef CONFIG_SND_CS46XX_NEW_DSP ptr = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 2) << 2); #else ptr = snd_cs46xx_peek(chip, BA1_PBA); #endif ptr -= cpcm->hw_buf.addr; return ptr >> cpcm->shift; } static snd_pcm_uframes_t snd_cs46xx_playback_indirect_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr; struct snd_cs46xx_pcm *cpcm = substream->runtime->private_data; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (snd_BUG_ON(!cpcm->pcm_channel)) return -ENXIO; ptr = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 2) << 2); #else ptr = snd_cs46xx_peek(chip, BA1_PBA); #endif ptr -= cpcm->hw_buf.addr; return snd_pcm_indirect_playback_pointer(substream, &cpcm->pcm_rec, ptr); } static snd_pcm_uframes_t snd_cs46xx_capture_direct_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr = snd_cs46xx_peek(chip, BA1_CBA) - chip->capt.hw_buf.addr; return ptr >> chip->capt.shift; } static snd_pcm_uframes_t snd_cs46xx_capture_indirect_pointer(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); size_t ptr = snd_cs46xx_peek(chip, BA1_CBA) - chip->capt.hw_buf.addr; return snd_pcm_indirect_capture_pointer(substream, &chip->capt.pcm_rec, ptr); } static int snd_cs46xx_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); /*struct snd_pcm_runtime *runtime = substream->runtime;*/ int result = 0; #ifdef CONFIG_SND_CS46XX_NEW_DSP struct snd_cs46xx_pcm *cpcm = substream->runtime->private_data; if (! cpcm->pcm_channel) { return -ENXIO; } #endif switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: #ifdef CONFIG_SND_CS46XX_NEW_DSP /* magic value to unmute PCM stream playback volume */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address + SCBVolumeCtrl) << 2, 0x80008000); if (cpcm->pcm_channel->unlinked) cs46xx_dsp_pcm_link(chip,cpcm->pcm_channel); if (substream->runtime->periods != CS46XX_FRAGS) snd_cs46xx_playback_transfer(substream); #else spin_lock(&chip->reg_lock); if (substream->runtime->periods != CS46XX_FRAGS) snd_cs46xx_playback_transfer(substream); { unsigned int tmp; tmp = snd_cs46xx_peek(chip, BA1_PCTL); tmp &= 0x0000ffff; snd_cs46xx_poke(chip, BA1_PCTL, chip->play_ctl | tmp); } spin_unlock(&chip->reg_lock); #endif break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: #ifdef CONFIG_SND_CS46XX_NEW_DSP /* magic mute channel */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address + SCBVolumeCtrl) << 2, 0xffffffff); if (!cpcm->pcm_channel->unlinked) cs46xx_dsp_pcm_unlink(chip,cpcm->pcm_channel); #else spin_lock(&chip->reg_lock); { unsigned int tmp; tmp = snd_cs46xx_peek(chip, BA1_PCTL); tmp &= 0x0000ffff; snd_cs46xx_poke(chip, BA1_PCTL, tmp); } spin_unlock(&chip->reg_lock); #endif break; default: result = -EINVAL; break; } return result; } static int snd_cs46xx_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); unsigned int tmp; int result = 0; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: tmp = snd_cs46xx_peek(chip, BA1_CCTL); tmp &= 0xffff0000; snd_cs46xx_poke(chip, BA1_CCTL, chip->capt.ctl | tmp); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: tmp = snd_cs46xx_peek(chip, BA1_CCTL); tmp &= 0xffff0000; snd_cs46xx_poke(chip, BA1_CCTL, tmp); break; default: result = -EINVAL; break; } spin_unlock(&chip->reg_lock); return result; } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int _cs46xx_adjust_sample_rate (struct snd_cs46xx *chip, struct snd_cs46xx_pcm *cpcm, int sample_rate) { /* If PCMReaderSCB and SrcTaskSCB not created yet ... */ if ( cpcm->pcm_channel == NULL) { cpcm->pcm_channel = cs46xx_dsp_create_pcm_channel (chip, sample_rate, cpcm, cpcm->hw_buf.addr,cpcm->pcm_channel_id); if (cpcm->pcm_channel == NULL) { snd_printk(KERN_ERR "cs46xx: failed to create virtual PCM channel\n"); return -ENOMEM; } cpcm->pcm_channel->sample_rate = sample_rate; } else /* if sample rate is changed */ if ((int)cpcm->pcm_channel->sample_rate != sample_rate) { int unlinked = cpcm->pcm_channel->unlinked; cs46xx_dsp_destroy_pcm_channel (chip,cpcm->pcm_channel); if ( (cpcm->pcm_channel = cs46xx_dsp_create_pcm_channel (chip, sample_rate, cpcm, cpcm->hw_buf.addr, cpcm->pcm_channel_id)) == NULL) { snd_printk(KERN_ERR "cs46xx: failed to re-create virtual PCM channel\n"); return -ENOMEM; } if (!unlinked) cs46xx_dsp_pcm_link (chip,cpcm->pcm_channel); cpcm->pcm_channel->sample_rate = sample_rate; } return 0; } #endif static int snd_cs46xx_playback_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm *cpcm; int err; #ifdef CONFIG_SND_CS46XX_NEW_DSP struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); int sample_rate = params_rate(hw_params); int period_size = params_period_bytes(hw_params); #endif cpcm = runtime->private_data; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (snd_BUG_ON(!sample_rate)) return -ENXIO; mutex_lock(&chip->spos_mutex); if (_cs46xx_adjust_sample_rate (chip,cpcm,sample_rate)) { mutex_unlock(&chip->spos_mutex); return -ENXIO; } snd_BUG_ON(!cpcm->pcm_channel); if (!cpcm->pcm_channel) { mutex_unlock(&chip->spos_mutex); return -ENXIO; } if (cs46xx_dsp_pcm_channel_set_period (chip,cpcm->pcm_channel,period_size)) { mutex_unlock(&chip->spos_mutex); return -EINVAL; } snd_printdd ("period_size (%d), periods (%d) buffer_size(%d)\n", period_size, params_periods(hw_params), params_buffer_bytes(hw_params)); #endif if (params_periods(hw_params) == CS46XX_FRAGS) { if (runtime->dma_area != cpcm->hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = cpcm->hw_buf.area; runtime->dma_addr = cpcm->hw_buf.addr; runtime->dma_bytes = cpcm->hw_buf.bytes; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (cpcm->pcm_channel_id == DSP_PCM_MAIN_CHANNEL) { substream->ops = &snd_cs46xx_playback_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_REAR_CHANNEL) { substream->ops = &snd_cs46xx_playback_rear_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_CENTER_LFE_CHANNEL) { substream->ops = &snd_cs46xx_playback_clfe_ops; } else if (cpcm->pcm_channel_id == DSP_IEC958_CHANNEL) { substream->ops = &snd_cs46xx_playback_iec958_ops; } else { snd_BUG(); } #else substream->ops = &snd_cs46xx_playback_ops; #endif } else { if (runtime->dma_area == cpcm->hw_buf.area) { runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; } if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) { #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_unlock(&chip->spos_mutex); #endif return err; } #ifdef CONFIG_SND_CS46XX_NEW_DSP if (cpcm->pcm_channel_id == DSP_PCM_MAIN_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_REAR_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_rear_ops; } else if (cpcm->pcm_channel_id == DSP_PCM_CENTER_LFE_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_clfe_ops; } else if (cpcm->pcm_channel_id == DSP_IEC958_CHANNEL) { substream->ops = &snd_cs46xx_playback_indirect_iec958_ops; } else { snd_BUG(); } #else substream->ops = &snd_cs46xx_playback_indirect_ops; #endif } #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_unlock(&chip->spos_mutex); #endif return 0; } static int snd_cs46xx_playback_hw_free(struct snd_pcm_substream *substream) { /*struct snd_cs46xx *chip = snd_pcm_substream_chip(substream);*/ struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm *cpcm; cpcm = runtime->private_data; /* if play_back open fails, then this function is called and cpcm can actually be NULL here */ if (!cpcm) return -ENXIO; if (runtime->dma_area != cpcm->hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; return 0; } static int snd_cs46xx_playback_prepare(struct snd_pcm_substream *substream) { unsigned int tmp; unsigned int pfie; struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm *cpcm; cpcm = runtime->private_data; #ifdef CONFIG_SND_CS46XX_NEW_DSP if (snd_BUG_ON(!cpcm->pcm_channel)) return -ENXIO; pfie = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 1) << 2 ); pfie &= ~0x0000f03f; #else /* old dsp */ pfie = snd_cs46xx_peek(chip, BA1_PFIE); pfie &= ~0x0000f03f; #endif cpcm->shift = 2; /* if to convert from stereo to mono */ if (runtime->channels == 1) { cpcm->shift--; pfie |= 0x00002000; } /* if to convert from 8 bit to 16 bit */ if (snd_pcm_format_width(runtime->format) == 8) { cpcm->shift--; pfie |= 0x00001000; } /* if to convert to unsigned */ if (snd_pcm_format_unsigned(runtime->format)) pfie |= 0x00008000; /* Never convert byte order when sample stream is 8 bit */ if (snd_pcm_format_width(runtime->format) != 8) { /* convert from big endian to little endian */ if (snd_pcm_format_big_endian(runtime->format)) pfie |= 0x00004000; } memset(&cpcm->pcm_rec, 0, sizeof(cpcm->pcm_rec)); cpcm->pcm_rec.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); cpcm->pcm_rec.hw_buffer_size = runtime->period_size * CS46XX_FRAGS << cpcm->shift; #ifdef CONFIG_SND_CS46XX_NEW_DSP tmp = snd_cs46xx_peek(chip, (cpcm->pcm_channel->pcm_reader_scb->address) << 2); tmp &= ~0x000003ff; tmp |= (4 << cpcm->shift) - 1; /* playback transaction count register */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address) << 2, tmp); /* playback format && interrupt enable */ snd_cs46xx_poke(chip, (cpcm->pcm_channel->pcm_reader_scb->address + 1) << 2, pfie | cpcm->pcm_channel->pcm_slot); #else snd_cs46xx_poke(chip, BA1_PBA, cpcm->hw_buf.addr); tmp = snd_cs46xx_peek(chip, BA1_PDTC); tmp &= ~0x000003ff; tmp |= (4 << cpcm->shift) - 1; snd_cs46xx_poke(chip, BA1_PDTC, tmp); snd_cs46xx_poke(chip, BA1_PFIE, pfie); snd_cs46xx_set_play_sample_rate(chip, runtime->rate); #endif return 0; } static int snd_cs46xx_capture_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_pcm_ostream_set_period (chip, params_period_bytes(hw_params)); #endif if (runtime->periods == CS46XX_FRAGS) { if (runtime->dma_area != chip->capt.hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = chip->capt.hw_buf.area; runtime->dma_addr = chip->capt.hw_buf.addr; runtime->dma_bytes = chip->capt.hw_buf.bytes; substream->ops = &snd_cs46xx_capture_ops; } else { if (runtime->dma_area == chip->capt.hw_buf.area) { runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; } if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; substream->ops = &snd_cs46xx_capture_indirect_ops; } return 0; } static int snd_cs46xx_capture_hw_free(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->dma_area != chip->capt.hw_buf.area) snd_pcm_lib_free_pages(substream); runtime->dma_area = NULL; runtime->dma_addr = 0; runtime->dma_bytes = 0; return 0; } static int snd_cs46xx_capture_prepare(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; snd_cs46xx_poke(chip, BA1_CBA, chip->capt.hw_buf.addr); chip->capt.shift = 2; memset(&chip->capt.pcm_rec, 0, sizeof(chip->capt.pcm_rec)); chip->capt.pcm_rec.sw_buffer_size = snd_pcm_lib_buffer_bytes(substream); chip->capt.pcm_rec.hw_buffer_size = runtime->period_size * CS46XX_FRAGS << 2; snd_cs46xx_set_capture_sample_rate(chip, runtime->rate); return 0; } static irqreturn_t snd_cs46xx_interrupt(int irq, void *dev_id) { struct snd_cs46xx *chip = dev_id; u32 status1; #ifdef CONFIG_SND_CS46XX_NEW_DSP struct dsp_spos_instance * ins = chip->dsp_spos_instance; u32 status2; int i; struct snd_cs46xx_pcm *cpcm = NULL; #endif /* * Read the Interrupt Status Register to clear the interrupt */ status1 = snd_cs46xx_peekBA0(chip, BA0_HISR); if ((status1 & 0x7fffffff) == 0) { snd_cs46xx_pokeBA0(chip, BA0_HICR, HICR_CHGM | HICR_IEV); return IRQ_NONE; } #ifdef CONFIG_SND_CS46XX_NEW_DSP status2 = snd_cs46xx_peekBA0(chip, BA0_HSR0); for (i = 0; i < DSP_MAX_PCM_CHANNELS; ++i) { if (i <= 15) { if ( status1 & (1 << i) ) { if (i == CS46XX_DSP_CAPTURE_CHANNEL) { if (chip->capt.substream) snd_pcm_period_elapsed(chip->capt.substream); } else { if (ins->pcm_channels[i].active && ins->pcm_channels[i].private_data && !ins->pcm_channels[i].unlinked) { cpcm = ins->pcm_channels[i].private_data; snd_pcm_period_elapsed(cpcm->substream); } } } } else { if ( status2 & (1 << (i - 16))) { if (ins->pcm_channels[i].active && ins->pcm_channels[i].private_data && !ins->pcm_channels[i].unlinked) { cpcm = ins->pcm_channels[i].private_data; snd_pcm_period_elapsed(cpcm->substream); } } } } #else /* old dsp */ if ((status1 & HISR_VC0) && chip->playback_pcm) { if (chip->playback_pcm->substream) snd_pcm_period_elapsed(chip->playback_pcm->substream); } if ((status1 & HISR_VC1) && chip->pcm) { if (chip->capt.substream) snd_pcm_period_elapsed(chip->capt.substream); } #endif if ((status1 & HISR_MIDI) && chip->rmidi) { unsigned char c; spin_lock(&chip->reg_lock); while ((snd_cs46xx_peekBA0(chip, BA0_MIDSR) & MIDSR_RBE) == 0) { c = snd_cs46xx_peekBA0(chip, BA0_MIDRP); if ((chip->midcr & MIDCR_RIE) == 0) continue; snd_rawmidi_receive(chip->midi_input, &c, 1); } while ((snd_cs46xx_peekBA0(chip, BA0_MIDSR) & MIDSR_TBF) == 0) { if ((chip->midcr & MIDCR_TIE) == 0) break; if (snd_rawmidi_transmit(chip->midi_output, &c, 1) != 1) { chip->midcr &= ~MIDCR_TIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); break; } snd_cs46xx_pokeBA0(chip, BA0_MIDWP, c); } spin_unlock(&chip->reg_lock); } /* * EOI to the PCI part....reenables interrupts */ snd_cs46xx_pokeBA0(chip, BA0_HICR, HICR_CHGM | HICR_IEV); return IRQ_HANDLED; } static struct snd_pcm_hardware snd_cs46xx_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/ /*SNDRV_PCM_INFO_RESUME*/), .formats = (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_U16_LE | SNDRV_PCM_FMTBIT_U16_BE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (256 * 1024), .period_bytes_min = CS46XX_MIN_PERIOD_SIZE, .period_bytes_max = CS46XX_MAX_PERIOD_SIZE, .periods_min = CS46XX_FRAGS, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_cs46xx_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER /*|*/ /*SNDRV_PCM_INFO_RESUME*/), .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 5500, .rate_max = 48000, .channels_min = 2, .channels_max = 2, .buffer_bytes_max = (256 * 1024), .period_bytes_min = CS46XX_MIN_PERIOD_SIZE, .period_bytes_max = CS46XX_MAX_PERIOD_SIZE, .periods_min = CS46XX_FRAGS, .periods_max = 1024, .fifo_size = 0, }; #ifdef CONFIG_SND_CS46XX_NEW_DSP static unsigned int period_sizes[] = { 32, 64, 128, 256, 512, 1024, 2048 }; static struct snd_pcm_hw_constraint_list hw_constraints_period_sizes = { .count = ARRAY_SIZE(period_sizes), .list = period_sizes, .mask = 0 }; #endif static void snd_cs46xx_pcm_free_substream(struct snd_pcm_runtime *runtime) { kfree(runtime->private_data); } static int _cs46xx_playback_open_channel (struct snd_pcm_substream *substream,int pcm_channel_id) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_cs46xx_pcm * cpcm; struct snd_pcm_runtime *runtime = substream->runtime; cpcm = kzalloc(sizeof(*cpcm), GFP_KERNEL); if (cpcm == NULL) return -ENOMEM; if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_SIZE, &cpcm->hw_buf) < 0) { kfree(cpcm); return -ENOMEM; } runtime->hw = snd_cs46xx_playback; runtime->private_data = cpcm; runtime->private_free = snd_cs46xx_pcm_free_substream; cpcm->substream = substream; #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_lock(&chip->spos_mutex); cpcm->pcm_channel = NULL; cpcm->pcm_channel_id = pcm_channel_id; snd_pcm_hw_constraint_list(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, &hw_constraints_period_sizes); mutex_unlock(&chip->spos_mutex); #else chip->playback_pcm = cpcm; /* HACK */ #endif if (chip->accept_valid) substream->runtime->hw.info |= SNDRV_PCM_INFO_MMAP_VALID; chip->active_ctrl(chip, 1); return 0; } static int snd_cs46xx_playback_open(struct snd_pcm_substream *substream) { snd_printdd("open front channel\n"); return _cs46xx_playback_open_channel(substream,DSP_PCM_MAIN_CHANNEL); } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int snd_cs46xx_playback_open_rear(struct snd_pcm_substream *substream) { snd_printdd("open rear channel\n"); return _cs46xx_playback_open_channel(substream,DSP_PCM_REAR_CHANNEL); } static int snd_cs46xx_playback_open_clfe(struct snd_pcm_substream *substream) { snd_printdd("open center - LFE channel\n"); return _cs46xx_playback_open_channel(substream,DSP_PCM_CENTER_LFE_CHANNEL); } static int snd_cs46xx_playback_open_iec958(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); snd_printdd("open raw iec958 channel\n"); mutex_lock(&chip->spos_mutex); cs46xx_iec958_pre_open (chip); mutex_unlock(&chip->spos_mutex); return _cs46xx_playback_open_channel(substream,DSP_IEC958_CHANNEL); } static int snd_cs46xx_playback_close(struct snd_pcm_substream *substream); static int snd_cs46xx_playback_close_iec958(struct snd_pcm_substream *substream) { int err; struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); snd_printdd("close raw iec958 channel\n"); err = snd_cs46xx_playback_close(substream); mutex_lock(&chip->spos_mutex); cs46xx_iec958_post_close (chip); mutex_unlock(&chip->spos_mutex); return err; } #endif static int snd_cs46xx_capture_open(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_SIZE, &chip->capt.hw_buf) < 0) return -ENOMEM; chip->capt.substream = substream; substream->runtime->hw = snd_cs46xx_capture; if (chip->accept_valid) substream->runtime->hw.info |= SNDRV_PCM_INFO_MMAP_VALID; chip->active_ctrl(chip, 1); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, &hw_constraints_period_sizes); #endif return 0; } static int snd_cs46xx_playback_close(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; struct snd_cs46xx_pcm * cpcm; cpcm = runtime->private_data; /* when playback_open fails, then cpcm can be NULL */ if (!cpcm) return -ENXIO; #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_lock(&chip->spos_mutex); if (cpcm->pcm_channel) { cs46xx_dsp_destroy_pcm_channel(chip,cpcm->pcm_channel); cpcm->pcm_channel = NULL; } mutex_unlock(&chip->spos_mutex); #else chip->playback_pcm = NULL; #endif cpcm->substream = NULL; snd_dma_free_pages(&cpcm->hw_buf); chip->active_ctrl(chip, -1); return 0; } static int snd_cs46xx_capture_close(struct snd_pcm_substream *substream) { struct snd_cs46xx *chip = snd_pcm_substream_chip(substream); chip->capt.substream = NULL; snd_dma_free_pages(&chip->capt.hw_buf); chip->active_ctrl(chip, -1); return 0; } #ifdef CONFIG_SND_CS46XX_NEW_DSP static struct snd_pcm_ops snd_cs46xx_playback_rear_ops = { .open = snd_cs46xx_playback_open_rear, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_rear_ops = { .open = snd_cs46xx_playback_open_rear, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; static struct snd_pcm_ops snd_cs46xx_playback_clfe_ops = { .open = snd_cs46xx_playback_open_clfe, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_clfe_ops = { .open = snd_cs46xx_playback_open_clfe, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; static struct snd_pcm_ops snd_cs46xx_playback_iec958_ops = { .open = snd_cs46xx_playback_open_iec958, .close = snd_cs46xx_playback_close_iec958, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_iec958_ops = { .open = snd_cs46xx_playback_open_iec958, .close = snd_cs46xx_playback_close_iec958, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; #endif static struct snd_pcm_ops snd_cs46xx_playback_ops = { .open = snd_cs46xx_playback_open, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_playback_indirect_ops = { .open = snd_cs46xx_playback_open, .close = snd_cs46xx_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_playback_hw_params, .hw_free = snd_cs46xx_playback_hw_free, .prepare = snd_cs46xx_playback_prepare, .trigger = snd_cs46xx_playback_trigger, .pointer = snd_cs46xx_playback_indirect_pointer, .ack = snd_cs46xx_playback_transfer, }; static struct snd_pcm_ops snd_cs46xx_capture_ops = { .open = snd_cs46xx_capture_open, .close = snd_cs46xx_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_capture_hw_params, .hw_free = snd_cs46xx_capture_hw_free, .prepare = snd_cs46xx_capture_prepare, .trigger = snd_cs46xx_capture_trigger, .pointer = snd_cs46xx_capture_direct_pointer, }; static struct snd_pcm_ops snd_cs46xx_capture_indirect_ops = { .open = snd_cs46xx_capture_open, .close = snd_cs46xx_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cs46xx_capture_hw_params, .hw_free = snd_cs46xx_capture_hw_free, .prepare = snd_cs46xx_capture_prepare, .trigger = snd_cs46xx_capture_trigger, .pointer = snd_cs46xx_capture_indirect_pointer, .ack = snd_cs46xx_capture_transfer, }; #ifdef CONFIG_SND_CS46XX_NEW_DSP #define MAX_PLAYBACK_CHANNELS (DSP_MAX_PCM_CHANNELS - 1) #else #define MAX_PLAYBACK_CHANNELS 1 #endif int __devinit snd_cs46xx_pcm(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx", device, MAX_PLAYBACK_CHANNELS, 1, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cs46xx_capture_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx"); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } #ifdef CONFIG_SND_CS46XX_NEW_DSP int __devinit snd_cs46xx_pcm_rear(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx - Rear", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_rear_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx - Rear"); chip->pcm_rear = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } int __devinit snd_cs46xx_pcm_center_lfe(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx - Center LFE", device, MAX_PLAYBACK_CHANNELS, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_clfe_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx - Center LFE"); chip->pcm_center_lfe = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } int __devinit snd_cs46xx_pcm_iec958(struct snd_cs46xx *chip, int device, struct snd_pcm ** rpcm) { struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(chip->card, "CS46xx - IEC958", device, 1, 0, &pcm)) < 0) return err; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_cs46xx_playback_iec958_ops); /* global setup */ pcm->info_flags = 0; strcpy(pcm->name, "CS46xx - IEC958"); chip->pcm_rear = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 256*1024); if (rpcm) *rpcm = pcm; return 0; } #endif /* * Mixer routines */ static void snd_cs46xx_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct snd_cs46xx *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_cs46xx_mixer_free_ac97(struct snd_ac97 *ac97) { struct snd_cs46xx *chip = ac97->private_data; if (snd_BUG_ON(ac97 != chip->ac97[CS46XX_PRIMARY_CODEC_INDEX] && ac97 != chip->ac97[CS46XX_SECONDARY_CODEC_INDEX])) return; if (ac97 == chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]) { chip->ac97[CS46XX_PRIMARY_CODEC_INDEX] = NULL; chip->eapd_switch = NULL; } else chip->ac97[CS46XX_SECONDARY_CODEC_INDEX] = NULL; } static int snd_cs46xx_vol_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 0x7fff; return 0; } static int snd_cs46xx_vol_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value; unsigned int val = snd_cs46xx_peek(chip, reg); ucontrol->value.integer.value[0] = 0xffff - (val >> 16); ucontrol->value.integer.value[1] = 0xffff - (val & 0xffff); return 0; } static int snd_cs46xx_vol_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value; unsigned int val = ((0xffff - ucontrol->value.integer.value[0]) << 16 | (0xffff - ucontrol->value.integer.value[1])); unsigned int old = snd_cs46xx_peek(chip, reg); int change = (old != val); if (change) { snd_cs46xx_poke(chip, reg, val); } return change; } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int snd_cs46xx_vol_dac_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->dsp_spos_instance->dac_volume_left; ucontrol->value.integer.value[1] = chip->dsp_spos_instance->dac_volume_right; return 0; } static int snd_cs46xx_vol_dac_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int change = 0; if (chip->dsp_spos_instance->dac_volume_right != ucontrol->value.integer.value[0] || chip->dsp_spos_instance->dac_volume_left != ucontrol->value.integer.value[1]) { cs46xx_dsp_set_dac_volume(chip, ucontrol->value.integer.value[0], ucontrol->value.integer.value[1]); change = 1; } return change; } #if 0 static int snd_cs46xx_vol_iec958_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = chip->dsp_spos_instance->spdif_input_volume_left; ucontrol->value.integer.value[1] = chip->dsp_spos_instance->spdif_input_volume_right; return 0; } static int snd_cs46xx_vol_iec958_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int change = 0; if (chip->dsp_spos_instance->spdif_input_volume_left != ucontrol->value.integer.value[0] || chip->dsp_spos_instance->spdif_input_volume_right!= ucontrol->value.integer.value[1]) { cs46xx_dsp_set_iec958_volume (chip, ucontrol->value.integer.value[0], ucontrol->value.integer.value[1]); change = 1; } return change; } #endif #define snd_mixer_boolean_info snd_ctl_boolean_mono_info static int snd_cs46xx_iec958_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value; if (reg == CS46XX_MIXER_SPDIF_OUTPUT_ELEMENT) ucontrol->value.integer.value[0] = (chip->dsp_spos_instance->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED); else ucontrol->value.integer.value[0] = chip->dsp_spos_instance->spdif_status_in; return 0; } static int snd_cs46xx_iec958_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int change, res; switch (kcontrol->private_value) { case CS46XX_MIXER_SPDIF_OUTPUT_ELEMENT: mutex_lock(&chip->spos_mutex); change = (chip->dsp_spos_instance->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED); if (ucontrol->value.integer.value[0] && !change) cs46xx_dsp_enable_spdif_out(chip); else if (change && !ucontrol->value.integer.value[0]) cs46xx_dsp_disable_spdif_out(chip); res = (change != (chip->dsp_spos_instance->spdif_status_out & DSP_SPDIF_STATUS_OUTPUT_ENABLED)); mutex_unlock(&chip->spos_mutex); break; case CS46XX_MIXER_SPDIF_INPUT_ELEMENT: change = chip->dsp_spos_instance->spdif_status_in; if (ucontrol->value.integer.value[0] && !change) { cs46xx_dsp_enable_spdif_in(chip); /* restore volume */ } else if (change && !ucontrol->value.integer.value[0]) cs46xx_dsp_disable_spdif_in(chip); res = (change != chip->dsp_spos_instance->spdif_status_in); break; default: res = -EINVAL; snd_BUG(); /* should never happen ... */ } return res; } static int snd_cs46xx_adc_capture_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (ins->adc_input != NULL) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; return 0; } static int snd_cs46xx_adc_capture_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; int change = 0; if (ucontrol->value.integer.value[0] && !ins->adc_input) { cs46xx_dsp_enable_adc_capture(chip); change = 1; } else if (!ucontrol->value.integer.value[0] && ins->adc_input) { cs46xx_dsp_disable_adc_capture(chip); change = 1; } return change; } static int snd_cs46xx_pcm_capture_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; if (ins->pcm_input != NULL) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; return 0; } static int snd_cs46xx_pcm_capture_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; int change = 0; if (ucontrol->value.integer.value[0] && !ins->pcm_input) { cs46xx_dsp_enable_pcm_capture(chip); change = 1; } else if (!ucontrol->value.integer.value[0] && ins->pcm_input) { cs46xx_dsp_disable_pcm_capture(chip); change = 1; } return change; } static int snd_herc_spdif_select_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int val1 = snd_cs46xx_peekBA0(chip, BA0_EGPIODR); if (val1 & EGPIODR_GPOE0) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; return 0; } /* * Game Theatre XP card - EGPIO[0] is used to select SPDIF input optical or coaxial. */ static int snd_herc_spdif_select_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); int val1 = snd_cs46xx_peekBA0(chip, BA0_EGPIODR); int val2 = snd_cs46xx_peekBA0(chip, BA0_EGPIOPTR); if (ucontrol->value.integer.value[0]) { /* optical is default */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE0 | val1); /* enable EGPIO0 output */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIOPTR_GPPT0 | val2); /* open-drain on output */ } else { /* coaxial */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, val1 & ~EGPIODR_GPOE0); /* disable */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, val2 & ~EGPIOPTR_GPPT0); /* disable */ } /* checking diff from the EGPIO direction register should be enough */ return (val1 != (int)snd_cs46xx_peekBA0(chip, BA0_EGPIODR)); } static int snd_cs46xx_spdif_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_IEC958; uinfo->count = 1; return 0; } static int snd_cs46xx_spdif_default_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; mutex_lock(&chip->spos_mutex); ucontrol->value.iec958.status[0] = _wrap_all_bits((ins->spdif_csuv_default >> 24) & 0xff); ucontrol->value.iec958.status[1] = _wrap_all_bits((ins->spdif_csuv_default >> 16) & 0xff); ucontrol->value.iec958.status[2] = 0; ucontrol->value.iec958.status[3] = _wrap_all_bits((ins->spdif_csuv_default) & 0xff); mutex_unlock(&chip->spos_mutex); return 0; } static int snd_cs46xx_spdif_default_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx * chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; unsigned int val; int change; mutex_lock(&chip->spos_mutex); val = ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[0]) << 24) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[2]) << 16) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[3])) | /* left and right validity bit */ (1 << 13) | (1 << 12); change = (unsigned int)ins->spdif_csuv_default != val; ins->spdif_csuv_default = val; if ( !(ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN) ) cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV,val); mutex_unlock(&chip->spos_mutex); return change; } static int snd_cs46xx_spdif_mask_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.iec958.status[0] = 0xff; ucontrol->value.iec958.status[1] = 0xff; ucontrol->value.iec958.status[2] = 0x00; ucontrol->value.iec958.status[3] = 0xff; return 0; } static int snd_cs46xx_spdif_stream_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; mutex_lock(&chip->spos_mutex); ucontrol->value.iec958.status[0] = _wrap_all_bits((ins->spdif_csuv_stream >> 24) & 0xff); ucontrol->value.iec958.status[1] = _wrap_all_bits((ins->spdif_csuv_stream >> 16) & 0xff); ucontrol->value.iec958.status[2] = 0; ucontrol->value.iec958.status[3] = _wrap_all_bits((ins->spdif_csuv_stream) & 0xff); mutex_unlock(&chip->spos_mutex); return 0; } static int snd_cs46xx_spdif_stream_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx * chip = snd_kcontrol_chip(kcontrol); struct dsp_spos_instance * ins = chip->dsp_spos_instance; unsigned int val; int change; mutex_lock(&chip->spos_mutex); val = ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[0]) << 24) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[1]) << 16) | ((unsigned int)_wrap_all_bits(ucontrol->value.iec958.status[3])) | /* left and right validity bit */ (1 << 13) | (1 << 12); change = ins->spdif_csuv_stream != val; ins->spdif_csuv_stream = val; if ( ins->spdif_status_out & DSP_SPDIF_STATUS_PLAYBACK_OPEN ) cs46xx_poke_via_dsp (chip,SP_SPDOUT_CSUV,val); mutex_unlock(&chip->spos_mutex); return change; } #endif /* CONFIG_SND_CS46XX_NEW_DSP */ static struct snd_kcontrol_new snd_cs46xx_controls[] __devinitdata = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Volume", .info = snd_cs46xx_vol_info, #ifndef CONFIG_SND_CS46XX_NEW_DSP .get = snd_cs46xx_vol_get, .put = snd_cs46xx_vol_put, .private_value = BA1_PVOL, #else .get = snd_cs46xx_vol_dac_get, .put = snd_cs46xx_vol_dac_put, #endif }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Volume", .info = snd_cs46xx_vol_info, .get = snd_cs46xx_vol_get, .put = snd_cs46xx_vol_put, #ifndef CONFIG_SND_CS46XX_NEW_DSP .private_value = BA1_CVOL, #else .private_value = (VARIDECIMATE_SCB_ADDR + 0xE) << 2, #endif }, #ifdef CONFIG_SND_CS46XX_NEW_DSP { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "ADC Capture Switch", .info = snd_mixer_boolean_info, .get = snd_cs46xx_adc_capture_get, .put = snd_cs46xx_adc_capture_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "DAC Capture Switch", .info = snd_mixer_boolean_info, .get = snd_cs46xx_pcm_capture_get, .put = snd_cs46xx_pcm_capture_put }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH), .info = snd_mixer_boolean_info, .get = snd_cs46xx_iec958_get, .put = snd_cs46xx_iec958_put, .private_value = CS46XX_MIXER_SPDIF_OUTPUT_ELEMENT, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Input ",NONE,SWITCH), .info = snd_mixer_boolean_info, .get = snd_cs46xx_iec958_get, .put = snd_cs46xx_iec958_put, .private_value = CS46XX_MIXER_SPDIF_INPUT_ELEMENT, }, #if 0 /* Input IEC958 volume does not work for the moment. (Benny) */ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = SNDRV_CTL_NAME_IEC958("Input ",NONE,VOLUME), .info = snd_cs46xx_vol_info, .get = snd_cs46xx_vol_iec958_get, .put = snd_cs46xx_vol_iec958_put, .private_value = (ASYNCRX_SCB_ADDR + 0xE) << 2, }, #endif { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,DEFAULT), .info = snd_cs46xx_spdif_info, .get = snd_cs46xx_spdif_default_get, .put = snd_cs46xx_spdif_default_put, }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,MASK), .info = snd_cs46xx_spdif_info, .get = snd_cs46xx_spdif_mask_get, .access = SNDRV_CTL_ELEM_ACCESS_READ }, { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .name = SNDRV_CTL_NAME_IEC958("",PLAYBACK,PCM_STREAM), .info = snd_cs46xx_spdif_info, .get = snd_cs46xx_spdif_stream_get, .put = snd_cs46xx_spdif_stream_put }, #endif }; #ifdef CONFIG_SND_CS46XX_NEW_DSP /* set primary cs4294 codec into Extended Audio Mode */ static int snd_cs46xx_front_dup_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); unsigned short val; val = snd_ac97_read(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX], AC97_CSR_ACMODE); ucontrol->value.integer.value[0] = (val & 0x200) ? 0 : 1; return 0; } static int snd_cs46xx_front_dup_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_cs46xx *chip = snd_kcontrol_chip(kcontrol); return snd_ac97_update_bits(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX], AC97_CSR_ACMODE, 0x200, ucontrol->value.integer.value[0] ? 0 : 0x200); } static struct snd_kcontrol_new snd_cs46xx_front_dup_ctl = { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Duplicate Front", .info = snd_mixer_boolean_info, .get = snd_cs46xx_front_dup_get, .put = snd_cs46xx_front_dup_put, }; #endif #ifdef CONFIG_SND_CS46XX_NEW_DSP /* Only available on the Hercules Game Theater XP soundcard */ static struct snd_kcontrol_new snd_hercules_controls[] = { { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Optical/Coaxial SPDIF Input Switch", .info = snd_mixer_boolean_info, .get = snd_herc_spdif_select_get, .put = snd_herc_spdif_select_put, }, }; static void snd_cs46xx_codec_reset (struct snd_ac97 * ac97) { unsigned long end_time; int err; /* reset to defaults */ snd_ac97_write(ac97, AC97_RESET, 0); /* set the desired CODEC mode */ if (ac97->num == CS46XX_PRIMARY_CODEC_INDEX) { snd_printdd("cs46xx: CODEC1 mode %04x\n", 0x0); snd_cs46xx_ac97_write(ac97, AC97_CSR_ACMODE, 0x0); } else if (ac97->num == CS46XX_SECONDARY_CODEC_INDEX) { snd_printdd("cs46xx: CODEC2 mode %04x\n", 0x3); snd_cs46xx_ac97_write(ac97, AC97_CSR_ACMODE, 0x3); } else { snd_BUG(); /* should never happen ... */ } udelay(50); /* it's necessary to wait awhile until registers are accessible after RESET */ /* because the PCM or MASTER volume registers can be modified, */ /* the REC_GAIN register is used for tests */ end_time = jiffies + HZ; do { unsigned short ext_mid; /* use preliminary reads to settle the communication */ snd_ac97_read(ac97, AC97_RESET); snd_ac97_read(ac97, AC97_VENDOR_ID1); snd_ac97_read(ac97, AC97_VENDOR_ID2); /* modem? */ ext_mid = snd_ac97_read(ac97, AC97_EXTENDED_MID); if (ext_mid != 0xffff && (ext_mid & 1) != 0) return; /* test if we can write to the record gain volume register */ snd_ac97_write(ac97, AC97_REC_GAIN, 0x8a05); if ((err = snd_ac97_read(ac97, AC97_REC_GAIN)) == 0x8a05) return; msleep(10); } while (time_after_eq(end_time, jiffies)); snd_printk(KERN_ERR "CS46xx secondary codec doesn't respond!\n"); } #endif static int __devinit cs46xx_detect_codec(struct snd_cs46xx *chip, int codec) { int idx, err; struct snd_ac97_template ac97; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_cs46xx_mixer_free_ac97; ac97.num = codec; if (chip->amplifier_ctrl == amp_voyetra) ac97.scaps = AC97_SCAP_INV_EAPD; if (codec == CS46XX_SECONDARY_CODEC_INDEX) { snd_cs46xx_codec_write(chip, AC97_RESET, 0, codec); udelay(10); if (snd_cs46xx_codec_read(chip, AC97_RESET, codec) & 0x8000) { snd_printdd("snd_cs46xx: seconadry codec not present\n"); return -ENXIO; } } snd_cs46xx_codec_write(chip, AC97_MASTER, 0x8000, codec); for (idx = 0; idx < 100; ++idx) { if (snd_cs46xx_codec_read(chip, AC97_MASTER, codec) == 0x8000) { err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97[codec]); return err; } msleep(10); } snd_printdd("snd_cs46xx: codec %d detection timeout\n", codec); return -ENXIO; } int __devinit snd_cs46xx_mixer(struct snd_cs46xx *chip, int spdif_device) { struct snd_card *card = chip->card; struct snd_ctl_elem_id id; int err; unsigned int idx; static struct snd_ac97_bus_ops ops = { #ifdef CONFIG_SND_CS46XX_NEW_DSP .reset = snd_cs46xx_codec_reset, #endif .write = snd_cs46xx_ac97_write, .read = snd_cs46xx_ac97_read, }; /* detect primary codec */ chip->nr_ac97_codecs = 0; snd_printdd("snd_cs46xx: detecting primary codec\n"); if ((err = snd_ac97_bus(card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_cs46xx_mixer_free_ac97_bus; if (cs46xx_detect_codec(chip, CS46XX_PRIMARY_CODEC_INDEX) < 0) return -ENXIO; chip->nr_ac97_codecs = 1; #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_printdd("snd_cs46xx: detecting seconadry codec\n"); /* try detect a secondary codec */ if (! cs46xx_detect_codec(chip, CS46XX_SECONDARY_CODEC_INDEX)) chip->nr_ac97_codecs = 2; #endif /* CONFIG_SND_CS46XX_NEW_DSP */ /* add cs4630 mixer controls */ for (idx = 0; idx < ARRAY_SIZE(snd_cs46xx_controls); idx++) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&snd_cs46xx_controls[idx], chip); if (kctl && kctl->id.iface == SNDRV_CTL_ELEM_IFACE_PCM) kctl->id.device = spdif_device; if ((err = snd_ctl_add(card, kctl)) < 0) return err; } /* get EAPD mixer switch (for voyetra hack) */ memset(&id, 0, sizeof(id)); id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(id.name, "External Amplifier"); chip->eapd_switch = snd_ctl_find_id(chip->card, &id); #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->nr_ac97_codecs == 1) { unsigned int id2 = chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]->id & 0xffff; if (id2 == 0x592b || id2 == 0x592d) { err = snd_ctl_add(card, snd_ctl_new1(&snd_cs46xx_front_dup_ctl, chip)); if (err < 0) return err; snd_ac97_write_cache(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX], AC97_CSR_ACMODE, 0x200); } } /* do soundcard specific mixer setup */ if (chip->mixer_init) { snd_printdd ("calling chip->mixer_init(chip);\n"); chip->mixer_init(chip); } #endif /* turn on amplifier */ chip->amplifier_ctrl(chip, 1); return 0; } /* * RawMIDI interface */ static void snd_cs46xx_midi_reset(struct snd_cs46xx *chip) { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, MIDCR_MRST); udelay(100); snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } static int snd_cs46xx_midi_input_open(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; chip->active_ctrl(chip, 1); spin_lock_irq(&chip->reg_lock); chip->uartm |= CS46XX_MODE_INPUT; chip->midcr |= MIDCR_RXE; chip->midi_input = substream; if (!(chip->uartm & CS46XX_MODE_OUTPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cs46xx_midi_input_close(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; spin_lock_irq(&chip->reg_lock); chip->midcr &= ~(MIDCR_RXE | MIDCR_RIE); chip->midi_input = NULL; if (!(chip->uartm & CS46XX_MODE_OUTPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } chip->uartm &= ~CS46XX_MODE_INPUT; spin_unlock_irq(&chip->reg_lock); chip->active_ctrl(chip, -1); return 0; } static int snd_cs46xx_midi_output_open(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; chip->active_ctrl(chip, 1); spin_lock_irq(&chip->reg_lock); chip->uartm |= CS46XX_MODE_OUTPUT; chip->midcr |= MIDCR_TXE; chip->midi_output = substream; if (!(chip->uartm & CS46XX_MODE_INPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } spin_unlock_irq(&chip->reg_lock); return 0; } static int snd_cs46xx_midi_output_close(struct snd_rawmidi_substream *substream) { struct snd_cs46xx *chip = substream->rmidi->private_data; spin_lock_irq(&chip->reg_lock); chip->midcr &= ~(MIDCR_TXE | MIDCR_TIE); chip->midi_output = NULL; if (!(chip->uartm & CS46XX_MODE_INPUT)) { snd_cs46xx_midi_reset(chip); } else { snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } chip->uartm &= ~CS46XX_MODE_OUTPUT; spin_unlock_irq(&chip->reg_lock); chip->active_ctrl(chip, -1); return 0; } static void snd_cs46xx_midi_input_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_cs46xx *chip = substream->rmidi->private_data; spin_lock_irqsave(&chip->reg_lock, flags); if (up) { if ((chip->midcr & MIDCR_RIE) == 0) { chip->midcr |= MIDCR_RIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } else { if (chip->midcr & MIDCR_RIE) { chip->midcr &= ~MIDCR_RIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } spin_unlock_irqrestore(&chip->reg_lock, flags); } static void snd_cs46xx_midi_output_trigger(struct snd_rawmidi_substream *substream, int up) { unsigned long flags; struct snd_cs46xx *chip = substream->rmidi->private_data; unsigned char byte; spin_lock_irqsave(&chip->reg_lock, flags); if (up) { if ((chip->midcr & MIDCR_TIE) == 0) { chip->midcr |= MIDCR_TIE; /* fill UART FIFO buffer at first, and turn Tx interrupts only if necessary */ while ((chip->midcr & MIDCR_TIE) && (snd_cs46xx_peekBA0(chip, BA0_MIDSR) & MIDSR_TBF) == 0) { if (snd_rawmidi_transmit(substream, &byte, 1) != 1) { chip->midcr &= ~MIDCR_TIE; } else { snd_cs46xx_pokeBA0(chip, BA0_MIDWP, byte); } } snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } else { if (chip->midcr & MIDCR_TIE) { chip->midcr &= ~MIDCR_TIE; snd_cs46xx_pokeBA0(chip, BA0_MIDCR, chip->midcr); } } spin_unlock_irqrestore(&chip->reg_lock, flags); } static struct snd_rawmidi_ops snd_cs46xx_midi_output = { .open = snd_cs46xx_midi_output_open, .close = snd_cs46xx_midi_output_close, .trigger = snd_cs46xx_midi_output_trigger, }; static struct snd_rawmidi_ops snd_cs46xx_midi_input = { .open = snd_cs46xx_midi_input_open, .close = snd_cs46xx_midi_input_close, .trigger = snd_cs46xx_midi_input_trigger, }; int __devinit snd_cs46xx_midi(struct snd_cs46xx *chip, int device, struct snd_rawmidi **rrawmidi) { struct snd_rawmidi *rmidi; int err; if (rrawmidi) *rrawmidi = NULL; if ((err = snd_rawmidi_new(chip->card, "CS46XX", device, 1, 1, &rmidi)) < 0) return err; strcpy(rmidi->name, "CS46XX"); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_cs46xx_midi_output); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_cs46xx_midi_input); rmidi->info_flags |= SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; rmidi->private_data = chip; chip->rmidi = rmidi; if (rrawmidi) *rrawmidi = NULL; return 0; } /* * gameport interface */ #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) static void snd_cs46xx_gameport_trigger(struct gameport *gameport) { struct snd_cs46xx *chip = gameport_get_port_data(gameport); if (snd_BUG_ON(!chip)) return; snd_cs46xx_pokeBA0(chip, BA0_JSPT, 0xFF); //outb(gameport->io, 0xFF); } static unsigned char snd_cs46xx_gameport_read(struct gameport *gameport) { struct snd_cs46xx *chip = gameport_get_port_data(gameport); if (snd_BUG_ON(!chip)) return 0; return snd_cs46xx_peekBA0(chip, BA0_JSPT); //inb(gameport->io); } static int snd_cs46xx_gameport_cooked_read(struct gameport *gameport, int *axes, int *buttons) { struct snd_cs46xx *chip = gameport_get_port_data(gameport); unsigned js1, js2, jst; if (snd_BUG_ON(!chip)) return 0; js1 = snd_cs46xx_peekBA0(chip, BA0_JSC1); js2 = snd_cs46xx_peekBA0(chip, BA0_JSC2); jst = snd_cs46xx_peekBA0(chip, BA0_JSPT); *buttons = (~jst >> 4) & 0x0F; axes[0] = ((js1 & JSC1_Y1V_MASK) >> JSC1_Y1V_SHIFT) & 0xFFFF; axes[1] = ((js1 & JSC1_X1V_MASK) >> JSC1_X1V_SHIFT) & 0xFFFF; axes[2] = ((js2 & JSC2_Y2V_MASK) >> JSC2_Y2V_SHIFT) & 0xFFFF; axes[3] = ((js2 & JSC2_X2V_MASK) >> JSC2_X2V_SHIFT) & 0xFFFF; for(jst=0;jst<4;++jst) if(axes[jst]==0xFFFF) axes[jst] = -1; return 0; } static int snd_cs46xx_gameport_open(struct gameport *gameport, int mode) { switch (mode) { case GAMEPORT_MODE_COOKED: return 0; case GAMEPORT_MODE_RAW: return 0; default: return -1; } return 0; } int __devinit snd_cs46xx_gameport(struct snd_cs46xx *chip) { struct gameport *gp; chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "cs46xx: cannot allocate memory for gameport\n"); return -ENOMEM; } gameport_set_name(gp, "CS46xx Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gameport_set_port_data(gp, chip); gp->open = snd_cs46xx_gameport_open; gp->read = snd_cs46xx_gameport_read; gp->trigger = snd_cs46xx_gameport_trigger; gp->cooked_read = snd_cs46xx_gameport_cooked_read; snd_cs46xx_pokeBA0(chip, BA0_JSIO, 0xFF); // ? snd_cs46xx_pokeBA0(chip, BA0_JSCTL, JSCTL_SP_MEDIUM_SLOW); gameport_register_port(gp); return 0; } static inline void snd_cs46xx_remove_gameport(struct snd_cs46xx *chip) { if (chip->gameport) { gameport_unregister_port(chip->gameport); chip->gameport = NULL; } } #else int __devinit snd_cs46xx_gameport(struct snd_cs46xx *chip) { return -ENOSYS; } static inline void snd_cs46xx_remove_gameport(struct snd_cs46xx *chip) { } #endif /* CONFIG_GAMEPORT */ #ifdef CONFIG_PROC_FS /* * proc interface */ static ssize_t snd_cs46xx_io_read(struct snd_info_entry *entry, void *file_private_data, struct file *file, char __user *buf, size_t count, loff_t pos) { struct snd_cs46xx_region *region = entry->private_data; if (copy_to_user_fromio(buf, region->remap_addr + pos, count)) return -EFAULT; return count; } static struct snd_info_entry_ops snd_cs46xx_proc_io_ops = { .read = snd_cs46xx_io_read, }; static int __devinit snd_cs46xx_proc_init(struct snd_card *card, struct snd_cs46xx *chip) { struct snd_info_entry *entry; int idx; for (idx = 0; idx < 5; idx++) { struct snd_cs46xx_region *region = &chip->region.idx[idx]; if (! snd_card_proc_new(card, region->name, &entry)) { entry->content = SNDRV_INFO_CONTENT_DATA; entry->private_data = chip; entry->c.ops = &snd_cs46xx_proc_io_ops; entry->size = region->size; entry->mode = S_IFREG | S_IRUSR; } } #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_proc_init(card, chip); #endif return 0; } static int snd_cs46xx_proc_done(struct snd_cs46xx *chip) { #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_proc_done(chip); #endif return 0; } #else /* !CONFIG_PROC_FS */ #define snd_cs46xx_proc_init(card, chip) #define snd_cs46xx_proc_done(chip) #endif /* * stop the h/w */ static void snd_cs46xx_hw_stop(struct snd_cs46xx *chip) { unsigned int tmp; tmp = snd_cs46xx_peek(chip, BA1_PFIE); tmp &= ~0x0000f03f; tmp |= 0x00000010; snd_cs46xx_poke(chip, BA1_PFIE, tmp); /* playback interrupt disable */ tmp = snd_cs46xx_peek(chip, BA1_CIE); tmp &= ~0x0000003f; tmp |= 0x00000011; snd_cs46xx_poke(chip, BA1_CIE, tmp); /* capture interrupt disable */ /* * Stop playback DMA. */ tmp = snd_cs46xx_peek(chip, BA1_PCTL); snd_cs46xx_poke(chip, BA1_PCTL, tmp & 0x0000ffff); /* * Stop capture DMA. */ tmp = snd_cs46xx_peek(chip, BA1_CCTL); snd_cs46xx_poke(chip, BA1_CCTL, tmp & 0xffff0000); /* * Reset the processor. */ snd_cs46xx_reset(chip); snd_cs46xx_proc_stop(chip); /* * Power down the PLL. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, 0); /* * Turn off the Processor by turning off the software clock enable flag in * the clock control register. */ tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1) & ~CLKCR1_SWCE; snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); } static int snd_cs46xx_free(struct snd_cs46xx *chip) { int idx; if (snd_BUG_ON(!chip)) return -EINVAL; if (chip->active_ctrl) chip->active_ctrl(chip, 1); snd_cs46xx_remove_gameport(chip); if (chip->amplifier_ctrl) chip->amplifier_ctrl(chip, -chip->amplifier); /* force to off */ snd_cs46xx_proc_done(chip); if (chip->region.idx[0].resource) snd_cs46xx_hw_stop(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->active_ctrl) chip->active_ctrl(chip, -chip->amplifier); for (idx = 0; idx < 5; idx++) { struct snd_cs46xx_region *region = &chip->region.idx[idx]; if (region->remap_addr) iounmap(region->remap_addr); release_and_free_resource(region->resource); } #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->dsp_spos_instance) { cs46xx_dsp_spos_destroy(chip); chip->dsp_spos_instance = NULL; } #endif #ifdef CONFIG_PM kfree(chip->saved_regs); #endif pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_cs46xx_dev_free(struct snd_device *device) { struct snd_cs46xx *chip = device->device_data; return snd_cs46xx_free(chip); } /* * initialize chip */ static int snd_cs46xx_chip_init(struct snd_cs46xx *chip) { int timeout; /* * First, blast the clock control register to zero so that the PLL starts * out in a known state, and blast the master serial port control register * to zero so that the serial ports also start out in a known state. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, 0); snd_cs46xx_pokeBA0(chip, BA0_SERMC1, 0); /* * If we are in AC97 mode, then we must set the part to a host controlled * AC-link. Otherwise, we won't be able to bring up the link. */ #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_SERACC, SERACC_HSP | SERACC_CHIP_TYPE_2_0 | SERACC_TWO_CODECS); /* 2.00 dual codecs */ /* snd_cs46xx_pokeBA0(chip, BA0_SERACC, SERACC_HSP | SERACC_CHIP_TYPE_2_0); */ /* 2.00 codec */ #else snd_cs46xx_pokeBA0(chip, BA0_SERACC, SERACC_HSP | SERACC_CHIP_TYPE_1_03); /* 1.03 codec */ #endif /* * Drive the ARST# pin low for a minimum of 1uS (as defined in the AC97 * spec) and then drive it high. This is done for non AC97 modes since * there might be logic external to the CS461x that uses the ARST# line * for a reset. */ snd_cs46xx_pokeBA0(chip, BA0_ACCTL, 0); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, 0); #endif udelay(50); snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_RSTN); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, ACCTL_RSTN); #endif /* * The first thing we do here is to enable sync generation. As soon * as we start receiving bit clock, we'll start producing the SYNC * signal. */ snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_ESYN | ACCTL_RSTN); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, ACCTL_ESYN | ACCTL_RSTN); #endif /* * Now wait for a short while to allow the AC97 part to start * generating bit clock (so we don't try to start the PLL without an * input clock). */ mdelay(10); /* * Set the serial port timing configuration, so that * the clock control circuit gets its clock from the correct place. */ snd_cs46xx_pokeBA0(chip, BA0_SERMC1, SERMC1_PTC_AC97); /* * Write the selected clock control setup to the hardware. Do not turn on * SWCE yet (if requested), so that the devices clocked by the output of * PLL are not clocked until the PLL is stable. */ snd_cs46xx_pokeBA0(chip, BA0_PLLCC, PLLCC_LPF_1050_2780_KHZ | PLLCC_CDR_73_104_MHZ); snd_cs46xx_pokeBA0(chip, BA0_PLLM, 0x3a); snd_cs46xx_pokeBA0(chip, BA0_CLKCR2, CLKCR2_PDIVS_8); /* * Power up the PLL. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, CLKCR1_PLLP); /* * Wait until the PLL has stabilized. */ msleep(100); /* * Turn on clocking of the core so that we can setup the serial ports. */ snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, CLKCR1_PLLP | CLKCR1_SWCE); /* * Enable FIFO Host Bypass */ snd_cs46xx_pokeBA0(chip, BA0_SERBCF, SERBCF_HBP); /* * Fill the serial port FIFOs with silence. */ snd_cs46xx_clear_serial_FIFOs(chip); /* * Set the serial port FIFO pointer to the first sample in the FIFO. */ /* snd_cs46xx_pokeBA0(chip, BA0_SERBSP, 0); */ /* * Write the serial port configuration to the part. The master * enable bit is not set until all other values have been written. */ snd_cs46xx_pokeBA0(chip, BA0_SERC1, SERC1_SO1F_AC97 | SERC1_SO1EN); snd_cs46xx_pokeBA0(chip, BA0_SERC2, SERC2_SI1F_AC97 | SERC1_SO1EN); snd_cs46xx_pokeBA0(chip, BA0_SERMC1, SERMC1_PTC_AC97 | SERMC1_MSPE); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_SERC7, SERC7_ASDI2EN); snd_cs46xx_pokeBA0(chip, BA0_SERC3, 0); snd_cs46xx_pokeBA0(chip, BA0_SERC4, 0); snd_cs46xx_pokeBA0(chip, BA0_SERC5, 0); snd_cs46xx_pokeBA0(chip, BA0_SERC6, 1); #endif mdelay(5); /* * Wait for the codec ready signal from the AC97 codec. */ timeout = 150; while (timeout-- > 0) { /* * Read the AC97 status register to see if we've seen a CODEC READY * signal from the AC97 codec. */ if (snd_cs46xx_peekBA0(chip, BA0_ACSTS) & ACSTS_CRDY) goto ok1; msleep(10); } snd_printk(KERN_ERR "create - never read codec ready from AC'97\n"); snd_printk(KERN_ERR "it is not probably bug, try to use CS4236 driver\n"); return -EIO; ok1: #ifdef CONFIG_SND_CS46XX_NEW_DSP { int count; for (count = 0; count < 150; count++) { /* First, we want to wait for a short time. */ udelay(25); if (snd_cs46xx_peekBA0(chip, BA0_ACSTS2) & ACSTS_CRDY) break; } /* * Make sure CODEC is READY. */ if (!(snd_cs46xx_peekBA0(chip, BA0_ACSTS2) & ACSTS_CRDY)) snd_printdd("cs46xx: never read card ready from secondary AC'97\n"); } #endif /* * Assert the vaid frame signal so that we can start sending commands * to the AC97 codec. */ snd_cs46xx_pokeBA0(chip, BA0_ACCTL, ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); #ifdef CONFIG_SND_CS46XX_NEW_DSP snd_cs46xx_pokeBA0(chip, BA0_ACCTL2, ACCTL_VFRM | ACCTL_ESYN | ACCTL_RSTN); #endif /* * Wait until we've sampled input slots 3 and 4 as valid, meaning that * the codec is pumping ADC data across the AC-link. */ timeout = 150; while (timeout-- > 0) { /* * Read the input slot valid register and see if input slots 3 and * 4 are valid yet. */ if ((snd_cs46xx_peekBA0(chip, BA0_ACISV) & (ACISV_ISV3 | ACISV_ISV4)) == (ACISV_ISV3 | ACISV_ISV4)) goto ok2; msleep(10); } #ifndef CONFIG_SND_CS46XX_NEW_DSP snd_printk(KERN_ERR "create - never read ISV3 & ISV4 from AC'97\n"); return -EIO; #else /* This may happen on a cold boot with a Terratec SiXPack 5.1. Reloading the driver may help, if there's other soundcards with the same problem I would like to know. (Benny) */ snd_printk(KERN_ERR "ERROR: snd-cs46xx: never read ISV3 & ISV4 from AC'97\n"); snd_printk(KERN_ERR " Try reloading the ALSA driver, if you find something\n"); snd_printk(KERN_ERR " broken or not working on your soundcard upon\n"); snd_printk(KERN_ERR " this message please report to alsa-devel@alsa-project.org\n"); return -EIO; #endif ok2: /* * Now, assert valid frame and the slot 3 and 4 valid bits. This will * commense the transfer of digital audio data to the AC97 codec. */ snd_cs46xx_pokeBA0(chip, BA0_ACOSV, ACOSV_SLV3 | ACOSV_SLV4); /* * Power down the DAC and ADC. We will power them up (if) when we need * them. */ /* snd_cs46xx_pokeBA0(chip, BA0_AC97_POWERDOWN, 0x300); */ /* * Turn off the Processor by turning off the software clock enable flag in * the clock control register. */ /* tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1) & ~CLKCR1_SWCE; */ /* snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); */ return 0; } /* * start and load DSP */ static void cs46xx_enable_stream_irqs(struct snd_cs46xx *chip) { unsigned int tmp; snd_cs46xx_pokeBA0(chip, BA0_HICR, HICR_IEV | HICR_CHGM); tmp = snd_cs46xx_peek(chip, BA1_PFIE); tmp &= ~0x0000f03f; snd_cs46xx_poke(chip, BA1_PFIE, tmp); /* playback interrupt enable */ tmp = snd_cs46xx_peek(chip, BA1_CIE); tmp &= ~0x0000003f; tmp |= 0x00000001; snd_cs46xx_poke(chip, BA1_CIE, tmp); /* capture interrupt enable */ } int __devinit snd_cs46xx_start_dsp(struct snd_cs46xx *chip) { unsigned int tmp; /* * Reset the processor. */ snd_cs46xx_reset(chip); /* * Download the image to the processor. */ #ifdef CONFIG_SND_CS46XX_NEW_DSP #if 0 if (cs46xx_dsp_load_module(chip, &cwcemb80_module) < 0) { snd_printk(KERN_ERR "image download error\n"); return -EIO; } #endif if (cs46xx_dsp_load_module(chip, &cwc4630_module) < 0) { snd_printk(KERN_ERR "image download error [cwc4630]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcasync_module) < 0) { snd_printk(KERN_ERR "image download error [cwcasync]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcsnoop_module) < 0) { snd_printk(KERN_ERR "image download error [cwcsnoop]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcbinhack_module) < 0) { snd_printk(KERN_ERR "image download error [cwcbinhack]\n"); return -EIO; } if (cs46xx_dsp_load_module(chip, &cwcdma_module) < 0) { snd_printk(KERN_ERR "image download error [cwcdma]\n"); return -EIO; } if (cs46xx_dsp_scb_and_task_init(chip) < 0) return -EIO; #else /* old image */ if (snd_cs46xx_download_image(chip) < 0) { snd_printk(KERN_ERR "image download error\n"); return -EIO; } /* * Stop playback DMA. */ tmp = snd_cs46xx_peek(chip, BA1_PCTL); chip->play_ctl = tmp & 0xffff0000; snd_cs46xx_poke(chip, BA1_PCTL, tmp & 0x0000ffff); #endif /* * Stop capture DMA. */ tmp = snd_cs46xx_peek(chip, BA1_CCTL); chip->capt.ctl = tmp & 0x0000ffff; snd_cs46xx_poke(chip, BA1_CCTL, tmp & 0xffff0000); mdelay(5); snd_cs46xx_set_play_sample_rate(chip, 8000); snd_cs46xx_set_capture_sample_rate(chip, 8000); snd_cs46xx_proc_start(chip); cs46xx_enable_stream_irqs(chip); #ifndef CONFIG_SND_CS46XX_NEW_DSP /* set the attenuation to 0dB */ snd_cs46xx_poke(chip, BA1_PVOL, 0x80008000); snd_cs46xx_poke(chip, BA1_CVOL, 0x80008000); #endif return 0; } /* * AMP control - null AMP */ static void amp_none(struct snd_cs46xx *chip, int change) { } #ifdef CONFIG_SND_CS46XX_NEW_DSP static int voyetra_setup_eapd_slot(struct snd_cs46xx *chip) { u32 idx, valid_slots,tmp,powerdown = 0; u16 modem_power,pin_config,logic_type; snd_printdd ("cs46xx: cs46xx_setup_eapd_slot()+\n"); /* * See if the devices are powered down. If so, we must power them up first * or they will not respond. */ tmp = snd_cs46xx_peekBA0(chip, BA0_CLKCR1); if (!(tmp & CLKCR1_SWCE)) { snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp | CLKCR1_SWCE); powerdown = 1; } /* * Clear PRA. The Bonzo chip will be used for GPIO not for modem * stuff. */ if(chip->nr_ac97_codecs != 2) { snd_printk (KERN_ERR "cs46xx: cs46xx_setup_eapd_slot() - no secondary codec configured\n"); return -EINVAL; } modem_power = snd_cs46xx_codec_read (chip, AC97_EXTENDED_MSTATUS, CS46XX_SECONDARY_CODEC_INDEX); modem_power &=0xFEFF; snd_cs46xx_codec_write(chip, AC97_EXTENDED_MSTATUS, modem_power, CS46XX_SECONDARY_CODEC_INDEX); /* * Set GPIO pin's 7 and 8 so that they are configured for output. */ pin_config = snd_cs46xx_codec_read (chip, AC97_GPIO_CFG, CS46XX_SECONDARY_CODEC_INDEX); pin_config &=0x27F; snd_cs46xx_codec_write(chip, AC97_GPIO_CFG, pin_config, CS46XX_SECONDARY_CODEC_INDEX); /* * Set GPIO pin's 7 and 8 so that they are compatible with CMOS logic. */ logic_type = snd_cs46xx_codec_read(chip, AC97_GPIO_POLARITY, CS46XX_SECONDARY_CODEC_INDEX); logic_type &=0x27F; snd_cs46xx_codec_write (chip, AC97_GPIO_POLARITY, logic_type, CS46XX_SECONDARY_CODEC_INDEX); valid_slots = snd_cs46xx_peekBA0(chip, BA0_ACOSV); valid_slots |= 0x200; snd_cs46xx_pokeBA0(chip, BA0_ACOSV, valid_slots); if ( cs46xx_wait_for_fifo(chip,1) ) { snd_printdd("FIFO is busy\n"); return -EINVAL; } /* * Fill slots 12 with the correct value for the GPIO pins. */ for(idx = 0x90; idx <= 0x9F; idx++) { /* * Initialize the fifo so that bits 7 and 8 are on. * * Remember that the GPIO pins in bonzo are shifted by 4 bits to * the left. 0x1800 corresponds to bits 7 and 8. */ snd_cs46xx_pokeBA0(chip, BA0_SERBWP, 0x1800); /* * Wait for command to complete */ if ( cs46xx_wait_for_fifo(chip,200) ) { snd_printdd("failed waiting for FIFO at addr (%02X)\n",idx); return -EINVAL; } /* * Write the serial port FIFO index. */ snd_cs46xx_pokeBA0(chip, BA0_SERBAD, idx); /* * Tell the serial port to load the new value into the FIFO location. */ snd_cs46xx_pokeBA0(chip, BA0_SERBCM, SERBCM_WRC); } /* wait for last command to complete */ cs46xx_wait_for_fifo(chip,200); /* * Now, if we powered up the devices, then power them back down again. * This is kinda ugly, but should never happen. */ if (powerdown) snd_cs46xx_pokeBA0(chip, BA0_CLKCR1, tmp); return 0; } #endif /* * Crystal EAPD mode */ static void amp_voyetra(struct snd_cs46xx *chip, int change) { /* Manage the EAPD bit on the Crystal 4297 and the Analog AD1885 */ #ifdef CONFIG_SND_CS46XX_NEW_DSP int old = chip->amplifier; #endif int oval, val; chip->amplifier += change; oval = snd_cs46xx_codec_read(chip, AC97_POWERDOWN, CS46XX_PRIMARY_CODEC_INDEX); val = oval; if (chip->amplifier) { /* Turn the EAPD amp on */ val |= 0x8000; } else { /* Turn the EAPD amp off */ val &= ~0x8000; } if (val != oval) { snd_cs46xx_codec_write(chip, AC97_POWERDOWN, val, CS46XX_PRIMARY_CODEC_INDEX); if (chip->eapd_switch) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->eapd_switch->id); } #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->amplifier && !old) { voyetra_setup_eapd_slot(chip); } #endif } static void hercules_init(struct snd_cs46xx *chip) { /* default: AMP off, and SPDIF input optical */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE0); snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIODR_GPOE0); } /* * Game Theatre XP card - EGPIO[2] is used to enable the external amp. */ static void amp_hercules(struct snd_cs46xx *chip, int change) { int old = chip->amplifier; int val1 = snd_cs46xx_peekBA0(chip, BA0_EGPIODR); int val2 = snd_cs46xx_peekBA0(chip, BA0_EGPIOPTR); chip->amplifier += change; if (chip->amplifier && !old) { snd_printdd ("Hercules amplifier ON\n"); snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE2 | val1); /* enable EGPIO2 output */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIOPTR_GPPT2 | val2); /* open-drain on output */ } else if (old && !chip->amplifier) { snd_printdd ("Hercules amplifier OFF\n"); snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, val1 & ~EGPIODR_GPOE2); /* disable */ snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, val2 & ~EGPIOPTR_GPPT2); /* disable */ } } static void voyetra_mixer_init (struct snd_cs46xx *chip) { snd_printdd ("initializing Voyetra mixer\n"); /* Enable SPDIF out */ snd_cs46xx_pokeBA0(chip, BA0_EGPIODR, EGPIODR_GPOE0); snd_cs46xx_pokeBA0(chip, BA0_EGPIOPTR, EGPIODR_GPOE0); } static void hercules_mixer_init (struct snd_cs46xx *chip) { #ifdef CONFIG_SND_CS46XX_NEW_DSP unsigned int idx; int err; struct snd_card *card = chip->card; #endif /* set EGPIO to default */ hercules_init(chip); snd_printdd ("initializing Hercules mixer\n"); #ifdef CONFIG_SND_CS46XX_NEW_DSP if (chip->in_suspend) return; for (idx = 0 ; idx < ARRAY_SIZE(snd_hercules_controls); idx++) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&snd_hercules_controls[idx], chip); if ((err = snd_ctl_add(card, kctl)) < 0) { printk (KERN_ERR "cs46xx: failed to initialize Hercules mixer (%d)\n",err); break; } } #endif } #if 0 /* * Untested */ static void amp_voyetra_4294(struct snd_cs46xx *chip, int change) { chip->amplifier += change; if (chip->amplifier) { /* Switch the GPIO pins 7 and 8 to open drain */ snd_cs46xx_codec_write(chip, 0x4C, snd_cs46xx_codec_read(chip, 0x4C) & 0xFE7F); snd_cs46xx_codec_write(chip, 0x4E, snd_cs46xx_codec_read(chip, 0x4E) | 0x0180); /* Now wake the AMP (this might be backwards) */ snd_cs46xx_codec_write(chip, 0x54, snd_cs46xx_codec_read(chip, 0x54) & ~0x0180); } else { snd_cs46xx_codec_write(chip, 0x54, snd_cs46xx_codec_read(chip, 0x54) | 0x0180); } } #endif /* * Handle the CLKRUN on a thinkpad. We must disable CLKRUN support * whenever we need to beat on the chip. * * The original idea and code for this hack comes from David Kaiser at * Linuxcare. Perhaps one day Crystal will document their chips well * enough to make them useful. */ static void clkrun_hack(struct snd_cs46xx *chip, int change) { u16 control, nval; if (!chip->acpi_port) return; chip->amplifier += change; /* Read ACPI port */ nval = control = inw(chip->acpi_port + 0x10); /* Flip CLKRUN off while running */ if (! chip->amplifier) nval |= 0x2000; else nval &= ~0x2000; if (nval != control) outw(nval, chip->acpi_port + 0x10); } /* * detect intel piix4 */ static void clkrun_init(struct snd_cs46xx *chip) { struct pci_dev *pdev; u8 pp; chip->acpi_port = 0; pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82371AB_3, NULL); if (pdev == NULL) return; /* Not a thinkpad thats for sure */ /* Find the control port */ pci_read_config_byte(pdev, 0x41, &pp); chip->acpi_port = pp << 8; pci_dev_put(pdev); } /* * Card subid table */ struct cs_card_type { u16 vendor; u16 id; char *name; void (*init)(struct snd_cs46xx *); void (*amp)(struct snd_cs46xx *, int); void (*active)(struct snd_cs46xx *, int); void (*mixer_init)(struct snd_cs46xx *); }; static struct cs_card_type __devinitdata cards[] = { { .vendor = 0x1489, .id = 0x7001, .name = "Genius Soundmaker 128 value", /* nothing special */ }, { .vendor = 0x5053, .id = 0x3357, .name = "Voyetra", .amp = amp_voyetra, .mixer_init = voyetra_mixer_init, }, { .vendor = 0x1071, .id = 0x6003, .name = "Mitac MI6020/21", .amp = amp_voyetra, }, /* Hercules Game Theatre XP */ { .vendor = 0x14af, /* Guillemot Corporation */ .id = 0x0050, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0050, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0051, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0052, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0053, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, { .vendor = 0x1681, .id = 0x0054, .name = "Hercules Game Theatre XP", .amp = amp_hercules, .mixer_init = hercules_mixer_init, }, /* Herculess Fortissimo */ { .vendor = 0x1681, .id = 0xa010, .name = "Hercules Gamesurround Fortissimo II", }, { .vendor = 0x1681, .id = 0xa011, .name = "Hercules Gamesurround Fortissimo III 7.1", }, /* Teratec */ { .vendor = 0x153b, .id = 0x112e, .name = "Terratec DMX XFire 1024", }, { .vendor = 0x153b, .id = 0x1136, .name = "Terratec SiXPack 5.1", }, /* Not sure if the 570 needs the clkrun hack */ { .vendor = PCI_VENDOR_ID_IBM, .id = 0x0132, .name = "Thinkpad 570", .init = clkrun_init, .active = clkrun_hack, }, { .vendor = PCI_VENDOR_ID_IBM, .id = 0x0153, .name = "Thinkpad 600X/A20/T20", .init = clkrun_init, .active = clkrun_hack, }, { .vendor = PCI_VENDOR_ID_IBM, .id = 0x1010, .name = "Thinkpad 600E (unsupported)", }, {} /* terminator */ }; /* * APM support */ #ifdef CONFIG_PM static unsigned int saved_regs[] = { BA0_ACOSV, /*BA0_ASER_FADDR,*/ BA0_ASER_MASTER, BA1_PVOL, BA1_CVOL, }; static int snd_cs46xx_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_cs46xx *chip = card->private_data; int i, amp_saved; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); chip->in_suspend = 1; snd_pcm_suspend_all(chip->pcm); // chip->ac97_powerdown = snd_cs46xx_codec_read(chip, AC97_POWER_CONTROL); // chip->ac97_general_purpose = snd_cs46xx_codec_read(chip, BA0_AC97_GENERAL_PURPOSE); snd_ac97_suspend(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]); snd_ac97_suspend(chip->ac97[CS46XX_SECONDARY_CODEC_INDEX]); /* save some registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) chip->saved_regs[i] = snd_cs46xx_peekBA0(chip, saved_regs[i]); amp_saved = chip->amplifier; /* turn off amp */ chip->amplifier_ctrl(chip, -chip->amplifier); snd_cs46xx_hw_stop(chip); /* disable CLKRUN */ chip->active_ctrl(chip, -chip->amplifier); chip->amplifier = amp_saved; /* restore the status */ pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, PCI_D3hot); return 0; } static int snd_cs46xx_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_cs46xx *chip = card->private_data; int amp_saved; #ifdef CONFIG_SND_CS46XX_NEW_DSP int i; #endif unsigned int tmp; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "cs46xx: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); amp_saved = chip->amplifier; chip->amplifier = 0; chip->active_ctrl(chip, 1); /* force to on */ snd_cs46xx_chip_init(chip); snd_cs46xx_reset(chip); #ifdef CONFIG_SND_CS46XX_NEW_DSP cs46xx_dsp_resume(chip); /* restore some registers */ for (i = 0; i < ARRAY_SIZE(saved_regs); i++) snd_cs46xx_pokeBA0(chip, saved_regs[i], chip->saved_regs[i]); #else snd_cs46xx_download_image(chip); #endif #if 0 snd_cs46xx_codec_write(chip, BA0_AC97_GENERAL_PURPOSE, chip->ac97_general_purpose); snd_cs46xx_codec_write(chip, AC97_POWER_CONTROL, chip->ac97_powerdown); mdelay(10); snd_cs46xx_codec_write(chip, BA0_AC97_POWERDOWN, chip->ac97_powerdown); mdelay(5); #endif snd_ac97_resume(chip->ac97[CS46XX_PRIMARY_CODEC_INDEX]); snd_ac97_resume(chip->ac97[CS46XX_SECONDARY_CODEC_INDEX]); /* * Stop capture DMA. */ tmp = snd_cs46xx_peek(chip, BA1_CCTL); chip->capt.ctl = tmp & 0x0000ffff; snd_cs46xx_poke(chip, BA1_CCTL, tmp & 0xffff0000); mdelay(5); /* reset playback/capture */ snd_cs46xx_set_play_sample_rate(chip, 8000); snd_cs46xx_set_capture_sample_rate(chip, 8000); snd_cs46xx_proc_start(chip); cs46xx_enable_stream_irqs(chip); if (amp_saved) chip->amplifier_ctrl(chip, 1); /* turn amp on */ else chip->active_ctrl(chip, -1); /* disable CLKRUN */ chip->amplifier = amp_saved; chip->in_suspend = 0; snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } SIMPLE_DEV_PM_OPS(snd_cs46xx_pm, snd_cs46xx_suspend, snd_cs46xx_resume); #endif /* CONFIG_PM */ /* */ int __devinit snd_cs46xx_create(struct snd_card *card, struct pci_dev * pci, int external_amp, int thinkpad, struct snd_cs46xx ** rchip) { struct snd_cs46xx *chip; int err, idx; struct snd_cs46xx_region *region; struct cs_card_type *cp; u16 ss_card, ss_vendor; static struct snd_device_ops ops = { .dev_free = snd_cs46xx_dev_free, }; *rchip = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); #ifdef CONFIG_SND_CS46XX_NEW_DSP mutex_init(&chip->spos_mutex); #endif chip->card = card; chip->pci = pci; chip->irq = -1; chip->ba0_addr = pci_resource_start(pci, 0); chip->ba1_addr = pci_resource_start(pci, 1); if (chip->ba0_addr == 0 || chip->ba0_addr == (unsigned long)~0 || chip->ba1_addr == 0 || chip->ba1_addr == (unsigned long)~0) { snd_printk(KERN_ERR "wrong address(es) - ba0 = 0x%lx, ba1 = 0x%lx\n", chip->ba0_addr, chip->ba1_addr); snd_cs46xx_free(chip); return -ENOMEM; } region = &chip->region.name.ba0; strcpy(region->name, "CS46xx_BA0"); region->base = chip->ba0_addr; region->size = CS46XX_BA0_SIZE; region = &chip->region.name.data0; strcpy(region->name, "CS46xx_BA1_data0"); region->base = chip->ba1_addr + BA1_SP_DMEM0; region->size = CS46XX_BA1_DATA0_SIZE; region = &chip->region.name.data1; strcpy(region->name, "CS46xx_BA1_data1"); region->base = chip->ba1_addr + BA1_SP_DMEM1; region->size = CS46XX_BA1_DATA1_SIZE; region = &chip->region.name.pmem; strcpy(region->name, "CS46xx_BA1_pmem"); region->base = chip->ba1_addr + BA1_SP_PMEM; region->size = CS46XX_BA1_PRG_SIZE; region = &chip->region.name.reg; strcpy(region->name, "CS46xx_BA1_reg"); region->base = chip->ba1_addr + BA1_SP_REG; region->size = CS46XX_BA1_REG_SIZE; /* set up amp and clkrun hack */ pci_read_config_word(pci, PCI_SUBSYSTEM_VENDOR_ID, &ss_vendor); pci_read_config_word(pci, PCI_SUBSYSTEM_ID, &ss_card); for (cp = &cards[0]; cp->name; cp++) { if (cp->vendor == ss_vendor && cp->id == ss_card) { snd_printdd ("hack for %s enabled\n", cp->name); chip->amplifier_ctrl = cp->amp; chip->active_ctrl = cp->active; chip->mixer_init = cp->mixer_init; if (cp->init) cp->init(chip); break; } } if (external_amp) { snd_printk(KERN_INFO "Crystal EAPD support forced on.\n"); chip->amplifier_ctrl = amp_voyetra; } if (thinkpad) { snd_printk(KERN_INFO "Activating CLKRUN hack for Thinkpad.\n"); chip->active_ctrl = clkrun_hack; clkrun_init(chip); } if (chip->amplifier_ctrl == NULL) chip->amplifier_ctrl = amp_none; if (chip->active_ctrl == NULL) chip->active_ctrl = amp_none; chip->active_ctrl(chip, 1); /* enable CLKRUN */ pci_set_master(pci); for (idx = 0; idx < 5; idx++) { region = &chip->region.idx[idx]; if ((region->resource = request_mem_region(region->base, region->size, region->name)) == NULL) { snd_printk(KERN_ERR "unable to request memory region 0x%lx-0x%lx\n", region->base, region->base + region->size - 1); snd_cs46xx_free(chip); return -EBUSY; } region->remap_addr = ioremap_nocache(region->base, region->size); if (region->remap_addr == NULL) { snd_printk(KERN_ERR "%s ioremap problem\n", region->name); snd_cs46xx_free(chip); return -ENOMEM; } } if (request_irq(pci->irq, snd_cs46xx_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_cs46xx_free(chip); return -EBUSY; } chip->irq = pci->irq; #ifdef CONFIG_SND_CS46XX_NEW_DSP chip->dsp_spos_instance = cs46xx_dsp_spos_create(chip); if (chip->dsp_spos_instance == NULL) { snd_cs46xx_free(chip); return -ENOMEM; } #endif err = snd_cs46xx_chip_init(chip); if (err < 0) { snd_cs46xx_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_cs46xx_free(chip); return err; } snd_cs46xx_proc_init(card, chip); #ifdef CONFIG_PM chip->saved_regs = kmalloc(sizeof(*chip->saved_regs) * ARRAY_SIZE(saved_regs), GFP_KERNEL); if (!chip->saved_regs) { snd_cs46xx_free(chip); return -ENOMEM; } #endif chip->active_ctrl(chip, -1); /* disable CLKRUN */ snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; }
gpl-2.0
jdheiner/SGH-T769_Kernel
kernel/lockdep.c
579
93680
/* * kernel/lockdep.c * * Runtime locking correctness validator * * Started by Ingo Molnar: * * Copyright (C) 2006,2007 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> * * this code maps all the lock dependencies as they occur in a live kernel * and will warn about the following classes of locking bugs: * * - lock inversion scenarios * - circular lock dependencies * - hardirq/softirq safe/unsafe locking bugs * * Bugs are reported even if the current locking scenario does not cause * any deadlock at this point. * * I.e. if anytime in the past two locks were taken in a different order, * even if it happened for another task, even if those were different * locks (but of the same class as this lock), this code will detect it. * * Thanks to Arjan van de Ven for coming up with the initial idea of * mapping lock dependencies runtime. */ #define DISABLE_BRANCH_PROFILING #include <linux/mutex.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/spinlock.h> #include <linux/kallsyms.h> #include <linux/interrupt.h> #include <linux/stacktrace.h> #include <linux/debug_locks.h> #include <linux/irqflags.h> #include <linux/utsname.h> #include <linux/hash.h> #include <linux/ftrace.h> #include <linux/stringify.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <asm/sections.h> #include "lockdep_internals.h" #define CREATE_TRACE_POINTS #include <trace/events/lock.h> #ifdef CONFIG_PROVE_LOCKING int prove_locking = 1; module_param(prove_locking, int, 0644); #else #define prove_locking 0 #endif #ifdef CONFIG_LOCK_STAT int lock_stat = 1; module_param(lock_stat, int, 0644); #else #define lock_stat 0 #endif /* * lockdep_lock: protects the lockdep graph, the hashes and the * class/list/hash allocators. * * This is one of the rare exceptions where it's justified * to use a raw spinlock - we really dont want the spinlock * code to recurse back into the lockdep code... */ static arch_spinlock_t lockdep_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; static int graph_lock(void) { arch_spin_lock(&lockdep_lock); /* * Make sure that if another CPU detected a bug while * walking the graph we dont change it (while the other * CPU is busy printing out stuff with the graph lock * dropped already) */ if (!debug_locks) { arch_spin_unlock(&lockdep_lock); return 0; } /* prevent any recursions within lockdep from causing deadlocks */ current->lockdep_recursion++; return 1; } static inline int graph_unlock(void) { if (debug_locks && !arch_spin_is_locked(&lockdep_lock)) return DEBUG_LOCKS_WARN_ON(1); current->lockdep_recursion--; arch_spin_unlock(&lockdep_lock); return 0; } /* * Turn lock debugging off and return with 0 if it was off already, * and also release the graph lock: */ static inline int debug_locks_off_graph_unlock(void) { int ret = debug_locks_off(); arch_spin_unlock(&lockdep_lock); return ret; } static int lockdep_initialized; unsigned long nr_list_entries; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; /* * All data structures here are protected by the global debug_lock. * * Mutex key structs only get allocated, once during bootup, and never * get freed - this significantly simplifies the debugging code. */ unsigned long nr_lock_classes; static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; static inline struct lock_class *hlock_class(struct held_lock *hlock) { if (!hlock->class_idx) { DEBUG_LOCKS_WARN_ON(1); return NULL; } return lock_classes + hlock->class_idx - 1; } #ifdef CONFIG_LOCK_STAT static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], cpu_lock_stats); static inline u64 lockstat_clock(void) { return cpu_clock(smp_processor_id()); } static int lock_point(unsigned long points[], unsigned long ip) { int i; for (i = 0; i < LOCKSTAT_POINTS; i++) { if (points[i] == 0) { points[i] = ip; break; } if (points[i] == ip) break; } return i; } static void lock_time_inc(struct lock_time *lt, u64 time) { if (time > lt->max) lt->max = time; if (time < lt->min || !lt->nr) lt->min = time; lt->total += time; lt->nr++; } static inline void lock_time_add(struct lock_time *src, struct lock_time *dst) { if (!src->nr) return; if (src->max > dst->max) dst->max = src->max; if (src->min < dst->min || !dst->nr) dst->min = src->min; dst->total += src->total; dst->nr += src->nr; } struct lock_class_stats lock_stats(struct lock_class *class) { struct lock_class_stats stats; int cpu, i; memset(&stats, 0, sizeof(struct lock_class_stats)); for_each_possible_cpu(cpu) { struct lock_class_stats *pcs = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; for (i = 0; i < ARRAY_SIZE(stats.contention_point); i++) stats.contention_point[i] += pcs->contention_point[i]; for (i = 0; i < ARRAY_SIZE(stats.contending_point); i++) stats.contending_point[i] += pcs->contending_point[i]; lock_time_add(&pcs->read_waittime, &stats.read_waittime); lock_time_add(&pcs->write_waittime, &stats.write_waittime); lock_time_add(&pcs->read_holdtime, &stats.read_holdtime); lock_time_add(&pcs->write_holdtime, &stats.write_holdtime); for (i = 0; i < ARRAY_SIZE(stats.bounces); i++) stats.bounces[i] += pcs->bounces[i]; } return stats; } void clear_lock_stats(struct lock_class *class) { int cpu; for_each_possible_cpu(cpu) { struct lock_class_stats *cpu_stats = &per_cpu(cpu_lock_stats, cpu)[class - lock_classes]; memset(cpu_stats, 0, sizeof(struct lock_class_stats)); } memset(class->contention_point, 0, sizeof(class->contention_point)); memset(class->contending_point, 0, sizeof(class->contending_point)); } static struct lock_class_stats *get_lock_stats(struct lock_class *class) { return &get_cpu_var(cpu_lock_stats)[class - lock_classes]; } static void put_lock_stats(struct lock_class_stats *stats) { put_cpu_var(cpu_lock_stats); } static void lock_release_holdtime(struct held_lock *hlock) { struct lock_class_stats *stats; u64 holdtime; if (!lock_stat) return; holdtime = lockstat_clock() - hlock->holdtime_stamp; stats = get_lock_stats(hlock_class(hlock)); if (hlock->read) lock_time_inc(&stats->read_holdtime, holdtime); else lock_time_inc(&stats->write_holdtime, holdtime); put_lock_stats(stats); } #else static inline void lock_release_holdtime(struct held_lock *hlock) { } #endif /* * We keep a global list of all lock classes. The list only grows, * never shrinks. The list is only accessed with the lockdep * spinlock lock held. */ LIST_HEAD(all_lock_classes); /* * The lockdep classes are in a hash-table as well, for fast lookup: */ #define CLASSHASH_BITS (MAX_LOCKDEP_KEYS_BITS - 1) #define CLASSHASH_SIZE (1UL << CLASSHASH_BITS) #define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS) #define classhashentry(key) (classhash_table + __classhashfn((key))) static struct list_head classhash_table[CLASSHASH_SIZE]; /* * We put the lock dependency chains into a hash-table as well, to cache * their existence: */ #define CHAINHASH_BITS (MAX_LOCKDEP_CHAINS_BITS-1) #define CHAINHASH_SIZE (1UL << CHAINHASH_BITS) #define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS) #define chainhashentry(chain) (chainhash_table + __chainhashfn((chain))) static struct list_head chainhash_table[CHAINHASH_SIZE]; /* * The hash key of the lock dependency chains is a hash itself too: * it's a hash of all locks taken up to that lock, including that lock. * It's a 64-bit hash, because it's important for the keys to be * unique. */ #define iterate_chain_key(key1, key2) \ (((key1) << MAX_LOCKDEP_KEYS_BITS) ^ \ ((key1) >> (64-MAX_LOCKDEP_KEYS_BITS)) ^ \ (key2)) void lockdep_off(void) { current->lockdep_recursion++; } EXPORT_SYMBOL(lockdep_off); void lockdep_on(void) { current->lockdep_recursion--; } EXPORT_SYMBOL(lockdep_on); /* * Debugging switches: */ #define VERBOSE 0 #define VERY_VERBOSE 0 #if VERBOSE # define HARDIRQ_VERBOSE 1 # define SOFTIRQ_VERBOSE 1 # define RECLAIM_VERBOSE 1 #else # define HARDIRQ_VERBOSE 0 # define SOFTIRQ_VERBOSE 0 # define RECLAIM_VERBOSE 0 #endif #if VERBOSE || HARDIRQ_VERBOSE || SOFTIRQ_VERBOSE || RECLAIM_VERBOSE /* * Quick filtering for interesting events: */ static int class_filter(struct lock_class *class) { #if 0 /* Example */ if (class->name_version == 1 && !strcmp(class->name, "lockname")) return 1; if (class->name_version == 1 && !strcmp(class->name, "&struct->lockfield")) return 1; #endif /* Filter everything else. 1 would be to allow everything else */ return 0; } #endif static int verbose(struct lock_class *class) { #if VERBOSE return class_filter(class); #endif return 0; } /* * Stack-trace: tightly packed array of stack backtrace * addresses. Protected by the graph_lock. */ unsigned long nr_stack_trace_entries; static unsigned long stack_trace[MAX_STACK_TRACE_ENTRIES]; static int save_trace(struct stack_trace *trace) { trace->nr_entries = 0; trace->max_entries = MAX_STACK_TRACE_ENTRIES - nr_stack_trace_entries; trace->entries = stack_trace + nr_stack_trace_entries; trace->skip = 3; save_stack_trace(trace); /* * Some daft arches put -1 at the end to indicate its a full trace. * * <rant> this is buggy anyway, since it takes a whole extra entry so a * complete trace that maxes out the entries provided will be reported * as incomplete, friggin useless </rant> */ if (trace->nr_entries != 0 && trace->entries[trace->nr_entries-1] == ULONG_MAX) trace->nr_entries--; trace->max_entries = trace->nr_entries; nr_stack_trace_entries += trace->nr_entries; if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) { if (!debug_locks_off_graph_unlock()) return 0; printk("BUG: MAX_STACK_TRACE_ENTRIES too low!\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return 0; } return 1; } unsigned int nr_hardirq_chains; unsigned int nr_softirq_chains; unsigned int nr_process_chains; unsigned int max_lockdep_depth; #ifdef CONFIG_DEBUG_LOCKDEP /* * We cannot printk in early bootup code. Not even early_printk() * might work. So we mark any initialization errors and printk * about it later on, in lockdep_info(). */ static int lockdep_init_error; static unsigned long lockdep_init_trace_data[20]; static struct stack_trace lockdep_init_trace = { .max_entries = ARRAY_SIZE(lockdep_init_trace_data), .entries = lockdep_init_trace_data, }; /* * Various lockdep statistics: */ DEFINE_PER_CPU(struct lockdep_stats, lockdep_stats); #endif /* * Locking printouts: */ #define __USAGE(__STATE) \ [LOCK_USED_IN_##__STATE] = "IN-"__stringify(__STATE)"-W", \ [LOCK_ENABLED_##__STATE] = __stringify(__STATE)"-ON-W", \ [LOCK_USED_IN_##__STATE##_READ] = "IN-"__stringify(__STATE)"-R",\ [LOCK_ENABLED_##__STATE##_READ] = __stringify(__STATE)"-ON-R", static const char *usage_str[] = { #define LOCKDEP_STATE(__STATE) __USAGE(__STATE) #include "lockdep_states.h" #undef LOCKDEP_STATE [LOCK_USED] = "INITIAL USE", }; const char * __get_key_name(struct lockdep_subclass_key *key, char *str) { return kallsyms_lookup((unsigned long)key, NULL, NULL, NULL, str); } static inline unsigned long lock_flag(enum lock_usage_bit bit) { return 1UL << bit; } static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) { char c = '.'; if (class->usage_mask & lock_flag(bit + 2)) c = '+'; if (class->usage_mask & lock_flag(bit)) { c = '-'; if (class->usage_mask & lock_flag(bit + 2)) c = '?'; } return c; } void get_usage_chars(struct lock_class *class, char usage[LOCK_USAGE_CHARS]) { int i = 0; #define LOCKDEP_STATE(__STATE) \ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE); \ usage[i++] = get_usage_char(class, LOCK_USED_IN_##__STATE##_READ); #include "lockdep_states.h" #undef LOCKDEP_STATE usage[i] = '\0'; } static void print_lock_name(struct lock_class *class) { char str[KSYM_NAME_LEN], usage[LOCK_USAGE_CHARS]; const char *name; get_usage_chars(class, usage); name = class->name; if (!name) { name = __get_key_name(class->key, str); printk(" (%s", name); } else { printk(" (%s", name); if (class->name_version > 1) printk("#%d", class->name_version); if (class->subclass) printk("/%d", class->subclass); } printk("){%s}", usage); } static void print_lockdep_cache(struct lockdep_map *lock) { const char *name; char str[KSYM_NAME_LEN]; name = lock->name; if (!name) name = __get_key_name(lock->key->subkeys, str); printk("%s", name); } static void print_lock(struct held_lock *hlock) { print_lock_name(hlock_class(hlock)); printk(", at: "); print_ip_sym(hlock->acquire_ip); } static void lockdep_print_held_locks(struct task_struct *curr) { int i, depth = curr->lockdep_depth; if (!depth) { printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); return; } printk("%d lock%s held by %s/%d:\n", depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); for (i = 0; i < depth; i++) { printk(" #%d: ", i); print_lock(curr->held_locks + i); } } static void print_kernel_version(void) { printk("%s %.*s\n", init_utsname()->release, (int)strcspn(init_utsname()->version, " "), init_utsname()->version); } static int very_verbose(struct lock_class *class) { #if VERY_VERBOSE return class_filter(class); #endif return 0; } /* * Is this the address of a static object: */ static int static_obj(void *obj) { unsigned long start = (unsigned long) &_stext, end = (unsigned long) &_end, addr = (unsigned long) obj; /* * static variable? */ if ((addr >= start) && (addr < end)) return 1; if (arch_is_kernel_data(addr)) return 1; /* * in-kernel percpu var? */ if (is_kernel_percpu_address(addr)) return 1; /* * module static or percpu var? */ return is_module_address(addr) || is_module_percpu_address(addr); } /* * To make lock name printouts unique, we calculate a unique * class->name_version generation counter: */ static int count_matching_names(struct lock_class *new_class) { struct lock_class *class; int count = 0; if (!new_class->name) return 0; list_for_each_entry(class, &all_lock_classes, lock_entry) { if (new_class->key - new_class->subclass == class->key) return class->name_version; if (class->name && !strcmp(class->name, new_class->name)) count = max(count, class->name_version); } return count + 1; } /* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */ static inline struct lock_class * look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) { struct lockdep_subclass_key *key; struct list_head *hash_head; struct lock_class *class; #ifdef CONFIG_DEBUG_LOCKDEP /* * If the architecture calls into lockdep before initializing * the hashes then we'll warn about it later. (we cannot printk * right now) */ if (unlikely(!lockdep_initialized)) { lockdep_init(); lockdep_init_error = 1; save_stack_trace(&lockdep_init_trace); } #endif /* * Static locks do not have their class-keys yet - for them the key * is the lock object itself: */ if (unlikely(!lock->key)) lock->key = (void *)lock; /* * NOTE: the class-key must be unique. For dynamic locks, a static * lock_class_key variable is passed in through the mutex_init() * (or spin_lock_init()) call - which acts as the key. For static * locks we use the lock object itself as the key. */ BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lockdep_map)); key = lock->key->subkeys + subclass; hash_head = classhashentry(key); /* * We can walk the hash lockfree, because the hash only * grows, and we are careful when adding entries to the end: */ list_for_each_entry(class, hash_head, hash_entry) { if (class->key == key) { WARN_ON_ONCE(class->name != lock->name); return class; } } return NULL; } /* * Register a lock's class in the hash-table, if the class is not present * yet. Otherwise we look it up. We cache the result in the lock object * itself, so actual lookup of the hash should be once per lock object. */ static inline struct lock_class * register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) { struct lockdep_subclass_key *key; struct list_head *hash_head; struct lock_class *class; unsigned long flags; class = look_up_lock_class(lock, subclass); if (likely(class)) return class; /* * Debug-check: all keys must be persistent! */ if (!static_obj(lock->key)) { debug_locks_off(); printk("INFO: trying to register non-static key.\n"); printk("the code is fine but needs lockdep annotation.\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return NULL; } key = lock->key->subkeys + subclass; hash_head = classhashentry(key); raw_local_irq_save(flags); if (!graph_lock()) { raw_local_irq_restore(flags); return NULL; } /* * We have to do the hash-walk again, to avoid races * with another CPU: */ list_for_each_entry(class, hash_head, hash_entry) if (class->key == key) goto out_unlock_set; /* * Allocate a new key from the static array, and add it to * the hash: */ if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { if (!debug_locks_off_graph_unlock()) { raw_local_irq_restore(flags); return NULL; } raw_local_irq_restore(flags); printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return NULL; } class = lock_classes + nr_lock_classes++; debug_atomic_inc(nr_unused_locks); class->key = key; class->name = lock->name; class->subclass = subclass; INIT_LIST_HEAD(&class->lock_entry); INIT_LIST_HEAD(&class->locks_before); INIT_LIST_HEAD(&class->locks_after); class->name_version = count_matching_names(class); /* * We use RCU's safe list-add method to make * parallel walking of the hash-list safe: */ list_add_tail_rcu(&class->hash_entry, hash_head); /* * Add it to the global list of classes: */ list_add_tail_rcu(&class->lock_entry, &all_lock_classes); if (verbose(class)) { graph_unlock(); raw_local_irq_restore(flags); printk("\nnew class %p: %s", class->key, class->name); if (class->name_version > 1) printk("#%d", class->name_version); printk("\n"); dump_stack(); raw_local_irq_save(flags); if (!graph_lock()) { raw_local_irq_restore(flags); return NULL; } } out_unlock_set: graph_unlock(); raw_local_irq_restore(flags); if (!subclass || force) lock->class_cache = class; if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) return NULL; return class; } #ifdef CONFIG_PROVE_LOCKING /* * Allocate a lockdep entry. (assumes the graph_lock held, returns * with NULL on failure) */ static struct lock_list *alloc_list_entry(void) { if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) { if (!debug_locks_off_graph_unlock()) return NULL; printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return NULL; } return list_entries + nr_list_entries++; } /* * Add a new dependency to the head of the list: */ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, struct list_head *head, unsigned long ip, int distance, struct stack_trace *trace) { struct lock_list *entry; /* * Lock not present yet - get a new dependency struct and * add it to the list: */ entry = alloc_list_entry(); if (!entry) return 0; entry->class = this; entry->distance = distance; entry->trace = *trace; /* * Since we never remove from the dependency list, the list can * be walked lockless by other CPUs, it's only allocation * that must be protected by the spinlock. But this also means * we must make new entries visible only once writes to the * entry become visible - hence the RCU op: */ list_add_tail_rcu(&entry->entry, head); return 1; } /* * For good efficiency of modular, we use power of 2 */ #define MAX_CIRCULAR_QUEUE_SIZE 4096UL #define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1) /* * The circular_queue and helpers is used to implement the * breadth-first search(BFS)algorithem, by which we can build * the shortest path from the next lock to be acquired to the * previous held lock if there is a circular between them. */ struct circular_queue { unsigned long element[MAX_CIRCULAR_QUEUE_SIZE]; unsigned int front, rear; }; static struct circular_queue lock_cq; unsigned int max_bfs_queue_depth; static unsigned int lockdep_dependency_gen_id; static inline void __cq_init(struct circular_queue *cq) { cq->front = cq->rear = 0; lockdep_dependency_gen_id++; } static inline int __cq_empty(struct circular_queue *cq) { return (cq->front == cq->rear); } static inline int __cq_full(struct circular_queue *cq) { return ((cq->rear + 1) & CQ_MASK) == cq->front; } static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem) { if (__cq_full(cq)) return -1; cq->element[cq->rear] = elem; cq->rear = (cq->rear + 1) & CQ_MASK; return 0; } static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem) { if (__cq_empty(cq)) return -1; *elem = cq->element[cq->front]; cq->front = (cq->front + 1) & CQ_MASK; return 0; } static inline unsigned int __cq_get_elem_count(struct circular_queue *cq) { return (cq->rear - cq->front) & CQ_MASK; } static inline void mark_lock_accessed(struct lock_list *lock, struct lock_list *parent) { unsigned long nr; nr = lock - list_entries; WARN_ON(nr >= nr_list_entries); lock->parent = parent; lock->class->dep_gen_id = lockdep_dependency_gen_id; } static inline unsigned long lock_accessed(struct lock_list *lock) { unsigned long nr; nr = lock - list_entries; WARN_ON(nr >= nr_list_entries); return lock->class->dep_gen_id == lockdep_dependency_gen_id; } static inline struct lock_list *get_lock_parent(struct lock_list *child) { return child->parent; } static inline int get_lock_depth(struct lock_list *child) { int depth = 0; struct lock_list *parent; while ((parent = get_lock_parent(child))) { child = parent; depth++; } return depth; } static int __bfs(struct lock_list *source_entry, void *data, int (*match)(struct lock_list *entry, void *data), struct lock_list **target_entry, int forward) { struct lock_list *entry; struct list_head *head; struct circular_queue *cq = &lock_cq; int ret = 1; if (match(source_entry, data)) { *target_entry = source_entry; ret = 0; goto exit; } if (forward) head = &source_entry->class->locks_after; else head = &source_entry->class->locks_before; if (list_empty(head)) goto exit; __cq_init(cq); __cq_enqueue(cq, (unsigned long)source_entry); while (!__cq_empty(cq)) { struct lock_list *lock; __cq_dequeue(cq, (unsigned long *)&lock); if (!lock->class) { ret = -2; goto exit; } if (forward) head = &lock->class->locks_after; else head = &lock->class->locks_before; list_for_each_entry(entry, head, entry) { if (!lock_accessed(entry)) { unsigned int cq_depth; mark_lock_accessed(entry, lock); if (match(entry, data)) { *target_entry = entry; ret = 0; goto exit; } if (__cq_enqueue(cq, (unsigned long)entry)) { ret = -1; goto exit; } cq_depth = __cq_get_elem_count(cq); if (max_bfs_queue_depth < cq_depth) max_bfs_queue_depth = cq_depth; } } } exit: return ret; } static inline int __bfs_forwards(struct lock_list *src_entry, void *data, int (*match)(struct lock_list *entry, void *data), struct lock_list **target_entry) { return __bfs(src_entry, data, match, target_entry, 1); } static inline int __bfs_backwards(struct lock_list *src_entry, void *data, int (*match)(struct lock_list *entry, void *data), struct lock_list **target_entry) { return __bfs(src_entry, data, match, target_entry, 0); } /* * Recursive, forwards-direction lock-dependency checking, used for * both noncyclic checking and for hardirq-unsafe/softirq-unsafe * checking. */ /* * Print a dependency chain entry (this is only done when a deadlock * has been detected): */ static noinline int print_circular_bug_entry(struct lock_list *target, int depth) { if (debug_locks_silent) return 0; printk("\n-> #%u", depth); print_lock_name(target->class); printk(":\n"); print_stack_trace(&target->trace, 6); return 0; } /* * When a circular dependency is detected, print the * header first: */ static noinline int print_circular_bug_header(struct lock_list *entry, unsigned int depth, struct held_lock *check_src, struct held_lock *check_tgt) { struct task_struct *curr = current; if (debug_locks_silent) return 0; printk("\n=======================================================\n"); printk( "[ INFO: possible circular locking dependency detected ]\n"); print_kernel_version(); printk( "-------------------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, task_pid_nr(curr)); print_lock(check_src); printk("\nbut task is already holding lock:\n"); print_lock(check_tgt); printk("\nwhich lock already depends on the new lock.\n\n"); printk("\nthe existing dependency chain (in reverse order) is:\n"); print_circular_bug_entry(entry, depth); return 0; } static inline int class_equal(struct lock_list *entry, void *data) { return entry->class == data; } static noinline int print_circular_bug(struct lock_list *this, struct lock_list *target, struct held_lock *check_src, struct held_lock *check_tgt) { struct task_struct *curr = current; struct lock_list *parent; int depth; if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; if (!save_trace(&this->trace)) return 0; depth = get_lock_depth(target); print_circular_bug_header(target, depth, check_src, check_tgt); parent = get_lock_parent(target); while (parent) { print_circular_bug_entry(parent, --depth); parent = get_lock_parent(parent); } printk("\nother info that might help us debug this:\n\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } static noinline int print_bfs_bug(int ret) { if (!debug_locks_off_graph_unlock()) return 0; WARN(1, "lockdep bfs error:%d\n", ret); return 0; } static int noop_count(struct lock_list *entry, void *data) { (*(unsigned long *)data)++; return 0; } unsigned long __lockdep_count_forward_deps(struct lock_list *this) { unsigned long count = 0; struct lock_list *uninitialized_var(target_entry); __bfs_forwards(this, (void *)&count, noop_count, &target_entry); return count; } unsigned long lockdep_count_forward_deps(struct lock_class *class) { unsigned long ret, flags; struct lock_list this; this.parent = NULL; this.class = class; local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_forward_deps(&this); arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; } unsigned long __lockdep_count_backward_deps(struct lock_list *this) { unsigned long count = 0; struct lock_list *uninitialized_var(target_entry); __bfs_backwards(this, (void *)&count, noop_count, &target_entry); return count; } unsigned long lockdep_count_backward_deps(struct lock_class *class) { unsigned long ret, flags; struct lock_list this; this.parent = NULL; this.class = class; local_irq_save(flags); arch_spin_lock(&lockdep_lock); ret = __lockdep_count_backward_deps(&this); arch_spin_unlock(&lockdep_lock); local_irq_restore(flags); return ret; } /* * Prove that the dependency graph starting at <entry> can not * lead to <target>. Print an error and return 0 if it does. */ static noinline int check_noncircular(struct lock_list *root, struct lock_class *target, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_cyclic_checks); result = __bfs_forwards(root, target, class_equal, target_entry); return result; } #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) /* * Forwards and backwards subgraph searching, for the purposes of * proving that two subgraphs can be connected by a new dependency * without creating any illegal irq-safe -> irq-unsafe lock dependency. */ static inline int usage_match(struct lock_list *entry, void *bit) { return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); } /* * Find a node in the forwards-direction dependency sub-graph starting * at @root->class that matches @bit. * * Return 0 if such a node exists in the subgraph, and put that node * into *@target_entry. * * Return 1 otherwise and keep *@target_entry unchanged. * Return <0 on error. */ static int find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_find_usage_forwards_checks); result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); return result; } /* * Find a node in the backwards-direction dependency sub-graph starting * at @root->class that matches @bit. * * Return 0 if such a node exists in the subgraph, and put that node * into *@target_entry. * * Return 1 otherwise and keep *@target_entry unchanged. * Return <0 on error. */ static int find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, struct lock_list **target_entry) { int result; debug_atomic_inc(nr_find_usage_backwards_checks); result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); return result; } static void print_lock_class_header(struct lock_class *class, int depth) { int bit; printk("%*s->", depth, ""); print_lock_name(class); printk(" ops: %lu", class->ops); printk(" {\n"); for (bit = 0; bit < LOCK_USAGE_STATES; bit++) { if (class->usage_mask & (1 << bit)) { int len = depth; len += printk("%*s %s", depth, "", usage_str[bit]); len += printk(" at:\n"); print_stack_trace(class->usage_traces + bit, len); } } printk("%*s }\n", depth, ""); printk("%*s ... key at: ",depth,""); print_ip_sym((unsigned long)class->key); } /* * printk the shortest lock dependencies from @start to @end in reverse order: */ static void __used print_shortest_lock_dependencies(struct lock_list *leaf, struct lock_list *root) { struct lock_list *entry = leaf; int depth; /*compute depth from generated tree by BFS*/ depth = get_lock_depth(leaf); do { print_lock_class_header(entry->class, depth); printk("%*s ... acquired at:\n", depth, ""); print_stack_trace(&entry->trace, 2); printk("\n"); if (depth == 0 && (entry != root)) { printk("lockdep:%s bad BFS generated tree\n", __func__); break; } entry = get_lock_parent(entry); depth--; } while (entry && (depth >= 0)); return; } static int print_bad_irq_dependency(struct task_struct *curr, struct lock_list *prev_root, struct lock_list *next_root, struct lock_list *backwards_entry, struct lock_list *forwards_entry, struct held_lock *prev, struct held_lock *next, enum lock_usage_bit bit1, enum lock_usage_bit bit2, const char *irqclass) { if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n======================================================\n"); printk( "[ INFO: %s-safe -> %s-unsafe lock order detected ]\n", irqclass, irqclass); print_kernel_version(); printk( "------------------------------------------------------\n"); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] is trying to acquire:\n", curr->comm, task_pid_nr(curr), curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, curr->hardirqs_enabled, curr->softirqs_enabled); print_lock(next); printk("\nand this task is already holding:\n"); print_lock(prev); printk("which would create a new lock dependency:\n"); print_lock_name(hlock_class(prev)); printk(" ->"); print_lock_name(hlock_class(next)); printk("\n"); printk("\nbut this new dependency connects a %s-irq-safe lock:\n", irqclass); print_lock_name(backwards_entry->class); printk("\n... which became %s-irq-safe at:\n", irqclass); print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); printk("\nto a %s-irq-unsafe lock:\n", irqclass); print_lock_name(forwards_entry->class); printk("\n... which became %s-irq-unsafe at:\n", irqclass); printk("..."); print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); printk("\nother info that might help us debug this:\n\n"); lockdep_print_held_locks(curr); printk("\nthe dependencies between %s-irq-safe lock", irqclass); printk(" and the holding lock:\n"); if (!save_trace(&prev_root->trace)) return 0; print_shortest_lock_dependencies(backwards_entry, prev_root); printk("\nthe dependencies between the lock to be acquired"); printk(" and %s-irq-unsafe lock:\n", irqclass); if (!save_trace(&next_root->trace)) return 0; print_shortest_lock_dependencies(forwards_entry, next_root); printk("\nstack backtrace:\n"); dump_stack(); return 0; } static int check_usage(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, enum lock_usage_bit bit_backwards, enum lock_usage_bit bit_forwards, const char *irqclass) { int ret; struct lock_list this, that; struct lock_list *uninitialized_var(target_entry); struct lock_list *uninitialized_var(target_entry1); this.parent = NULL; this.class = hlock_class(prev); ret = find_usage_backwards(&this, bit_backwards, &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; that.parent = NULL; that.class = hlock_class(next); ret = find_usage_forwards(&that, bit_forwards, &target_entry1); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; return print_bad_irq_dependency(curr, &this, &that, target_entry, target_entry1, prev, next, bit_backwards, bit_forwards, irqclass); } static const char *state_names[] = { #define LOCKDEP_STATE(__STATE) \ __stringify(__STATE), #include "lockdep_states.h" #undef LOCKDEP_STATE }; static const char *state_rnames[] = { #define LOCKDEP_STATE(__STATE) \ __stringify(__STATE)"-READ", #include "lockdep_states.h" #undef LOCKDEP_STATE }; static inline const char *state_name(enum lock_usage_bit bit) { return (bit & 1) ? state_rnames[bit >> 2] : state_names[bit >> 2]; } static int exclusive_bit(int new_bit) { /* * USED_IN * USED_IN_READ * ENABLED * ENABLED_READ * * bit 0 - write/read * bit 1 - used_in/enabled * bit 2+ state */ int state = new_bit & ~3; int dir = new_bit & 2; /* * keep state, bit flip the direction and strip read. */ return state | (dir ^ 2); } static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, enum lock_usage_bit bit) { /* * Prove that the new dependency does not connect a hardirq-safe * lock with a hardirq-unsafe lock - to achieve this we search * the backwards-subgraph starting at <prev>, and the * forwards-subgraph starting at <next>: */ if (!check_usage(curr, prev, next, bit, exclusive_bit(bit), state_name(bit))) return 0; bit++; /* _READ */ /* * Prove that the new dependency does not connect a hardirq-safe-read * lock with a hardirq-unsafe lock - to achieve this we search * the backwards-subgraph starting at <prev>, and the * forwards-subgraph starting at <next>: */ if (!check_usage(curr, prev, next, bit, exclusive_bit(bit), state_name(bit))) return 0; return 1; } static int check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { #define LOCKDEP_STATE(__STATE) \ if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ return 0; #include "lockdep_states.h" #undef LOCKDEP_STATE return 1; } static void inc_chains(void) { if (current->hardirq_context) nr_hardirq_chains++; else { if (current->softirq_context) nr_softirq_chains++; else nr_process_chains++; } } #else static inline int check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { return 1; } static inline void inc_chains(void) { nr_process_chains++; } #endif static int print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, struct held_lock *next) { if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n=============================================\n"); printk( "[ INFO: possible recursive locking detected ]\n"); print_kernel_version(); printk( "---------------------------------------------\n"); printk("%s/%d is trying to acquire lock:\n", curr->comm, task_pid_nr(curr)); print_lock(next); printk("\nbut task is already holding lock:\n"); print_lock(prev); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Check whether we are holding such a class already. * * (Note that this has to be done separately, because the graph cannot * detect such classes of deadlocks.) * * Returns: 0 on deadlock detected, 1 on OK, 2 on recursive read */ static int check_deadlock(struct task_struct *curr, struct held_lock *next, struct lockdep_map *next_instance, int read) { struct held_lock *prev; struct held_lock *nest = NULL; int i; for (i = 0; i < curr->lockdep_depth; i++) { prev = curr->held_locks + i; if (prev->instance == next->nest_lock) nest = prev; if (hlock_class(prev) != hlock_class(next)) continue; /* * Allow read-after-read recursion of the same * lock class (i.e. read_lock(lock)+read_lock(lock)): */ if ((read == 2) && prev->read) return 2; /* * We're holding the nest_lock, which serializes this lock's * nesting behaviour. */ if (nest) return 2; return print_deadlock_bug(curr, prev, next); } return 1; } /* * There was a chain-cache miss, and we are about to add a new dependency * to a previous lock. We recursively validate the following rules: * * - would the adding of the <prev> -> <next> dependency create a * circular dependency in the graph? [== circular deadlock] * * - does the new prev->next dependency connect any hardirq-safe lock * (in the full backwards-subgraph starting at <prev>) with any * hardirq-unsafe lock (in the full forwards-subgraph starting at * <next>)? [== illegal lock inversion with hardirq contexts] * * - does the new prev->next dependency connect any softirq-safe lock * (in the full backwards-subgraph starting at <prev>) with any * softirq-unsafe lock (in the full forwards-subgraph starting at * <next>)? [== illegal lock inversion with softirq contexts] * * any of these scenarios could lead to a deadlock. * * Then if all the validations pass, we add the forwards and backwards * dependency. */ static int check_prev_add(struct task_struct *curr, struct held_lock *prev, struct held_lock *next, int distance, int trylock_loop) { struct lock_list *entry; int ret; struct lock_list this; struct lock_list *uninitialized_var(target_entry); /* * Static variable, serialized by the graph_lock(). * * We use this static variable to save the stack trace in case * we call into this function multiple times due to encountering * trylocks in the held lock stack. */ static struct stack_trace trace; /* * Prove that the new <prev> -> <next> dependency would not * create a circular dependency in the graph. (We do this by * forward-recursing into the graph starting at <next>, and * checking whether we can reach <prev>.) * * We are using global variables to control the recursion, to * keep the stackframe size of the recursive functions low: */ this.class = hlock_class(next); this.parent = NULL; ret = check_noncircular(&this, hlock_class(prev), &target_entry); if (unlikely(!ret)) return print_circular_bug(&this, target_entry, next, prev); else if (unlikely(ret < 0)) return print_bfs_bug(ret); if (!check_prev_add_irq(curr, prev, next)) return 0; /* * For recursive read-locks we do all the dependency checks, * but we dont store read-triggered dependencies (only * write-triggered dependencies). This ensures that only the * write-side dependencies matter, and that if for example a * write-lock never takes any other locks, then the reads are * equivalent to a NOP. */ if (next->read == 2 || prev->read == 2) return 1; /* * Is the <prev> -> <next> dependency already present? * * (this may occur even though this is a new chain: consider * e.g. the L1 -> L2 -> L3 -> L4 and the L5 -> L1 -> L2 -> L3 * chains - the second one will be new, but L1 already has * L2 added to its dependency list, due to the first chain.) */ list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { if (entry->class == hlock_class(next)) { if (distance == 1) entry->distance = 1; return 2; } } if (!trylock_loop && !save_trace(&trace)) return 0; /* * Ok, all validations passed, add the new lock * to the previous lock's dependency list: */ ret = add_lock_to_list(hlock_class(prev), hlock_class(next), &hlock_class(prev)->locks_after, next->acquire_ip, distance, &trace); if (!ret) return 0; ret = add_lock_to_list(hlock_class(next), hlock_class(prev), &hlock_class(next)->locks_before, next->acquire_ip, distance, &trace); if (!ret) return 0; /* * Debugging printouts: */ if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { graph_unlock(); printk("\n new dependency: "); print_lock_name(hlock_class(prev)); printk(" => "); print_lock_name(hlock_class(next)); printk("\n"); dump_stack(); return graph_lock(); } return 1; } /* * Add the dependency to all directly-previous locks that are 'relevant'. * The ones that are relevant are (in increasing distance from curr): * all consecutive trylock entries and the final non-trylock entry - or * the end of this context's lock-chain - whichever comes first. */ static int check_prevs_add(struct task_struct *curr, struct held_lock *next) { int depth = curr->lockdep_depth; int trylock_loop = 0; struct held_lock *hlock; /* * Debugging checks. * * Depth must not be zero for a non-head lock: */ if (!depth) goto out_bug; /* * At least two relevant locks must exist for this * to be a head: */ if (curr->held_locks[depth].irq_context != curr->held_locks[depth-1].irq_context) goto out_bug; for (;;) { int distance = curr->lockdep_depth - depth + 1; hlock = curr->held_locks + depth-1; /* * Only non-recursive-read entries get new dependencies * added: */ if (hlock->read != 2) { if (!check_prev_add(curr, hlock, next, distance, trylock_loop)) return 0; /* * Stop after the first non-trylock entry, * as non-trylock entries have added their * own direct dependencies already, so this * lock is connected to them indirectly: */ if (!hlock->trylock) break; } depth--; /* * End of lock-stack? */ if (!depth) break; /* * Stop the search if we cross into another context: */ if (curr->held_locks[depth].irq_context != curr->held_locks[depth-1].irq_context) break; trylock_loop = 1; } return 1; out_bug: if (!debug_locks_off_graph_unlock()) return 0; WARN_ON(1); return 0; } unsigned long nr_lock_chains; struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; int nr_chain_hlocks; static u16 chain_hlocks[MAX_LOCKDEP_CHAIN_HLOCKS]; struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i) { return lock_classes + chain_hlocks[chain->base + i]; } /* * Look up a dependency chain. If the key is not present yet then * add it and return 1 - in this case the new dependency chain is * validated. If the key is already hashed, return 0. * (On return with 1 graph_lock is held.) */ static inline int lookup_chain_cache(struct task_struct *curr, struct held_lock *hlock, u64 chain_key) { struct lock_class *class = hlock_class(hlock); struct list_head *hash_head = chainhashentry(chain_key); struct lock_chain *chain; struct held_lock *hlock_curr, *hlock_next; int i, j, n, cn; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; /* * We can walk it lock-free, because entries only get added * to the hash: */ list_for_each_entry(chain, hash_head, entry) { if (chain->chain_key == chain_key) { cache_hit: debug_atomic_inc(chain_lookup_hits); if (very_verbose(class)) printk("\nhash chain already cached, key: " "%016Lx tail class: [%p] %s\n", (unsigned long long)chain_key, class->key, class->name); return 0; } } if (very_verbose(class)) printk("\nnew hash chain, key: %016Lx tail class: [%p] %s\n", (unsigned long long)chain_key, class->key, class->name); /* * Allocate a new chain entry from the static array, and add * it to the hash: */ if (!graph_lock()) return 0; /* * We have to walk the chain again locked - to avoid duplicates: */ list_for_each_entry(chain, hash_head, entry) { if (chain->chain_key == chain_key) { graph_unlock(); goto cache_hit; } } if (unlikely(nr_lock_chains >= MAX_LOCKDEP_CHAINS)) { if (!debug_locks_off_graph_unlock()) return 0; printk("BUG: MAX_LOCKDEP_CHAINS too low!\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return 0; } chain = lock_chains + nr_lock_chains++; chain->chain_key = chain_key; chain->irq_context = hlock->irq_context; /* Find the first held_lock of current chain */ hlock_next = hlock; for (i = curr->lockdep_depth - 1; i >= 0; i--) { hlock_curr = curr->held_locks + i; if (hlock_curr->irq_context != hlock_next->irq_context) break; hlock_next = hlock; } i++; chain->depth = curr->lockdep_depth + 1 - i; cn = nr_chain_hlocks; while (cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS) { n = cmpxchg(&nr_chain_hlocks, cn, cn + chain->depth); if (n == cn) break; cn = n; } if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { chain->base = cn; for (j = 0; j < chain->depth - 1; j++, i++) { int lock_id = curr->held_locks[i].class_idx - 1; chain_hlocks[chain->base + j] = lock_id; } chain_hlocks[chain->base + j] = class - lock_classes; } list_add_tail_rcu(&chain->entry, hash_head); debug_atomic_inc(chain_lookup_misses); inc_chains(); return 1; } static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, struct held_lock *hlock, int chain_head, u64 chain_key) { /* * Trylock needs to maintain the stack of held locks, but it * does not add new dependencies, because trylock can be done * in any order. * * We look up the chain_key and do the O(N^2) check and update of * the dependencies only if this is a new dependency chain. * (If lookup_chain_cache() returns with 1 it acquires * graph_lock for us) */ if (!hlock->trylock && (hlock->check == 2) && lookup_chain_cache(curr, hlock, chain_key)) { /* * Check whether last held lock: * * - is irq-safe, if this lock is irq-unsafe * - is softirq-safe, if this lock is hardirq-unsafe * * And check whether the new lock's dependency graph * could lead back to the previous lock. * * any of these scenarios could lead to a deadlock. If * All validations */ int ret = check_deadlock(curr, hlock, lock, hlock->read); if (!ret) return 0; /* * Mark recursive read, as we jump over it when * building dependencies (just like we jump over * trylock entries): */ if (ret == 2) hlock->read = 2; /* * Add dependency only if this lock is not the head * of the chain, and if it's not a secondary read-lock: */ if (!chain_head && ret != 2) if (!check_prevs_add(curr, hlock)) return 0; graph_unlock(); } else /* after lookup_chain_cache(): */ if (unlikely(!debug_locks)) return 0; return 1; } #else static inline int validate_chain(struct task_struct *curr, struct lockdep_map *lock, struct held_lock *hlock, int chain_head, u64 chain_key) { return 1; } #endif /* * We are building curr_chain_key incrementally, so double-check * it from scratch, to make sure that it's done correctly: */ static void check_chain_key(struct task_struct *curr) { #ifdef CONFIG_DEBUG_LOCKDEP struct held_lock *hlock, *prev_hlock = NULL; unsigned int i, id; u64 chain_key = 0; for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; if (chain_key != hlock->prev_chain_key) { debug_locks_off(); WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", curr->lockdep_depth, i, (unsigned long long)chain_key, (unsigned long long)hlock->prev_chain_key); return; } id = hlock->class_idx - 1; if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) return; if (prev_hlock && (prev_hlock->irq_context != hlock->irq_context)) chain_key = 0; chain_key = iterate_chain_key(chain_key, id); prev_hlock = hlock; } if (chain_key != curr->curr_chain_key) { debug_locks_off(); WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", curr->lockdep_depth, i, (unsigned long long)chain_key, (unsigned long long)curr->curr_chain_key); } #endif } static int print_usage_bug(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit) { if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n=================================\n"); printk( "[ INFO: inconsistent lock state ]\n"); print_kernel_version(); printk( "---------------------------------\n"); printk("inconsistent {%s} -> {%s} usage.\n", usage_str[prev_bit], usage_str[new_bit]); printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n", curr->comm, task_pid_nr(curr), trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, trace_hardirqs_enabled(curr), trace_softirqs_enabled(curr)); print_lock(this); printk("{%s} state was registered at:\n", usage_str[prev_bit]); print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); print_irqtrace_events(curr); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Print out an error if an invalid bit is set: */ static inline int valid_state(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) { if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) return print_usage_bug(curr, this, bad_bit, new_bit); return 1; } static int mark_lock(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit); #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) /* * print irq inversion bug: */ static int print_irq_inversion_bug(struct task_struct *curr, struct lock_list *root, struct lock_list *other, struct held_lock *this, int forwards, const char *irqclass) { if (!debug_locks_off_graph_unlock() || debug_locks_silent) return 0; printk("\n=========================================================\n"); printk( "[ INFO: possible irq lock inversion dependency detected ]\n"); print_kernel_version(); printk( "---------------------------------------------------------\n"); printk("%s/%d just changed the state of lock:\n", curr->comm, task_pid_nr(curr)); print_lock(this); if (forwards) printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); else printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); print_lock_name(other->class); printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); if (!save_trace(&root->trace)) return 0; print_shortest_lock_dependencies(other, root); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Prove that in the forwards-direction subgraph starting at <this> * there is no lock matching <mask>: */ static int check_usage_forwards(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit bit, const char *irqclass) { int ret; struct lock_list root; struct lock_list *uninitialized_var(target_entry); root.parent = NULL; root.class = hlock_class(this); ret = find_usage_forwards(&root, bit, &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; return print_irq_inversion_bug(curr, &root, target_entry, this, 1, irqclass); } /* * Prove that in the backwards-direction subgraph starting at <this> * there is no lock matching <mask>: */ static int check_usage_backwards(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit bit, const char *irqclass) { int ret; struct lock_list root; struct lock_list *uninitialized_var(target_entry); root.parent = NULL; root.class = hlock_class(this); ret = find_usage_backwards(&root, bit, &target_entry); if (ret < 0) return print_bfs_bug(ret); if (ret == 1) return ret; return print_irq_inversion_bug(curr, &root, target_entry, this, 0, irqclass); } void print_irqtrace_events(struct task_struct *curr) { printk("irq event stamp: %u\n", curr->irq_events); printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); print_ip_sym(curr->hardirq_enable_ip); printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); print_ip_sym(curr->hardirq_disable_ip); printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); print_ip_sym(curr->softirq_enable_ip); printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); print_ip_sym(curr->softirq_disable_ip); } static int HARDIRQ_verbose(struct lock_class *class) { #if HARDIRQ_VERBOSE return class_filter(class); #endif return 0; } static int SOFTIRQ_verbose(struct lock_class *class) { #if SOFTIRQ_VERBOSE return class_filter(class); #endif return 0; } static int RECLAIM_FS_verbose(struct lock_class *class) { #if RECLAIM_VERBOSE return class_filter(class); #endif return 0; } #define STRICT_READ_CHECKS 1 static int (*state_verbose_f[])(struct lock_class *class) = { #define LOCKDEP_STATE(__STATE) \ __STATE##_verbose, #include "lockdep_states.h" #undef LOCKDEP_STATE }; static inline int state_verbose(enum lock_usage_bit bit, struct lock_class *class) { return state_verbose_f[bit >> 2](class); } typedef int (*check_usage_f)(struct task_struct *, struct held_lock *, enum lock_usage_bit bit, const char *name); static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { int excl_bit = exclusive_bit(new_bit); int read = new_bit & 1; int dir = new_bit & 2; /* * mark USED_IN has to look forwards -- to ensure no dependency * has ENABLED state, which would allow recursion deadlocks. * * mark ENABLED has to look backwards -- to ensure no dependee * has USED_IN state, which, again, would allow recursion deadlocks. */ check_usage_f usage = dir ? check_usage_backwards : check_usage_forwards; /* * Validate that this particular lock does not have conflicting * usage states. */ if (!valid_state(curr, this, new_bit, excl_bit)) return 0; /* * Validate that the lock dependencies don't have conflicting usage * states. */ if ((!read || !dir || STRICT_READ_CHECKS) && !usage(curr, this, excl_bit, state_name(new_bit & ~1))) return 0; /* * Check for read in write conflicts */ if (!read) { if (!valid_state(curr, this, new_bit, excl_bit + 1)) return 0; if (STRICT_READ_CHECKS && !usage(curr, this, excl_bit + 1, state_name(new_bit + 1))) return 0; } if (state_verbose(new_bit, hlock_class(this))) return 2; return 1; } enum mark_type { #define LOCKDEP_STATE(__STATE) __STATE, #include "lockdep_states.h" #undef LOCKDEP_STATE }; /* * Mark all held locks with a usage bit: */ static int mark_held_locks(struct task_struct *curr, enum mark_type mark) { enum lock_usage_bit usage_bit; struct held_lock *hlock; int i; for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; usage_bit = 2 + (mark << 2); /* ENABLED */ if (hlock->read) usage_bit += 1; /* READ */ BUG_ON(usage_bit >= LOCK_USAGE_STATES); if (!mark_lock(curr, hlock, usage_bit)) return 0; } return 1; } /* * Debugging helper: via this flag we know that we are in * 'early bootup code', and will warn about any invalid irqs-on event: */ static int early_boot_irqs_enabled; void early_boot_irqs_off(void) { early_boot_irqs_enabled = 0; } void early_boot_irqs_on(void) { early_boot_irqs_enabled = 1; } /* * Hardirqs will be enabled: */ void trace_hardirqs_on_caller(unsigned long ip) { struct task_struct *curr = current; time_hardirqs_on(CALLER_ADDR0, ip); if (unlikely(!debug_locks || current->lockdep_recursion)) return; if (DEBUG_LOCKS_WARN_ON(unlikely(!early_boot_irqs_enabled))) return; if (unlikely(curr->hardirqs_enabled)) { /* * Neither irq nor preemption are disabled here * so this is racy by nature but loosing one hit * in a stat is not a big deal. */ __debug_atomic_inc(redundant_hardirqs_on); return; } /* we'll do an OFF -> ON transition: */ curr->hardirqs_enabled = 1; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) return; /* * We are going to turn hardirqs on, so set the * usage bit for all held locks: */ if (!mark_held_locks(curr, HARDIRQ)) return; /* * If we have softirqs enabled, then set the usage * bit for all held locks. (disabled hardirqs prevented * this bit from being set before) */ if (curr->softirqs_enabled) if (!mark_held_locks(curr, SOFTIRQ)) return; curr->hardirq_enable_ip = ip; curr->hardirq_enable_event = ++curr->irq_events; debug_atomic_inc(hardirqs_on_events); } EXPORT_SYMBOL(trace_hardirqs_on_caller); void trace_hardirqs_on(void) { trace_hardirqs_on_caller(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_on); /* * Hardirqs were disabled: */ void trace_hardirqs_off_caller(unsigned long ip) { struct task_struct *curr = current; time_hardirqs_off(CALLER_ADDR0, ip); if (unlikely(!debug_locks || current->lockdep_recursion)) return; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; if (curr->hardirqs_enabled) { /* * We have done an ON -> OFF transition: */ curr->hardirqs_enabled = 0; curr->hardirq_disable_ip = ip; curr->hardirq_disable_event = ++curr->irq_events; debug_atomic_inc(hardirqs_off_events); } else debug_atomic_inc(redundant_hardirqs_off); } EXPORT_SYMBOL(trace_hardirqs_off_caller); void trace_hardirqs_off(void) { trace_hardirqs_off_caller(CALLER_ADDR0); } EXPORT_SYMBOL(trace_hardirqs_off); /* * Softirqs will be enabled: */ void trace_softirqs_on(unsigned long ip) { struct task_struct *curr = current; if (unlikely(!debug_locks)) return; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; if (curr->softirqs_enabled) { debug_atomic_inc(redundant_softirqs_on); return; } /* * We'll do an OFF -> ON transition: */ curr->softirqs_enabled = 1; curr->softirq_enable_ip = ip; curr->softirq_enable_event = ++curr->irq_events; debug_atomic_inc(softirqs_on_events); /* * We are going to turn softirqs on, so set the * usage bit for all held locks, if hardirqs are * enabled too: */ if (curr->hardirqs_enabled) mark_held_locks(curr, SOFTIRQ); } /* * Softirqs were disabled: */ void trace_softirqs_off(unsigned long ip) { struct task_struct *curr = current; if (unlikely(!debug_locks)) return; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return; if (curr->softirqs_enabled) { /* * We have done an ON -> OFF transition: */ curr->softirqs_enabled = 0; curr->softirq_disable_ip = ip; curr->softirq_disable_event = ++curr->irq_events; debug_atomic_inc(softirqs_off_events); DEBUG_LOCKS_WARN_ON(!softirq_count()); } else debug_atomic_inc(redundant_softirqs_off); } static void __lockdep_trace_alloc(gfp_t gfp_mask, unsigned long flags) { struct task_struct *curr = current; if (unlikely(!debug_locks)) return; /* no reclaim without waiting on it */ if (!(gfp_mask & __GFP_WAIT)) return; /* this guy won't enter reclaim */ if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) return; /* We're only interested __GFP_FS allocations for now */ if (!(gfp_mask & __GFP_FS)) return; if (DEBUG_LOCKS_WARN_ON(irqs_disabled_flags(flags))) return; mark_held_locks(curr, RECLAIM_FS); } static void check_flags(unsigned long flags); void lockdep_trace_alloc(gfp_t gfp_mask) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; __lockdep_trace_alloc(gfp_mask, flags); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { /* * If non-trylock use in a hardirq or softirq context, then * mark the lock as used in these contexts: */ if (!hlock->trylock) { if (hlock->read) { if (curr->hardirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ_READ)) return 0; if (curr->softirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ_READ)) return 0; } else { if (curr->hardirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) return 0; if (curr->softirq_context) if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) return 0; } } if (!hlock->hardirqs_off) { if (hlock->read) { if (!mark_lock(curr, hlock, LOCK_ENABLED_HARDIRQ_READ)) return 0; if (curr->softirqs_enabled) if (!mark_lock(curr, hlock, LOCK_ENABLED_SOFTIRQ_READ)) return 0; } else { if (!mark_lock(curr, hlock, LOCK_ENABLED_HARDIRQ)) return 0; if (curr->softirqs_enabled) if (!mark_lock(curr, hlock, LOCK_ENABLED_SOFTIRQ)) return 0; } } /* * We reuse the irq context infrastructure more broadly as a general * context checking code. This tests GFP_FS recursion (a lock taken * during reclaim for a GFP_FS allocation is held over a GFP_FS * allocation). */ if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { if (hlock->read) { if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) return 0; } else { if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) return 0; } } return 1; } static int separate_irq_context(struct task_struct *curr, struct held_lock *hlock) { unsigned int depth = curr->lockdep_depth; /* * Keep track of points where we cross into an interrupt context: */ hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + curr->softirq_context; if (depth) { struct held_lock *prev_hlock; prev_hlock = curr->held_locks + depth-1; /* * If we cross into another context, reset the * hash key (this also prevents the checking and the * adding of the dependency to 'prev'): */ if (prev_hlock->irq_context != hlock->irq_context) return 1; } return 0; } #else static inline int mark_lock_irq(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { WARN_ON(1); return 1; } static inline int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) { return 1; } static inline int separate_irq_context(struct task_struct *curr, struct held_lock *hlock) { return 0; } void lockdep_trace_alloc(gfp_t gfp_mask) { } #endif /* * Mark a lock with a usage bit, and validate the state transition: */ static int mark_lock(struct task_struct *curr, struct held_lock *this, enum lock_usage_bit new_bit) { unsigned int new_mask = 1 << new_bit, ret = 1; /* * If already set then do not dirty the cacheline, * nor do any checks: */ if (likely(hlock_class(this)->usage_mask & new_mask)) return 1; if (!graph_lock()) return 0; /* * Make sure we didnt race: */ if (unlikely(hlock_class(this)->usage_mask & new_mask)) { graph_unlock(); return 1; } hlock_class(this)->usage_mask |= new_mask; if (!save_trace(hlock_class(this)->usage_traces + new_bit)) return 0; switch (new_bit) { #define LOCKDEP_STATE(__STATE) \ case LOCK_USED_IN_##__STATE: \ case LOCK_USED_IN_##__STATE##_READ: \ case LOCK_ENABLED_##__STATE: \ case LOCK_ENABLED_##__STATE##_READ: #include "lockdep_states.h" #undef LOCKDEP_STATE ret = mark_lock_irq(curr, this, new_bit); if (!ret) return 0; break; case LOCK_USED: debug_atomic_dec(nr_unused_locks); break; default: if (!debug_locks_off_graph_unlock()) return 0; WARN_ON(1); return 0; } graph_unlock(); /* * We must printk outside of the graph_lock: */ if (ret == 2) { printk("\nmarked lock as {%s}:\n", usage_str[new_bit]); print_lock(this); print_irqtrace_events(curr); dump_stack(); } return ret; } /* * Initialize a lock instance's lock-class mapping info: */ void lockdep_init_map(struct lockdep_map *lock, const char *name, struct lock_class_key *key, int subclass) { lock->class_cache = NULL; #ifdef CONFIG_LOCK_STAT lock->cpu = raw_smp_processor_id(); #endif if (DEBUG_LOCKS_WARN_ON(!name)) { lock->name = "NULL"; return; } lock->name = name; if (DEBUG_LOCKS_WARN_ON(!key)) return; /* * Sanity check, the lock-class key must be persistent: */ if (!static_obj(key)) { printk("BUG: key %p not in .data!\n", key); DEBUG_LOCKS_WARN_ON(1); return; } lock->key = key; if (unlikely(!debug_locks)) return; if (subclass) register_lock_class(lock, subclass, 1); } EXPORT_SYMBOL_GPL(lockdep_init_map); struct lock_class_key __lockdep_no_validate__; /* * This gets called for every mutex_lock*()/spin_lock*() operation. * We maintain the dependency maps and validate the locking attempt: */ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, int hardirqs_off, struct lockdep_map *nest_lock, unsigned long ip, int references) { struct task_struct *curr = current; struct lock_class *class = NULL; struct held_lock *hlock; unsigned int depth, id; int chain_head = 0; int class_idx; u64 chain_key; if (!prove_locking) check = 1; if (unlikely(!debug_locks)) return 0; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; if (unlikely(subclass >= MAX_LOCKDEP_SUBCLASSES)) { debug_locks_off(); printk("BUG: MAX_LOCKDEP_SUBCLASSES too low!\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return 0; } if (lock->key == &__lockdep_no_validate__) check = 1; if (!subclass) class = lock->class_cache; /* * Not cached yet or subclass? */ if (unlikely(!class)) { class = register_lock_class(lock, subclass, 0); if (!class) return 0; } atomic_inc((atomic_t *)&class->ops); if (very_verbose(class)) { printk("\nacquire class [%p] %s", class->key, class->name); if (class->name_version > 1) printk("#%d", class->name_version); printk("\n"); dump_stack(); } /* * Add the lock to the list of currently held locks. * (we dont increase the depth just yet, up until the * dependency checks are done) */ depth = curr->lockdep_depth; if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) return 0; class_idx = class - lock_classes + 1; if (depth) { hlock = curr->held_locks + depth - 1; if (hlock->class_idx == class_idx && nest_lock) { if (hlock->references) hlock->references++; else hlock->references = 2; return 1; } } hlock = curr->held_locks + depth; if (DEBUG_LOCKS_WARN_ON(!class)) return 0; hlock->class_idx = class_idx; hlock->acquire_ip = ip; hlock->instance = lock; hlock->nest_lock = nest_lock; hlock->trylock = trylock; hlock->read = read; hlock->check = check; hlock->hardirqs_off = !!hardirqs_off; hlock->references = references; #ifdef CONFIG_LOCK_STAT hlock->waittime_stamp = 0; hlock->holdtime_stamp = lockstat_clock(); #endif if (check == 2 && !mark_irqflags(curr, hlock)) return 0; /* mark it as used: */ if (!mark_lock(curr, hlock, LOCK_USED)) return 0; /* * Calculate the chain hash: it's the combined hash of all the * lock keys along the dependency chain. We save the hash value * at every step so that we can get the current hash easily * after unlock. The chain hash is then used to cache dependency * results. * * The 'key ID' is what is the most compact key value to drive * the hash, not class->key. */ id = class - lock_classes; if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) return 0; chain_key = curr->curr_chain_key; if (!depth) { if (DEBUG_LOCKS_WARN_ON(chain_key != 0)) return 0; chain_head = 1; } hlock->prev_chain_key = chain_key; if (separate_irq_context(curr, hlock)) { chain_key = 0; chain_head = 1; } chain_key = iterate_chain_key(chain_key, id); if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) return 0; curr->curr_chain_key = chain_key; curr->lockdep_depth++; check_chain_key(curr); #ifdef CONFIG_DEBUG_LOCKDEP if (unlikely(!debug_locks)) return 0; #endif if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { debug_locks_off(); printk("BUG: MAX_LOCK_DEPTH too low!\n"); printk("turning off the locking correctness validator.\n"); dump_stack(); return 0; } if (unlikely(curr->lockdep_depth > max_lockdep_depth)) max_lockdep_depth = curr->lockdep_depth; return 1; } static int print_unlock_inbalance_bug(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { if (!debug_locks_off()) return 0; if (debug_locks_silent) return 0; printk("\n=====================================\n"); printk( "[ BUG: bad unlock balance detected! ]\n"); printk( "-------------------------------------\n"); printk("%s/%d is trying to release lock (", curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); printk("but there are no more locks to release!\n"); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } /* * Common debugging checks for both nested and non-nested unlock: */ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { if (unlikely(!debug_locks)) return 0; if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) return 0; if (curr->lockdep_depth <= 0) return print_unlock_inbalance_bug(curr, lock, ip); return 1; } static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock) { if (hlock->instance == lock) return 1; if (hlock->references) { struct lock_class *class = lock->class_cache; if (!class) class = look_up_lock_class(lock, 0); if (DEBUG_LOCKS_WARN_ON(!class)) return 0; if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) return 0; if (hlock->class_idx == class - lock_classes + 1) return 1; } return 0; } static int __lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, unsigned long ip) { struct task_struct *curr = current; struct held_lock *hlock, *prev_hlock; struct lock_class *class; unsigned int depth; int i; depth = curr->lockdep_depth; if (DEBUG_LOCKS_WARN_ON(!depth)) return 0; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } return print_unlock_inbalance_bug(curr, lock, ip); found_it: lockdep_init_map(lock, name, key, 0); class = register_lock_class(lock, subclass, 0); hlock->class_idx = class - lock_classes + 1; curr->lockdep_depth = i; curr->curr_chain_key = hlock->prev_chain_key; for (; i < depth; i++) { hlock = curr->held_locks + i; if (!__lock_acquire(hlock->instance, hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, hlock->references)) return 0; } if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) return 0; return 1; } /* * Remove the lock to the list of currently held locks in a * potentially non-nested (out of order) manner. This is a * relatively rare operation, as all the unlock APIs default * to nested mode (which uses lock_release()): */ static int lock_release_non_nested(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { struct held_lock *hlock, *prev_hlock; unsigned int depth; int i; /* * Check whether the lock exists in the current stack * of held locks: */ depth = curr->lockdep_depth; if (DEBUG_LOCKS_WARN_ON(!depth)) return 0; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } return print_unlock_inbalance_bug(curr, lock, ip); found_it: if (hlock->instance == lock) lock_release_holdtime(hlock); if (hlock->references) { hlock->references--; if (hlock->references) { /* * We had, and after removing one, still have * references, the current lock stack is still * valid. We're done! */ return 1; } } /* * We have the right lock to unlock, 'hlock' points to it. * Now we remove it from the stack, and add back the other * entries (if any), recalculating the hash along the way: */ curr->lockdep_depth = i; curr->curr_chain_key = hlock->prev_chain_key; for (i++; i < depth; i++) { hlock = curr->held_locks + i; if (!__lock_acquire(hlock->instance, hlock_class(hlock)->subclass, hlock->trylock, hlock->read, hlock->check, hlock->hardirqs_off, hlock->nest_lock, hlock->acquire_ip, hlock->references)) return 0; } if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) return 0; return 1; } /* * Remove the lock to the list of currently held locks - this gets * called on mutex_unlock()/spin_unlock*() (or on a failed * mutex_lock_interruptible()). This is done for unlocks that nest * perfectly. (i.e. the current top of the lock-stack is unlocked) */ static int lock_release_nested(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { struct held_lock *hlock; unsigned int depth; /* * Pop off the top of the lock stack: */ depth = curr->lockdep_depth - 1; hlock = curr->held_locks + depth; /* * Is the unlock non-nested: */ if (hlock->instance != lock || hlock->references) return lock_release_non_nested(curr, lock, ip); curr->lockdep_depth--; if (DEBUG_LOCKS_WARN_ON(!depth && (hlock->prev_chain_key != 0))) return 0; curr->curr_chain_key = hlock->prev_chain_key; lock_release_holdtime(hlock); #ifdef CONFIG_DEBUG_LOCKDEP hlock->prev_chain_key = 0; hlock->class_idx = 0; hlock->acquire_ip = 0; hlock->irq_context = 0; #endif return 1; } /* * Remove the lock to the list of currently held locks - this gets * called on mutex_unlock()/spin_unlock*() (or on a failed * mutex_lock_interruptible()). This is done for unlocks that nest * perfectly. (i.e. the current top of the lock-stack is unlocked) */ static void __lock_release(struct lockdep_map *lock, int nested, unsigned long ip) { struct task_struct *curr = current; if (!check_unlock(curr, lock, ip)) return; if (nested) { if (!lock_release_nested(curr, lock, ip)) return; } else { if (!lock_release_non_nested(curr, lock, ip)) return; } check_chain_key(curr); } static int __lock_is_held(struct lockdep_map *lock) { struct task_struct *curr = current; int i; for (i = 0; i < curr->lockdep_depth; i++) { struct held_lock *hlock = curr->held_locks + i; if (match_held_lock(hlock, lock)) return 1; } return 0; } /* * Check whether we follow the irq-flags state precisely: */ static void check_flags(unsigned long flags) { #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP) && \ defined(CONFIG_TRACE_IRQFLAGS) if (!debug_locks) return; if (irqs_disabled_flags(flags)) { if (DEBUG_LOCKS_WARN_ON(current->hardirqs_enabled)) { printk("possible reason: unannotated irqs-off.\n"); } } else { if (DEBUG_LOCKS_WARN_ON(!current->hardirqs_enabled)) { printk("possible reason: unannotated irqs-on.\n"); } } /* * We dont accurately track softirq state in e.g. * hardirq contexts (such as on 4KSTACKS), so only * check if not in hardirq contexts: */ if (!hardirq_count()) { if (softirq_count()) DEBUG_LOCKS_WARN_ON(current->softirqs_enabled); else DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); } if (!debug_locks) print_irqtrace_events(current); #endif } void lock_set_class(struct lockdep_map *lock, const char *name, struct lock_class_key *key, unsigned int subclass, unsigned long ip) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); current->lockdep_recursion = 1; check_flags(flags); if (__lock_set_class(lock, name, key, subclass, ip)) check_chain_key(current); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_set_class); /* * We are not always called with irqs disabled - do that here, * and also avoid lockdep recursion: */ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, int trylock, int read, int check, struct lockdep_map *nest_lock, unsigned long ip) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip); __lock_acquire(lock, subclass, trylock, read, check, irqs_disabled_flags(flags), nest_lock, ip, 0); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_acquire); void lock_release(struct lockdep_map *lock, int nested, unsigned long ip) { unsigned long flags; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; trace_lock_release(lock, ip); __lock_release(lock, nested, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_release); int lock_is_held(struct lockdep_map *lock) { unsigned long flags; int ret = 0; if (unlikely(current->lockdep_recursion)) return ret; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; ret = __lock_is_held(lock); current->lockdep_recursion = 0; raw_local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(lock_is_held); void lockdep_set_current_reclaim_state(gfp_t gfp_mask) { current->lockdep_reclaim_gfp = gfp_mask; } void lockdep_clear_current_reclaim_state(void) { current->lockdep_reclaim_gfp = 0; } #ifdef CONFIG_LOCK_STAT static int print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, unsigned long ip) { if (!debug_locks_off()) return 0; if (debug_locks_silent) return 0; printk("\n=================================\n"); printk( "[ BUG: bad contention detected! ]\n"); printk( "---------------------------------\n"); printk("%s/%d is trying to contend lock (", curr->comm, task_pid_nr(curr)); print_lockdep_cache(lock); printk(") at:\n"); print_ip_sym(ip); printk("but there are no locks held!\n"); printk("\nother info that might help us debug this:\n"); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); return 0; } static void __lock_contended(struct lockdep_map *lock, unsigned long ip) { struct task_struct *curr = current; struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; int i, contention_point, contending_point; depth = curr->lockdep_depth; if (DEBUG_LOCKS_WARN_ON(!depth)) return; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } print_lock_contention_bug(curr, lock, ip); return; found_it: if (hlock->instance != lock) return; hlock->waittime_stamp = lockstat_clock(); contention_point = lock_point(hlock_class(hlock)->contention_point, ip); contending_point = lock_point(hlock_class(hlock)->contending_point, lock->ip); stats = get_lock_stats(hlock_class(hlock)); if (contention_point < LOCKSTAT_POINTS) stats->contention_point[contention_point]++; if (contending_point < LOCKSTAT_POINTS) stats->contending_point[contending_point]++; if (lock->cpu != smp_processor_id()) stats->bounces[bounce_contended + !!hlock->read]++; put_lock_stats(stats); } static void __lock_acquired(struct lockdep_map *lock, unsigned long ip) { struct task_struct *curr = current; struct held_lock *hlock, *prev_hlock; struct lock_class_stats *stats; unsigned int depth; u64 now, waittime = 0; int i, cpu; depth = curr->lockdep_depth; if (DEBUG_LOCKS_WARN_ON(!depth)) return; prev_hlock = NULL; for (i = depth-1; i >= 0; i--) { hlock = curr->held_locks + i; /* * We must not cross into another context: */ if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) break; if (match_held_lock(hlock, lock)) goto found_it; prev_hlock = hlock; } print_lock_contention_bug(curr, lock, _RET_IP_); return; found_it: if (hlock->instance != lock) return; cpu = smp_processor_id(); if (hlock->waittime_stamp) { now = lockstat_clock(); waittime = now - hlock->waittime_stamp; hlock->holdtime_stamp = now; } trace_lock_acquired(lock, ip); stats = get_lock_stats(hlock_class(hlock)); if (waittime) { if (hlock->read) lock_time_inc(&stats->read_waittime, waittime); else lock_time_inc(&stats->write_waittime, waittime); } if (lock->cpu != cpu) stats->bounces[bounce_acquired + !!hlock->read]++; put_lock_stats(stats); lock->cpu = cpu; lock->ip = ip; } void lock_contended(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; if (unlikely(!lock_stat)) return; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; trace_lock_contended(lock, ip); __lock_contended(lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_contended); void lock_acquired(struct lockdep_map *lock, unsigned long ip) { unsigned long flags; if (unlikely(!lock_stat)) return; if (unlikely(current->lockdep_recursion)) return; raw_local_irq_save(flags); check_flags(flags); current->lockdep_recursion = 1; __lock_acquired(lock, ip); current->lockdep_recursion = 0; raw_local_irq_restore(flags); } EXPORT_SYMBOL_GPL(lock_acquired); #endif /* * Used by the testsuite, sanitize the validator state * after a simulated failure: */ void lockdep_reset(void) { unsigned long flags; int i; raw_local_irq_save(flags); current->curr_chain_key = 0; current->lockdep_depth = 0; current->lockdep_recursion = 0; memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock)); nr_hardirq_chains = 0; nr_softirq_chains = 0; nr_process_chains = 0; debug_locks = 1; for (i = 0; i < CHAINHASH_SIZE; i++) INIT_LIST_HEAD(chainhash_table + i); raw_local_irq_restore(flags); } static void zap_class(struct lock_class *class) { int i; /* * Remove all dependencies this lock is * involved in: */ for (i = 0; i < nr_list_entries; i++) { if (list_entries[i].class == class) list_del_rcu(&list_entries[i].entry); } /* * Unhash the class and remove it from the all_lock_classes list: */ list_del_rcu(&class->hash_entry); list_del_rcu(&class->lock_entry); class->key = NULL; } static inline int within(const void *addr, void *start, unsigned long size) { return addr >= start && addr < start + size; } void lockdep_free_key_range(void *start, unsigned long size) { struct lock_class *class, *next; struct list_head *head; unsigned long flags; int i; int locked; raw_local_irq_save(flags); locked = graph_lock(); /* * Unhash all classes that were created by this module: */ for (i = 0; i < CLASSHASH_SIZE; i++) { head = classhash_table + i; if (list_empty(head)) continue; list_for_each_entry_safe(class, next, head, hash_entry) { if (within(class->key, start, size)) zap_class(class); else if (within(class->name, start, size)) zap_class(class); } } if (locked) graph_unlock(); raw_local_irq_restore(flags); } void lockdep_reset_lock(struct lockdep_map *lock) { struct lock_class *class, *next; struct list_head *head; unsigned long flags; int i, j; int locked; raw_local_irq_save(flags); /* * Remove all classes this lock might have: */ for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { /* * If the class exists we look it up and zap it: */ class = look_up_lock_class(lock, j); if (class) zap_class(class); } /* * Debug check: in the end all mapped classes should * be gone. */ locked = graph_lock(); for (i = 0; i < CLASSHASH_SIZE; i++) { head = classhash_table + i; if (list_empty(head)) continue; list_for_each_entry_safe(class, next, head, hash_entry) { if (unlikely(class == lock->class_cache)) { if (debug_locks_off_graph_unlock()) WARN_ON(1); goto out_restore; } } } if (locked) graph_unlock(); out_restore: raw_local_irq_restore(flags); } void lockdep_init(void) { int i; /* * Some architectures have their own start_kernel() * code which calls lockdep_init(), while we also * call lockdep_init() from the start_kernel() itself, * and we want to initialize the hashes only once: */ if (lockdep_initialized) return; for (i = 0; i < CLASSHASH_SIZE; i++) INIT_LIST_HEAD(classhash_table + i); for (i = 0; i < CHAINHASH_SIZE; i++) INIT_LIST_HEAD(chainhash_table + i); lockdep_initialized = 1; } void __init lockdep_info(void) { printk("Lock dependency validator: Copyright (c) 2006 Red Hat, Inc., Ingo Molnar\n"); printk("... MAX_LOCKDEP_SUBCLASSES: %lu\n", MAX_LOCKDEP_SUBCLASSES); printk("... MAX_LOCK_DEPTH: %lu\n", MAX_LOCK_DEPTH); printk("... MAX_LOCKDEP_KEYS: %lu\n", MAX_LOCKDEP_KEYS); printk("... CLASSHASH_SIZE: %lu\n", CLASSHASH_SIZE); printk("... MAX_LOCKDEP_ENTRIES: %lu\n", MAX_LOCKDEP_ENTRIES); printk("... MAX_LOCKDEP_CHAINS: %lu\n", MAX_LOCKDEP_CHAINS); printk("... CHAINHASH_SIZE: %lu\n", CHAINHASH_SIZE); printk(" memory used by lock dependency info: %lu kB\n", (sizeof(struct lock_class) * MAX_LOCKDEP_KEYS + sizeof(struct list_head) * CLASSHASH_SIZE + sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + sizeof(struct list_head) * CHAINHASH_SIZE #ifdef CONFIG_PROVE_LOCKING + sizeof(struct circular_queue) #endif ) / 1024 ); printk(" per task-struct memory footprint: %lu bytes\n", sizeof(struct held_lock) * MAX_LOCK_DEPTH); #ifdef CONFIG_DEBUG_LOCKDEP if (lockdep_init_error) { printk("WARNING: lockdep init error! Arch code didn't call lockdep_init() early enough?\n"); printk("Call stack leading to lockdep invocation was:\n"); print_stack_trace(&lockdep_init_trace, 0); } #endif } static void print_freed_lock_bug(struct task_struct *curr, const void *mem_from, const void *mem_to, struct held_lock *hlock) { if (!debug_locks_off()) return; if (debug_locks_silent) return; printk("\n=========================\n"); printk( "[ BUG: held lock freed! ]\n"); printk( "-------------------------\n"); printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", curr->comm, task_pid_nr(curr), mem_from, mem_to-1); print_lock(hlock); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); } static inline int not_in_range(const void* mem_from, unsigned long mem_len, const void* lock_from, unsigned long lock_len) { return lock_from + lock_len <= mem_from || mem_from + mem_len <= lock_from; } /* * Called when kernel memory is freed (or unmapped), or if a lock * is destroyed or reinitialized - this code checks whether there is * any held lock in the memory range of <from> to <to>: */ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) { struct task_struct *curr = current; struct held_lock *hlock; unsigned long flags; int i; if (unlikely(!debug_locks)) return; local_irq_save(flags); for (i = 0; i < curr->lockdep_depth; i++) { hlock = curr->held_locks + i; if (not_in_range(mem_from, mem_len, hlock->instance, sizeof(*hlock->instance))) continue; print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); break; } local_irq_restore(flags); } EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); static void print_held_locks_bug(struct task_struct *curr) { if (!debug_locks_off()) return; if (debug_locks_silent) return; printk("\n=====================================\n"); printk( "[ BUG: lock held at task exit time! ]\n"); printk( "-------------------------------------\n"); printk("%s/%d is exiting with locks still held!\n", curr->comm, task_pid_nr(curr)); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); } void debug_check_no_locks_held(struct task_struct *task) { if (unlikely(task->lockdep_depth > 0)) print_held_locks_bug(task); } void debug_show_all_locks(void) { struct task_struct *g, *p; int count = 10; int unlock = 1; if (unlikely(!debug_locks)) { printk("INFO: lockdep is turned off.\n"); return; } printk("\nShowing all locks held in the system:\n"); /* * Here we try to get the tasklist_lock as hard as possible, * if not successful after 2 seconds we ignore it (but keep * trying). This is to enable a debug printout even if a * tasklist_lock-holding task deadlocks or crashes. */ retry: if (!read_trylock(&tasklist_lock)) { if (count == 10) printk("hm, tasklist_lock locked, retrying... "); if (count) { count--; printk(" #%d", 10-count); mdelay(200); goto retry; } printk(" ignoring it.\n"); unlock = 0; } else { if (count != 10) printk(KERN_CONT " locked it.\n"); } do_each_thread(g, p) { /* * It's not reliable to print a task's held locks * if it's not sleeping (or if it's not the current * task): */ if (p->state == TASK_RUNNING && p != current) continue; if (p->lockdep_depth) lockdep_print_held_locks(p); if (!unlock) if (read_trylock(&tasklist_lock)) unlock = 1; } while_each_thread(g, p); printk("\n"); printk("=============================================\n\n"); if (unlock) read_unlock(&tasklist_lock); } EXPORT_SYMBOL_GPL(debug_show_all_locks); /* * Careful: only use this function if you are sure that * the task cannot run in parallel! */ void __debug_show_held_locks(struct task_struct *task) { if (unlikely(!debug_locks)) { printk("INFO: lockdep is turned off.\n"); return; } lockdep_print_held_locks(task); } EXPORT_SYMBOL_GPL(__debug_show_held_locks); void debug_show_held_locks(struct task_struct *task) { __debug_show_held_locks(task); } EXPORT_SYMBOL_GPL(debug_show_held_locks); void lockdep_sys_exit(void) { struct task_struct *curr = current; if (unlikely(curr->lockdep_depth)) { if (!debug_locks_off()) return; printk("\n================================================\n"); printk( "[ BUG: lock held when returning to user space! ]\n"); printk( "------------------------------------------------\n"); printk("%s/%d is leaving the kernel with locks still held!\n", curr->comm, curr->pid); lockdep_print_held_locks(curr); } } void lockdep_rcu_dereference(const char *file, const int line) { struct task_struct *curr = current; #ifndef CONFIG_PROVE_RCU_REPEATEDLY if (!debug_locks_off()) return; #endif /* #ifdef CONFIG_PROVE_RCU_REPEATEDLY */ /* Note: the following can be executed concurrently, so be careful. */ printk("\n===================================================\n"); printk( "[ INFO: suspicious rcu_dereference_check() usage. ]\n"); printk( "---------------------------------------------------\n"); printk("%s:%d invoked rcu_dereference_check() without protection!\n", file, line); printk("\nother info that might help us debug this:\n\n"); printk("\nrcu_scheduler_active = %d, debug_locks = %d\n", rcu_scheduler_active, debug_locks); lockdep_print_held_locks(curr); printk("\nstack backtrace:\n"); dump_stack(); } EXPORT_SYMBOL_GPL(lockdep_rcu_dereference);
gpl-2.0
Hui-Zhi/gpu_cgroup_kernel
arch/mips/sibyte/common/cfe.c
1347
8426
/* * Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/linkage.h> #include <linux/mm.h> #include <linux/blkdev.h> #include <linux/bootmem.h> #include <linux/pm.h> #include <linux/smp.h> #include <asm/bootinfo.h> #include <asm/reboot.h> #include <asm/sibyte/board.h> #include <asm/smp-ops.h> #include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_error.h> /* Max ram addressable in 32-bit segments */ #ifdef CONFIG_64BIT #define MAX_RAM_SIZE (~0ULL) #else #ifdef CONFIG_HIGHMEM #ifdef CONFIG_PHYS_ADDR_T_64BIT #define MAX_RAM_SIZE (~0ULL) #else #define MAX_RAM_SIZE (0xffffffffULL) #endif #else #define MAX_RAM_SIZE (0x1fffffffULL) #endif #endif #define SIBYTE_MAX_MEM_REGIONS 8 phys_addr_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS]; phys_addr_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS]; unsigned int board_mem_region_count; int cfe_cons_handle; #ifdef CONFIG_BLK_DEV_INITRD extern unsigned long initrd_start, initrd_end; #endif static void __noreturn cfe_linux_exit(void *arg) { int warm = *(int *)arg; if (smp_processor_id()) { static int reboot_smp; /* Don't repeat the process from another CPU */ if (!reboot_smp) { /* Get CPU 0 to do the cfe_exit */ reboot_smp = 1; smp_call_function(cfe_linux_exit, arg, 0); } } else { printk("Passing control back to CFE...\n"); cfe_exit(warm, 0); printk("cfe_exit returned??\n"); } while (1); } static void __noreturn cfe_linux_restart(char *command) { static const int zero; cfe_linux_exit((void *)&zero); } static void __noreturn cfe_linux_halt(void) { static const int one = 1; cfe_linux_exit((void *)&one); } static __init void prom_meminit(void) { u64 addr, size, type; /* regardless of PHYS_ADDR_T_64BIT */ int mem_flags = 0; unsigned int idx; int rd_flag; #ifdef CONFIG_BLK_DEV_INITRD unsigned long initrd_pstart; unsigned long initrd_pend; initrd_pstart = CPHYSADDR(initrd_start); initrd_pend = CPHYSADDR(initrd_end); if (initrd_start && ((initrd_pstart > MAX_RAM_SIZE) || (initrd_pend > MAX_RAM_SIZE))) { panic("initrd out of addressable memory"); } #endif /* INITRD */ for (idx = 0; cfe_enummem(idx, mem_flags, &addr, &size, &type) != CFE_ERR_NOMORE; idx++) { rd_flag = 0; if (type == CFE_MI_AVAILABLE) { /* * See if this block contains (any portion of) the * ramdisk */ #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { if ((initrd_pstart > addr) && (initrd_pstart < (addr + size))) { add_memory_region(addr, initrd_pstart - addr, BOOT_MEM_RAM); rd_flag = 1; } if ((initrd_pend > addr) && (initrd_pend < (addr + size))) { add_memory_region(initrd_pend, (addr + size) - initrd_pend, BOOT_MEM_RAM); rd_flag = 1; } } #endif if (!rd_flag) { if (addr > MAX_RAM_SIZE) continue; if (addr+size > MAX_RAM_SIZE) size = MAX_RAM_SIZE - (addr+size) + 1; /* * memcpy/__copy_user prefetch, which * will cause a bus error for * KSEG/KUSEG addrs not backed by RAM. * Hence, reserve some padding for the * prefetch distance. */ if (size > 512) size -= 512; add_memory_region(addr, size, BOOT_MEM_RAM); } board_mem_region_addrs[board_mem_region_count] = addr; board_mem_region_sizes[board_mem_region_count] = size; board_mem_region_count++; if (board_mem_region_count == SIBYTE_MAX_MEM_REGIONS) { /* * Too many regions. Need to configure more */ while(1); } } } #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) { add_memory_region(initrd_pstart, initrd_pend - initrd_pstart, BOOT_MEM_RESERVED); } #endif } #ifdef CONFIG_BLK_DEV_INITRD static int __init initrd_setup(char *str) { char rdarg[64]; int idx; char *tmp, *endptr; unsigned long initrd_size; /* Make a copy of the initrd argument so we can smash it up here */ for (idx = 0; idx < sizeof(rdarg)-1; idx++) { if (!str[idx] || (str[idx] == ' ')) break; rdarg[idx] = str[idx]; } rdarg[idx] = 0; str = rdarg; /* *Initrd location comes in the form "<hex size of ramdisk in bytes>@<location in memory>" * e.g. initrd=3abfd@80010000. This is set up by the loader. */ for (tmp = str; *tmp != '@'; tmp++) { if (!*tmp) { goto fail; } } *tmp = 0; tmp++; if (!*tmp) { goto fail; } initrd_size = simple_strtoul(str, &endptr, 16); if (*endptr) { *(tmp-1) = '@'; goto fail; } *(tmp-1) = '@'; initrd_start = simple_strtoul(tmp, &endptr, 16); if (*endptr) { goto fail; } initrd_end = initrd_start + initrd_size; printk("Found initrd of %lx@%lx\n", initrd_size, initrd_start); return 1; fail: printk("Bad initrd argument. Disabling initrd\n"); initrd_start = 0; initrd_end = 0; return 1; } #endif extern struct plat_smp_ops sb_smp_ops; extern struct plat_smp_ops bcm1480_smp_ops; /* * prom_init is called just after the cpu type is determined, from setup_arch() */ void __init prom_init(void) { uint64_t cfe_ept, cfe_handle; unsigned int cfe_eptseal; int argc = fw_arg0; char **envp = (char **) fw_arg2; int *prom_vec = (int *) fw_arg3; _machine_restart = cfe_linux_restart; _machine_halt = cfe_linux_halt; pm_power_off = cfe_linux_halt; /* * Check if a loader was used; if NOT, the 4 arguments are * what CFE gives us (handle, 0, EPT and EPTSEAL) */ if (argc < 0) { cfe_handle = (uint64_t)(long)argc; cfe_ept = (long)envp; cfe_eptseal = (uint32_t)(unsigned long)prom_vec; } else { if ((int32_t)(long)prom_vec < 0) { /* * Old loader; all it gives us is the handle, * so use the "known" entrypoint and assume * the seal. */ cfe_handle = (uint64_t)(long)prom_vec; cfe_ept = (uint64_t)((int32_t)0x9fc00500); cfe_eptseal = CFE_EPTSEAL; } else { /* * Newer loaders bundle the handle/ept/eptseal * Note: prom_vec is in the loader's useg * which is still alive in the TLB. */ cfe_handle = (uint64_t)((int32_t *)prom_vec)[0]; cfe_ept = (uint64_t)((int32_t *)prom_vec)[2]; cfe_eptseal = (unsigned int)((uint32_t *)prom_vec)[3]; } } if (cfe_eptseal != CFE_EPTSEAL) { /* too early for panic to do any good */ printk("CFE's entrypoint seal doesn't match. Spinning."); while (1) ; } cfe_init(cfe_handle, cfe_ept); /* * Get the handle for (at least) prom_putchar, possibly for * boot console */ cfe_cons_handle = cfe_getstdhandle(CFE_STDHANDLE_CONSOLE); if (cfe_getenv("LINUX_CMDLINE", arcs_cmdline, COMMAND_LINE_SIZE) < 0) { if (argc >= 0) { /* The loader should have set the command line */ /* too early for panic to do any good */ printk("LINUX_CMDLINE not defined in cfe."); while (1) ; } } #ifdef CONFIG_BLK_DEV_INITRD { char *ptr; /* Need to find out early whether we've got an initrd. So scan the list looking now */ for (ptr = arcs_cmdline; *ptr; ptr++) { while (*ptr == ' ') { ptr++; } if (!strncmp(ptr, "initrd=", 7)) { initrd_setup(ptr+7); break; } else { while (*ptr && (*ptr != ' ')) { ptr++; } } } } #endif /* CONFIG_BLK_DEV_INITRD */ /* Not sure this is needed, but it's the safe way. */ arcs_cmdline[COMMAND_LINE_SIZE-1] = 0; prom_meminit(); #if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250) register_smp_ops(&sb_smp_ops); #endif #if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80) register_smp_ops(&bcm1480_smp_ops); #endif } void __init prom_free_prom_memory(void) { /* Not sure what I'm supposed to do here. Nothing, I think */ } void prom_putchar(char c) { int ret; while ((ret = cfe_write(cfe_cons_handle, &c, 1)) == 0) ; }
gpl-2.0
X-ROM/android_kernel_motorola_msm8226
drivers/staging/winbond/wbusb.c
1603
23379
/* * Copyright 2008 Pavel Machek <pavel@ucw.cz> * * Distribute under GPLv2. * * The original driver was written by: * Jeff Lee <YY_Lee@issc.com.tw> * * and was adapted to the 2.6 kernel by: * Costantino Leandro (Rxart Desktop) <le_costantino@pixartargentina.com.ar> */ #include <net/mac80211.h> #include <linux/usb.h> #include <linux/module.h> #include "core.h" #include "mds_f.h" #include "mto.h" #include "wbhal.h" #include "wb35reg_f.h" #include "wb35tx_f.h" #include "wb35rx_f.h" MODULE_DESCRIPTION("IS89C35 802.11bg WLAN USB Driver"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1"); static const struct usb_device_id wb35_table[] = { { USB_DEVICE(0x0416, 0x0035) }, { USB_DEVICE(0x18E8, 0x6201) }, { USB_DEVICE(0x18E8, 0x6206) }, { USB_DEVICE(0x18E8, 0x6217) }, { USB_DEVICE(0x18E8, 0x6230) }, { USB_DEVICE(0x18E8, 0x6233) }, { USB_DEVICE(0x1131, 0x2035) }, { 0, } }; MODULE_DEVICE_TABLE(usb, wb35_table); static struct ieee80211_rate wbsoft_rates[] = { { .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, }; static struct ieee80211_channel wbsoft_channels[] = { { .center_freq = 2412 }, }; static struct ieee80211_supported_band wbsoft_band_2GHz = { .channels = wbsoft_channels, .n_channels = ARRAY_SIZE(wbsoft_channels), .bitrates = wbsoft_rates, .n_bitrates = ARRAY_SIZE(wbsoft_rates), }; static void hal_set_beacon_period(struct hw_data *pHwData, u16 beacon_period) { u32 tmp; if (pHwData->SurpriseRemove) return; pHwData->BeaconPeriod = beacon_period; tmp = pHwData->BeaconPeriod << 16; tmp |= pHwData->ProbeDelay; Wb35Reg_Write(pHwData, 0x0848, tmp); } static int wbsoft_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct wbsoft_priv *priv = dev->priv; hal_set_beacon_period(&priv->sHwData, vif->bss_conf.beacon_int); return 0; } static void wbsoft_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { printk("wbsoft_remove interface called\n"); } static void wbsoft_stop(struct ieee80211_hw *hw) { printk(KERN_INFO "%s called\n", __func__); } static int wbsoft_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { printk(KERN_INFO "%s called\n", __func__); return 0; } static u64 wbsoft_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { return netdev_hw_addr_list_count(mc_list); } static void wbsoft_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { unsigned int new_flags; new_flags = 0; if (*total_flags & FIF_PROMISC_IN_BSS) new_flags |= FIF_PROMISC_IN_BSS; else if ((*total_flags & FIF_ALLMULTI) || (multicast > 32)) new_flags |= FIF_ALLMULTI; dev->flags &= ~IEEE80211_HW_RX_INCLUDES_FCS; *total_flags = new_flags; } static void wbsoft_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct wbsoft_priv *priv = dev->priv; if (priv->sMlmeFrame.IsInUsed != PACKET_FREE_TO_USE) { priv->sMlmeFrame.wNumTxMMPDUDiscarded++; kfree_skb(skb); return; } priv->sMlmeFrame.IsInUsed = PACKET_COME_FROM_MLME; priv->sMlmeFrame.pMMPDU = skb->data; priv->sMlmeFrame.DataType = FRAME_TYPE_802_11_MANAGEMENT; priv->sMlmeFrame.len = skb->len; priv->sMlmeFrame.wNumTxMMPDU++; /* * H/W will enter power save by set the register. S/W don't send null * frame with PWRMgt bit enbled to enter power save now. */ Mds_Tx(priv); } static int wbsoft_start(struct ieee80211_hw *dev) { struct wbsoft_priv *priv = dev->priv; priv->enabled = true; return 0; } static void hal_set_radio_mode(struct hw_data *pHwData, unsigned char radio_off) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return; if (radio_off) { /* disable Baseband receive off */ pHwData->CurrentRadioSw = 1; /* off */ reg->M24_MacControl &= 0xffffffbf; } else { pHwData->CurrentRadioSw = 0; /* on */ reg->M24_MacControl |= 0x00000040; } Wb35Reg_Write(pHwData, 0x0824, reg->M24_MacControl); } static void hal_set_current_channel_ex(struct hw_data *pHwData, struct chan_info channel) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return; printk("Going to channel: %d/%d\n", channel.band, channel.ChanNo); RFSynthesizer_SwitchingChannel(pHwData, channel); /* Switch channel */ pHwData->Channel = channel.ChanNo; pHwData->band = channel.band; pr_debug("Set channel is %d, band =%d\n", pHwData->Channel, pHwData->band); reg->M28_MacControl &= ~0xff; /* Clean channel information field */ reg->M28_MacControl |= channel.ChanNo; Wb35Reg_WriteWithCallbackValue(pHwData, 0x0828, reg->M28_MacControl, (s8 *) &channel, sizeof(struct chan_info)); } static void hal_set_current_channel(struct hw_data *pHwData, struct chan_info channel) { hal_set_current_channel_ex(pHwData, channel); } static void hal_set_accept_broadcast(struct hw_data *pHwData, u8 enable) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return; reg->M00_MacControl &= ~0x02000000; /* The HW value */ if (enable) reg->M00_MacControl |= 0x02000000; /* The HW value */ Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl); } /* For wep key error detection, we need to accept broadcast packets to be received temporary. */ static void hal_set_accept_promiscuous(struct hw_data *pHwData, u8 enable) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return; if (enable) { reg->M00_MacControl |= 0x00400000; Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl); } else { reg->M00_MacControl &= ~0x00400000; Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl); } } static void hal_set_accept_multicast(struct hw_data *pHwData, u8 enable) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return; reg->M00_MacControl &= ~0x01000000; /* The HW value */ if (enable) reg->M00_MacControl |= 0x01000000; /* The HW value */ Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl); } static void hal_set_accept_beacon(struct hw_data *pHwData, u8 enable) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return; if (!enable) /* Due to SME and MLME are not suitable for 35 */ return; reg->M00_MacControl &= ~0x04000000; /* The HW value */ if (enable) reg->M00_MacControl |= 0x04000000; /* The HW value */ Wb35Reg_Write(pHwData, 0x0800, reg->M00_MacControl); } static int wbsoft_config(struct ieee80211_hw *dev, u32 changed) { struct wbsoft_priv *priv = dev->priv; struct chan_info ch; printk("wbsoft_config called\n"); /* Should use channel_num, or something, as that is already pre-translated */ ch.band = 1; ch.ChanNo = 1; hal_set_current_channel(&priv->sHwData, ch); hal_set_accept_broadcast(&priv->sHwData, 1); hal_set_accept_promiscuous(&priv->sHwData, 1); hal_set_accept_multicast(&priv->sHwData, 1); hal_set_accept_beacon(&priv->sHwData, 1); hal_set_radio_mode(&priv->sHwData, 0); return 0; } static u64 wbsoft_get_tsf(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { printk("wbsoft_get_tsf called\n"); return 0; } static const struct ieee80211_ops wbsoft_ops = { .tx = wbsoft_tx, .start = wbsoft_start, .stop = wbsoft_stop, .add_interface = wbsoft_add_interface, .remove_interface = wbsoft_remove_interface, .config = wbsoft_config, .prepare_multicast = wbsoft_prepare_multicast, .configure_filter = wbsoft_configure_filter, .get_stats = wbsoft_get_stats, .get_tsf = wbsoft_get_tsf, }; static void hal_set_ethernet_address(struct hw_data *pHwData, u8 *current_address) { u32 ltmp[2]; if (pHwData->SurpriseRemove) return; memcpy(pHwData->CurrentMacAddress, current_address, ETH_ALEN); ltmp[0] = cpu_to_le32(*(u32 *) pHwData->CurrentMacAddress); ltmp[1] = cpu_to_le32(*(u32 *) (pHwData->CurrentMacAddress + 4)) & 0xffff; Wb35Reg_BurstWrite(pHwData, 0x03e8, ltmp, 2, AUTO_INCREMENT); } static void hal_get_permanent_address(struct hw_data *pHwData, u8 *pethernet_address) { if (pHwData->SurpriseRemove) return; memcpy(pethernet_address, pHwData->PermanentMacAddress, 6); } static void hal_stop(struct hw_data *pHwData) { struct wb35_reg *reg = &pHwData->reg; pHwData->Wb35Rx.rx_halt = 1; Wb35Rx_stop(pHwData); pHwData->Wb35Tx.tx_halt = 1; Wb35Tx_stop(pHwData); reg->D00_DmaControl &= ~0xc0000000; /* Tx Off, Rx Off */ Wb35Reg_Write(pHwData, 0x0400, reg->D00_DmaControl); } static unsigned char hal_idle(struct hw_data *pHwData) { struct wb35_reg *reg = &pHwData->reg; if (!pHwData->SurpriseRemove && reg->EP0vm_state != VM_STOP) return false; return true; } u8 hal_get_antenna_number(struct hw_data *pHwData) { struct wb35_reg *reg = &pHwData->reg; if ((reg->BB2C & BIT(11)) == 0) return 0; else return 1; } /* 0 : radio on; 1: radio off */ static u8 hal_get_hw_radio_off(struct hw_data *pHwData) { struct wb35_reg *reg = &pHwData->reg; if (pHwData->SurpriseRemove) return 1; /* read the bit16 of register U1B0 */ Wb35Reg_Read(pHwData, 0x3b0, &reg->U1B0); if ((reg->U1B0 & 0x00010000)) { pHwData->CurrentRadioHw = 1; return 1; } else { pHwData->CurrentRadioHw = 0; return 0; } } static u8 LED_GRAY[20] = { 0, 3, 4, 6, 8, 10, 11, 12, 13, 14, 15, 14, 13, 12, 11, 10, 8, 6, 4, 2 }; static u8 LED_GRAY2[30] = { 7, 8, 9, 10, 11, 12, 13, 14, 15, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 14, 13, 12, 11, 10, 9, 8 }; static void hal_led_control(unsigned long data) { struct wbsoft_priv *adapter = (struct wbsoft_priv *)data; struct hw_data *pHwData = &adapter->sHwData; struct wb35_reg *reg = &pHwData->reg; u32 LEDSet = (pHwData->SoftwareSet & HAL_LED_SET_MASK) >> HAL_LED_SET_SHIFT; u32 TimeInterval = 500, ltmp, ltmp2; ltmp = 0; if (pHwData->SurpriseRemove) return; if (pHwData->LED_control) { ltmp2 = pHwData->LED_control & 0xff; if (ltmp2 == 5) { /* 5 is WPS mode */ TimeInterval = 100; ltmp2 = (pHwData->LED_control >> 8) & 0xff; switch (ltmp2) { case 1: /* [0.2 On][0.1 Off]... */ pHwData->LED_Blinking %= 3; ltmp = 0x1010; /* Led 1 & 0 Green and Red */ if (pHwData->LED_Blinking == 2) /* Turn off */ ltmp = 0; break; case 2: /* [0.1 On][0.1 Off]... */ pHwData->LED_Blinking %= 2; ltmp = 0x0010; /* Led 0 red color */ if (pHwData->LED_Blinking) /* Turn off */ ltmp = 0; break; case 3: /* [0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.1 On][0.1 Off][0.5 Off]... */ pHwData->LED_Blinking %= 15; ltmp = 0x0010; /* Led 0 red color */ if ((pHwData->LED_Blinking >= 9) || (pHwData->LED_Blinking % 2)) /* Turn off 0.6 sec */ ltmp = 0; break; case 4: /* [300 On][ off ] */ ltmp = 0x1000; /* Led 1 Green color */ if (pHwData->LED_Blinking >= 3000) ltmp = 0; /* led maybe on after 300sec * 32bit counter overlap. */ break; } pHwData->LED_Blinking++; reg->U1BC_LEDConfigure = ltmp; if (LEDSet != 7) { /* Only 111 mode has 2 LEDs on PCB. */ reg->U1BC_LEDConfigure |= (ltmp & 0xff) << 8; /* Copy LED result to each LED control register */ reg->U1BC_LEDConfigure |= (ltmp & 0xff00) >> 8; } Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); } } else if (pHwData->CurrentRadioSw || pHwData->CurrentRadioHw) { /* If radio off */ if (reg->U1BC_LEDConfigure & 0x1010) { reg->U1BC_LEDConfigure &= ~0x1010; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); } } else { switch (LEDSet) { case 4: /* [100] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing */ if (!pHwData->LED_LinkOn) { /* Blink only if not Link On */ /* Blinking if scanning is on progress */ if (pHwData->LED_Scanning) { if (pHwData->LED_Blinking == 0) { reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 On */ pHwData->LED_Blinking = 1; TimeInterval = 300; } else { reg->U1BC_LEDConfigure &= ~0x10; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */ pHwData->LED_Blinking = 0; TimeInterval = 300; } } else { /* Turn Off LED_0 */ if (reg->U1BC_LEDConfigure & 0x10) { reg->U1BC_LEDConfigure &= ~0x10; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */ } } } else { /* Turn On LED_0 */ if ((reg->U1BC_LEDConfigure & 0x10) == 0) { reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */ } } break; case 6: /* [110] Only 1 Led be placed on PCB and use pin 21 of IC. Use LED_0 for showing */ if (!pHwData->LED_LinkOn) { /* Blink only if not Link On */ /* Blinking if scanning is on progress */ if (pHwData->LED_Scanning) { if (pHwData->LED_Blinking == 0) { reg->U1BC_LEDConfigure &= ~0xf; reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 On */ pHwData->LED_Blinking = 1; TimeInterval = 300; } else { reg->U1BC_LEDConfigure &= ~0x1f; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */ pHwData->LED_Blinking = 0; TimeInterval = 300; } } else { /* Gray blinking if in disconnect state and not scanning */ ltmp = reg->U1BC_LEDConfigure; reg->U1BC_LEDConfigure &= ~0x1f; if (LED_GRAY2[(pHwData->LED_Blinking % 30)]) { reg->U1BC_LEDConfigure |= 0x10; reg->U1BC_LEDConfigure |= LED_GRAY2[(pHwData->LED_Blinking % 30)]; } pHwData->LED_Blinking++; if (reg->U1BC_LEDConfigure != ltmp) Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */ TimeInterval = 100; } } else { /* Turn On LED_0 */ if ((reg->U1BC_LEDConfigure & 0x10) == 0) { reg->U1BC_LEDConfigure |= 0x10; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_0 Off */ } } break; case 5: /* [101] Only 1 Led be placed on PCB and use LED_1 for showing */ if (!pHwData->LED_LinkOn) { /* Blink only if not Link On */ /* Blinking if scanning is on progress */ if (pHwData->LED_Scanning) { if (pHwData->LED_Blinking == 0) { reg->U1BC_LEDConfigure |= 0x1000; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 On */ pHwData->LED_Blinking = 1; TimeInterval = 300; } else { reg->U1BC_LEDConfigure &= ~0x1000; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 Off */ pHwData->LED_Blinking = 0; TimeInterval = 300; } } else { /* Turn Off LED_1 */ if (reg->U1BC_LEDConfigure & 0x1000) { reg->U1BC_LEDConfigure &= ~0x1000; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 Off */ } } } else { /* Is transmitting/receiving ?? */ if ((adapter->RxByteCount != pHwData->RxByteCountLast) || (adapter->TxByteCount != pHwData->TxByteCountLast)) { if ((reg->U1BC_LEDConfigure & 0x3000) != 0x3000) { reg->U1BC_LEDConfigure |= 0x3000; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 On */ } /* Update variable */ pHwData->RxByteCountLast = adapter->RxByteCount; pHwData->TxByteCountLast = adapter->TxByteCount; TimeInterval = 200; } else { /* Turn On LED_1 and blinking if transmitting/receiving */ if ((reg->U1BC_LEDConfigure & 0x3000) != 0x1000) { reg->U1BC_LEDConfigure &= ~0x3000; reg->U1BC_LEDConfigure |= 0x1000; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); /* LED_1 On */ } } } break; default: /* Default setting. 2 LED be placed on PCB. LED_0: Link On LED_1 Active */ if ((reg->U1BC_LEDConfigure & 0x3000) != 0x3000) { reg->U1BC_LEDConfigure |= 0x3000; /* LED_1 is always on and event enable */ Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); } if (pHwData->LED_Blinking) { /* Gray blinking */ reg->U1BC_LEDConfigure &= ~0x0f; reg->U1BC_LEDConfigure |= 0x10; reg->U1BC_LEDConfigure |= LED_GRAY[(pHwData->LED_Blinking - 1) % 20]; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); pHwData->LED_Blinking += 2; if (pHwData->LED_Blinking < 40) TimeInterval = 100; else { pHwData->LED_Blinking = 0; /* Stop blinking */ reg->U1BC_LEDConfigure &= ~0x0f; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); } break; } if (pHwData->LED_LinkOn) { if (!(reg->U1BC_LEDConfigure & 0x10)) { /* Check the LED_0 */ /* Try to turn ON LED_0 after gray blinking */ reg->U1BC_LEDConfigure |= 0x10; pHwData->LED_Blinking = 1; /* Start blinking */ TimeInterval = 50; } } else { if (reg->U1BC_LEDConfigure & 0x10) { /* Check the LED_0 */ reg->U1BC_LEDConfigure &= ~0x10; Wb35Reg_Write(pHwData, 0x03bc, reg->U1BC_LEDConfigure); } } break; } } pHwData->time_count += TimeInterval; Wb35Tx_CurrentTime(adapter, pHwData->time_count); pHwData->LEDTimer.expires = jiffies + msecs_to_jiffies(TimeInterval); add_timer(&pHwData->LEDTimer); } static int hal_init_hardware(struct ieee80211_hw *hw) { struct wbsoft_priv *priv = hw->priv; struct hw_data *pHwData = &priv->sHwData; u16 SoftwareSet; pHwData->MaxReceiveLifeTime = DEFAULT_MSDU_LIFE_TIME; pHwData->FragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; if (!Wb35Reg_initial(pHwData)) goto error_reg_destroy; if (!Wb35Tx_initial(pHwData)) goto error_tx_destroy; if (!Wb35Rx_initial(pHwData)) goto error_rx_destroy; init_timer(&pHwData->LEDTimer); pHwData->LEDTimer.function = hal_led_control; pHwData->LEDTimer.data = (unsigned long)priv; pHwData->LEDTimer.expires = jiffies + msecs_to_jiffies(1000); add_timer(&pHwData->LEDTimer); SoftwareSet = hal_software_set(pHwData); Wb35Rx_start(hw); Wb35Tx_EP2VM_start(priv); return 0; error_rx_destroy: Wb35Rx_destroy(pHwData); error_tx_destroy: Wb35Tx_destroy(pHwData); error_reg_destroy: Wb35Reg_destroy(pHwData); pHwData->SurpriseRemove = 1; return -EINVAL; } static int wb35_hw_init(struct ieee80211_hw *hw) { struct wbsoft_priv *priv = hw->priv; struct hw_data *pHwData = &priv->sHwData; u8 EEPROM_region; u8 HwRadioOff; u8 *pMacAddr2; u8 *pMacAddr; int err; pHwData->phy_type = RF_DECIDE_BY_INF; priv->Mds.TxRTSThreshold = DEFAULT_RTSThreshold; priv->Mds.TxFragmentThreshold = DEFAULT_FRAGMENT_THRESHOLD; priv->sLocalPara.region_INF = REGION_AUTO; priv->sLocalPara.TxRateMode = RATE_AUTO; priv->sLocalPara.bMacOperationMode = MODE_802_11_BG; priv->sLocalPara.MTUsize = MAX_ETHERNET_PACKET_SIZE; priv->sLocalPara.bPreambleMode = AUTO_MODE; priv->sLocalPara.bWepKeyError = false; priv->sLocalPara.bToSelfPacketReceived = false; priv->sLocalPara.WepKeyDetectTimerCount = 2 * 100; /* 2 seconds */ priv->sLocalPara.RadioOffStatus.boSwRadioOff = false; err = hal_init_hardware(hw); if (err) goto error; EEPROM_region = hal_get_region_from_EEPROM(pHwData); if (EEPROM_region != REGION_AUTO) priv->sLocalPara.region = EEPROM_region; else { if (priv->sLocalPara.region_INF != REGION_AUTO) priv->sLocalPara.region = priv->sLocalPara.region_INF; else priv->sLocalPara.region = REGION_USA; /* default setting */ } Mds_initial(priv); /* * If no user-defined address in the registry, use the address * "burned" on the NIC instead. */ pMacAddr = priv->sLocalPara.ThisMacAddress; pMacAddr2 = priv->sLocalPara.PermanentAddress; /* Reading ethernet address from EEPROM */ hal_get_permanent_address(pHwData, priv->sLocalPara.PermanentAddress); if (memcmp(pMacAddr, "\x00\x00\x00\x00\x00\x00", MAC_ADDR_LENGTH) == 0) memcpy(pMacAddr, pMacAddr2, MAC_ADDR_LENGTH); else { /* Set the user define MAC address */ hal_set_ethernet_address(pHwData, priv->sLocalPara.ThisMacAddress); } priv->sLocalPara.bAntennaNo = hal_get_antenna_number(pHwData); pr_debug("Driver init, antenna no = %d\n", priv->sLocalPara.bAntennaNo); hal_get_hw_radio_off(pHwData); /* Waiting for HAL setting OK */ while (!hal_idle(pHwData)) msleep(10); MTO_Init(priv); HwRadioOff = hal_get_hw_radio_off(pHwData); priv->sLocalPara.RadioOffStatus.boHwRadioOff = !!HwRadioOff; hal_set_radio_mode(pHwData, (unsigned char)(priv->sLocalPara.RadioOffStatus. boSwRadioOff || priv->sLocalPara.RadioOffStatus. boHwRadioOff)); /* Notify hal that the driver is ready now. */ hal_driver_init_OK(pHwData) = 1; error: return err; } static int wb35_probe(struct usb_interface *intf, const struct usb_device_id *id_table) { struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *endpoint; struct usb_host_interface *interface; struct ieee80211_hw *dev; struct wbsoft_priv *priv; int nr, err; u32 ltmp; usb_get_dev(udev); /* Check the device if it already be opened */ nr = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x01, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0, 0x400, &ltmp, 4, HZ * 100); if (nr < 0) { err = nr; goto error; } /* Is already initialized? */ ltmp = cpu_to_le32(ltmp); if (ltmp) { err = -EBUSY; goto error; } dev = ieee80211_alloc_hw(sizeof(*priv), &wbsoft_ops); if (!dev) { err = -ENOMEM; goto error; } priv = dev->priv; priv->sHwData.udev = udev; interface = intf->cur_altsetting; endpoint = &interface->endpoint[0].desc; if (endpoint[2].wMaxPacketSize == 512) printk("[w35und] Working on USB 2.0\n"); err = wb35_hw_init(dev); if (err) goto error_free_hw; SET_IEEE80211_DEV(dev, &udev->dev); { struct hw_data *pHwData = &priv->sHwData; unsigned char dev_addr[MAX_ADDR_LEN]; hal_get_permanent_address(pHwData, dev_addr); SET_IEEE80211_PERM_ADDR(dev, dev_addr); } dev->extra_tx_headroom = 12; /* FIXME */ dev->flags = IEEE80211_HW_SIGNAL_UNSPEC; dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); dev->channel_change_time = 1000; dev->max_signal = 100; dev->queues = 1; dev->wiphy->bands[IEEE80211_BAND_2GHZ] = &wbsoft_band_2GHz; err = ieee80211_register_hw(dev); if (err) goto error_free_hw; usb_set_intfdata(intf, dev); return 0; error_free_hw: ieee80211_free_hw(dev); error: usb_put_dev(udev); return err; } static void hal_halt(struct hw_data *pHwData) { del_timer_sync(&pHwData->LEDTimer); /* XXX: Wait for Timer DPC exit. */ msleep(100); Wb35Rx_destroy(pHwData); Wb35Tx_destroy(pHwData); Wb35Reg_destroy(pHwData); } static void wb35_hw_halt(struct wbsoft_priv *adapter) { /* Turn off Rx and Tx hardware ability */ hal_stop(&adapter->sHwData); pr_debug("[w35und] Hal_stop O.K.\n"); /* Waiting Irp completed */ msleep(100); hal_halt(&adapter->sHwData); } static void wb35_disconnect(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct wbsoft_priv *priv = hw->priv; wb35_hw_halt(priv); ieee80211_stop_queues(hw); ieee80211_unregister_hw(hw); ieee80211_free_hw(hw); usb_set_intfdata(intf, NULL); usb_put_dev(interface_to_usbdev(intf)); } static struct usb_driver wb35_driver = { .name = "w35und", .id_table = wb35_table, .probe = wb35_probe, .disconnect = wb35_disconnect, }; module_usb_driver(wb35_driver);
gpl-2.0
omnirom/android_kernel_moto_shamu
arch/arm/mach-kirkwood/board-ns2.c
2115
1025
/* * Copyright 2012 (C), Simon Guinot <simon.guinot@sequanux.org> * * arch/arm/mach-kirkwood/board-ns2.c * * LaCie Network Space v2 board (and parents) initialization for drivers * not converted to flattened device tree yet. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mv643xx_eth.h> #include <linux/of.h> #include "common.h" static struct mv643xx_eth_platform_data ns2_ge00_data = { .phy_addr = MV643XX_ETH_PHY_ADDR(8), }; void __init ns2_init(void) { /* * Basic setup. Needs to be called early. */ if (of_machine_is_compatible("lacie,cloudbox") || of_machine_is_compatible("lacie,netspace_lite_v2") || of_machine_is_compatible("lacie,netspace_mini_v2")) ns2_ge00_data.phy_addr = MV643XX_ETH_PHY_ADDR(0); kirkwood_ge00_init(&ns2_ge00_data); }
gpl-2.0
ArtisteHsu/jetson-tk1-r21.3-kernel
drivers/mtd/nand/sm_common.c
2115
4618
/* * Copyright © 2009 - Maxim Levitsky * Common routines & support for xD format * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/mtd/nand.h> #include <linux/module.h> #include <linux/sizes.h> #include "sm_common.h" static struct nand_ecclayout nand_oob_sm = { .eccbytes = 6, .eccpos = {8, 9, 10, 13, 14, 15}, .oobfree = { {.offset = 0 , .length = 4}, /* reserved */ {.offset = 6 , .length = 2}, /* LBA1 */ {.offset = 11, .length = 2} /* LBA2 */ } }; /* NOTE: This layout is is not compatabable with SmartMedia, */ /* because the 256 byte devices have page depenent oob layout */ /* However it does preserve the bad block markers */ /* If you use smftl, it will bypass this and work correctly */ /* If you not, then you break SmartMedia compliance anyway */ static struct nand_ecclayout nand_oob_sm_small = { .eccbytes = 3, .eccpos = {0, 1, 2}, .oobfree = { {.offset = 3 , .length = 2}, /* reserved */ {.offset = 6 , .length = 2}, /* LBA1 */ } }; static int sm_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_oob_ops ops; struct sm_oob oob; int ret, error = 0; memset(&oob, -1, SM_OOB_SIZE); oob.block_status = 0x0F; /* As long as this function is called on erase block boundaries it will work correctly for 256 byte nand */ ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = 0; ops.ooblen = mtd->oobsize; ops.oobbuf = (void *)&oob; ops.datbuf = NULL; ret = mtd_write_oob(mtd, ofs, &ops); if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) { printk(KERN_NOTICE "sm_common: can't mark sector at %i as bad\n", (int)ofs); error = -EIO; } else mtd->ecc_stats.badblocks++; return error; } static struct nand_flash_dev nand_smartmedia_flash_ids[] = { LEGACY_ID_NAND("SmartMedia 2MiB 3,3V ROM", 0x5d, 2, SZ_8K, NAND_ROM), LEGACY_ID_NAND("SmartMedia 4MiB 3,3V", 0xe3, 4, SZ_8K, 0), LEGACY_ID_NAND("SmartMedia 4MiB 3,3/5V", 0xe5, 4, SZ_8K, 0), LEGACY_ID_NAND("SmartMedia 4MiB 5V", 0x6b, 4, SZ_8K, 0), LEGACY_ID_NAND("SmartMedia 4MiB 3,3V ROM", 0xd5, 4, SZ_8K, NAND_ROM), LEGACY_ID_NAND("SmartMedia 8MiB 3,3V", 0xe6, 8, SZ_8K, 0), LEGACY_ID_NAND("SmartMedia 8MiB 3,3V ROM", 0xd6, 8, SZ_8K, NAND_ROM), LEGACY_ID_NAND("SmartMedia 16MiB 3,3V", 0x73, 16, SZ_16K, 0), LEGACY_ID_NAND("SmartMedia 16MiB 3,3V ROM", 0x57, 16, SZ_16K, NAND_ROM), LEGACY_ID_NAND("SmartMedia 32MiB 3,3V", 0x75, 32, SZ_16K, 0), LEGACY_ID_NAND("SmartMedia 32MiB 3,3V ROM", 0x58, 32, SZ_16K, NAND_ROM), LEGACY_ID_NAND("SmartMedia 64MiB 3,3V", 0x76, 64, SZ_16K, 0), LEGACY_ID_NAND("SmartMedia 64MiB 3,3V ROM", 0xd9, 64, SZ_16K, NAND_ROM), LEGACY_ID_NAND("SmartMedia 128MiB 3,3V", 0x79, 128, SZ_16K, 0), LEGACY_ID_NAND("SmartMedia 128MiB 3,3V ROM", 0xda, 128, SZ_16K, NAND_ROM), LEGACY_ID_NAND("SmartMedia 256MiB 3, 3V", 0x71, 256, SZ_16K, 0), LEGACY_ID_NAND("SmartMedia 256MiB 3,3V ROM", 0x5b, 256, SZ_16K, NAND_ROM), {NULL} }; static struct nand_flash_dev nand_xd_flash_ids[] = { LEGACY_ID_NAND("xD 16MiB 3,3V", 0x73, 16, SZ_16K, 0), LEGACY_ID_NAND("xD 32MiB 3,3V", 0x75, 32, SZ_16K, 0), LEGACY_ID_NAND("xD 64MiB 3,3V", 0x76, 64, SZ_16K, 0), LEGACY_ID_NAND("xD 128MiB 3,3V", 0x79, 128, SZ_16K, 0), LEGACY_ID_NAND("xD 256MiB 3,3V", 0x71, 256, SZ_16K, NAND_BROKEN_XD), LEGACY_ID_NAND("xD 512MiB 3,3V", 0xdc, 512, SZ_16K, NAND_BROKEN_XD), LEGACY_ID_NAND("xD 1GiB 3,3V", 0xd3, 1024, SZ_16K, NAND_BROKEN_XD), LEGACY_ID_NAND("xD 2GiB 3,3V", 0xd5, 2048, SZ_16K, NAND_BROKEN_XD), {NULL} }; int sm_register_device(struct mtd_info *mtd, int smartmedia) { struct nand_chip *chip = mtd->priv; int ret; chip->options |= NAND_SKIP_BBTSCAN; /* Scan for card properties */ ret = nand_scan_ident(mtd, 1, smartmedia ? nand_smartmedia_flash_ids : nand_xd_flash_ids); if (ret) return ret; /* Bad block marker position */ chip->badblockpos = 0x05; chip->badblockbits = 7; chip->block_markbad = sm_block_markbad; /* ECC layout */ if (mtd->writesize == SM_SECTOR_SIZE) chip->ecc.layout = &nand_oob_sm; else if (mtd->writesize == SM_SMALL_PAGE) chip->ecc.layout = &nand_oob_sm_small; else return -ENODEV; ret = nand_scan_tail(mtd); if (ret) return ret; return mtd_device_register(mtd, NULL, 0); } EXPORT_SYMBOL_GPL(sm_register_device); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); MODULE_DESCRIPTION("Common SmartMedia/xD functions");
gpl-2.0
xjljian/android_kernel_huawei_msm8916
drivers/gpu/drm/via/via_drv.c
2371
3609
/* * Copyright 1998-2003 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2003 S3 Graphics, Inc. All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * VIA, S3 GRAPHICS, AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include <linux/module.h> #include <drm/drmP.h> #include <drm/via_drm.h> #include "via_drv.h" #include <drm/drm_pciids.h> static int via_driver_open(struct drm_device *dev, struct drm_file *file) { struct via_file_private *file_priv; DRM_DEBUG_DRIVER("\n"); file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL); if (!file_priv) return -ENOMEM; file->driver_priv = file_priv; INIT_LIST_HEAD(&file_priv->obj_list); return 0; } void via_driver_postclose(struct drm_device *dev, struct drm_file *file) { struct via_file_private *file_priv = file->driver_priv; kfree(file_priv); } static struct pci_device_id pciidlist[] = { viadrv_PCI_IDS }; static const struct file_operations via_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, #ifdef CONFIG_COMPAT .compat_ioctl = drm_compat_ioctl, #endif .llseek = noop_llseek, }; static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, .load = via_driver_load, .unload = via_driver_unload, .open = via_driver_open, .preclose = via_reclaim_buffers_locked, .postclose = via_driver_postclose, .context_dtor = via_final_context, .get_vblank_counter = via_get_vblank_counter, .enable_vblank = via_enable_vblank, .disable_vblank = via_disable_vblank, .irq_preinstall = via_driver_irq_preinstall, .irq_postinstall = via_driver_irq_postinstall, .irq_uninstall = via_driver_irq_uninstall, .irq_handler = via_driver_irq_handler, .dma_quiescent = via_driver_dma_quiescent, .lastclose = via_lastclose, .ioctls = via_ioctls, .fops = &via_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static struct pci_driver via_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, }; static int __init via_init(void) { driver.num_ioctls = via_max_ioctl; via_init_command_verifier(); return drm_pci_init(&driver, &via_pci_driver); } static void __exit via_exit(void) { drm_pci_exit(&driver, &via_pci_driver); } module_init(via_init); module_exit(via_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
OtherCrashOverride/linux
arch/arm/mach-msm/board-trout-mmc.c
2883
4626
/* linux/arch/arm/mach-msm/board-trout-mmc.c ** Author: Brian Swetland <swetland@google.com> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio_ids.h> #include <linux/err.h> #include <linux/debugfs.h> #include <asm/gpio.h> #include <asm/io.h> #include <mach/vreg.h> #include <mach/mmc.h> #include "devices.h" #include "board-trout.h" #include "proc_comm.h" #define DEBUG_SDSLOT_VDD 1 /* ---- COMMON ---- */ static void config_gpio_table(uint32_t *table, int len) { int n; unsigned id; for(n = 0; n < len; n++) { id = table[n]; msm_proc_comm(PCOM_RPC_GPIO_TLMM_CONFIG_EX, &id, 0); } } /* ---- SDCARD ---- */ static uint32_t sdcard_on_gpio_table[] = { PCOM_GPIO_CFG(62, 2, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_8MA), /* CLK */ PCOM_GPIO_CFG(63, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* CMD */ PCOM_GPIO_CFG(64, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT3 */ PCOM_GPIO_CFG(65, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_8MA), /* DAT2 */ PCOM_GPIO_CFG(66, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT1 */ PCOM_GPIO_CFG(67, 2, GPIO_OUTPUT, GPIO_PULL_UP, GPIO_4MA), /* DAT0 */ }; static uint32_t sdcard_off_gpio_table[] = { PCOM_GPIO_CFG(62, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CLK */ PCOM_GPIO_CFG(63, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* CMD */ PCOM_GPIO_CFG(64, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT3 */ PCOM_GPIO_CFG(65, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT2 */ PCOM_GPIO_CFG(66, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT1 */ PCOM_GPIO_CFG(67, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_4MA), /* DAT0 */ }; static uint opt_disable_sdcard; static int __init trout_disablesdcard_setup(char *str) { int cal = simple_strtol(str, NULL, 0); opt_disable_sdcard = cal; return 1; } __setup("board_trout.disable_sdcard=", trout_disablesdcard_setup); static struct vreg *vreg_sdslot; /* SD slot power */ struct mmc_vdd_xlat { int mask; int level; }; static struct mmc_vdd_xlat mmc_vdd_table[] = { { MMC_VDD_165_195, 1800 }, { MMC_VDD_20_21, 2050 }, { MMC_VDD_21_22, 2150 }, { MMC_VDD_22_23, 2250 }, { MMC_VDD_23_24, 2350 }, { MMC_VDD_24_25, 2450 }, { MMC_VDD_25_26, 2550 }, { MMC_VDD_26_27, 2650 }, { MMC_VDD_27_28, 2750 }, { MMC_VDD_28_29, 2850 }, { MMC_VDD_29_30, 2950 }, }; static unsigned int sdslot_vdd = 0xffffffff; static unsigned int sdslot_vreg_enabled; static uint32_t trout_sdslot_switchvdd(struct device *dev, unsigned int vdd) { int i, rc; BUG_ON(!vreg_sdslot); if (vdd == sdslot_vdd) return 0; sdslot_vdd = vdd; if (vdd == 0) { #if DEBUG_SDSLOT_VDD printk("%s: Disabling SD slot power\n", __func__); #endif config_gpio_table(sdcard_off_gpio_table, ARRAY_SIZE(sdcard_off_gpio_table)); vreg_disable(vreg_sdslot); sdslot_vreg_enabled = 0; return 0; } if (!sdslot_vreg_enabled) { rc = vreg_enable(vreg_sdslot); if (rc) { printk(KERN_ERR "%s: Error enabling vreg (%d)\n", __func__, rc); } config_gpio_table(sdcard_on_gpio_table, ARRAY_SIZE(sdcard_on_gpio_table)); sdslot_vreg_enabled = 1; } for (i = 0; i < ARRAY_SIZE(mmc_vdd_table); i++) { if (mmc_vdd_table[i].mask == (1 << vdd)) { #if DEBUG_SDSLOT_VDD printk("%s: Setting level to %u\n", __func__, mmc_vdd_table[i].level); #endif rc = vreg_set_level(vreg_sdslot, mmc_vdd_table[i].level); if (rc) { printk(KERN_ERR "%s: Error setting vreg level (%d)\n", __func__, rc); } return 0; } } printk(KERN_ERR "%s: Invalid VDD %d specified\n", __func__, vdd); return 0; } static unsigned int trout_sdslot_status(struct device *dev) { unsigned int status; status = (unsigned int) gpio_get_value(TROUT_GPIO_SDMC_CD_N); return (!status); } #define TROUT_MMC_VDD MMC_VDD_165_195 | MMC_VDD_20_21 | MMC_VDD_21_22 \ | MMC_VDD_22_23 | MMC_VDD_23_24 | MMC_VDD_24_25 \ | MMC_VDD_25_26 | MMC_VDD_26_27 | MMC_VDD_27_28 \ | MMC_VDD_28_29 | MMC_VDD_29_30 static struct msm_mmc_platform_data trout_sdslot_data = { .ocr_mask = TROUT_MMC_VDD, .status = trout_sdslot_status, .translate_vdd = trout_sdslot_switchvdd, }; int __init trout_init_mmc(unsigned int sys_rev) { sdslot_vreg_enabled = 0; vreg_sdslot = vreg_get(0, "gp6"); if (IS_ERR(vreg_sdslot)) return PTR_ERR(vreg_sdslot); irq_set_irq_wake(TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 1); if (!opt_disable_sdcard) msm_add_sdcc(2, &trout_sdslot_data, TROUT_GPIO_TO_INT(TROUT_GPIO_SDMC_CD_N), 0); else printk(KERN_INFO "trout: SD-Card interface disabled\n"); return 0; }
gpl-2.0
mathkid95/linux_lg_jb
arch/blackfin/mach-bf527/boards/ezkit.c
4419
31801
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/export.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/i2c.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/usb/musb.h> #include <linux/leds.h> #include <linux/input.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/reboot.h> #include <asm/nand.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/ad7877.h> #include <asm/bfin_sport.h> /* * Name the Board for the /proc/cpuinfo */ #ifdef CONFIG_BFIN527_EZKIT_V2 const char bfin_board_name[] = "ADI BF527-EZKIT V2"; #else const char bfin_board_name[] = "ADI BF527-EZKIT"; #endif /* * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) #include <linux/usb/isp1760.h> static struct resource bfin_isp1760_resources[] = { [0] = { .start = 0x203C0000, .end = 0x203C0000 + 0x000fffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ, }, }; static struct isp1760_platform_data isp1760_priv = { .is_isp1761 = 0, .bus_width_16 = 1, .port1_otg = 0, .analog_oc = 0, .dack_polarity_high = 0, .dreq_polarity_high = 0, }; static struct platform_device bfin_isp1760_device = { .name = "isp1760", .id = 0, .dev = { .platform_data = &isp1760_priv, }, .num_resources = ARRAY_SIZE(bfin_isp1760_resources), .resource = bfin_isp1760_resources, }; #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct resource musb_resources[] = { [0] = { .start = 0xffc03800, .end = 0xffc03cff, .flags = IORESOURCE_MEM, }, [1] = { /* general IRQ */ .start = IRQ_USB_INT0, .end = IRQ_USB_INT0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "mc" }, [2] = { /* DMA IRQ */ .start = IRQ_USB_DMA, .end = IRQ_USB_DMA, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, .name = "dma" }, }; static struct musb_hdrc_config musb_config = { .multipoint = 0, .dyn_fifo = 0, .soft_con = 1, .dma = 1, .num_eps = 8, .dma_channels = 8, .gpio_vrsel = GPIO_PG13, /* Some custom boards need to be active low, just set it to "0" * if it is the case. */ .gpio_vrsel_active = 1, .clkin = 24, /* musb CLKIN in MHZ */ }; static struct musb_hdrc_platform_data musb_plat = { #if defined(CONFIG_USB_MUSB_OTG) .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif .config = &musb_config, }; static u64 musb_dmamask = ~(u32)0; static struct platform_device musb_device = { .name = "musb-blackfin", .id = 0, .dev = { .dma_mask = &musb_dmamask, .coherent_dma_mask = 0xffffffff, .platform_data = &musb_plat, }, .num_resources = ARRAY_SIZE(musb_resources), .resource = musb_resources, }; #endif #if defined(CONFIG_FB_BFIN_T350MCQB) || defined(CONFIG_FB_BFIN_T350MCQB_MODULE) static struct resource bf52x_t350mcqb_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf52x_t350mcqb_device = { .name = "bfin-t350mcqb", .id = -1, .num_resources = ARRAY_SIZE(bf52x_t350mcqb_resources), .resource = bf52x_t350mcqb_resources, }; #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) #include <asm/bfin-lq035q1.h> static struct bfin_lq035q1fb_disp_info bfin_lq035q1_data = { .mode = LQ035_NORM | LQ035_RGB | LQ035_RL | LQ035_TB, .ppi_mode = USE_RGB565_8_BIT_PPI, }; static struct resource bfin_lq035q1_resources[] = { { .start = IRQ_PPI_ERROR, .end = IRQ_PPI_ERROR, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_lq035q1_device = { .name = "bfin-lq035q1", .id = -1, .num_resources = ARRAY_SIZE(bfin_lq035q1_resources), .resource = bfin_lq035q1_resources, .dev = { .platform_data = &bfin_lq035q1_data, }, }; #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezkit_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ezkit_flash_data = { .width = 2, .parts = ezkit_partitions, .nr_parts = ARRAY_SIZE(ezkit_partitions), }; static struct resource ezkit_flash_resource = { .start = 0x20000000, .end = 0x203fffff, .flags = IORESOURCE_MEM, }; static struct platform_device ezkit_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ezkit_flash_data, }, .num_resources = 1, .resource = &ezkit_flash_resource, }; #endif #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) static struct mtd_partition partition_info[] = { { .name = "bootloader(nand)", .offset = 0, .size = 0x40000, }, { .name = "linux kernel(nand)", .offset = MTDPART_OFS_APPEND, .size = 4 * 1024 * 1024, }, { .name = "file system(nand)", .offset = MTDPART_OFS_APPEND, .size = MTDPART_SIZ_FULL, }, }; static struct bf5xx_nand_platform bf5xx_nand_platform = { .data_width = NFC_NWIDTH_8, .partitions = partition_info, .nr_partitions = ARRAY_SIZE(partition_info), .rd_dly = 3, .wr_dly = 3, }; static struct resource bf5xx_nand_resources[] = { { .start = NFC_CTL, .end = NFC_DATA_RD + 2, .flags = IORESOURCE_MEM, }, { .start = CH_NFC, .end = CH_NFC, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bf5xx_nand_device = { .name = "bf5xx-nand", .id = 0, .num_resources = ARRAY_SIZE(bf5xx_nand_resources), .resource = bf5xx_nand_resources, .dev = { .platform_data = &bf5xx_nand_platform, }, }; #endif #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) static struct resource bfin_pcmcia_cf_resources[] = { { .start = 0x20310000, /* IO PORT */ .end = 0x20312000, .flags = IORESOURCE_MEM, }, { .start = 0x20311000, /* Attribute Memory */ .end = 0x20311FFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF4, .end = IRQ_PF4, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, { .start = 6, /* Card Detect PF6 */ .end = 6, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pcmcia_cf_device = { .name = "bfin_cf_pcmcia", .id = -1, .num_resources = ARRAY_SIZE(bfin_pcmcia_cf_resources), .resource = bfin_pcmcia_cf_resources, }; #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x20300300, .end = 0x20300300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) static struct resource dm9000_resources[] = { [0] = { .start = 0x203FB800, .end = 0x203FB800 + 1, .flags = IORESOURCE_MEM, }, [1] = { .start = 0x203FB800 + 4, .end = 0x203FB800 + 5, .flags = IORESOURCE_MEM, }, [2] = { .start = IRQ_PF9, .end = IRQ_PF9, .flags = (IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHEDGE), }, }; static struct platform_device dm9000_device = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(dm9000_resources), .resource = dm9000_resources, }; #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_RMII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_RMII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = 1, .flags = IORESOURCE_BUS, }, { .start = IRQ_PF7, .end = IRQ_PF7, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00040000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p16", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) static const struct ad7877_platform_data bfin_ad7877_ts_info = { .model = 7877, .vref_delay_usecs = 50, /* internal, no capacitor */ .x_plate_ohms = 419, .y_plate_ohms = 486, .pressure_max = 1000, .pressure_min = 0, .stopacq_polarity = 1, .first_conversion_delay = 3, .acquisition_time = 1, .averaging = 1, .pen_down_acc_interval = 1, }; #endif #if defined(CONFIG_TOUCHSCREEN_AD7879) || defined(CONFIG_TOUCHSCREEN_AD7879_MODULE) #include <linux/spi/ad7879.h> static const struct ad7879_platform_data bfin_ad7879_ts_info = { .model = 7879, /* Model = AD7879 */ .x_plate_ohms = 620, /* 620 Ohm from the touch datasheet */ .pressure_max = 10000, .pressure_min = 0, .first_conversion_delay = 3, /* wait 512us before do a first conversion */ .acquisition_time = 1, /* 4us acquisition time per sample */ .median = 2, /* do 8 measurements */ .averaging = 1, /* take the average of 4 middle samples */ .pen_down_acc_interval = 255, /* 9.4 ms */ .gpio_export = 0, /* Export GPIO to gpiolib */ }; #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) || \ defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) static const u16 bfin_snd_pin[][7] = { {P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0, 0}, {P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, P_SPORT1_TFS, 0}, }; static struct bfin_snd_platform_data bfin_snd_data[] = { { .pin_req = &bfin_snd_pin[0][0], }, { .pin_req = &bfin_snd_pin[1][0], }, }; #define BFIN_SND_RES(x) \ [x] = { \ { \ .start = SPORT##x##_TCR1, \ .end = SPORT##x##_TCR1, \ .flags = IORESOURCE_MEM \ }, \ { \ .start = CH_SPORT##x##_RX, \ .end = CH_SPORT##x##_RX, \ .flags = IORESOURCE_DMA, \ }, \ { \ .start = CH_SPORT##x##_TX, \ .end = CH_SPORT##x##_TX, \ .flags = IORESOURCE_DMA, \ }, \ { \ .start = IRQ_SPORT##x##_ERROR, \ .end = IRQ_SPORT##x##_ERROR, \ .flags = IORESOURCE_IRQ, \ } \ } static struct resource bfin_snd_resources[][4] = { BFIN_SND_RES(0), BFIN_SND_RES(1), }; #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) static struct platform_device bfin_i2s_pcm = { .name = "bfin-i2s-pcm-audio", .id = -1, }; #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) static struct platform_device bfin_tdm_pcm = { .name = "bfin-tdm-pcm-audio", .id = -1, }; #endif #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97_pcm = { .name = "bfin-ac97-pcm-audio", .id = -1, }; #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) static struct platform_device bfin_i2s = { .name = "bfin-i2s", .id = CONFIG_SND_BF5XX_SPORT_NUM, .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], .dev = { .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], }, }; #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) static struct platform_device bfin_tdm = { .name = "bfin-tdm", .id = CONFIG_SND_BF5XX_SPORT_NUM, .num_resources = ARRAY_SIZE(bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM]), .resource = bfin_snd_resources[CONFIG_SND_BF5XX_SPORT_NUM], .dev = { .platform_data = &bfin_snd_data[CONFIG_SND_BF5XX_SPORT_NUM], }, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) \ || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, .platform_data = "ad1836", .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 3, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_0, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7877) || defined(CONFIG_TOUCHSCREEN_AD7877_MODULE) { .modalias = "ad7877", .platform_data = &bfin_ad7877_ts_info, .irq = IRQ_PF8, .max_speed_hz = 12500000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 2, }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_SPI) || defined(CONFIG_TOUCHSCREEN_AD7879_SPI_MODULE) { .modalias = "ad7879", .platform_data = &bfin_ad7879_ts_info, .irq = IRQ_PF8, .max_speed_hz = 5000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 3, .mode = SPI_CPHA | SPI_CPOL, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) { .modalias = "bfin-lq035q1-spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 7, .mode = SPI_CPHA | SPI_CPOL, }, #endif }; #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_TX, .end = IRQ_UART0_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_TX, .end = IRQ_UART1_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, #ifdef CONFIG_BFIN_UART1_CTSRTS { /* CTS pin */ .start = GPIO_PF9, .end = GPIO_PF9, .flags = IORESOURCE_IO, }, { /* RTS pin */ .start = GPIO_PF10, .end = GPIO_PF10, .flags = IORESOURCE_IO, }, #endif }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) #include <linux/mfd/adp5520.h> /* * ADP5520/5501 LEDs Data */ static struct led_info adp5520_leds[] = { { .name = "adp5520-led1", .default_trigger = "none", .flags = FLAG_ID_ADP5520_LED1_ADP5501_LED0 | ADP5520_LED_OFFT_600ms, }, }; static struct adp5520_leds_platform_data adp5520_leds_data = { .num_leds = ARRAY_SIZE(adp5520_leds), .leds = adp5520_leds, .fade_in = ADP5520_FADE_T_600ms, .fade_out = ADP5520_FADE_T_600ms, .led_on_time = ADP5520_LED_ONT_600ms, }; /* * ADP5520 Keypad Data */ static const unsigned short adp5520_keymap[ADP5520_KEYMAPSIZE] = { [ADP5520_KEY(3, 3)] = KEY_1, [ADP5520_KEY(2, 3)] = KEY_2, [ADP5520_KEY(1, 3)] = KEY_3, [ADP5520_KEY(0, 3)] = KEY_UP, [ADP5520_KEY(3, 2)] = KEY_4, [ADP5520_KEY(2, 2)] = KEY_5, [ADP5520_KEY(1, 2)] = KEY_6, [ADP5520_KEY(0, 2)] = KEY_DOWN, [ADP5520_KEY(3, 1)] = KEY_7, [ADP5520_KEY(2, 1)] = KEY_8, [ADP5520_KEY(1, 1)] = KEY_9, [ADP5520_KEY(0, 1)] = KEY_DOT, [ADP5520_KEY(3, 0)] = KEY_BACKSPACE, [ADP5520_KEY(2, 0)] = KEY_0, [ADP5520_KEY(1, 0)] = KEY_HELP, [ADP5520_KEY(0, 0)] = KEY_ENTER, }; static struct adp5520_keys_platform_data adp5520_keys_data = { .rows_en_mask = ADP5520_ROW_R3 | ADP5520_ROW_R2 | ADP5520_ROW_R1 | ADP5520_ROW_R0, .cols_en_mask = ADP5520_COL_C3 | ADP5520_COL_C2 | ADP5520_COL_C1 | ADP5520_COL_C0, .keymap = adp5520_keymap, .keymapsize = ARRAY_SIZE(adp5520_keymap), .repeat = 0, }; /* * ADP5520/5501 Multifunction Device Init Data */ static struct adp5520_platform_data adp5520_pdev_data = { .leds = &adp5520_leds_data, .keys = &adp5520_keys_data, }; #endif static struct i2c_board_info __initdata bfin_i2c_board_info[] = { #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("pcf8574_lcd", 0x22), }, #endif #if defined(CONFIG_INPUT_PCF8574) || defined(CONFIG_INPUT_PCF8574_MODULE) { I2C_BOARD_INFO("pcf8574_keypad", 0x27), .irq = IRQ_PF8, }, #endif #if defined(CONFIG_FB_BFIN_7393) || defined(CONFIG_FB_BFIN_7393_MODULE) { I2C_BOARD_INFO("bfin-adv7393", 0x2B), }, #endif #if defined(CONFIG_TOUCHSCREEN_AD7879_I2C) || defined(CONFIG_TOUCHSCREEN_AD7879_I2C_MODULE) { I2C_BOARD_INFO("ad7879", 0x2C), .irq = IRQ_PF8, .platform_data = (void *)&bfin_ad7879_ts_info, }, #endif #if defined(CONFIG_PMIC_ADP5520) || defined(CONFIG_PMIC_ADP5520_MODULE) { I2C_BOARD_INFO("pmic-adp5520", 0x32), .irq = IRQ_PF9, .platform_data = (void *)&adp5520_pdev_data, }, #endif #if defined(CONFIG_SND_SOC_SSM2602) || defined(CONFIG_SND_SOC_SSM2602_MODULE) { I2C_BOARD_INFO("ssm2602", 0x1b), }, #endif #if defined(CONFIG_BFIN_TWI_LCD) || defined(CONFIG_BFIN_TWI_LCD_MODULE) { I2C_BOARD_INFO("ad5252", 0x2f), }, #endif #if defined(CONFIG_SND_SOC_ADAU1373) || defined(CONFIG_SND_SOC_ADAU1373_MODULE) { I2C_BOARD_INFO("adau1373", 0x1A), }, #endif }; #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PG0, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PG13, 1, "gpio-keys: BTN1"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif #if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) #include <asm/bfin_rotary.h> static struct bfin_rotary_platform_data bfin_rotary_data = { /*.rotary_up_key = KEY_UP,*/ /*.rotary_down_key = KEY_DOWN,*/ .rotary_rel_code = REL_WHEEL, .rotary_button_key = KEY_ENTER, .debounce = 10, /* 0..17 */ .mode = ROT_QUAD_ENC | ROT_DEBE, }; static struct resource bfin_rotary_resources[] = { { .start = IRQ_CNT, .end = IRQ_CNT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_rotary_device = { .name = "bfin-rotary", .id = -1, .num_resources = ARRAY_SIZE(bfin_rotary_resources), .resource = bfin_rotary_resources, .dev = { .platform_data = &bfin_rotary_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_100, 400000000), VRPAIR(VLEV_105, 426000000), VRPAIR(VLEV_110, 500000000), VRPAIR(VLEV_115, 533000000), VRPAIR(VLEV_120, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *stamp_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_MTD_NAND_BF5XX) || defined(CONFIG_MTD_NAND_BF5XX_MODULE) &bf5xx_nand_device, #endif #if defined(CONFIG_BFIN_CFPCMCIA) || defined(CONFIG_BFIN_CFPCMCIA_MODULE) &bfin_pcmcia_cf_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) &bfin_isp1760_device, #endif #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) &musb_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_DM9000) || defined(CONFIG_DM9000_MODULE) &dm9000_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_FB_BFIN_T350MCQB) || defined(CONFIG_FB_BFIN_T350MCQB_MODULE) &bf52x_t350mcqb_device, #endif #if defined(CONFIG_FB_BFIN_LQ035Q1) || defined(CONFIG_FB_BFIN_LQ035Q1_MODULE) &bfin_lq035q1_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_INPUT_BFIN_ROTARY) || defined(CONFIG_INPUT_BFIN_ROTARY_MODULE) &bfin_rotary_device, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezkit_flash_device, #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s_pcm, #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) &bfin_tdm_pcm, #endif #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97_pcm, #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s, #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) &bfin_tdm, #endif }; static int __init ezkit_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); i2c_register_board_info(0, bfin_i2c_board_info, ARRAY_SIZE(bfin_i2c_board_info)); platform_add_devices(stamp_devices, ARRAY_SIZE(stamp_devices)); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ezkit_init); static struct platform_device *ezkit_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ezkit_early_devices, ARRAY_SIZE(ezkit_early_devices)); } void native_machine_restart(char *cmd) { /* workaround reboot hang when booting from SPI */ if ((bfin_read_SYSCR() & 0x7) == 0x3) bfin_reset_boot_spi_cs(P_DEFAULT_BOOT_SPI_CS); } int bfin_get_ether_addr(char *addr) { /* the MAC is stored in OTP memory page 0xDF */ u32 ret; u64 otp_mac; u32 (*otp_read)(u32 page, u32 flags, u64 *page_content) = (void *)0xEF00001A; ret = otp_read(0xDF, 0x00, &otp_mac); if (!(ret & 0x1)) { char *otp_mac_p = (char *)&otp_mac; for (ret = 0; ret < 6; ++ret) addr[ret] = otp_mac_p[5 - ret]; } return 0; } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
ISTweak/android_kernel_sony_blue_hayabusa
arch/powerpc/sysdev/xics/xics-common.c
4419
10435
/* * Copyright 2011 IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/threads.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/debugfs.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/seq_file.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <asm/prom.h> #include <asm/io.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/irq.h> #include <asm/errno.h> #include <asm/rtas.h> #include <asm/xics.h> #include <asm/firmware.h> /* Globals common to all ICP/ICS implementations */ const struct icp_ops *icp_ops; unsigned int xics_default_server = 0xff; unsigned int xics_default_distrib_server = 0; unsigned int xics_interrupt_server_size = 8; DEFINE_PER_CPU(struct xics_cppr, xics_cppr); struct irq_domain *xics_host; static LIST_HEAD(ics_list); void xics_update_irq_servers(void) { int i, j; struct device_node *np; u32 ilen; const u32 *ireg; u32 hcpuid; /* Find the server numbers for the boot cpu. */ np = of_get_cpu_node(boot_cpuid, NULL); BUG_ON(!np); hcpuid = get_hard_smp_processor_id(boot_cpuid); xics_default_server = xics_default_distrib_server = hcpuid; pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server); ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); if (!ireg) { of_node_put(np); return; } i = ilen / sizeof(int); /* Global interrupt distribution server is specified in the last * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last * entry fom this property for current boot cpu id and use it as * default distribution server */ for (j = 0; j < i; j += 2) { if (ireg[j] == hcpuid) { xics_default_distrib_server = ireg[j+1]; break; } } pr_devel("xics: xics_default_distrib_server = 0x%x\n", xics_default_distrib_server); of_node_put(np); } /* GIQ stuff, currently only supported on RTAS setups, will have * to be sorted properly for bare metal */ void xics_set_cpu_giq(unsigned int gserver, unsigned int join) { #ifdef CONFIG_PPC_RTAS int index; int status; if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) return; index = (1UL << xics_interrupt_server_size) - 1 - gserver; status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", GLOBAL_INTERRUPT_QUEUE, index, join, status); #endif } void xics_setup_cpu(void) { icp_ops->set_priority(LOWEST_PRIORITY); xics_set_cpu_giq(xics_default_distrib_server, 1); } void xics_mask_unknown_vec(unsigned int vec) { struct ics *ics; pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec); list_for_each_entry(ics, &ics_list, link) ics->mask_unknown(ics, vec); } #ifdef CONFIG_SMP static void xics_request_ipi(void) { unsigned int ipi; ipi = irq_create_mapping(xics_host, XICS_IPI); BUG_ON(ipi == NO_IRQ); /* * IPIs are marked IRQF_PERCPU. The handler was set in map. */ BUG_ON(request_irq(ipi, icp_ops->ipi_action, IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); } int __init xics_smp_probe(void) { /* Setup cause_ipi callback based on which ICP is used */ smp_ops->cause_ipi = icp_ops->cause_ipi; /* Register all the IPIs */ xics_request_ipi(); return cpumask_weight(cpu_possible_mask); } #endif /* CONFIG_SMP */ void xics_teardown_cpu(void) { struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); /* * we have to reset the cppr index to 0 because we're * not going to return from the IPI */ os_cppr->index = 0; icp_ops->set_priority(0); icp_ops->teardown_cpu(); } void xics_kexec_teardown_cpu(int secondary) { xics_teardown_cpu(); icp_ops->flush_ipi(); /* * Some machines need to have at least one cpu in the GIQ, * so leave the master cpu in the group. */ if (secondary) xics_set_cpu_giq(xics_default_distrib_server, 0); } #ifdef CONFIG_HOTPLUG_CPU /* Interrupts are disabled. */ void xics_migrate_irqs_away(void) { int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); unsigned int irq, virq; struct irq_desc *desc; /* If we used to be the default server, move to the new "boot_cpuid" */ if (hw_cpu == xics_default_server) xics_update_irq_servers(); /* Reject any interrupt that was queued to us... */ icp_ops->set_priority(0); /* Remove ourselves from the global interrupt queue */ xics_set_cpu_giq(xics_default_distrib_server, 0); /* Allow IPIs again... */ icp_ops->set_priority(DEFAULT_PRIORITY); for_each_irq_desc(virq, desc) { struct irq_chip *chip; long server; unsigned long flags; struct ics *ics; /* We can't set affinity on ISA interrupts */ if (virq < NUM_ISA_INTERRUPTS) continue; /* We only need to migrate enabled IRQS */ if (!desc->action) continue; if (desc->irq_data.domain != xics_host) continue; irq = desc->irq_data.hwirq; /* We need to get IPIs still. */ if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) continue; chip = irq_desc_get_chip(desc); if (!chip || !chip->irq_set_affinity) continue; raw_spin_lock_irqsave(&desc->lock, flags); /* Locate interrupt server */ server = -1; ics = irq_get_chip_data(virq); if (ics) server = ics->get_server(ics, irq); if (server < 0) { printk(KERN_ERR "%s: Can't find server for irq %d\n", __func__, irq); goto unlock; } /* We only support delivery to all cpus or to one cpu. * The irq has to be migrated only in the single cpu * case. */ if (server != hw_cpu) goto unlock; /* This is expected during cpu offline. */ if (cpu_online(cpu)) pr_warning("IRQ %u affinity broken off cpu %u\n", virq, cpu); /* Reset affinity to all cpus */ raw_spin_unlock_irqrestore(&desc->lock, flags); irq_set_affinity(virq, cpu_all_mask); continue; unlock: raw_spin_unlock_irqrestore(&desc->lock, flags); } } #endif /* CONFIG_HOTPLUG_CPU */ #ifdef CONFIG_SMP /* * For the moment we only implement delivery to all cpus or one cpu. * * If the requested affinity is cpu_all_mask, we set global affinity. * If not we set it to the first cpu in the mask, even if multiple cpus * are set. This is so things like irqbalance (which set core and package * wide affinities) do the right thing. * * We need to fix this to implement support for the links */ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, unsigned int strict_check) { if (!distribute_irqs) return xics_default_server; if (!cpumask_subset(cpu_possible_mask, cpumask)) { int server = cpumask_first_and(cpu_online_mask, cpumask); if (server < nr_cpu_ids) return get_hard_smp_processor_id(server); if (strict_check) return -1; } /* * Workaround issue with some versions of JS20 firmware that * deliver interrupts to cpus which haven't been started. This * happens when using the maxcpus= boot option. */ if (cpumask_equal(cpu_online_mask, cpu_present_mask)) return xics_default_distrib_server; return xics_default_server; } #endif /* CONFIG_SMP */ static int xics_host_match(struct irq_domain *h, struct device_node *node) { struct ics *ics; list_for_each_entry(ics, &ics_list, link) if (ics->host_match(ics, node)) return 1; return 0; } /* Dummies */ static void xics_ipi_unmask(struct irq_data *d) { } static void xics_ipi_mask(struct irq_data *d) { } static struct irq_chip xics_ipi_chip = { .name = "XICS", .irq_eoi = NULL, /* Patched at init time */ .irq_mask = xics_ipi_mask, .irq_unmask = xics_ipi_unmask, }; static int xics_host_map(struct irq_domain *h, unsigned int virq, irq_hw_number_t hw) { struct ics *ics; pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); /* Insert the interrupt mapping into the radix tree for fast lookup */ irq_radix_revmap_insert(xics_host, virq, hw); /* They aren't all level sensitive but we just don't really know */ irq_set_status_flags(virq, IRQ_LEVEL); /* Don't call into ICS for IPIs */ if (hw == XICS_IPI) { irq_set_chip_and_handler(virq, &xics_ipi_chip, handle_percpu_irq); return 0; } /* Let the ICS setup the chip data */ list_for_each_entry(ics, &ics_list, link) if (ics->map(ics, virq) == 0) return 0; return -EINVAL; } static int xics_host_xlate(struct irq_domain *h, struct device_node *ct, const u32 *intspec, unsigned int intsize, irq_hw_number_t *out_hwirq, unsigned int *out_flags) { /* Current xics implementation translates everything * to level. It is not technically right for MSIs but this * is irrelevant at this point. We might get smarter in the future */ *out_hwirq = intspec[0]; *out_flags = IRQ_TYPE_LEVEL_LOW; return 0; } static struct irq_domain_ops xics_host_ops = { .match = xics_host_match, .map = xics_host_map, .xlate = xics_host_xlate, }; static void __init xics_init_host(void) { xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL); BUG_ON(xics_host == NULL); irq_set_default_host(xics_host); } void __init xics_register_ics(struct ics *ics) { list_add(&ics->link, &ics_list); } static void __init xics_get_server_size(void) { struct device_node *np; const u32 *isize; /* We fetch the interrupt server size from the first ICS node * we find if any */ np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); if (!np) return; isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); if (!isize) return; xics_interrupt_server_size = *isize; of_node_put(np); } void __init xics_init(void) { int rc = -1; /* Fist locate ICP */ if (firmware_has_feature(FW_FEATURE_LPAR)) rc = icp_hv_init(); if (rc < 0) rc = icp_native_init(); if (rc < 0) { pr_warning("XICS: Cannot find a Presentation Controller !\n"); return; } /* Copy get_irq callback over to ppc_md */ ppc_md.get_irq = icp_ops->get_irq; /* Patch up IPI chip EOI */ xics_ipi_chip.irq_eoi = icp_ops->eoi; /* Now locate ICS */ rc = ics_rtas_init(); if (rc < 0) rc = ics_opal_init(); if (rc < 0) pr_warning("XICS: Cannot find a Source Controller !\n"); /* Initialize common bits */ xics_get_server_size(); xics_update_irq_servers(); xics_init_host(); xics_setup_cpu(); }
gpl-2.0
aditisstillalive/android_kernel_lge_hammerhead
fs/cifs/dns_resolve.c
5699
2856
/* * fs/cifs/dns_resolve.c * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * Contains the CIFS DFS upcall routines used for hostname to * IP address translation. * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/slab.h> #include <linux/dns_resolver.h> #include "dns_resolve.h" #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" /** * dns_resolve_server_name_to_ip - Resolve UNC server name to ip address. * @unc: UNC path specifying the server * @ip_addr: Where to return the IP address. * * The IP address will be returned in string form, and the caller is * responsible for freeing it. * * Returns length of result on success, -ve on error. */ int dns_resolve_server_name_to_ip(const char *unc, char **ip_addr) { struct sockaddr_storage ss; const char *hostname, *sep; char *name; int len, rc; if (!ip_addr || !unc) return -EINVAL; len = strlen(unc); if (len < 3) { cFYI(1, "%s: unc is too short: %s", __func__, unc); return -EINVAL; } /* Discount leading slashes for cifs */ len -= 2; hostname = unc + 2; /* Search for server name delimiter */ sep = memchr(hostname, '\\', len); if (sep) len = sep - hostname; else cFYI(1, "%s: probably server name is whole unc: %s", __func__, unc); /* Try to interpret hostname as an IPv4 or IPv6 address */ rc = cifs_convert_address((struct sockaddr *)&ss, hostname, len); if (rc > 0) goto name_is_IP_address; /* Perform the upcall */ rc = dns_query(NULL, hostname, len, NULL, ip_addr, NULL); if (rc < 0) cFYI(1, "%s: unable to resolve: %*.*s", __func__, len, len, hostname); else cFYI(1, "%s: resolved: %*.*s to %s", __func__, len, len, hostname, *ip_addr); return rc; name_is_IP_address: name = kmalloc(len + 1, GFP_KERNEL); if (!name) return -ENOMEM; memcpy(name, hostname, len); name[len] = 0; cFYI(1, "%s: unc is IP, skipping dns upcall: %s", __func__, name); *ip_addr = name; return 0; }
gpl-2.0