repo_name
string
path
string
copies
string
size
string
content
string
license
string
NamelessRom/android_kernel_nvidia_shieldtablet
drivers/media/dvb-core/dvb_frontend.c
2249
74346
/* * dvb_frontend.c: DVB frontend tuning interface/thread * * * Copyright (C) 1999-2001 Ralph Metzler * Marcus Metzler * Holger Waechtler * for convergence integrated media GmbH * * Copyright (C) 2004 Andrew de Quincey (tuning thread cleanup) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * Or, point your browser to http://www.gnu.org/copyleft/gpl.html */ /* Enables DVBv3 compatibility bits at the headers */ #define __DVB_CORE__ #include <linux/string.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/semaphore.h> #include <linux/module.h> #include <linux/list.h> #include <linux/freezer.h> #include <linux/jiffies.h> #include <linux/kthread.h> #include <asm/processor.h> #include "dvb_frontend.h" #include "dvbdev.h" #include <linux/dvb/version.h> static int dvb_frontend_debug; static int dvb_shutdown_timeout; static int dvb_force_auto_inversion; static int dvb_override_tune_delay; static int dvb_powerdown_on_sleep = 1; static int dvb_mfe_wait_time = 5; module_param_named(frontend_debug, dvb_frontend_debug, int, 0644); MODULE_PARM_DESC(frontend_debug, "Turn on/off frontend core debugging (default:off)."); module_param(dvb_shutdown_timeout, int, 0644); MODULE_PARM_DESC(dvb_shutdown_timeout, "wait <shutdown_timeout> seconds after close() before suspending hardware"); module_param(dvb_force_auto_inversion, int, 0644); MODULE_PARM_DESC(dvb_force_auto_inversion, "0: normal (default), 1: INVERSION_AUTO forced always"); module_param(dvb_override_tune_delay, int, 0644); MODULE_PARM_DESC(dvb_override_tune_delay, "0: normal (default), >0 => delay in milliseconds to wait for lock after a tune attempt"); module_param(dvb_powerdown_on_sleep, int, 0644); MODULE_PARM_DESC(dvb_powerdown_on_sleep, "0: do not power down, 1: turn LNB voltage off on sleep (default)"); module_param(dvb_mfe_wait_time, int, 0644); MODULE_PARM_DESC(dvb_mfe_wait_time, "Wait up to <mfe_wait_time> seconds on open() for multi-frontend to become available (default:5 seconds)"); #define FESTATE_IDLE 1 #define FESTATE_RETUNE 2 #define FESTATE_TUNING_FAST 4 #define FESTATE_TUNING_SLOW 8 #define FESTATE_TUNED 16 #define FESTATE_ZIGZAG_FAST 32 #define FESTATE_ZIGZAG_SLOW 64 #define FESTATE_DISEQC 128 #define FESTATE_ERROR 256 #define FESTATE_WAITFORLOCK (FESTATE_TUNING_FAST | FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW | FESTATE_DISEQC) #define FESTATE_SEARCHING_FAST (FESTATE_TUNING_FAST | FESTATE_ZIGZAG_FAST) #define FESTATE_SEARCHING_SLOW (FESTATE_TUNING_SLOW | FESTATE_ZIGZAG_SLOW) #define FESTATE_LOSTLOCK (FESTATE_ZIGZAG_FAST | FESTATE_ZIGZAG_SLOW) #define FE_ALGO_HW 1 /* * FESTATE_IDLE. No tuning parameters have been supplied and the loop is idling. * FESTATE_RETUNE. Parameters have been supplied, but we have not yet performed the first tune. * FESTATE_TUNING_FAST. Tuning parameters have been supplied and fast zigzag scan is in progress. * FESTATE_TUNING_SLOW. Tuning parameters have been supplied. Fast zigzag failed, so we're trying again, but slower. * FESTATE_TUNED. The frontend has successfully locked on. * FESTATE_ZIGZAG_FAST. The lock has been lost, and a fast zigzag has been initiated to try and regain it. * FESTATE_ZIGZAG_SLOW. The lock has been lost. Fast zigzag has been failed, so we're trying again, but slower. * FESTATE_DISEQC. A DISEQC command has just been issued. * FESTATE_WAITFORLOCK. When we're waiting for a lock. * FESTATE_SEARCHING_FAST. When we're searching for a signal using a fast zigzag scan. * FESTATE_SEARCHING_SLOW. When we're searching for a signal using a slow zigzag scan. * FESTATE_LOSTLOCK. When the lock has been lost, and we're searching it again. */ #define DVB_FE_NO_EXIT 0 #define DVB_FE_NORMAL_EXIT 1 #define DVB_FE_DEVICE_REMOVED 2 static DEFINE_MUTEX(frontend_mutex); struct dvb_frontend_private { /* thread/frontend values */ struct dvb_device *dvbdev; struct dvb_frontend_parameters parameters_out; struct dvb_fe_events events; struct semaphore sem; struct list_head list_head; wait_queue_head_t wait_queue; struct task_struct *thread; unsigned long release_jiffies; unsigned int exit; unsigned int wakeup; fe_status_t status; unsigned long tune_mode_flags; unsigned int delay; unsigned int reinitialise; int tone; int voltage; /* swzigzag values */ unsigned int state; unsigned int bending; int lnb_drift; unsigned int inversion; unsigned int auto_step; unsigned int auto_sub_step; unsigned int started_auto_step; unsigned int min_delay; unsigned int max_drift; unsigned int step_size; int quality; unsigned int check_wrapped; enum dvbfe_search algo_status; }; static void dvb_frontend_wakeup(struct dvb_frontend *fe); static int dtv_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p_out); static int dtv_property_legacy_params_sync(struct dvb_frontend *fe, struct dvb_frontend_parameters *p); static bool has_get_frontend(struct dvb_frontend *fe) { return fe->ops.get_frontend != NULL; } /* * Due to DVBv3 API calls, a delivery system should be mapped into one of * the 4 DVBv3 delivery systems (FE_QPSK, FE_QAM, FE_OFDM or FE_ATSC), * otherwise, a DVBv3 call will fail. */ enum dvbv3_emulation_type { DVBV3_UNKNOWN, DVBV3_QPSK, DVBV3_QAM, DVBV3_OFDM, DVBV3_ATSC, }; static enum dvbv3_emulation_type dvbv3_type(u32 delivery_system) { switch (delivery_system) { case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: return DVBV3_QAM; case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: case SYS_ISDBS: case SYS_DSS: return DVBV3_QPSK; case SYS_DVBT: case SYS_DVBT2: case SYS_ISDBT: case SYS_DTMB: return DVBV3_OFDM; case SYS_ATSC: case SYS_ATSCMH: case SYS_DVBC_ANNEX_B: return DVBV3_ATSC; case SYS_UNDEFINED: case SYS_ISDBC: case SYS_DVBH: case SYS_DAB: default: /* * Doesn't know how to emulate those types and/or * there's no frontend driver from this type yet * with some emulation code, so, we're not sure yet how * to handle them, or they're not compatible with a DVBv3 call. */ return DVBV3_UNKNOWN; } } static void dvb_frontend_add_event(struct dvb_frontend *fe, fe_status_t status) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; struct dvb_frontend_event *e; int wp; dev_dbg(fe->dvb->device, "%s:\n", __func__); if ((status & FE_HAS_LOCK) && has_get_frontend(fe)) dtv_get_frontend(fe, &fepriv->parameters_out); mutex_lock(&events->mtx); wp = (events->eventw + 1) % MAX_EVENT; if (wp == events->eventr) { events->overflow = 1; events->eventr = (events->eventr + 1) % MAX_EVENT; } e = &events->events[events->eventw]; e->status = status; e->parameters = fepriv->parameters_out; events->eventw = wp; mutex_unlock(&events->mtx); wake_up_interruptible (&events->wait_queue); } static int dvb_frontend_get_event(struct dvb_frontend *fe, struct dvb_frontend_event *event, int flags) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (events->overflow) { events->overflow = 0; return -EOVERFLOW; } if (events->eventw == events->eventr) { int ret; if (flags & O_NONBLOCK) return -EWOULDBLOCK; up(&fepriv->sem); ret = wait_event_interruptible (events->wait_queue, events->eventw != events->eventr); if (down_interruptible (&fepriv->sem)) return -ERESTARTSYS; if (ret < 0) return ret; } mutex_lock(&events->mtx); *event = events->events[events->eventr]; events->eventr = (events->eventr + 1) % MAX_EVENT; mutex_unlock(&events->mtx); return 0; } static void dvb_frontend_clear_events(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_fe_events *events = &fepriv->events; mutex_lock(&events->mtx); events->eventr = events->eventw; mutex_unlock(&events->mtx); } static void dvb_frontend_init(struct dvb_frontend *fe) { dev_dbg(fe->dvb->device, "%s: initialising adapter %i frontend %i (%s)...\n", __func__, fe->dvb->num, fe->id, fe->ops.info.name); if (fe->ops.init) fe->ops.init(fe); if (fe->ops.tuner_ops.init) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.init(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } } void dvb_frontend_reinitialise(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; fepriv->reinitialise = 1; dvb_frontend_wakeup(fe); } EXPORT_SYMBOL(dvb_frontend_reinitialise); static void dvb_frontend_swzigzag_update_delay(struct dvb_frontend_private *fepriv, int locked) { int q2; struct dvb_frontend *fe = fepriv->dvbdev->priv; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (locked) (fepriv->quality) = (fepriv->quality * 220 + 36*256) / 256; else (fepriv->quality) = (fepriv->quality * 220 + 0) / 256; q2 = fepriv->quality - 128; q2 *= q2; fepriv->delay = fepriv->min_delay + q2 * HZ / (128*128); } /** * Performs automatic twiddling of frontend parameters. * * @param fe The frontend concerned. * @param check_wrapped Checks if an iteration has completed. DO NOT SET ON THE FIRST ATTEMPT * @returns Number of complete iterations that have been performed. */ static int dvb_frontend_swzigzag_autotune(struct dvb_frontend *fe, int check_wrapped) { int autoinversion; int ready = 0; int fe_set_err = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; int original_inversion = c->inversion; u32 original_frequency = c->frequency; /* are we using autoinversion? */ autoinversion = ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) && (c->inversion == INVERSION_AUTO)); /* setup parameters correctly */ while(!ready) { /* calculate the lnb_drift */ fepriv->lnb_drift = fepriv->auto_step * fepriv->step_size; /* wrap the auto_step if we've exceeded the maximum drift */ if (fepriv->lnb_drift > fepriv->max_drift) { fepriv->auto_step = 0; fepriv->auto_sub_step = 0; fepriv->lnb_drift = 0; } /* perform inversion and +/- zigzag */ switch(fepriv->auto_sub_step) { case 0: /* try with the current inversion and current drift setting */ ready = 1; break; case 1: if (!autoinversion) break; fepriv->inversion = (fepriv->inversion == INVERSION_OFF) ? INVERSION_ON : INVERSION_OFF; ready = 1; break; case 2: if (fepriv->lnb_drift == 0) break; fepriv->lnb_drift = -fepriv->lnb_drift; ready = 1; break; case 3: if (fepriv->lnb_drift == 0) break; if (!autoinversion) break; fepriv->inversion = (fepriv->inversion == INVERSION_OFF) ? INVERSION_ON : INVERSION_OFF; fepriv->lnb_drift = -fepriv->lnb_drift; ready = 1; break; default: fepriv->auto_step++; fepriv->auto_sub_step = -1; /* it'll be incremented to 0 in a moment */ break; } if (!ready) fepriv->auto_sub_step++; } /* if this attempt would hit where we started, indicate a complete * iteration has occurred */ if ((fepriv->auto_step == fepriv->started_auto_step) && (fepriv->auto_sub_step == 0) && check_wrapped) { return 1; } dev_dbg(fe->dvb->device, "%s: drift:%i inversion:%i auto_step:%i " \ "auto_sub_step:%i started_auto_step:%i\n", __func__, fepriv->lnb_drift, fepriv->inversion, fepriv->auto_step, fepriv->auto_sub_step, fepriv->started_auto_step); /* set the frontend itself */ c->frequency += fepriv->lnb_drift; if (autoinversion) c->inversion = fepriv->inversion; tmp = *c; if (fe->ops.set_frontend) fe_set_err = fe->ops.set_frontend(fe); *c = tmp; if (fe_set_err < 0) { fepriv->state = FESTATE_ERROR; return fe_set_err; } c->frequency = original_frequency; c->inversion = original_inversion; fepriv->auto_sub_step++; return 0; } static void dvb_frontend_swzigzag(struct dvb_frontend *fe) { fe_status_t s = 0; int retval = 0; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache, tmp; /* if we've got no parameters, just keep idling */ if (fepriv->state & FESTATE_IDLE) { fepriv->delay = 3*HZ; fepriv->quality = 0; return; } /* in SCAN mode, we just set the frontend when asked and leave it alone */ if (fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT) { if (fepriv->state & FESTATE_RETUNE) { tmp = *c; if (fe->ops.set_frontend) retval = fe->ops.set_frontend(fe); *c = tmp; if (retval < 0) fepriv->state = FESTATE_ERROR; else fepriv->state = FESTATE_TUNED; } fepriv->delay = 3*HZ; fepriv->quality = 0; return; } /* get the frontend status */ if (fepriv->state & FESTATE_RETUNE) { s = 0; } else { if (fe->ops.read_status) fe->ops.read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); fepriv->status = s; } } /* if we're not tuned, and we have a lock, move to the TUNED state */ if ((fepriv->state & FESTATE_WAITFORLOCK) && (s & FE_HAS_LOCK)) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); fepriv->state = FESTATE_TUNED; /* if we're tuned, then we have determined the correct inversion */ if ((!(fe->ops.info.caps & FE_CAN_INVERSION_AUTO)) && (c->inversion == INVERSION_AUTO)) { c->inversion = fepriv->inversion; } return; } /* if we are tuned already, check we're still locked */ if (fepriv->state & FESTATE_TUNED) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); /* we're tuned, and the lock is still good... */ if (s & FE_HAS_LOCK) { return; } else { /* if we _WERE_ tuned, but now don't have a lock */ fepriv->state = FESTATE_ZIGZAG_FAST; fepriv->started_auto_step = fepriv->auto_step; fepriv->check_wrapped = 0; } } /* don't actually do anything if we're in the LOSTLOCK state, * the frontend is set to FE_CAN_RECOVER, and the max_drift is 0 */ if ((fepriv->state & FESTATE_LOSTLOCK) && (fe->ops.info.caps & FE_CAN_RECOVER) && (fepriv->max_drift == 0)) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); return; } /* don't do anything if we're in the DISEQC state, since this * might be someone with a motorized dish controlled by DISEQC. * If its actually a re-tune, there will be a SET_FRONTEND soon enough. */ if (fepriv->state & FESTATE_DISEQC) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); return; } /* if we're in the RETUNE state, set everything up for a brand * new scan, keeping the current inversion setting, as the next * tune is _very_ likely to require the same */ if (fepriv->state & FESTATE_RETUNE) { fepriv->lnb_drift = 0; fepriv->auto_step = 0; fepriv->auto_sub_step = 0; fepriv->started_auto_step = 0; fepriv->check_wrapped = 0; } /* fast zigzag. */ if ((fepriv->state & FESTATE_SEARCHING_FAST) || (fepriv->state & FESTATE_RETUNE)) { fepriv->delay = fepriv->min_delay; /* perform a tune */ retval = dvb_frontend_swzigzag_autotune(fe, fepriv->check_wrapped); if (retval < 0) { return; } else if (retval) { /* OK, if we've run out of trials at the fast speed. * Drop back to slow for the _next_ attempt */ fepriv->state = FESTATE_SEARCHING_SLOW; fepriv->started_auto_step = fepriv->auto_step; return; } fepriv->check_wrapped = 1; /* if we've just retuned, enter the ZIGZAG_FAST state. * This ensures we cannot return from an * FE_SET_FRONTEND ioctl before the first frontend tune * occurs */ if (fepriv->state & FESTATE_RETUNE) { fepriv->state = FESTATE_TUNING_FAST; } } /* slow zigzag */ if (fepriv->state & FESTATE_SEARCHING_SLOW) { dvb_frontend_swzigzag_update_delay(fepriv, s & FE_HAS_LOCK); /* Note: don't bother checking for wrapping; we stay in this * state until we get a lock */ dvb_frontend_swzigzag_autotune(fe, 0); } } static int dvb_frontend_is_exiting(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv->exit != DVB_FE_NO_EXIT) return 1; if (fepriv->dvbdev->writers == 1) if (time_after_eq(jiffies, fepriv->release_jiffies + dvb_shutdown_timeout * HZ)) return 1; return 0; } static int dvb_frontend_should_wakeup(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; if (fepriv->wakeup) { fepriv->wakeup = 0; return 1; } return dvb_frontend_is_exiting(fe); } static void dvb_frontend_wakeup(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; fepriv->wakeup = 1; wake_up_interruptible(&fepriv->wait_queue); } static int dvb_frontend_thread(void *data) { struct dvb_frontend *fe = data; struct dvb_frontend_private *fepriv = fe->frontend_priv; fe_status_t s; enum dvbfe_algo algo; bool re_tune = false; bool semheld = false; dev_dbg(fe->dvb->device, "%s:\n", __func__); fepriv->check_wrapped = 0; fepriv->quality = 0; fepriv->delay = 3*HZ; fepriv->status = 0; fepriv->wakeup = 0; fepriv->reinitialise = 0; dvb_frontend_init(fe); set_freezable(); while (1) { up(&fepriv->sem); /* is locked when we enter the thread... */ restart: wait_event_interruptible_timeout(fepriv->wait_queue, dvb_frontend_should_wakeup(fe) || kthread_should_stop() || freezing(current), fepriv->delay); if (kthread_should_stop() || dvb_frontend_is_exiting(fe)) { /* got signal or quitting */ if (!down_interruptible(&fepriv->sem)) semheld = true; fepriv->exit = DVB_FE_NORMAL_EXIT; break; } if (try_to_freeze()) goto restart; if (down_interruptible(&fepriv->sem)) break; if (fepriv->reinitialise) { dvb_frontend_init(fe); if (fe->ops.set_tone && fepriv->tone != -1) fe->ops.set_tone(fe, fepriv->tone); if (fe->ops.set_voltage && fepriv->voltage != -1) fe->ops.set_voltage(fe, fepriv->voltage); fepriv->reinitialise = 0; } /* do an iteration of the tuning loop */ if (fe->ops.get_frontend_algo) { algo = fe->ops.get_frontend_algo(fe); switch (algo) { case DVBFE_ALGO_HW: dev_dbg(fe->dvb->device, "%s: Frontend ALGO = DVBFE_ALGO_HW\n", __func__); if (fepriv->state & FESTATE_RETUNE) { dev_dbg(fe->dvb->device, "%s: Retune requested, FESTATE_RETUNE\n", __func__); re_tune = true; fepriv->state = FESTATE_TUNED; } else { re_tune = false; } if (fe->ops.tune) fe->ops.tune(fe, re_tune, fepriv->tune_mode_flags, &fepriv->delay, &s); if (s != fepriv->status && !(fepriv->tune_mode_flags & FE_TUNE_MODE_ONESHOT)) { dev_dbg(fe->dvb->device, "%s: state changed, adding current state\n", __func__); dvb_frontend_add_event(fe, s); fepriv->status = s; } break; case DVBFE_ALGO_SW: dev_dbg(fe->dvb->device, "%s: Frontend ALGO = DVBFE_ALGO_SW\n", __func__); dvb_frontend_swzigzag(fe); break; case DVBFE_ALGO_CUSTOM: dev_dbg(fe->dvb->device, "%s: Frontend ALGO = DVBFE_ALGO_CUSTOM, state=%d\n", __func__, fepriv->state); if (fepriv->state & FESTATE_RETUNE) { dev_dbg(fe->dvb->device, "%s: Retune requested, FESTAT_RETUNE\n", __func__); fepriv->state = FESTATE_TUNED; } /* Case where we are going to search for a carrier * User asked us to retune again for some reason, possibly * requesting a search with a new set of parameters */ if (fepriv->algo_status & DVBFE_ALGO_SEARCH_AGAIN) { if (fe->ops.search) { fepriv->algo_status = fe->ops.search(fe); /* We did do a search as was requested, the flags are * now unset as well and has the flags wrt to search. */ } else { fepriv->algo_status &= ~DVBFE_ALGO_SEARCH_AGAIN; } } /* Track the carrier if the search was successful */ if (fepriv->algo_status != DVBFE_ALGO_SEARCH_SUCCESS) { fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; fepriv->delay = HZ / 2; } dtv_property_legacy_params_sync(fe, &fepriv->parameters_out); fe->ops.read_status(fe, &s); if (s != fepriv->status) { dvb_frontend_add_event(fe, s); /* update event list */ fepriv->status = s; if (!(s & FE_HAS_LOCK)) { fepriv->delay = HZ / 10; fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; } else { fepriv->delay = 60 * HZ; } } break; default: dev_dbg(fe->dvb->device, "%s: UNDEFINED ALGO !\n", __func__); break; } } else { dvb_frontend_swzigzag(fe); } } if (dvb_powerdown_on_sleep) { if (fe->ops.set_voltage) fe->ops.set_voltage(fe, SEC_VOLTAGE_OFF); if (fe->ops.tuner_ops.sleep) { if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 1); fe->ops.tuner_ops.sleep(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } if (fe->ops.sleep) fe->ops.sleep(fe); } fepriv->thread = NULL; if (kthread_should_stop()) fepriv->exit = DVB_FE_DEVICE_REMOVED; else fepriv->exit = DVB_FE_NO_EXIT; mb(); if (semheld) up(&fepriv->sem); dvb_frontend_wakeup(fe); return 0; } static void dvb_frontend_stop(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; dev_dbg(fe->dvb->device, "%s:\n", __func__); fepriv->exit = DVB_FE_NORMAL_EXIT; mb(); if (!fepriv->thread) return; kthread_stop(fepriv->thread); sema_init(&fepriv->sem, 1); fepriv->state = FESTATE_IDLE; /* paranoia check in case a signal arrived */ if (fepriv->thread) dev_warn(fe->dvb->device, "dvb_frontend_stop: warning: thread %p won't exit\n", fepriv->thread); } s32 timeval_usec_diff(struct timeval lasttime, struct timeval curtime) { return ((curtime.tv_usec < lasttime.tv_usec) ? 1000000 - lasttime.tv_usec + curtime.tv_usec : curtime.tv_usec - lasttime.tv_usec); } EXPORT_SYMBOL(timeval_usec_diff); static inline void timeval_usec_add(struct timeval *curtime, u32 add_usec) { curtime->tv_usec += add_usec; if (curtime->tv_usec >= 1000000) { curtime->tv_usec -= 1000000; curtime->tv_sec++; } } /* * Sleep until gettimeofday() > waketime + add_usec * This needs to be as precise as possible, but as the delay is * usually between 2ms and 32ms, it is done using a scheduled msleep * followed by usleep (normally a busy-wait loop) for the remainder */ void dvb_frontend_sleep_until(struct timeval *waketime, u32 add_usec) { struct timeval lasttime; s32 delta, newdelta; timeval_usec_add(waketime, add_usec); do_gettimeofday(&lasttime); delta = timeval_usec_diff(lasttime, *waketime); if (delta > 2500) { msleep((delta - 1500) / 1000); do_gettimeofday(&lasttime); newdelta = timeval_usec_diff(lasttime, *waketime); delta = (newdelta > delta) ? 0 : newdelta; } if (delta > 0) udelay(delta); } EXPORT_SYMBOL(dvb_frontend_sleep_until); static int dvb_frontend_start(struct dvb_frontend *fe) { int ret; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct task_struct *fe_thread; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (fepriv->thread) { if (fepriv->exit == DVB_FE_NO_EXIT) return 0; else dvb_frontend_stop (fe); } if (signal_pending(current)) return -EINTR; if (down_interruptible (&fepriv->sem)) return -EINTR; fepriv->state = FESTATE_IDLE; fepriv->exit = DVB_FE_NO_EXIT; fepriv->thread = NULL; mb(); fe_thread = kthread_run(dvb_frontend_thread, fe, "kdvb-ad-%i-fe-%i", fe->dvb->num,fe->id); if (IS_ERR(fe_thread)) { ret = PTR_ERR(fe_thread); dev_warn(fe->dvb->device, "dvb_frontend_start: failed to start kthread (%d)\n", ret); up(&fepriv->sem); return ret; } fepriv->thread = fe_thread; return 0; } static void dvb_frontend_get_frequency_limits(struct dvb_frontend *fe, u32 *freq_min, u32 *freq_max) { *freq_min = max(fe->ops.info.frequency_min, fe->ops.tuner_ops.info.frequency_min); if (fe->ops.info.frequency_max == 0) *freq_max = fe->ops.tuner_ops.info.frequency_max; else if (fe->ops.tuner_ops.info.frequency_max == 0) *freq_max = fe->ops.info.frequency_max; else *freq_max = min(fe->ops.info.frequency_max, fe->ops.tuner_ops.info.frequency_max); if (*freq_min == 0 || *freq_max == 0) dev_warn(fe->dvb->device, "DVB: adapter %i frontend %u frequency limits undefined - fix the driver\n", fe->dvb->num, fe->id); } static int dvb_frontend_check_parameters(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; u32 freq_min; u32 freq_max; /* range check: frequency */ dvb_frontend_get_frequency_limits(fe, &freq_min, &freq_max); if ((freq_min && c->frequency < freq_min) || (freq_max && c->frequency > freq_max)) { dev_warn(fe->dvb->device, "DVB: adapter %i frontend %i frequency %u out of range (%u..%u)\n", fe->dvb->num, fe->id, c->frequency, freq_min, freq_max); return -EINVAL; } /* range check: symbol rate */ switch (c->delivery_system) { case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: if ((fe->ops.info.symbol_rate_min && c->symbol_rate < fe->ops.info.symbol_rate_min) || (fe->ops.info.symbol_rate_max && c->symbol_rate > fe->ops.info.symbol_rate_max)) { dev_warn(fe->dvb->device, "DVB: adapter %i frontend %i symbol rate %u out of range (%u..%u)\n", fe->dvb->num, fe->id, c->symbol_rate, fe->ops.info.symbol_rate_min, fe->ops.info.symbol_rate_max); return -EINVAL; } default: break; } return 0; } static int dvb_frontend_clear_cache(struct dvb_frontend *fe) { struct dtv_frontend_properties *c = &fe->dtv_property_cache; int i; u32 delsys; delsys = c->delivery_system; memset(c, 0, offsetof(struct dtv_frontend_properties, strength)); c->delivery_system = delsys; c->state = DTV_CLEAR; dev_dbg(fe->dvb->device, "%s: Clearing cache for delivery system %d\n", __func__, c->delivery_system); c->transmission_mode = TRANSMISSION_MODE_AUTO; c->bandwidth_hz = 0; /* AUTO */ c->guard_interval = GUARD_INTERVAL_AUTO; c->hierarchy = HIERARCHY_AUTO; c->symbol_rate = 0; c->code_rate_HP = FEC_AUTO; c->code_rate_LP = FEC_AUTO; c->fec_inner = FEC_AUTO; c->rolloff = ROLLOFF_AUTO; c->voltage = SEC_VOLTAGE_OFF; c->sectone = SEC_TONE_OFF; c->pilot = PILOT_AUTO; c->isdbt_partial_reception = 0; c->isdbt_sb_mode = 0; c->isdbt_sb_subchannel = 0; c->isdbt_sb_segment_idx = 0; c->isdbt_sb_segment_count = 0; c->isdbt_layer_enabled = 0; for (i = 0; i < 3; i++) { c->layer[i].fec = FEC_AUTO; c->layer[i].modulation = QAM_AUTO; c->layer[i].interleaving = 0; c->layer[i].segment_count = 0; } c->stream_id = NO_STREAM_ID_FILTER; switch (c->delivery_system) { case SYS_DVBS: case SYS_DVBS2: case SYS_TURBO: c->modulation = QPSK; /* implied for DVB-S in legacy API */ c->rolloff = ROLLOFF_35;/* implied for DVB-S */ break; case SYS_ATSC: c->modulation = VSB_8; break; default: c->modulation = QAM_AUTO; break; } c->lna = LNA_AUTO; return 0; } #define _DTV_CMD(n, s, b) \ [n] = { \ .name = #n, \ .cmd = n, \ .set = s,\ .buffer = b \ } static struct dtv_cmds_h dtv_cmds[DTV_MAX_COMMAND + 1] = { _DTV_CMD(DTV_TUNE, 1, 0), _DTV_CMD(DTV_CLEAR, 1, 0), /* Set */ _DTV_CMD(DTV_FREQUENCY, 1, 0), _DTV_CMD(DTV_BANDWIDTH_HZ, 1, 0), _DTV_CMD(DTV_MODULATION, 1, 0), _DTV_CMD(DTV_INVERSION, 1, 0), _DTV_CMD(DTV_DISEQC_MASTER, 1, 1), _DTV_CMD(DTV_SYMBOL_RATE, 1, 0), _DTV_CMD(DTV_INNER_FEC, 1, 0), _DTV_CMD(DTV_VOLTAGE, 1, 0), _DTV_CMD(DTV_TONE, 1, 0), _DTV_CMD(DTV_PILOT, 1, 0), _DTV_CMD(DTV_ROLLOFF, 1, 0), _DTV_CMD(DTV_DELIVERY_SYSTEM, 1, 0), _DTV_CMD(DTV_HIERARCHY, 1, 0), _DTV_CMD(DTV_CODE_RATE_HP, 1, 0), _DTV_CMD(DTV_CODE_RATE_LP, 1, 0), _DTV_CMD(DTV_GUARD_INTERVAL, 1, 0), _DTV_CMD(DTV_TRANSMISSION_MODE, 1, 0), _DTV_CMD(DTV_INTERLEAVING, 1, 0), _DTV_CMD(DTV_ISDBT_PARTIAL_RECEPTION, 1, 0), _DTV_CMD(DTV_ISDBT_SOUND_BROADCASTING, 1, 0), _DTV_CMD(DTV_ISDBT_SB_SUBCHANNEL_ID, 1, 0), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_IDX, 1, 0), _DTV_CMD(DTV_ISDBT_SB_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYER_ENABLED, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_FEC, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_MODULATION, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERA_TIME_INTERLEAVING, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_FEC, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_MODULATION, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERB_TIME_INTERLEAVING, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_FEC, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_MODULATION, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_SEGMENT_COUNT, 1, 0), _DTV_CMD(DTV_ISDBT_LAYERC_TIME_INTERLEAVING, 1, 0), _DTV_CMD(DTV_STREAM_ID, 1, 0), _DTV_CMD(DTV_DVBT2_PLP_ID_LEGACY, 1, 0), _DTV_CMD(DTV_LNA, 1, 0), /* Get */ _DTV_CMD(DTV_DISEQC_SLAVE_REPLY, 0, 1), _DTV_CMD(DTV_API_VERSION, 0, 0), _DTV_CMD(DTV_ENUM_DELSYS, 0, 0), _DTV_CMD(DTV_ATSCMH_PARADE_ID, 1, 0), _DTV_CMD(DTV_ATSCMH_RS_FRAME_ENSEMBLE, 1, 0), _DTV_CMD(DTV_ATSCMH_FIC_VER, 0, 0), _DTV_CMD(DTV_ATSCMH_NOG, 0, 0), _DTV_CMD(DTV_ATSCMH_TNOG, 0, 0), _DTV_CMD(DTV_ATSCMH_SGN, 0, 0), _DTV_CMD(DTV_ATSCMH_PRC, 0, 0), _DTV_CMD(DTV_ATSCMH_RS_FRAME_MODE, 0, 0), _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_PRI, 0, 0), _DTV_CMD(DTV_ATSCMH_RS_CODE_MODE_SEC, 0, 0), _DTV_CMD(DTV_ATSCMH_SCCC_BLOCK_MODE, 0, 0), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_A, 0, 0), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_B, 0, 0), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_C, 0, 0), _DTV_CMD(DTV_ATSCMH_SCCC_CODE_MODE_D, 0, 0), /* Statistics API */ _DTV_CMD(DTV_STAT_SIGNAL_STRENGTH, 0, 0), _DTV_CMD(DTV_STAT_CNR, 0, 0), _DTV_CMD(DTV_STAT_PRE_ERROR_BIT_COUNT, 0, 0), _DTV_CMD(DTV_STAT_PRE_TOTAL_BIT_COUNT, 0, 0), _DTV_CMD(DTV_STAT_POST_ERROR_BIT_COUNT, 0, 0), _DTV_CMD(DTV_STAT_POST_TOTAL_BIT_COUNT, 0, 0), _DTV_CMD(DTV_STAT_ERROR_BLOCK_COUNT, 0, 0), _DTV_CMD(DTV_STAT_TOTAL_BLOCK_COUNT, 0, 0), }; static void dtv_property_dump(struct dvb_frontend *fe, struct dtv_property *tvp) { int i; if (tvp->cmd <= 0 || tvp->cmd > DTV_MAX_COMMAND) { dev_warn(fe->dvb->device, "%s: tvp.cmd = 0x%08x undefined\n", __func__, tvp->cmd); return; } dev_dbg(fe->dvb->device, "%s: tvp.cmd = 0x%08x (%s)\n", __func__, tvp->cmd, dtv_cmds[tvp->cmd].name); if (dtv_cmds[tvp->cmd].buffer) { dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.len = 0x%02x\n", __func__, tvp->u.buffer.len); for(i = 0; i < tvp->u.buffer.len; i++) dev_dbg(fe->dvb->device, "%s: tvp.u.buffer.data[0x%02x] = 0x%02x\n", __func__, i, tvp->u.buffer.data[i]); } else { dev_dbg(fe->dvb->device, "%s: tvp.u.data = 0x%08x\n", __func__, tvp->u.data); } } /* Synchronise the legacy tuning parameters into the cache, so that demodulator * drivers can use a single set_frontend tuning function, regardless of whether * it's being used for the legacy or new API, reducing code and complexity. */ static int dtv_property_cache_sync(struct dvb_frontend *fe, struct dtv_frontend_properties *c, const struct dvb_frontend_parameters *p) { c->frequency = p->frequency; c->inversion = p->inversion; switch (dvbv3_type(c->delivery_system)) { case DVBV3_QPSK: dev_dbg(fe->dvb->device, "%s: Preparing QPSK req\n", __func__); c->symbol_rate = p->u.qpsk.symbol_rate; c->fec_inner = p->u.qpsk.fec_inner; break; case DVBV3_QAM: dev_dbg(fe->dvb->device, "%s: Preparing QAM req\n", __func__); c->symbol_rate = p->u.qam.symbol_rate; c->fec_inner = p->u.qam.fec_inner; c->modulation = p->u.qam.modulation; break; case DVBV3_OFDM: dev_dbg(fe->dvb->device, "%s: Preparing OFDM req\n", __func__); switch (p->u.ofdm.bandwidth) { case BANDWIDTH_10_MHZ: c->bandwidth_hz = 10000000; break; case BANDWIDTH_8_MHZ: c->bandwidth_hz = 8000000; break; case BANDWIDTH_7_MHZ: c->bandwidth_hz = 7000000; break; case BANDWIDTH_6_MHZ: c->bandwidth_hz = 6000000; break; case BANDWIDTH_5_MHZ: c->bandwidth_hz = 5000000; break; case BANDWIDTH_1_712_MHZ: c->bandwidth_hz = 1712000; break; case BANDWIDTH_AUTO: c->bandwidth_hz = 0; } c->code_rate_HP = p->u.ofdm.code_rate_HP; c->code_rate_LP = p->u.ofdm.code_rate_LP; c->modulation = p->u.ofdm.constellation; c->transmission_mode = p->u.ofdm.transmission_mode; c->guard_interval = p->u.ofdm.guard_interval; c->hierarchy = p->u.ofdm.hierarchy_information; break; case DVBV3_ATSC: dev_dbg(fe->dvb->device, "%s: Preparing ATSC req\n", __func__); c->modulation = p->u.vsb.modulation; if (c->delivery_system == SYS_ATSCMH) break; if ((c->modulation == VSB_8) || (c->modulation == VSB_16)) c->delivery_system = SYS_ATSC; else c->delivery_system = SYS_DVBC_ANNEX_B; break; case DVBV3_UNKNOWN: dev_err(fe->dvb->device, "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n", __func__, c->delivery_system); return -EINVAL; } return 0; } /* Ensure the cached values are set correctly in the frontend * legacy tuning structures, for the advanced tuning API. */ static int dtv_property_legacy_params_sync(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { const struct dtv_frontend_properties *c = &fe->dtv_property_cache; p->frequency = c->frequency; p->inversion = c->inversion; switch (dvbv3_type(c->delivery_system)) { case DVBV3_UNKNOWN: dev_err(fe->dvb->device, "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n", __func__, c->delivery_system); return -EINVAL; case DVBV3_QPSK: dev_dbg(fe->dvb->device, "%s: Preparing QPSK req\n", __func__); p->u.qpsk.symbol_rate = c->symbol_rate; p->u.qpsk.fec_inner = c->fec_inner; break; case DVBV3_QAM: dev_dbg(fe->dvb->device, "%s: Preparing QAM req\n", __func__); p->u.qam.symbol_rate = c->symbol_rate; p->u.qam.fec_inner = c->fec_inner; p->u.qam.modulation = c->modulation; break; case DVBV3_OFDM: dev_dbg(fe->dvb->device, "%s: Preparing OFDM req\n", __func__); switch (c->bandwidth_hz) { case 10000000: p->u.ofdm.bandwidth = BANDWIDTH_10_MHZ; break; case 8000000: p->u.ofdm.bandwidth = BANDWIDTH_8_MHZ; break; case 7000000: p->u.ofdm.bandwidth = BANDWIDTH_7_MHZ; break; case 6000000: p->u.ofdm.bandwidth = BANDWIDTH_6_MHZ; break; case 5000000: p->u.ofdm.bandwidth = BANDWIDTH_5_MHZ; break; case 1712000: p->u.ofdm.bandwidth = BANDWIDTH_1_712_MHZ; break; case 0: default: p->u.ofdm.bandwidth = BANDWIDTH_AUTO; } p->u.ofdm.code_rate_HP = c->code_rate_HP; p->u.ofdm.code_rate_LP = c->code_rate_LP; p->u.ofdm.constellation = c->modulation; p->u.ofdm.transmission_mode = c->transmission_mode; p->u.ofdm.guard_interval = c->guard_interval; p->u.ofdm.hierarchy_information = c->hierarchy; break; case DVBV3_ATSC: dev_dbg(fe->dvb->device, "%s: Preparing VSB req\n", __func__); p->u.vsb.modulation = c->modulation; break; } return 0; } /** * dtv_get_frontend - calls a callback for retrieving DTV parameters * @fe: struct dvb_frontend pointer * @c: struct dtv_frontend_properties pointer (DVBv5 cache) * @p_out struct dvb_frontend_parameters pointer (DVBv3 FE struct) * * This routine calls either the DVBv3 or DVBv5 get_frontend call. * If c is not null, it will update the DVBv5 cache struct pointed by it. * If p_out is not null, it will update the DVBv3 params pointed by it. */ static int dtv_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p_out) { int r; if (fe->ops.get_frontend) { r = fe->ops.get_frontend(fe); if (unlikely(r < 0)) return r; if (p_out) dtv_property_legacy_params_sync(fe, p_out); return 0; } /* As everything is in cache, get_frontend fops are always supported */ return 0; } static int dvb_frontend_ioctl_legacy(struct file *file, unsigned int cmd, void *parg); static int dvb_frontend_ioctl_properties(struct file *file, unsigned int cmd, void *parg); static int dtv_property_process_get(struct dvb_frontend *fe, const struct dtv_frontend_properties *c, struct dtv_property *tvp, struct file *file) { int r, ncaps; switch(tvp->cmd) { case DTV_ENUM_DELSYS: ncaps = 0; while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) { tvp->u.buffer.data[ncaps] = fe->ops.delsys[ncaps]; ncaps++; } tvp->u.buffer.len = ncaps; break; case DTV_FREQUENCY: tvp->u.data = c->frequency; break; case DTV_MODULATION: tvp->u.data = c->modulation; break; case DTV_BANDWIDTH_HZ: tvp->u.data = c->bandwidth_hz; break; case DTV_INVERSION: tvp->u.data = c->inversion; break; case DTV_SYMBOL_RATE: tvp->u.data = c->symbol_rate; break; case DTV_INNER_FEC: tvp->u.data = c->fec_inner; break; case DTV_PILOT: tvp->u.data = c->pilot; break; case DTV_ROLLOFF: tvp->u.data = c->rolloff; break; case DTV_DELIVERY_SYSTEM: tvp->u.data = c->delivery_system; break; case DTV_VOLTAGE: tvp->u.data = c->voltage; break; case DTV_TONE: tvp->u.data = c->sectone; break; case DTV_API_VERSION: tvp->u.data = (DVB_API_VERSION << 8) | DVB_API_VERSION_MINOR; break; case DTV_CODE_RATE_HP: tvp->u.data = c->code_rate_HP; break; case DTV_CODE_RATE_LP: tvp->u.data = c->code_rate_LP; break; case DTV_GUARD_INTERVAL: tvp->u.data = c->guard_interval; break; case DTV_TRANSMISSION_MODE: tvp->u.data = c->transmission_mode; break; case DTV_HIERARCHY: tvp->u.data = c->hierarchy; break; case DTV_INTERLEAVING: tvp->u.data = c->interleaving; break; /* ISDB-T Support here */ case DTV_ISDBT_PARTIAL_RECEPTION: tvp->u.data = c->isdbt_partial_reception; break; case DTV_ISDBT_SOUND_BROADCASTING: tvp->u.data = c->isdbt_sb_mode; break; case DTV_ISDBT_SB_SUBCHANNEL_ID: tvp->u.data = c->isdbt_sb_subchannel; break; case DTV_ISDBT_SB_SEGMENT_IDX: tvp->u.data = c->isdbt_sb_segment_idx; break; case DTV_ISDBT_SB_SEGMENT_COUNT: tvp->u.data = c->isdbt_sb_segment_count; break; case DTV_ISDBT_LAYER_ENABLED: tvp->u.data = c->isdbt_layer_enabled; break; case DTV_ISDBT_LAYERA_FEC: tvp->u.data = c->layer[0].fec; break; case DTV_ISDBT_LAYERA_MODULATION: tvp->u.data = c->layer[0].modulation; break; case DTV_ISDBT_LAYERA_SEGMENT_COUNT: tvp->u.data = c->layer[0].segment_count; break; case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: tvp->u.data = c->layer[0].interleaving; break; case DTV_ISDBT_LAYERB_FEC: tvp->u.data = c->layer[1].fec; break; case DTV_ISDBT_LAYERB_MODULATION: tvp->u.data = c->layer[1].modulation; break; case DTV_ISDBT_LAYERB_SEGMENT_COUNT: tvp->u.data = c->layer[1].segment_count; break; case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: tvp->u.data = c->layer[1].interleaving; break; case DTV_ISDBT_LAYERC_FEC: tvp->u.data = c->layer[2].fec; break; case DTV_ISDBT_LAYERC_MODULATION: tvp->u.data = c->layer[2].modulation; break; case DTV_ISDBT_LAYERC_SEGMENT_COUNT: tvp->u.data = c->layer[2].segment_count; break; case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: tvp->u.data = c->layer[2].interleaving; break; /* Multistream support */ case DTV_STREAM_ID: case DTV_DVBT2_PLP_ID_LEGACY: tvp->u.data = c->stream_id; break; /* ATSC-MH */ case DTV_ATSCMH_FIC_VER: tvp->u.data = fe->dtv_property_cache.atscmh_fic_ver; break; case DTV_ATSCMH_PARADE_ID: tvp->u.data = fe->dtv_property_cache.atscmh_parade_id; break; case DTV_ATSCMH_NOG: tvp->u.data = fe->dtv_property_cache.atscmh_nog; break; case DTV_ATSCMH_TNOG: tvp->u.data = fe->dtv_property_cache.atscmh_tnog; break; case DTV_ATSCMH_SGN: tvp->u.data = fe->dtv_property_cache.atscmh_sgn; break; case DTV_ATSCMH_PRC: tvp->u.data = fe->dtv_property_cache.atscmh_prc; break; case DTV_ATSCMH_RS_FRAME_MODE: tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_mode; break; case DTV_ATSCMH_RS_FRAME_ENSEMBLE: tvp->u.data = fe->dtv_property_cache.atscmh_rs_frame_ensemble; break; case DTV_ATSCMH_RS_CODE_MODE_PRI: tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_pri; break; case DTV_ATSCMH_RS_CODE_MODE_SEC: tvp->u.data = fe->dtv_property_cache.atscmh_rs_code_mode_sec; break; case DTV_ATSCMH_SCCC_BLOCK_MODE: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_block_mode; break; case DTV_ATSCMH_SCCC_CODE_MODE_A: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_a; break; case DTV_ATSCMH_SCCC_CODE_MODE_B: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_b; break; case DTV_ATSCMH_SCCC_CODE_MODE_C: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_c; break; case DTV_ATSCMH_SCCC_CODE_MODE_D: tvp->u.data = fe->dtv_property_cache.atscmh_sccc_code_mode_d; break; case DTV_LNA: tvp->u.data = c->lna; break; /* Fill quality measures */ case DTV_STAT_SIGNAL_STRENGTH: tvp->u.st = c->strength; break; case DTV_STAT_CNR: tvp->u.st = c->cnr; break; case DTV_STAT_PRE_ERROR_BIT_COUNT: tvp->u.st = c->pre_bit_error; break; case DTV_STAT_PRE_TOTAL_BIT_COUNT: tvp->u.st = c->pre_bit_count; break; case DTV_STAT_POST_ERROR_BIT_COUNT: tvp->u.st = c->post_bit_error; break; case DTV_STAT_POST_TOTAL_BIT_COUNT: tvp->u.st = c->post_bit_count; break; case DTV_STAT_ERROR_BLOCK_COUNT: tvp->u.st = c->block_error; break; case DTV_STAT_TOTAL_BLOCK_COUNT: tvp->u.st = c->block_count; break; default: dev_dbg(fe->dvb->device, "%s: FE property %d doesn't exist\n", __func__, tvp->cmd); return -EINVAL; } /* Allow the frontend to override outgoing properties */ if (fe->ops.get_property) { r = fe->ops.get_property(fe, tvp); if (r < 0) return r; } dtv_property_dump(fe, tvp); return 0; } static int dtv_set_frontend(struct dvb_frontend *fe); static bool is_dvbv3_delsys(u32 delsys) { bool status; status = (delsys == SYS_DVBT) || (delsys == SYS_DVBC_ANNEX_A) || (delsys == SYS_DVBS) || (delsys == SYS_ATSC); return status; } /** * emulate_delivery_system - emulate a DVBv5 delivery system with a DVBv3 type * @fe: struct frontend; * @delsys: DVBv5 type that will be used for emulation * * Provides emulation for delivery systems that are compatible with the old * DVBv3 call. Among its usages, it provices support for ISDB-T, and allows * using a DVB-S2 only frontend just like it were a DVB-S, if the frontent * parameters are compatible with DVB-S spec. */ static int emulate_delivery_system(struct dvb_frontend *fe, u32 delsys) { int i; struct dtv_frontend_properties *c = &fe->dtv_property_cache; c->delivery_system = delsys; /* * If the call is for ISDB-T, put it into full-seg, auto mode, TV */ if (c->delivery_system == SYS_ISDBT) { dev_dbg(fe->dvb->device, "%s: Using defaults for SYS_ISDBT\n", __func__); if (!c->bandwidth_hz) c->bandwidth_hz = 6000000; c->isdbt_partial_reception = 0; c->isdbt_sb_mode = 0; c->isdbt_sb_subchannel = 0; c->isdbt_sb_segment_idx = 0; c->isdbt_sb_segment_count = 0; c->isdbt_layer_enabled = 7; for (i = 0; i < 3; i++) { c->layer[i].fec = FEC_AUTO; c->layer[i].modulation = QAM_AUTO; c->layer[i].interleaving = 0; c->layer[i].segment_count = 0; } } dev_dbg(fe->dvb->device, "%s: change delivery system on cache to %d\n", __func__, c->delivery_system); return 0; } /** * dvbv5_set_delivery_system - Sets the delivery system for a DVBv5 API call * @fe: frontend struct * @desired_system: delivery system requested by the user * * A DVBv5 call know what's the desired system it wants. So, set it. * * There are, however, a few known issues with early DVBv5 applications that * are also handled by this logic: * * 1) Some early apps use SYS_UNDEFINED as the desired delivery system. * This is an API violation, but, as we don't want to break userspace, * convert it to the first supported delivery system. * 2) Some apps might be using a DVBv5 call in a wrong way, passing, for * example, SYS_DVBT instead of SYS_ISDBT. This is because early usage of * ISDB-T provided backward compat with DVB-T. */ static int dvbv5_set_delivery_system(struct dvb_frontend *fe, u32 desired_system) { int ncaps; u32 delsys = SYS_UNDEFINED; struct dtv_frontend_properties *c = &fe->dtv_property_cache; enum dvbv3_emulation_type type; /* * It was reported that some old DVBv5 applications were * filling delivery_system with SYS_UNDEFINED. If this happens, * assume that the application wants to use the first supported * delivery system. */ if (desired_system == SYS_UNDEFINED) desired_system = fe->ops.delsys[0]; /* * This is a DVBv5 call. So, it likely knows the supported * delivery systems. So, check if the desired delivery system is * supported */ ncaps = 0; while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) { if (fe->ops.delsys[ncaps] == desired_system) { c->delivery_system = desired_system; dev_dbg(fe->dvb->device, "%s: Changing delivery system to %d\n", __func__, desired_system); return 0; } ncaps++; } /* * The requested delivery system isn't supported. Maybe userspace * is requesting a DVBv3 compatible delivery system. * * The emulation only works if the desired system is one of the * delivery systems supported by DVBv3 API */ if (!is_dvbv3_delsys(desired_system)) { dev_dbg(fe->dvb->device, "%s: Delivery system %d not supported.\n", __func__, desired_system); return -EINVAL; } type = dvbv3_type(desired_system); /* * Get the last non-DVBv3 delivery system that has the same type * of the desired system */ ncaps = 0; while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) { if (dvbv3_type(fe->ops.delsys[ncaps]) == type) delsys = fe->ops.delsys[ncaps]; ncaps++; } /* There's nothing compatible with the desired delivery system */ if (delsys == SYS_UNDEFINED) { dev_dbg(fe->dvb->device, "%s: Delivery system %d not supported on emulation mode.\n", __func__, desired_system); return -EINVAL; } dev_dbg(fe->dvb->device, "%s: Using delivery system %d emulated as if it were %d\n", __func__, delsys, desired_system); return emulate_delivery_system(fe, desired_system); } /** * dvbv3_set_delivery_system - Sets the delivery system for a DVBv3 API call * @fe: frontend struct * * A DVBv3 call doesn't know what's the desired system it wants. It also * doesn't allow to switch between different types. Due to that, userspace * should use DVBv5 instead. * However, in order to avoid breaking userspace API, limited backward * compatibility support is provided. * * There are some delivery systems that are incompatible with DVBv3 calls. * * This routine should work fine for frontends that support just one delivery * system. * * For frontends that support multiple frontends: * 1) It defaults to use the first supported delivery system. There's an * userspace application that allows changing it at runtime; * * 2) If the current delivery system is not compatible with DVBv3, it gets * the first one that it is compatible. * * NOTE: in order for this to work with applications like Kaffeine that * uses a DVBv5 call for DVB-S2 and a DVBv3 call to go back to * DVB-S, drivers that support both DVB-S and DVB-S2 should have the * SYS_DVBS entry before the SYS_DVBS2, otherwise it won't switch back * to DVB-S. */ static int dvbv3_set_delivery_system(struct dvb_frontend *fe) { int ncaps; u32 delsys = SYS_UNDEFINED; struct dtv_frontend_properties *c = &fe->dtv_property_cache; /* If not set yet, defaults to the first supported delivery system */ if (c->delivery_system == SYS_UNDEFINED) c->delivery_system = fe->ops.delsys[0]; /* * Trivial case: just use the current one, if it already a DVBv3 * delivery system */ if (is_dvbv3_delsys(c->delivery_system)) { dev_dbg(fe->dvb->device, "%s: Using delivery system to %d\n", __func__, c->delivery_system); return 0; } /* * Seek for the first delivery system that it is compatible with a * DVBv3 standard */ ncaps = 0; while (fe->ops.delsys[ncaps] && ncaps < MAX_DELSYS) { if (dvbv3_type(fe->ops.delsys[ncaps]) != DVBV3_UNKNOWN) { delsys = fe->ops.delsys[ncaps]; break; } ncaps++; } if (delsys == SYS_UNDEFINED) { dev_dbg(fe->dvb->device, "%s: Couldn't find a delivery system that works with FE_SET_FRONTEND\n", __func__); return -EINVAL; } return emulate_delivery_system(fe, delsys); } static int dtv_property_process_set(struct dvb_frontend *fe, struct dtv_property *tvp, struct file *file) { int r = 0; struct dtv_frontend_properties *c = &fe->dtv_property_cache; /* Allow the frontend to validate incoming properties */ if (fe->ops.set_property) { r = fe->ops.set_property(fe, tvp); if (r < 0) return r; } switch(tvp->cmd) { case DTV_CLEAR: /* * Reset a cache of data specific to the frontend here. This does * not effect hardware. */ dvb_frontend_clear_cache(fe); break; case DTV_TUNE: /* interpret the cache of data, build either a traditional frontend * tunerequest so we can pass validation in the FE_SET_FRONTEND * ioctl. */ c->state = tvp->cmd; dev_dbg(fe->dvb->device, "%s: Finalised property cache\n", __func__); r = dtv_set_frontend(fe); break; case DTV_FREQUENCY: c->frequency = tvp->u.data; break; case DTV_MODULATION: c->modulation = tvp->u.data; break; case DTV_BANDWIDTH_HZ: c->bandwidth_hz = tvp->u.data; break; case DTV_INVERSION: c->inversion = tvp->u.data; break; case DTV_SYMBOL_RATE: c->symbol_rate = tvp->u.data; break; case DTV_INNER_FEC: c->fec_inner = tvp->u.data; break; case DTV_PILOT: c->pilot = tvp->u.data; break; case DTV_ROLLOFF: c->rolloff = tvp->u.data; break; case DTV_DELIVERY_SYSTEM: r = dvbv5_set_delivery_system(fe, tvp->u.data); break; case DTV_VOLTAGE: c->voltage = tvp->u.data; r = dvb_frontend_ioctl_legacy(file, FE_SET_VOLTAGE, (void *)c->voltage); break; case DTV_TONE: c->sectone = tvp->u.data; r = dvb_frontend_ioctl_legacy(file, FE_SET_TONE, (void *)c->sectone); break; case DTV_CODE_RATE_HP: c->code_rate_HP = tvp->u.data; break; case DTV_CODE_RATE_LP: c->code_rate_LP = tvp->u.data; break; case DTV_GUARD_INTERVAL: c->guard_interval = tvp->u.data; break; case DTV_TRANSMISSION_MODE: c->transmission_mode = tvp->u.data; break; case DTV_HIERARCHY: c->hierarchy = tvp->u.data; break; case DTV_INTERLEAVING: c->interleaving = tvp->u.data; break; /* ISDB-T Support here */ case DTV_ISDBT_PARTIAL_RECEPTION: c->isdbt_partial_reception = tvp->u.data; break; case DTV_ISDBT_SOUND_BROADCASTING: c->isdbt_sb_mode = tvp->u.data; break; case DTV_ISDBT_SB_SUBCHANNEL_ID: c->isdbt_sb_subchannel = tvp->u.data; break; case DTV_ISDBT_SB_SEGMENT_IDX: c->isdbt_sb_segment_idx = tvp->u.data; break; case DTV_ISDBT_SB_SEGMENT_COUNT: c->isdbt_sb_segment_count = tvp->u.data; break; case DTV_ISDBT_LAYER_ENABLED: c->isdbt_layer_enabled = tvp->u.data; break; case DTV_ISDBT_LAYERA_FEC: c->layer[0].fec = tvp->u.data; break; case DTV_ISDBT_LAYERA_MODULATION: c->layer[0].modulation = tvp->u.data; break; case DTV_ISDBT_LAYERA_SEGMENT_COUNT: c->layer[0].segment_count = tvp->u.data; break; case DTV_ISDBT_LAYERA_TIME_INTERLEAVING: c->layer[0].interleaving = tvp->u.data; break; case DTV_ISDBT_LAYERB_FEC: c->layer[1].fec = tvp->u.data; break; case DTV_ISDBT_LAYERB_MODULATION: c->layer[1].modulation = tvp->u.data; break; case DTV_ISDBT_LAYERB_SEGMENT_COUNT: c->layer[1].segment_count = tvp->u.data; break; case DTV_ISDBT_LAYERB_TIME_INTERLEAVING: c->layer[1].interleaving = tvp->u.data; break; case DTV_ISDBT_LAYERC_FEC: c->layer[2].fec = tvp->u.data; break; case DTV_ISDBT_LAYERC_MODULATION: c->layer[2].modulation = tvp->u.data; break; case DTV_ISDBT_LAYERC_SEGMENT_COUNT: c->layer[2].segment_count = tvp->u.data; break; case DTV_ISDBT_LAYERC_TIME_INTERLEAVING: c->layer[2].interleaving = tvp->u.data; break; /* Multistream support */ case DTV_STREAM_ID: case DTV_DVBT2_PLP_ID_LEGACY: c->stream_id = tvp->u.data; break; /* ATSC-MH */ case DTV_ATSCMH_PARADE_ID: fe->dtv_property_cache.atscmh_parade_id = tvp->u.data; break; case DTV_ATSCMH_RS_FRAME_ENSEMBLE: fe->dtv_property_cache.atscmh_rs_frame_ensemble = tvp->u.data; break; case DTV_LNA: c->lna = tvp->u.data; if (fe->ops.set_lna) r = fe->ops.set_lna(fe); break; default: return -EINVAL; } return r; } static int dvb_frontend_ioctl(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_private *fepriv = fe->frontend_priv; int err = -EOPNOTSUPP; dev_dbg(fe->dvb->device, "%s: (%d)\n", __func__, _IOC_NR(cmd)); if (down_interruptible(&fepriv->sem)) return -ERESTARTSYS; if (fepriv->exit != DVB_FE_NO_EXIT) { up(&fepriv->sem); return -ENODEV; } if ((file->f_flags & O_ACCMODE) == O_RDONLY && (_IOC_DIR(cmd) != _IOC_READ || cmd == FE_GET_EVENT || cmd == FE_DISEQC_RECV_SLAVE_REPLY)) { up(&fepriv->sem); return -EPERM; } if ((cmd == FE_SET_PROPERTY) || (cmd == FE_GET_PROPERTY)) err = dvb_frontend_ioctl_properties(file, cmd, parg); else { c->state = DTV_UNDEFINED; err = dvb_frontend_ioctl_legacy(file, cmd, parg); } up(&fepriv->sem); return err; } static int dvb_frontend_ioctl_properties(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int err = 0; struct dtv_properties *tvps = NULL; struct dtv_property *tvp = NULL; int i; dev_dbg(fe->dvb->device, "%s:\n", __func__); if(cmd == FE_SET_PROPERTY) { tvps = (struct dtv_properties __user *)parg; dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num); dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props); /* Put an arbitrary limit on the number of messages that can * be sent at once */ if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS)) return -EINVAL; tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL); if (!tvp) { err = -ENOMEM; goto out; } if (copy_from_user(tvp, tvps->props, tvps->num * sizeof(struct dtv_property))) { err = -EFAULT; goto out; } for (i = 0; i < tvps->num; i++) { err = dtv_property_process_set(fe, tvp + i, file); if (err < 0) goto out; (tvp + i)->result = err; } if (c->state == DTV_TUNE) dev_dbg(fe->dvb->device, "%s: Property cache is full, tuning\n", __func__); } else if(cmd == FE_GET_PROPERTY) { tvps = (struct dtv_properties __user *)parg; dev_dbg(fe->dvb->device, "%s: properties.num = %d\n", __func__, tvps->num); dev_dbg(fe->dvb->device, "%s: properties.props = %p\n", __func__, tvps->props); /* Put an arbitrary limit on the number of messages that can * be sent at once */ if ((tvps->num == 0) || (tvps->num > DTV_IOCTL_MAX_MSGS)) return -EINVAL; tvp = kmalloc(tvps->num * sizeof(struct dtv_property), GFP_KERNEL); if (!tvp) { err = -ENOMEM; goto out; } if (copy_from_user(tvp, tvps->props, tvps->num * sizeof(struct dtv_property))) { err = -EFAULT; goto out; } /* * Fills the cache out struct with the cache contents, plus * the data retrieved from get_frontend, if the frontend * is not idle. Otherwise, returns the cached content */ if (fepriv->state != FESTATE_IDLE) { err = dtv_get_frontend(fe, NULL); if (err < 0) goto out; } for (i = 0; i < tvps->num; i++) { err = dtv_property_process_get(fe, c, tvp + i, file); if (err < 0) goto out; (tvp + i)->result = err; } if (copy_to_user(tvps->props, tvp, tvps->num * sizeof(struct dtv_property))) { err = -EFAULT; goto out; } } else err = -EOPNOTSUPP; out: kfree(tvp); return err; } static int dtv_set_frontend(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; struct dvb_frontend_tune_settings fetunesettings; u32 rolloff = 0; if (dvb_frontend_check_parameters(fe) < 0) return -EINVAL; /* * Initialize output parameters to match the values given by * the user. FE_SET_FRONTEND triggers an initial frontend event * with status = 0, which copies output parameters to userspace. */ dtv_property_legacy_params_sync(fe, &fepriv->parameters_out); /* * Be sure that the bandwidth will be filled for all * non-satellite systems, as tuners need to know what * low pass/Nyquist half filter should be applied, in * order to avoid inter-channel noise. * * ISDB-T and DVB-T/T2 already sets bandwidth. * ATSC and DVB-C don't set, so, the core should fill it. * * On DVB-C Annex A and C, the bandwidth is a function of * the roll-off and symbol rate. Annex B defines different * roll-off factors depending on the modulation. Fortunately, * Annex B is only used with 6MHz, so there's no need to * calculate it. * * While not officially supported, a side effect of handling it at * the cache level is that a program could retrieve the bandwidth * via DTV_BANDWIDTH_HZ, which may be useful for test programs. */ switch (c->delivery_system) { case SYS_ATSC: case SYS_DVBC_ANNEX_B: c->bandwidth_hz = 6000000; break; case SYS_DVBC_ANNEX_A: rolloff = 115; break; case SYS_DVBC_ANNEX_C: rolloff = 113; break; default: break; } if (rolloff) c->bandwidth_hz = (c->symbol_rate * rolloff) / 100; /* force auto frequency inversion if requested */ if (dvb_force_auto_inversion) c->inversion = INVERSION_AUTO; /* * without hierarchical coding code_rate_LP is irrelevant, * so we tolerate the otherwise invalid FEC_NONE setting */ if (c->hierarchy == HIERARCHY_NONE && c->code_rate_LP == FEC_NONE) c->code_rate_LP = FEC_AUTO; /* get frontend-specific tuning settings */ memset(&fetunesettings, 0, sizeof(struct dvb_frontend_tune_settings)); if (fe->ops.get_tune_settings && (fe->ops.get_tune_settings(fe, &fetunesettings) == 0)) { fepriv->min_delay = (fetunesettings.min_delay_ms * HZ) / 1000; fepriv->max_drift = fetunesettings.max_drift; fepriv->step_size = fetunesettings.step_size; } else { /* default values */ switch (c->delivery_system) { case SYS_DVBS: case SYS_DVBS2: case SYS_ISDBS: case SYS_TURBO: case SYS_DVBC_ANNEX_A: case SYS_DVBC_ANNEX_C: fepriv->min_delay = HZ / 20; fepriv->step_size = c->symbol_rate / 16000; fepriv->max_drift = c->symbol_rate / 2000; break; case SYS_DVBT: case SYS_DVBT2: case SYS_ISDBT: case SYS_DTMB: fepriv->min_delay = HZ / 20; fepriv->step_size = fe->ops.info.frequency_stepsize * 2; fepriv->max_drift = (fe->ops.info.frequency_stepsize * 2) + 1; break; default: /* * FIXME: This sounds wrong! if freqency_stepsize is * defined by the frontend, why not use it??? */ fepriv->min_delay = HZ / 20; fepriv->step_size = 0; /* no zigzag */ fepriv->max_drift = 0; break; } } if (dvb_override_tune_delay > 0) fepriv->min_delay = (dvb_override_tune_delay * HZ) / 1000; fepriv->state = FESTATE_RETUNE; /* Request the search algorithm to search */ fepriv->algo_status |= DVBFE_ALGO_SEARCH_AGAIN; dvb_frontend_clear_events(fe); dvb_frontend_add_event(fe, 0); dvb_frontend_wakeup(fe); fepriv->status = 0; return 0; } static int dvb_frontend_ioctl_legacy(struct file *file, unsigned int cmd, void *parg) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dtv_frontend_properties *c = &fe->dtv_property_cache; int err = -EOPNOTSUPP; switch (cmd) { case FE_GET_INFO: { struct dvb_frontend_info* info = parg; memcpy(info, &fe->ops.info, sizeof(struct dvb_frontend_info)); dvb_frontend_get_frequency_limits(fe, &info->frequency_min, &info->frequency_max); /* * Associate the 4 delivery systems supported by DVBv3 * API with their DVBv5 counterpart. For the other standards, * use the closest type, assuming that it would hopefully * work with a DVBv3 application. * It should be noticed that, on multi-frontend devices with * different types (terrestrial and cable, for example), * a pure DVBv3 application won't be able to use all delivery * systems. Yet, changing the DVBv5 cache to the other delivery * system should be enough for making it work. */ switch (dvbv3_type(c->delivery_system)) { case DVBV3_QPSK: info->type = FE_QPSK; break; case DVBV3_ATSC: info->type = FE_ATSC; break; case DVBV3_QAM: info->type = FE_QAM; break; case DVBV3_OFDM: info->type = FE_OFDM; break; default: dev_err(fe->dvb->device, "%s: doesn't know how to handle a DVBv3 call to delivery system %i\n", __func__, c->delivery_system); fe->ops.info.type = FE_OFDM; } dev_dbg(fe->dvb->device, "%s: current delivery system on cache: %d, V3 type: %d\n", __func__, c->delivery_system, fe->ops.info.type); /* Force the CAN_INVERSION_AUTO bit on. If the frontend doesn't * do it, it is done for it. */ info->caps |= FE_CAN_INVERSION_AUTO; err = 0; break; } case FE_READ_STATUS: { fe_status_t* status = parg; /* if retune was requested but hasn't occurred yet, prevent * that user get signal state from previous tuning */ if (fepriv->state == FESTATE_RETUNE || fepriv->state == FESTATE_ERROR) { err=0; *status = 0; break; } if (fe->ops.read_status) err = fe->ops.read_status(fe, status); break; } case FE_READ_BER: if (fe->ops.read_ber) { if (fepriv->thread) err = fe->ops.read_ber(fe, (__u32 *) parg); else err = -EAGAIN; } break; case FE_READ_SIGNAL_STRENGTH: if (fe->ops.read_signal_strength) { if (fepriv->thread) err = fe->ops.read_signal_strength(fe, (__u16 *) parg); else err = -EAGAIN; } break; case FE_READ_SNR: if (fe->ops.read_snr) { if (fepriv->thread) err = fe->ops.read_snr(fe, (__u16 *) parg); else err = -EAGAIN; } break; case FE_READ_UNCORRECTED_BLOCKS: if (fe->ops.read_ucblocks) { if (fepriv->thread) err = fe->ops.read_ucblocks(fe, (__u32 *) parg); else err = -EAGAIN; } break; case FE_DISEQC_RESET_OVERLOAD: if (fe->ops.diseqc_reset_overload) { err = fe->ops.diseqc_reset_overload(fe); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_SEND_MASTER_CMD: if (fe->ops.diseqc_send_master_cmd) { err = fe->ops.diseqc_send_master_cmd(fe, (struct dvb_diseqc_master_cmd*) parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_SEND_BURST: if (fe->ops.diseqc_send_burst) { err = fe->ops.diseqc_send_burst(fe, (fe_sec_mini_cmd_t) parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_SET_TONE: if (fe->ops.set_tone) { err = fe->ops.set_tone(fe, (fe_sec_tone_mode_t) parg); fepriv->tone = (fe_sec_tone_mode_t) parg; fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_SET_VOLTAGE: if (fe->ops.set_voltage) { err = fe->ops.set_voltage(fe, (fe_sec_voltage_t) parg); fepriv->voltage = (fe_sec_voltage_t) parg; fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISHNETWORK_SEND_LEGACY_CMD: if (fe->ops.dishnetwork_send_legacy_command) { err = fe->ops.dishnetwork_send_legacy_command(fe, (unsigned long) parg); fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } else if (fe->ops.set_voltage) { /* * NOTE: This is a fallback condition. Some frontends * (stv0299 for instance) take longer than 8msec to * respond to a set_voltage command. Those switches * need custom routines to switch properly. For all * other frontends, the following should work ok. * Dish network legacy switches (as used by Dish500) * are controlled by sending 9-bit command words * spaced 8msec apart. * the actual command word is switch/port dependent * so it is up to the userspace application to send * the right command. * The command must always start with a '0' after * initialization, so parg is 8 bits and does not * include the initialization or start bit */ unsigned long swcmd = ((unsigned long) parg) << 1; struct timeval nexttime; struct timeval tv[10]; int i; u8 last = 1; if (dvb_frontend_debug) printk("%s switch command: 0x%04lx\n", __func__, swcmd); do_gettimeofday(&nexttime); if (dvb_frontend_debug) tv[0] = nexttime; /* before sending a command, initialize by sending * a 32ms 18V to the switch */ fe->ops.set_voltage(fe, SEC_VOLTAGE_18); dvb_frontend_sleep_until(&nexttime, 32000); for (i = 0; i < 9; i++) { if (dvb_frontend_debug) do_gettimeofday(&tv[i + 1]); if ((swcmd & 0x01) != last) { /* set voltage to (last ? 13V : 18V) */ fe->ops.set_voltage(fe, (last) ? SEC_VOLTAGE_13 : SEC_VOLTAGE_18); last = (last) ? 0 : 1; } swcmd = swcmd >> 1; if (i != 8) dvb_frontend_sleep_until(&nexttime, 8000); } if (dvb_frontend_debug) { printk("%s(%d): switch delay (should be 32k followed by all 8k\n", __func__, fe->dvb->num); for (i = 1; i < 10; i++) printk("%d: %d\n", i, timeval_usec_diff(tv[i-1] , tv[i])); } err = 0; fepriv->state = FESTATE_DISEQC; fepriv->status = 0; } break; case FE_DISEQC_RECV_SLAVE_REPLY: if (fe->ops.diseqc_recv_slave_reply) err = fe->ops.diseqc_recv_slave_reply(fe, (struct dvb_diseqc_slave_reply*) parg); break; case FE_ENABLE_HIGH_LNB_VOLTAGE: if (fe->ops.enable_high_lnb_voltage) err = fe->ops.enable_high_lnb_voltage(fe, (long) parg); break; case FE_SET_FRONTEND: err = dvbv3_set_delivery_system(fe); if (err) break; err = dtv_property_cache_sync(fe, c, parg); if (err) break; err = dtv_set_frontend(fe); break; case FE_GET_EVENT: err = dvb_frontend_get_event (fe, parg, file->f_flags); break; case FE_GET_FRONTEND: err = dtv_get_frontend(fe, parg); break; case FE_SET_FRONTEND_TUNE_MODE: fepriv->tune_mode_flags = (unsigned long) parg; err = 0; break; } return err; } static unsigned int dvb_frontend_poll(struct file *file, struct poll_table_struct *wait) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; dev_dbg_ratelimited(fe->dvb->device, "%s:\n", __func__); poll_wait (file, &fepriv->events.wait_queue, wait); if (fepriv->events.eventw != fepriv->events.eventr) return (POLLIN | POLLRDNORM | POLLPRI); return 0; } static int dvb_frontend_open(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; struct dvb_adapter *adapter = fe->dvb; int ret; dev_dbg(fe->dvb->device, "%s:\n", __func__); if (fepriv->exit == DVB_FE_DEVICE_REMOVED) return -ENODEV; if (adapter->mfe_shared) { mutex_lock (&adapter->mfe_lock); if (adapter->mfe_dvbdev == NULL) adapter->mfe_dvbdev = dvbdev; else if (adapter->mfe_dvbdev != dvbdev) { struct dvb_device *mfedev = adapter->mfe_dvbdev; struct dvb_frontend *mfe = mfedev->priv; struct dvb_frontend_private *mfepriv = mfe->frontend_priv; int mferetry = (dvb_mfe_wait_time << 1); mutex_unlock (&adapter->mfe_lock); while (mferetry-- && (mfedev->users != -1 || mfepriv->thread != NULL)) { if(msleep_interruptible(500)) { if(signal_pending(current)) return -EINTR; } } mutex_lock (&adapter->mfe_lock); if(adapter->mfe_dvbdev != dvbdev) { mfedev = adapter->mfe_dvbdev; mfe = mfedev->priv; mfepriv = mfe->frontend_priv; if (mfedev->users != -1 || mfepriv->thread != NULL) { mutex_unlock (&adapter->mfe_lock); return -EBUSY; } adapter->mfe_dvbdev = dvbdev; } } } if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) { if ((ret = fe->ops.ts_bus_ctrl(fe, 1)) < 0) goto err0; /* If we took control of the bus, we need to force reinitialization. This is because many ts_bus_ctrl() functions strobe the RESET pin on the demod, and if the frontend thread already exists then the dvb_init() routine won't get called (which is what usually does initial register configuration). */ fepriv->reinitialise = 1; } if ((ret = dvb_generic_open (inode, file)) < 0) goto err1; if ((file->f_flags & O_ACCMODE) != O_RDONLY) { /* normal tune mode when opened R/W */ fepriv->tune_mode_flags &= ~FE_TUNE_MODE_ONESHOT; fepriv->tone = -1; fepriv->voltage = -1; ret = dvb_frontend_start (fe); if (ret) goto err2; /* empty event queue */ fepriv->events.eventr = fepriv->events.eventw = 0; } if (adapter->mfe_shared) mutex_unlock (&adapter->mfe_lock); return ret; err2: dvb_generic_release(inode, file); err1: if (dvbdev->users == -1 && fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); err0: if (adapter->mfe_shared) mutex_unlock (&adapter->mfe_lock); return ret; } static int dvb_frontend_release(struct inode *inode, struct file *file) { struct dvb_device *dvbdev = file->private_data; struct dvb_frontend *fe = dvbdev->priv; struct dvb_frontend_private *fepriv = fe->frontend_priv; int ret; dev_dbg(fe->dvb->device, "%s:\n", __func__); if ((file->f_flags & O_ACCMODE) != O_RDONLY) { fepriv->release_jiffies = jiffies; mb(); } ret = dvb_generic_release (inode, file); if (dvbdev->users == -1) { wake_up(&fepriv->wait_queue); if (fepriv->exit != DVB_FE_NO_EXIT) wake_up(&dvbdev->wait_queue); if (fe->ops.ts_bus_ctrl) fe->ops.ts_bus_ctrl(fe, 0); } return ret; } static const struct file_operations dvb_frontend_fops = { .owner = THIS_MODULE, .unlocked_ioctl = dvb_generic_ioctl, .poll = dvb_frontend_poll, .open = dvb_frontend_open, .release = dvb_frontend_release, .llseek = noop_llseek, }; int dvb_frontend_suspend(struct dvb_frontend *fe) { int ret = 0; dev_dbg(fe->dvb->device, "%s: adap=%d fe=%d\n", __func__, fe->dvb->num, fe->id); if (fe->ops.tuner_ops.sleep) ret = fe->ops.tuner_ops.sleep(fe); if (fe->ops.sleep) ret = fe->ops.sleep(fe); return ret; } EXPORT_SYMBOL(dvb_frontend_suspend); int dvb_frontend_resume(struct dvb_frontend *fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; int ret = 0; dev_dbg(fe->dvb->device, "%s: adap=%d fe=%d\n", __func__, fe->dvb->num, fe->id); if (fe->ops.init) ret = fe->ops.init(fe); if (fe->ops.tuner_ops.init) ret = fe->ops.tuner_ops.init(fe); fepriv->state = FESTATE_RETUNE; dvb_frontend_wakeup(fe); return ret; } EXPORT_SYMBOL(dvb_frontend_resume); int dvb_register_frontend(struct dvb_adapter* dvb, struct dvb_frontend* fe) { struct dvb_frontend_private *fepriv; static const struct dvb_device dvbdev_template = { .users = ~0, .writers = 1, .readers = (~0)-1, .fops = &dvb_frontend_fops, .kernel_ioctl = dvb_frontend_ioctl }; dev_dbg(dvb->device, "%s:\n", __func__); if (mutex_lock_interruptible(&frontend_mutex)) return -ERESTARTSYS; fe->frontend_priv = kzalloc(sizeof(struct dvb_frontend_private), GFP_KERNEL); if (fe->frontend_priv == NULL) { mutex_unlock(&frontend_mutex); return -ENOMEM; } fepriv = fe->frontend_priv; sema_init(&fepriv->sem, 1); init_waitqueue_head (&fepriv->wait_queue); init_waitqueue_head (&fepriv->events.wait_queue); mutex_init(&fepriv->events.mtx); fe->dvb = dvb; fepriv->inversion = INVERSION_OFF; dev_info(fe->dvb->device, "DVB: registering adapter %i frontend %i (%s)...\n", fe->dvb->num, fe->id, fe->ops.info.name); dvb_register_device (fe->dvb, &fepriv->dvbdev, &dvbdev_template, fe, DVB_DEVICE_FRONTEND); /* * Initialize the cache to the proper values according with the * first supported delivery system (ops->delsys[0]) */ fe->dtv_property_cache.delivery_system = fe->ops.delsys[0]; dvb_frontend_clear_cache(fe); mutex_unlock(&frontend_mutex); return 0; } EXPORT_SYMBOL(dvb_register_frontend); int dvb_unregister_frontend(struct dvb_frontend* fe) { struct dvb_frontend_private *fepriv = fe->frontend_priv; dev_dbg(fe->dvb->device, "%s:\n", __func__); mutex_lock(&frontend_mutex); dvb_frontend_stop (fe); mutex_unlock(&frontend_mutex); if (fepriv->dvbdev->users < -1) wait_event(fepriv->dvbdev->wait_queue, fepriv->dvbdev->users==-1); mutex_lock(&frontend_mutex); dvb_unregister_device (fepriv->dvbdev); /* fe is invalid now */ kfree(fepriv); mutex_unlock(&frontend_mutex); return 0; } EXPORT_SYMBOL(dvb_unregister_frontend); #ifdef CONFIG_MEDIA_ATTACH void dvb_frontend_detach(struct dvb_frontend* fe) { void *ptr; if (fe->ops.release_sec) { fe->ops.release_sec(fe); symbol_put_addr(fe->ops.release_sec); } if (fe->ops.tuner_ops.release) { fe->ops.tuner_ops.release(fe); symbol_put_addr(fe->ops.tuner_ops.release); } if (fe->ops.analog_ops.release) { fe->ops.analog_ops.release(fe); symbol_put_addr(fe->ops.analog_ops.release); } ptr = (void*)fe->ops.release; if (ptr) { fe->ops.release(fe); symbol_put_addr(ptr); } } #else void dvb_frontend_detach(struct dvb_frontend* fe) { if (fe->ops.release_sec) fe->ops.release_sec(fe); if (fe->ops.tuner_ops.release) fe->ops.tuner_ops.release(fe); if (fe->ops.analog_ops.release) fe->ops.analog_ops.release(fe); if (fe->ops.release) fe->ops.release(fe); } #endif EXPORT_SYMBOL(dvb_frontend_detach);
gpl-2.0
djmax81/Hani_Kernel_Exynos5433
drivers/watchdog/of_xilinx_wdt.c
2249
9849
/* * of_xilinx_wdt.c 1.01 A Watchdog Device Driver for Xilinx xps_timebase_wdt * * (C) Copyright 2011 (Alejandro Cabrera <aldaya@gmail.com>) * * ----------------------- * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * ----------------------- * 30-May-2011 Alejandro Cabrera <aldaya@gmail.com> * - If "xlnx,wdt-enable-once" wasn't found on device tree the * module will use CONFIG_WATCHDOG_NOWAYOUT * - If the device tree parameters ("clock-frequency" and * "xlnx,wdt-interval") wasn't found the driver won't * know the wdt reset interval */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/watchdog.h> #include <linux/io.h> #include <linux/uaccess.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_address.h> /* Register offsets for the Wdt device */ #define XWT_TWCSR0_OFFSET 0x0 /* Control/Status Register0 */ #define XWT_TWCSR1_OFFSET 0x4 /* Control/Status Register1 */ #define XWT_TBR_OFFSET 0x8 /* Timebase Register Offset */ /* Control/Status Register Masks */ #define XWT_CSR0_WRS_MASK 0x00000008 /* Reset status */ #define XWT_CSR0_WDS_MASK 0x00000004 /* Timer state */ #define XWT_CSR0_EWDT1_MASK 0x00000002 /* Enable bit 1 */ /* Control/Status Register 0/1 bits */ #define XWT_CSRX_EWDT2_MASK 0x00000001 /* Enable bit 2 */ /* SelfTest constants */ #define XWT_MAX_SELFTEST_LOOP_COUNT 0x00010000 #define XWT_TIMER_FAILED 0xFFFFFFFF #define WATCHDOG_NAME "Xilinx Watchdog" #define PFX WATCHDOG_NAME ": " struct xwdt_device { struct resource res; void __iomem *base; u32 nowayout; u32 wdt_interval; u32 boot_status; }; static struct xwdt_device xdev; static u32 timeout; static u32 control_status_reg; static u8 expect_close; static u8 no_timeout; static unsigned long driver_open; static DEFINE_SPINLOCK(spinlock); static void xwdt_start(void) { spin_lock(&spinlock); /* Clean previous status and enable the watchdog timer */ control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK); iowrite32((control_status_reg | XWT_CSR0_EWDT1_MASK), xdev.base + XWT_TWCSR0_OFFSET); iowrite32(XWT_CSRX_EWDT2_MASK, xdev.base + XWT_TWCSR1_OFFSET); spin_unlock(&spinlock); } static void xwdt_stop(void) { spin_lock(&spinlock); control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); iowrite32((control_status_reg & ~XWT_CSR0_EWDT1_MASK), xdev.base + XWT_TWCSR0_OFFSET); iowrite32(0, xdev.base + XWT_TWCSR1_OFFSET); spin_unlock(&spinlock); pr_info("Stopped!\n"); } static void xwdt_keepalive(void) { spin_lock(&spinlock); control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); control_status_reg |= (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK); iowrite32(control_status_reg, xdev.base + XWT_TWCSR0_OFFSET); spin_unlock(&spinlock); } static void xwdt_get_status(int *status) { int new_status; spin_lock(&spinlock); control_status_reg = ioread32(xdev.base + XWT_TWCSR0_OFFSET); new_status = ((control_status_reg & (XWT_CSR0_WRS_MASK | XWT_CSR0_WDS_MASK)) != 0); spin_unlock(&spinlock); *status = 0; if (new_status & 1) *status |= WDIOF_CARDRESET; } static u32 xwdt_selftest(void) { int i; u32 timer_value1; u32 timer_value2; spin_lock(&spinlock); timer_value1 = ioread32(xdev.base + XWT_TBR_OFFSET); timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET); for (i = 0; ((i <= XWT_MAX_SELFTEST_LOOP_COUNT) && (timer_value2 == timer_value1)); i++) { timer_value2 = ioread32(xdev.base + XWT_TBR_OFFSET); } spin_unlock(&spinlock); if (timer_value2 != timer_value1) return ~XWT_TIMER_FAILED; else return XWT_TIMER_FAILED; } static int xwdt_open(struct inode *inode, struct file *file) { /* Only one process can handle the wdt at a time */ if (test_and_set_bit(0, &driver_open)) return -EBUSY; /* Make sure that the module are always loaded...*/ if (xdev.nowayout) __module_get(THIS_MODULE); xwdt_start(); pr_info("Started...\n"); return nonseekable_open(inode, file); } static int xwdt_release(struct inode *inode, struct file *file) { if (expect_close == 42) { xwdt_stop(); } else { pr_crit("Unexpected close, not stopping watchdog!\n"); xwdt_keepalive(); } clear_bit(0, &driver_open); expect_close = 0; return 0; } /* * xwdt_write: * @file: file handle to the watchdog * @buf: buffer to write (unused as data does not matter here * @count: count of bytes * @ppos: pointer to the position to write. No seeks allowed * * A write to a watchdog device is defined as a keepalive signal. Any * write of data will do, as we don't define content meaning. */ static ssize_t xwdt_write(struct file *file, const char __user *buf, size_t len, loff_t *ppos) { if (len) { if (!xdev.nowayout) { size_t i; /* In case it was set long ago */ expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, buf + i)) return -EFAULT; if (c == 'V') expect_close = 42; } } xwdt_keepalive(); } return len; } static const struct watchdog_info ident = { .options = WDIOF_MAGICCLOSE | WDIOF_KEEPALIVEPING, .firmware_version = 1, .identity = WATCHDOG_NAME, }; /* * xwdt_ioctl: * @file: file handle to the device * @cmd: watchdog command * @arg: argument pointer * * The watchdog API defines a common set of functions for all watchdogs * according to their available features. */ static long xwdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int status; union { struct watchdog_info __user *ident; int __user *i; } uarg; uarg.i = (int __user *)arg; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(uarg.ident, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETBOOTSTATUS: return put_user(xdev.boot_status, uarg.i); case WDIOC_GETSTATUS: xwdt_get_status(&status); return put_user(status, uarg.i); case WDIOC_KEEPALIVE: xwdt_keepalive(); return 0; case WDIOC_GETTIMEOUT: if (no_timeout) return -ENOTTY; else return put_user(timeout, uarg.i); default: return -ENOTTY; } } static const struct file_operations xwdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = xwdt_write, .open = xwdt_open, .release = xwdt_release, .unlocked_ioctl = xwdt_ioctl, }; static struct miscdevice xwdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &xwdt_fops, }; static int xwdt_probe(struct platform_device *pdev) { int rc; u32 *tmptr; u32 *pfreq; no_timeout = 0; pfreq = (u32 *)of_get_property(pdev->dev.of_node, "clock-frequency", NULL); if (pfreq == NULL) { pr_warn("The watchdog clock frequency cannot be obtained!\n"); no_timeout = 1; } rc = of_address_to_resource(pdev->dev.of_node, 0, &xdev.res); if (rc) { pr_warn("invalid address!\n"); return rc; } tmptr = (u32 *)of_get_property(pdev->dev.of_node, "xlnx,wdt-interval", NULL); if (tmptr == NULL) { pr_warn("Parameter \"xlnx,wdt-interval\" not found in device tree!\n"); no_timeout = 1; } else { xdev.wdt_interval = *tmptr; } tmptr = (u32 *)of_get_property(pdev->dev.of_node, "xlnx,wdt-enable-once", NULL); if (tmptr == NULL) { pr_warn("Parameter \"xlnx,wdt-enable-once\" not found in device tree!\n"); xdev.nowayout = WATCHDOG_NOWAYOUT; } /* * Twice of the 2^wdt_interval / freq because the first wdt overflow is * ignored (interrupt), reset is only generated at second wdt overflow */ if (!no_timeout) timeout = 2 * ((1<<xdev.wdt_interval) / *pfreq); if (!request_mem_region(xdev.res.start, xdev.res.end - xdev.res.start + 1, WATCHDOG_NAME)) { rc = -ENXIO; pr_err("memory request failure!\n"); goto err_out; } xdev.base = ioremap(xdev.res.start, xdev.res.end - xdev.res.start + 1); if (xdev.base == NULL) { rc = -ENOMEM; pr_err("ioremap failure!\n"); goto release_mem; } rc = xwdt_selftest(); if (rc == XWT_TIMER_FAILED) { pr_err("SelfTest routine error!\n"); goto unmap_io; } xwdt_get_status(&xdev.boot_status); rc = misc_register(&xwdt_miscdev); if (rc) { pr_err("cannot register miscdev on minor=%d (err=%d)\n", xwdt_miscdev.minor, rc); goto unmap_io; } if (no_timeout) pr_info("driver loaded (timeout=? sec, nowayout=%d)\n", xdev.nowayout); else pr_info("driver loaded (timeout=%d sec, nowayout=%d)\n", timeout, xdev.nowayout); expect_close = 0; clear_bit(0, &driver_open); return 0; unmap_io: iounmap(xdev.base); release_mem: release_mem_region(xdev.res.start, resource_size(&xdev.res)); err_out: return rc; } static int xwdt_remove(struct platform_device *dev) { misc_deregister(&xwdt_miscdev); iounmap(xdev.base); release_mem_region(xdev.res.start, resource_size(&xdev.res)); return 0; } /* Match table for of_platform binding */ static struct of_device_id xwdt_of_match[] = { { .compatible = "xlnx,xps-timebase-wdt-1.01.a", }, {}, }; MODULE_DEVICE_TABLE(of, xwdt_of_match); static struct platform_driver xwdt_driver = { .probe = xwdt_probe, .remove = xwdt_remove, .driver = { .owner = THIS_MODULE, .name = WATCHDOG_NAME, .of_match_table = xwdt_of_match, }, }; module_platform_driver(xwdt_driver); MODULE_AUTHOR("Alejandro Cabrera <aldaya@gmail.com>"); MODULE_DESCRIPTION("Xilinx Watchdog driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
gpl-2.0
Ander-Alvarez/CoffeeKernel
drivers/uio/uio_pdrv_genirq.c
2249
7483
/* * drivers/uio/uio_pdrv_genirq.c * * Userspace I/O platform driver with generic IRQ handling code. * * Copyright (C) 2008 Magnus Damm * * Based on uio_pdrv.c by Uwe Kleine-Koenig, * Copyright (C) 2008 by Digi International Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/platform_device.h> #include <linux/uio_driver.h> #include <linux/spinlock.h> #include <linux/bitops.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/stringify.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_address.h> #define DRIVER_NAME "uio_pdrv_genirq" struct uio_pdrv_genirq_platdata { struct uio_info *uioinfo; spinlock_t lock; unsigned long flags; struct platform_device *pdev; }; static int uio_pdrv_genirq_open(struct uio_info *info, struct inode *inode) { struct uio_pdrv_genirq_platdata *priv = info->priv; /* Wait until the Runtime PM code has woken up the device */ pm_runtime_get_sync(&priv->pdev->dev); return 0; } static int uio_pdrv_genirq_release(struct uio_info *info, struct inode *inode) { struct uio_pdrv_genirq_platdata *priv = info->priv; /* Tell the Runtime PM code that the device has become idle */ pm_runtime_put_sync(&priv->pdev->dev); return 0; } static irqreturn_t uio_pdrv_genirq_handler(int irq, struct uio_info *dev_info) { struct uio_pdrv_genirq_platdata *priv = dev_info->priv; /* Just disable the interrupt in the interrupt controller, and * remember the state so we can allow user space to enable it later. */ if (!test_and_set_bit(0, &priv->flags)) disable_irq_nosync(irq); return IRQ_HANDLED; } static int uio_pdrv_genirq_irqcontrol(struct uio_info *dev_info, s32 irq_on) { struct uio_pdrv_genirq_platdata *priv = dev_info->priv; unsigned long flags; /* Allow user space to enable and disable the interrupt * in the interrupt controller, but keep track of the * state to prevent per-irq depth damage. * * Serialize this operation to support multiple tasks. */ spin_lock_irqsave(&priv->lock, flags); if (irq_on) { if (test_and_clear_bit(0, &priv->flags)) enable_irq(dev_info->irq); } else { if (!test_and_set_bit(0, &priv->flags)) disable_irq(dev_info->irq); } spin_unlock_irqrestore(&priv->lock, flags); return 0; } static int uio_pdrv_genirq_probe(struct platform_device *pdev) { struct uio_info *uioinfo = pdev->dev.platform_data; struct uio_pdrv_genirq_platdata *priv; struct uio_mem *uiomem; int ret = -EINVAL; int i; if (pdev->dev.of_node) { int irq; /* alloc uioinfo for one device */ uioinfo = kzalloc(sizeof(*uioinfo), GFP_KERNEL); if (!uioinfo) { ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); goto bad2; } uioinfo->name = pdev->dev.of_node->name; uioinfo->version = "devicetree"; /* Multiple IRQs are not supported */ irq = platform_get_irq(pdev, 0); if (irq == -ENXIO) uioinfo->irq = UIO_IRQ_NONE; else uioinfo->irq = irq; } if (!uioinfo || !uioinfo->name || !uioinfo->version) { dev_err(&pdev->dev, "missing platform_data\n"); goto bad0; } if (uioinfo->handler || uioinfo->irqcontrol || uioinfo->irq_flags & IRQF_SHARED) { dev_err(&pdev->dev, "interrupt configuration error\n"); goto bad0; } priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { ret = -ENOMEM; dev_err(&pdev->dev, "unable to kmalloc\n"); goto bad0; } priv->uioinfo = uioinfo; spin_lock_init(&priv->lock); priv->flags = 0; /* interrupt is enabled to begin with */ priv->pdev = pdev; if (!uioinfo->irq) { ret = platform_get_irq(pdev, 0); if (ret < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); goto bad0; } uioinfo->irq = ret; } uiomem = &uioinfo->mem[0]; for (i = 0; i < pdev->num_resources; ++i) { struct resource *r = &pdev->resource[i]; if (r->flags != IORESOURCE_MEM) continue; if (uiomem >= &uioinfo->mem[MAX_UIO_MAPS]) { dev_warn(&pdev->dev, "device has more than " __stringify(MAX_UIO_MAPS) " I/O memory resources.\n"); break; } uiomem->memtype = UIO_MEM_PHYS; uiomem->addr = r->start; uiomem->size = resource_size(r); uiomem->name = r->name; ++uiomem; } while (uiomem < &uioinfo->mem[MAX_UIO_MAPS]) { uiomem->size = 0; ++uiomem; } /* This driver requires no hardware specific kernel code to handle * interrupts. Instead, the interrupt handler simply disables the * interrupt in the interrupt controller. User space is responsible * for performing hardware specific acknowledge and re-enabling of * the interrupt in the interrupt controller. * * Interrupt sharing is not supported. */ uioinfo->handler = uio_pdrv_genirq_handler; uioinfo->irqcontrol = uio_pdrv_genirq_irqcontrol; uioinfo->open = uio_pdrv_genirq_open; uioinfo->release = uio_pdrv_genirq_release; uioinfo->priv = priv; /* Enable Runtime PM for this device: * The device starts in suspended state to allow the hardware to be * turned off by default. The Runtime PM bus code should power on the * hardware and enable clocks at open(). */ pm_runtime_enable(&pdev->dev); ret = uio_register_device(&pdev->dev, priv->uioinfo); if (ret) { dev_err(&pdev->dev, "unable to register uio device\n"); goto bad1; } platform_set_drvdata(pdev, priv); return 0; bad1: kfree(priv); pm_runtime_disable(&pdev->dev); bad0: /* kfree uioinfo for OF */ if (pdev->dev.of_node) kfree(uioinfo); bad2: return ret; } static int uio_pdrv_genirq_remove(struct platform_device *pdev) { struct uio_pdrv_genirq_platdata *priv = platform_get_drvdata(pdev); uio_unregister_device(priv->uioinfo); pm_runtime_disable(&pdev->dev); priv->uioinfo->handler = NULL; priv->uioinfo->irqcontrol = NULL; /* kfree uioinfo for OF */ if (pdev->dev.of_node) kfree(priv->uioinfo); kfree(priv); return 0; } static int uio_pdrv_genirq_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * In this driver pm_runtime_get_sync() and pm_runtime_put_sync() * are used at open() and release() time. This allows the * Runtime PM code to turn off power to the device while the * device is unused, ie before open() and after release(). * * This Runtime PM callback does not need to save or restore * any registers since user space is responsbile for hardware * register reinitialization after open(). */ return 0; } static const struct dev_pm_ops uio_pdrv_genirq_dev_pm_ops = { .runtime_suspend = uio_pdrv_genirq_runtime_nop, .runtime_resume = uio_pdrv_genirq_runtime_nop, }; #ifdef CONFIG_OF static const struct of_device_id uio_of_genirq_match[] = { { /* empty for now */ }, }; MODULE_DEVICE_TABLE(of, uio_of_genirq_match); #else # define uio_of_genirq_match NULL #endif static struct platform_driver uio_pdrv_genirq = { .probe = uio_pdrv_genirq_probe, .remove = uio_pdrv_genirq_remove, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .pm = &uio_pdrv_genirq_dev_pm_ops, .of_match_table = uio_of_genirq_match, }, }; module_platform_driver(uio_pdrv_genirq); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("Userspace I/O platform driver with generic IRQ handling"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRIVER_NAME);
gpl-2.0
olegsvs/android_kernel_ark_benefit_m7
arch/sh/kernel/cpu/sh2a/setup-sh7264.c
2505
16813
/* * SH7264 Setup * * Copyright (C) 2012 Renesas Electronics Europe Ltd * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/usb/r8a66597.h> #include <linux/sh_timer.h> #include <linux/io.h> enum { UNUSED = 0, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7, DMAC0, DMAC1, DMAC2, DMAC3, DMAC4, DMAC5, DMAC6, DMAC7, DMAC8, DMAC9, DMAC10, DMAC11, DMAC12, DMAC13, DMAC14, DMAC15, USB, VDC3, CMT0, CMT1, BSC, WDT, MTU0_ABCD, MTU0_VEF, MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU, MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V, PWMT1, PWMT2, ADC_ADI, SSIF0, SSII1, SSII2, SSII3, RSPDIF, IIC30, IIC31, IIC32, IIC33, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI, SIO_FIFO, RSPIC0, RSPIC1, RCAN0, RCAN1, IEBC, CD_ROMD, NFMC, SDHI, RTC, SRCC0, SRCC1, DCOMU, OFFI, IFEI, /* interrupt groups */ PINT, SCIF0, SCIF1, SCIF2, SCIF3, SCIF4, SCIF5, SCIF6, SCIF7, }; static struct intc_vect vectors[] __initdata = { INTC_IRQ(IRQ0, 64), INTC_IRQ(IRQ1, 65), INTC_IRQ(IRQ2, 66), INTC_IRQ(IRQ3, 67), INTC_IRQ(IRQ4, 68), INTC_IRQ(IRQ5, 69), INTC_IRQ(IRQ6, 70), INTC_IRQ(IRQ7, 71), INTC_IRQ(PINT0, 80), INTC_IRQ(PINT1, 81), INTC_IRQ(PINT2, 82), INTC_IRQ(PINT3, 83), INTC_IRQ(PINT4, 84), INTC_IRQ(PINT5, 85), INTC_IRQ(PINT6, 86), INTC_IRQ(PINT7, 87), INTC_IRQ(DMAC0, 108), INTC_IRQ(DMAC0, 109), INTC_IRQ(DMAC1, 112), INTC_IRQ(DMAC1, 113), INTC_IRQ(DMAC2, 116), INTC_IRQ(DMAC2, 117), INTC_IRQ(DMAC3, 120), INTC_IRQ(DMAC3, 121), INTC_IRQ(DMAC4, 124), INTC_IRQ(DMAC4, 125), INTC_IRQ(DMAC5, 128), INTC_IRQ(DMAC5, 129), INTC_IRQ(DMAC6, 132), INTC_IRQ(DMAC6, 133), INTC_IRQ(DMAC7, 136), INTC_IRQ(DMAC7, 137), INTC_IRQ(DMAC8, 140), INTC_IRQ(DMAC8, 141), INTC_IRQ(DMAC9, 144), INTC_IRQ(DMAC9, 145), INTC_IRQ(DMAC10, 148), INTC_IRQ(DMAC10, 149), INTC_IRQ(DMAC11, 152), INTC_IRQ(DMAC11, 153), INTC_IRQ(DMAC12, 156), INTC_IRQ(DMAC12, 157), INTC_IRQ(DMAC13, 160), INTC_IRQ(DMAC13, 161), INTC_IRQ(DMAC14, 164), INTC_IRQ(DMAC14, 165), INTC_IRQ(DMAC15, 168), INTC_IRQ(DMAC15, 169), INTC_IRQ(USB, 170), INTC_IRQ(VDC3, 171), INTC_IRQ(VDC3, 172), INTC_IRQ(VDC3, 173), INTC_IRQ(VDC3, 174), INTC_IRQ(CMT0, 175), INTC_IRQ(CMT1, 176), INTC_IRQ(BSC, 177), INTC_IRQ(WDT, 178), INTC_IRQ(MTU0_ABCD, 179), INTC_IRQ(MTU0_ABCD, 180), INTC_IRQ(MTU0_ABCD, 181), INTC_IRQ(MTU0_ABCD, 182), INTC_IRQ(MTU0_VEF, 183), INTC_IRQ(MTU0_VEF, 184), INTC_IRQ(MTU0_VEF, 185), INTC_IRQ(MTU1_AB, 186), INTC_IRQ(MTU1_AB, 187), INTC_IRQ(MTU1_VU, 188), INTC_IRQ(MTU1_VU, 189), INTC_IRQ(MTU2_AB, 190), INTC_IRQ(MTU2_AB, 191), INTC_IRQ(MTU2_VU, 192), INTC_IRQ(MTU2_VU, 193), INTC_IRQ(MTU3_ABCD, 194), INTC_IRQ(MTU3_ABCD, 195), INTC_IRQ(MTU3_ABCD, 196), INTC_IRQ(MTU3_ABCD, 197), INTC_IRQ(MTU3_TCI3V, 198), INTC_IRQ(MTU4_ABCD, 199), INTC_IRQ(MTU4_ABCD, 200), INTC_IRQ(MTU4_ABCD, 201), INTC_IRQ(MTU4_ABCD, 202), INTC_IRQ(MTU4_TCI4V, 203), INTC_IRQ(PWMT1, 204), INTC_IRQ(PWMT2, 205), INTC_IRQ(ADC_ADI, 206), INTC_IRQ(SSIF0, 207), INTC_IRQ(SSIF0, 208), INTC_IRQ(SSIF0, 209), INTC_IRQ(SSII1, 210), INTC_IRQ(SSII1, 211), INTC_IRQ(SSII2, 212), INTC_IRQ(SSII2, 213), INTC_IRQ(SSII3, 214), INTC_IRQ(SSII3, 215), INTC_IRQ(RSPDIF, 216), INTC_IRQ(IIC30, 217), INTC_IRQ(IIC30, 218), INTC_IRQ(IIC30, 219), INTC_IRQ(IIC30, 220), INTC_IRQ(IIC30, 221), INTC_IRQ(IIC31, 222), INTC_IRQ(IIC31, 223), INTC_IRQ(IIC31, 224), INTC_IRQ(IIC31, 225), INTC_IRQ(IIC31, 226), INTC_IRQ(IIC32, 227), INTC_IRQ(IIC32, 228), INTC_IRQ(IIC32, 229), INTC_IRQ(IIC32, 230), INTC_IRQ(IIC32, 231), INTC_IRQ(SCIF0_BRI, 232), INTC_IRQ(SCIF0_ERI, 233), INTC_IRQ(SCIF0_RXI, 234), INTC_IRQ(SCIF0_TXI, 235), INTC_IRQ(SCIF1_BRI, 236), INTC_IRQ(SCIF1_ERI, 237), INTC_IRQ(SCIF1_RXI, 238), INTC_IRQ(SCIF1_TXI, 239), INTC_IRQ(SCIF2_BRI, 240), INTC_IRQ(SCIF2_ERI, 241), INTC_IRQ(SCIF2_RXI, 242), INTC_IRQ(SCIF2_TXI, 243), INTC_IRQ(SCIF3_BRI, 244), INTC_IRQ(SCIF3_ERI, 245), INTC_IRQ(SCIF3_RXI, 246), INTC_IRQ(SCIF3_TXI, 247), INTC_IRQ(SCIF4_BRI, 248), INTC_IRQ(SCIF4_ERI, 249), INTC_IRQ(SCIF4_RXI, 250), INTC_IRQ(SCIF4_TXI, 251), INTC_IRQ(SCIF5_BRI, 252), INTC_IRQ(SCIF5_ERI, 253), INTC_IRQ(SCIF5_RXI, 254), INTC_IRQ(SCIF5_TXI, 255), INTC_IRQ(SCIF6_BRI, 256), INTC_IRQ(SCIF6_ERI, 257), INTC_IRQ(SCIF6_RXI, 258), INTC_IRQ(SCIF6_TXI, 259), INTC_IRQ(SCIF7_BRI, 260), INTC_IRQ(SCIF7_ERI, 261), INTC_IRQ(SCIF7_RXI, 262), INTC_IRQ(SCIF7_TXI, 263), INTC_IRQ(SIO_FIFO, 264), INTC_IRQ(RSPIC0, 265), INTC_IRQ(RSPIC0, 266), INTC_IRQ(RSPIC0, 267), INTC_IRQ(RSPIC1, 268), INTC_IRQ(RSPIC1, 269), INTC_IRQ(RSPIC1, 270), INTC_IRQ(RCAN0, 271), INTC_IRQ(RCAN0, 272), INTC_IRQ(RCAN0, 273), INTC_IRQ(RCAN0, 274), INTC_IRQ(RCAN0, 275), INTC_IRQ(RCAN1, 276), INTC_IRQ(RCAN1, 277), INTC_IRQ(RCAN1, 278), INTC_IRQ(RCAN1, 279), INTC_IRQ(RCAN1, 280), INTC_IRQ(IEBC, 281), INTC_IRQ(CD_ROMD, 282), INTC_IRQ(CD_ROMD, 283), INTC_IRQ(CD_ROMD, 284), INTC_IRQ(CD_ROMD, 285), INTC_IRQ(CD_ROMD, 286), INTC_IRQ(CD_ROMD, 287), INTC_IRQ(NFMC, 288), INTC_IRQ(NFMC, 289), INTC_IRQ(NFMC, 290), INTC_IRQ(NFMC, 291), INTC_IRQ(SDHI, 292), INTC_IRQ(SDHI, 293), INTC_IRQ(SDHI, 294), INTC_IRQ(RTC, 296), INTC_IRQ(RTC, 297), INTC_IRQ(RTC, 298), INTC_IRQ(SRCC0, 299), INTC_IRQ(SRCC0, 300), INTC_IRQ(SRCC0, 301), INTC_IRQ(SRCC0, 302), INTC_IRQ(SRCC0, 303), INTC_IRQ(SRCC1, 304), INTC_IRQ(SRCC1, 305), INTC_IRQ(SRCC1, 306), INTC_IRQ(SRCC1, 307), INTC_IRQ(SRCC1, 308), INTC_IRQ(DCOMU, 310), INTC_IRQ(DCOMU, 311), INTC_IRQ(DCOMU, 312), }; static struct intc_group groups[] __initdata = { INTC_GROUP(PINT, PINT0, PINT1, PINT2, PINT3, PINT4, PINT5, PINT6, PINT7), INTC_GROUP(SCIF0, SCIF0_BRI, SCIF0_ERI, SCIF0_RXI, SCIF0_TXI), INTC_GROUP(SCIF1, SCIF1_BRI, SCIF1_ERI, SCIF1_RXI, SCIF1_TXI), INTC_GROUP(SCIF2, SCIF2_BRI, SCIF2_ERI, SCIF2_RXI, SCIF2_TXI), INTC_GROUP(SCIF3, SCIF3_BRI, SCIF3_ERI, SCIF3_RXI, SCIF3_TXI), INTC_GROUP(SCIF4, SCIF4_BRI, SCIF4_ERI, SCIF4_RXI, SCIF4_TXI), INTC_GROUP(SCIF5, SCIF5_BRI, SCIF5_ERI, SCIF5_RXI, SCIF5_TXI), INTC_GROUP(SCIF6, SCIF6_BRI, SCIF6_ERI, SCIF6_RXI, SCIF6_TXI), INTC_GROUP(SCIF7, SCIF7_BRI, SCIF7_ERI, SCIF7_RXI, SCIF7_TXI), }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xfffe0818, 0, 16, 4, /* IPR01 */ { IRQ0, IRQ1, IRQ2, IRQ3 } }, { 0xfffe081a, 0, 16, 4, /* IPR02 */ { IRQ4, IRQ5, IRQ6, IRQ7 } }, { 0xfffe0820, 0, 16, 4, /* IPR05 */ { PINT, 0, 0, 0 } }, { 0xfffe0c00, 0, 16, 4, /* IPR06 */ { DMAC0, DMAC1, DMAC2, DMAC3 } }, { 0xfffe0c02, 0, 16, 4, /* IPR07 */ { DMAC4, DMAC5, DMAC6, DMAC7 } }, { 0xfffe0c04, 0, 16, 4, /* IPR08 */ { DMAC8, DMAC9, DMAC10, DMAC11 } }, { 0xfffe0c06, 0, 16, 4, /* IPR09 */ { DMAC12, DMAC13, DMAC14, DMAC15 } }, { 0xfffe0c08, 0, 16, 4, /* IPR10 */ { USB, VDC3, CMT0, CMT1 } }, { 0xfffe0c0a, 0, 16, 4, /* IPR11 */ { BSC, WDT, MTU0_ABCD, MTU0_VEF } }, { 0xfffe0c0c, 0, 16, 4, /* IPR12 */ { MTU1_AB, MTU1_VU, MTU2_AB, MTU2_VU } }, { 0xfffe0c0e, 0, 16, 4, /* IPR13 */ { MTU3_ABCD, MTU3_TCI3V, MTU4_ABCD, MTU4_TCI4V } }, { 0xfffe0c10, 0, 16, 4, /* IPR14 */ { PWMT1, PWMT2, ADC_ADI, 0 } }, { 0xfffe0c12, 0, 16, 4, /* IPR15 */ { SSIF0, SSII1, SSII2, SSII3 } }, { 0xfffe0c14, 0, 16, 4, /* IPR16 */ { RSPDIF, IIC30, IIC31, IIC32 } }, { 0xfffe0c16, 0, 16, 4, /* IPR17 */ { SCIF0, SCIF1, SCIF2, SCIF3 } }, { 0xfffe0c18, 0, 16, 4, /* IPR18 */ { SCIF4, SCIF5, SCIF6, SCIF7 } }, { 0xfffe0c1a, 0, 16, 4, /* IPR19 */ { SIO_FIFO, 0, RSPIC0, RSPIC1, } }, { 0xfffe0c1c, 0, 16, 4, /* IPR20 */ { RCAN0, RCAN1, IEBC, CD_ROMD } }, { 0xfffe0c1e, 0, 16, 4, /* IPR21 */ { NFMC, SDHI, RTC, 0 } }, { 0xfffe0c20, 0, 16, 4, /* IPR22 */ { SRCC0, SRCC1, 0, DCOMU } }, }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xfffe0808, 0, 16, /* PINTER */ { 0, 0, 0, 0, 0, 0, 0, 0, PINT7, PINT6, PINT5, PINT4, PINT3, PINT2, PINT1, PINT0 } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7264", vectors, groups, mask_registers, prio_registers, NULL); static struct plat_sci_port scif0_platform_data = { .mapbase = 0xfffe8000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 233, 234, 235, 232 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xfffe8800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 237, 238, 239, 236 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xfffe9000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 241, 242, 243, 240 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xfffe9800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 245, 246, 247, 244 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xfffea000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 249, 250, 251, 248 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xfffea800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 253, 254, 255, 252 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; static struct plat_sci_port scif6_platform_data = { .mapbase = 0xfffeb000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 257, 258, 259, 256 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif6_device = { .name = "sh-sci", .id = 6, .dev = { .platform_data = &scif6_platform_data, }, }; static struct plat_sci_port scif7_platform_data = { .mapbase = 0xfffeb800, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RIE | SCSCR_TIE | SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 261, 262, 263, 260 }, .regtype = SCIx_SH2_SCIF_FIFODATA_REGTYPE, }; static struct platform_device scif7_device = { .name = "sh-sci", .id = 7, .dev = { .platform_data = &scif7_platform_data, }, }; static struct sh_timer_config cmt0_platform_data = { .channel_offset = 0x02, .timer_bit = 0, .clockevent_rating = 125, .clocksource_rating = 0, /* disabled due to code generation issues */ }; static struct resource cmt0_resources[] = { [0] = { .name = "CMT0", .start = 0xfffec002, .end = 0xfffec007, .flags = IORESOURCE_MEM, }, [1] = { .start = 175, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt0_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt0_platform_data, }, .resource = cmt0_resources, .num_resources = ARRAY_SIZE(cmt0_resources), }; static struct sh_timer_config cmt1_platform_data = { .name = "CMT1", .channel_offset = 0x08, .timer_bit = 1, .clockevent_rating = 125, .clocksource_rating = 0, /* disabled due to code generation issues */ }; static struct resource cmt1_resources[] = { [0] = { .name = "CMT1", .start = 0xfffec008, .end = 0xfffec00d, .flags = IORESOURCE_MEM, }, [1] = { .start = 176, .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt1_device = { .name = "sh_cmt", .id = 1, .dev = { .platform_data = &cmt1_platform_data, }, .resource = cmt1_resources, .num_resources = ARRAY_SIZE(cmt1_resources), }; static struct sh_timer_config mtu2_0_platform_data = { .name = "MTU2_0", .channel_offset = -0x80, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource mtu2_0_resources[] = { [0] = { .name = "MTU2_0", .start = 0xfffe4300, .end = 0xfffe4326, .flags = IORESOURCE_MEM, }, [1] = { .start = 179, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_0_device = { .name = "sh_mtu2", .id = 0, .dev = { .platform_data = &mtu2_0_platform_data, }, .resource = mtu2_0_resources, .num_resources = ARRAY_SIZE(mtu2_0_resources), }; static struct sh_timer_config mtu2_1_platform_data = { .name = "MTU2_1", .channel_offset = -0x100, .timer_bit = 1, .clockevent_rating = 200, }; static struct resource mtu2_1_resources[] = { [0] = { .name = "MTU2_1", .start = 0xfffe4380, .end = 0xfffe4390, .flags = IORESOURCE_MEM, }, [1] = { .start = 186, .flags = IORESOURCE_IRQ, }, }; static struct platform_device mtu2_1_device = { .name = "sh_mtu2", .id = 1, .dev = { .platform_data = &mtu2_1_platform_data, }, .resource = mtu2_1_resources, .num_resources = ARRAY_SIZE(mtu2_1_resources), }; static struct resource rtc_resources[] = { [0] = { .start = 0xfffe6000, .end = 0xfffe6000 + 0x30 - 1, .flags = IORESOURCE_IO, }, [1] = { /* Shared Period/Carry/Alarm IRQ */ .start = 296, .flags = IORESOURCE_IRQ, }, }; static struct platform_device rtc_device = { .name = "sh-rtc", .id = -1, .num_resources = ARRAY_SIZE(rtc_resources), .resource = rtc_resources, }; /* USB Host */ static void usb_port_power(int port, int power) { __raw_writew(0x200 , 0xffffc0c2) ; /* Initialise UACS25 */ } static struct r8a66597_platdata r8a66597_data = { .on_chip = 1, .endian = 1, .port_power = usb_port_power, }; static struct resource r8a66597_usb_host_resources[] = { [0] = { .start = 0xffffc000, .end = 0xffffc0e4, .flags = IORESOURCE_MEM, }, [1] = { .start = 170, .end = 170, .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device r8a66597_usb_host_device = { .name = "r8a66597_hcd", .id = 0, .dev = { .dma_mask = NULL, /* not use dma */ .coherent_dma_mask = 0xffffffff, .platform_data = &r8a66597_data, }, .num_resources = ARRAY_SIZE(r8a66597_usb_host_resources), .resource = r8a66597_usb_host_resources, }; static struct platform_device *sh7264_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &cmt0_device, &cmt1_device, &mtu2_0_device, &mtu2_1_device, &rtc_device, &r8a66597_usb_host_device, }; static int __init sh7264_devices_setup(void) { return platform_add_devices(sh7264_devices, ARRAY_SIZE(sh7264_devices)); } arch_initcall(sh7264_devices_setup); void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } static struct platform_device *sh7264_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &cmt0_device, &cmt1_device, &mtu2_0_device, &mtu2_1_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7264_early_devices, ARRAY_SIZE(sh7264_early_devices)); }
gpl-2.0
cameron581/lge-kernel-gproj
arch/m68k/platform/coldfire/reset.c
4553
1152
/* * reset.c -- common ColdFire SoC reset support * * (C) Copyright 2012, Greg Ungerer <gerg@uclinux.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcfsim.h> /* * There are 2 common methods amongst the ColdFure parts for reseting * the CPU. But there are couple of exceptions, the 5272 and the 547x * have something completely special to them, and we let their specific * subarch code handle them. */ #ifdef MCFSIM_SYPCR static void mcf_cpu_reset(void) { local_irq_disable(); /* Set watchdog to soft reset, and enabled */ __raw_writeb(0xc0, MCF_MBAR + MCFSIM_SYPCR); for (;;) /* wait for watchdog to timeout */; } #endif #ifdef MCF_RCR static void mcf_cpu_reset(void) { local_irq_disable(); __raw_writeb(MCF_RCR_SWRESET, MCF_RCR); } #endif static int __init mcf_setup_reset(void) { mach_reset = mcf_cpu_reset; return 0; } arch_initcall(mcf_setup_reset);
gpl-2.0
klabit87/jflte_vzw_of1
drivers/isdn/hardware/eicon/capifunc.c
4809
31342
/* $Id: capifunc.c,v 1.61.4.7 2005/02/11 19:40:25 armin Exp $ * * ISDN interface module for Eicon active cards DIVA. * CAPI Interface common functions * * Copyright 2000-2003 by Armin Schindler (mac@melware.de) * Copyright 2000-2003 Cytronics & Melware (info@melware.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include "platform.h" #include "os_capi.h" #include "di_defs.h" #include "capi20.h" #include "divacapi.h" #include "divasync.h" #include "capifunc.h" #define DBG_MINIMUM (DL_LOG + DL_FTL + DL_ERR) #define DBG_DEFAULT (DBG_MINIMUM + DL_XLOG + DL_REG) DIVA_CAPI_ADAPTER *adapter = (DIVA_CAPI_ADAPTER *) NULL; APPL *application = (APPL *) NULL; byte max_appl = MAX_APPL; byte max_adapter = 0; static CAPI_MSG *mapped_msg = (CAPI_MSG *) NULL; byte UnMapController(byte); char DRIVERRELEASE_CAPI[32]; extern void AutomaticLaw(DIVA_CAPI_ADAPTER *); extern void callback(ENTITY *); extern word api_remove_start(void); extern word CapiRelease(word); extern word CapiRegister(word); extern word api_put(APPL *, CAPI_MSG *); static diva_os_spin_lock_t api_lock; static LIST_HEAD(cards); static dword notify_handle; static void DIRequest(ENTITY *e); static DESCRIPTOR MAdapter; static DESCRIPTOR DAdapter; static byte ControllerMap[MAX_DESCRIPTORS + 1]; static void diva_register_appl(struct capi_ctr *, __u16, capi_register_params *); static void diva_release_appl(struct capi_ctr *, __u16); static char *diva_procinfo(struct capi_ctr *); static u16 diva_send_message(struct capi_ctr *, diva_os_message_buffer_s *); extern void diva_os_set_controller_struct(struct capi_ctr *); extern void DIVA_DIDD_Read(DESCRIPTOR *, int); /* * debug */ static void no_printf(unsigned char *, ...); #include "debuglib.c" static void xlog(char *x, ...) { #ifndef DIVA_NO_DEBUGLIB va_list ap; if (myDriverDebugHandle.dbgMask & DL_XLOG) { va_start(ap, x); if (myDriverDebugHandle.dbg_irq) { myDriverDebugHandle.dbg_irq(myDriverDebugHandle.id, DLI_XLOG, x, ap); } else if (myDriverDebugHandle.dbg_old) { myDriverDebugHandle.dbg_old(myDriverDebugHandle.id, x, ap); } va_end(ap); } #endif } /* * info for proc */ static char *diva_procinfo(struct capi_ctr *ctrl) { return (ctrl->serial); } /* * stop debugging */ static void stop_dbg(void) { DbgDeregister(); memset(&MAdapter, 0, sizeof(MAdapter)); dprintf = no_printf; } /* * dummy debug function */ static void no_printf(unsigned char *x, ...) { } /* * Controller mapping */ byte MapController(byte Controller) { byte i; byte MappedController = 0; byte ctrl = Controller & 0x7f; /* mask external controller bit off */ for (i = 1; i < max_adapter + 1; i++) { if (ctrl == ControllerMap[i]) { MappedController = (byte) i; break; } } if (i > max_adapter) { ControllerMap[0] = ctrl; MappedController = 0; } return (MappedController | (Controller & 0x80)); /* put back external controller bit */ } /* * Controller unmapping */ byte UnMapController(byte MappedController) { byte Controller; byte ctrl = MappedController & 0x7f; /* mask external controller bit off */ if (ctrl <= max_adapter) { Controller = ControllerMap[ctrl]; } else { Controller = 0; } return (Controller | (MappedController & 0x80)); /* put back external controller bit */ } /* * find a new free id */ static int find_free_id(void) { int num = 0; DIVA_CAPI_ADAPTER *a; while (num < MAX_DESCRIPTORS) { a = &adapter[num]; if (!a->Id) break; num++; } return (num + 1); } /* * find a card structure by controller number */ static diva_card *find_card_by_ctrl(word controller) { struct list_head *tmp; diva_card *card; list_for_each(tmp, &cards) { card = list_entry(tmp, diva_card, list); if (ControllerMap[card->Id] == controller) { if (card->remove_in_progress) card = NULL; return (card); } } return (diva_card *) 0; } /* * Buffer RX/TX */ void *TransmitBufferSet(APPL *appl, dword ref) { appl->xbuffer_used[ref] = true; DBG_PRV1(("%d:xbuf_used(%d)", appl->Id, ref + 1)) return (void *)(long)ref; } void *TransmitBufferGet(APPL *appl, void *p) { if (appl->xbuffer_internal[(dword)(long)p]) return appl->xbuffer_internal[(dword)(long)p]; return appl->xbuffer_ptr[(dword)(long)p]; } void TransmitBufferFree(APPL *appl, void *p) { appl->xbuffer_used[(dword)(long)p] = false; DBG_PRV1(("%d:xbuf_free(%d)", appl->Id, ((dword)(long)p) + 1)) } void *ReceiveBufferGet(APPL *appl, int Num) { return &appl->ReceiveBuffer[Num * appl->MaxDataLength]; } /* * api_remove_start/complete for cleanup */ void api_remove_complete(void) { DBG_PRV1(("api_remove_complete")) } /* * main function called by message.c */ void sendf(APPL *appl, word command, dword Id, word Number, byte *format, ...) { word i, j; word length = 12, dlength = 0; byte *write; CAPI_MSG msg; byte *string = NULL; va_list ap; diva_os_message_buffer_s *dmb; diva_card *card = NULL; dword tmp; if (!appl) return; DBG_PRV1(("sendf(a=%d,cmd=%x,format=%s)", appl->Id, command, (byte *) format)) PUT_WORD(&msg.header.appl_id, appl->Id); PUT_WORD(&msg.header.command, command); if ((byte) (command >> 8) == 0x82) Number = appl->Number++; PUT_WORD(&msg.header.number, Number); PUT_DWORD(&msg.header.controller, Id); write = (byte *)&msg; write += 12; va_start(ap, format); for (i = 0; format[i]; i++) { switch (format[i]) { case 'b': tmp = va_arg(ap, dword); *(byte *) write = (byte) (tmp & 0xff); write += 1; length += 1; break; case 'w': tmp = va_arg(ap, dword); PUT_WORD(write, (tmp & 0xffff)); write += 2; length += 2; break; case 'd': tmp = va_arg(ap, dword); PUT_DWORD(write, tmp); write += 4; length += 4; break; case 's': case 'S': string = va_arg(ap, byte *); length += string[0] + 1; for (j = 0; j <= string[0]; j++) *write++ = string[j]; break; } } va_end(ap); PUT_WORD(&msg.header.length, length); msg.header.controller = UnMapController(msg.header.controller); if (command == _DATA_B3_I) dlength = GET_WORD( ((byte *)&msg.info.data_b3_ind.Data_Length)); if (!(dmb = diva_os_alloc_message_buffer(length + dlength, (void **) &write))) { DBG_ERR(("sendf: alloc_message_buffer failed, incoming msg dropped.")) return; } /* copy msg header to sk_buff */ memcpy(write, (byte *)&msg, length); /* if DATA_B3_IND, copy data too */ if (command == _DATA_B3_I) { dword data = GET_DWORD(&msg.info.data_b3_ind.Data); memcpy(write + length, (void *)(long)data, dlength); } #ifndef DIVA_NO_DEBUGLIB if (myDriverDebugHandle.dbgMask & DL_XLOG) { switch (command) { default: xlog("\x00\x02", &msg, 0x81, length); break; case _DATA_B3_R | CONFIRM: if (myDriverDebugHandle.dbgMask & DL_BLK) xlog("\x00\x02", &msg, 0x81, length); break; case _DATA_B3_I: if (myDriverDebugHandle.dbgMask & DL_BLK) { xlog("\x00\x02", &msg, 0x81, length); for (i = 0; i < dlength; i += 256) { DBG_BLK((((char *)(long)GET_DWORD(&msg.info.data_b3_ind.Data)) + i, ((dlength - i) < 256) ? (dlength - i) : 256)) if (!(myDriverDebugHandle.dbgMask & DL_PRV0)) break; /* not more if not explicitly requested */ } } break; } } #endif /* find the card structure for this controller */ if (!(card = find_card_by_ctrl(write[8] & 0x7f))) { DBG_ERR(("sendf - controller %d not found, incoming msg dropped", write[8] & 0x7f)) diva_os_free_message_buffer(dmb); return; } /* send capi msg to capi layer */ capi_ctr_handle_message(&card->capi_ctrl, appl->Id, dmb); } /* * cleanup adapter */ static void clean_adapter(int id, struct list_head *free_mem_q) { DIVA_CAPI_ADAPTER *a; int i, k; a = &adapter[id]; k = li_total_channels - a->li_channels; if (k == 0) { if (li_config_table) { list_add((struct list_head *)li_config_table, free_mem_q); li_config_table = NULL; } } else { if (a->li_base < k) { memmove(&li_config_table[a->li_base], &li_config_table[a->li_base + a->li_channels], (k - a->li_base) * sizeof(LI_CONFIG)); for (i = 0; i < k; i++) { memmove(&li_config_table[i].flag_table[a->li_base], &li_config_table[i].flag_table[a->li_base + a->li_channels], k - a->li_base); memmove(&li_config_table[i]. coef_table[a->li_base], &li_config_table[i].coef_table[a->li_base + a->li_channels], k - a->li_base); } } } li_total_channels = k; for (i = id; i < max_adapter; i++) { if (adapter[i].request) adapter[i].li_base -= a->li_channels; } if (a->plci) list_add((struct list_head *)a->plci, free_mem_q); memset(a, 0x00, sizeof(DIVA_CAPI_ADAPTER)); while ((max_adapter != 0) && !adapter[max_adapter - 1].request) max_adapter--; } /* * remove a card, but ensures consistent state of LI tables * in the time adapter is removed */ static void divacapi_remove_card(DESCRIPTOR *d) { diva_card *card = NULL; diva_os_spin_lock_magic_t old_irql; LIST_HEAD(free_mem_q); struct list_head *link; struct list_head *tmp; /* * Set "remove in progress flag". * Ensures that there is no call from sendf to CAPI in * the time CAPI controller is about to be removed. */ diva_os_enter_spin_lock(&api_lock, &old_irql, "remove card"); list_for_each(tmp, &cards) { card = list_entry(tmp, diva_card, list); if (card->d.request == d->request) { card->remove_in_progress = 1; list_del(tmp); break; } } diva_os_leave_spin_lock(&api_lock, &old_irql, "remove card"); if (card) { /* * Detach CAPI. Sendf cannot call to CAPI any more. * After detach no call to send_message() is done too. */ detach_capi_ctr(&card->capi_ctrl); /* * Now get API lock (to ensure stable state of LI tables) * and update the adapter map/LI table. */ diva_os_enter_spin_lock(&api_lock, &old_irql, "remove card"); clean_adapter(card->Id - 1, &free_mem_q); DBG_TRC(("DelAdapterMap (%d) -> (%d)", ControllerMap[card->Id], card->Id)) ControllerMap[card->Id] = 0; DBG_TRC(("adapter remove, max_adapter=%d", max_adapter)); diva_os_leave_spin_lock(&api_lock, &old_irql, "remove card"); /* After releasing the lock, we can free the memory */ diva_os_free(0, card); } /* free queued memory areas */ list_for_each_safe(link, tmp, &free_mem_q) { list_del(link); diva_os_free(0, link); } } /* * remove cards */ static void divacapi_remove_cards(void) { DESCRIPTOR d; struct list_head *tmp; diva_card *card; diva_os_spin_lock_magic_t old_irql; rescan: diva_os_enter_spin_lock(&api_lock, &old_irql, "remove cards"); list_for_each(tmp, &cards) { card = list_entry(tmp, diva_card, list); diva_os_leave_spin_lock(&api_lock, &old_irql, "remove cards"); d.request = card->d.request; divacapi_remove_card(&d); goto rescan; } diva_os_leave_spin_lock(&api_lock, &old_irql, "remove cards"); } /* * sync_callback */ static void sync_callback(ENTITY *e) { diva_os_spin_lock_magic_t old_irql; DBG_TRC(("cb:Id=%x,Rc=%x,Ind=%x", e->Id, e->Rc, e->Ind)) diva_os_enter_spin_lock(&api_lock, &old_irql, "sync_callback"); callback(e); diva_os_leave_spin_lock(&api_lock, &old_irql, "sync_callback"); } /* * add a new card */ static int diva_add_card(DESCRIPTOR *d) { int k = 0, i = 0; diva_os_spin_lock_magic_t old_irql; diva_card *card = NULL; struct capi_ctr *ctrl = NULL; DIVA_CAPI_ADAPTER *a = NULL; IDI_SYNC_REQ sync_req; char serial[16]; void *mem_to_free; LI_CONFIG *new_li_config_table; int j; if (!(card = (diva_card *) diva_os_malloc(0, sizeof(diva_card)))) { DBG_ERR(("diva_add_card: failed to allocate card struct.")) return (0); } memset((char *) card, 0x00, sizeof(diva_card)); memcpy(&card->d, d, sizeof(DESCRIPTOR)); sync_req.GetName.Req = 0; sync_req.GetName.Rc = IDI_SYNC_REQ_GET_NAME; card->d.request((ENTITY *)&sync_req); strlcpy(card->name, sync_req.GetName.name, sizeof(card->name)); ctrl = &card->capi_ctrl; strcpy(ctrl->name, card->name); ctrl->register_appl = diva_register_appl; ctrl->release_appl = diva_release_appl; ctrl->send_message = diva_send_message; ctrl->procinfo = diva_procinfo; ctrl->driverdata = card; diva_os_set_controller_struct(ctrl); if (attach_capi_ctr(ctrl)) { DBG_ERR(("diva_add_card: failed to attach controller.")) diva_os_free(0, card); return (0); } diva_os_enter_spin_lock(&api_lock, &old_irql, "find id"); card->Id = find_free_id(); diva_os_leave_spin_lock(&api_lock, &old_irql, "find id"); strlcpy(ctrl->manu, M_COMPANY, sizeof(ctrl->manu)); ctrl->version.majorversion = 2; ctrl->version.minorversion = 0; ctrl->version.majormanuversion = DRRELMAJOR; ctrl->version.minormanuversion = DRRELMINOR; sync_req.GetSerial.Req = 0; sync_req.GetSerial.Rc = IDI_SYNC_REQ_GET_SERIAL; sync_req.GetSerial.serial = 0; card->d.request((ENTITY *)&sync_req); if ((i = ((sync_req.GetSerial.serial & 0xff000000) >> 24))) { sprintf(serial, "%ld-%d", sync_req.GetSerial.serial & 0x00ffffff, i + 1); } else { sprintf(serial, "%ld", sync_req.GetSerial.serial); } serial[CAPI_SERIAL_LEN - 1] = 0; strlcpy(ctrl->serial, serial, sizeof(ctrl->serial)); a = &adapter[card->Id - 1]; card->adapter = a; a->os_card = card; ControllerMap[card->Id] = (byte) (ctrl->cnr); DBG_TRC(("AddAdapterMap (%d) -> (%d)", ctrl->cnr, card->Id)) sync_req.xdi_capi_prms.Req = 0; sync_req.xdi_capi_prms.Rc = IDI_SYNC_REQ_XDI_GET_CAPI_PARAMS; sync_req.xdi_capi_prms.info.structure_length = sizeof(diva_xdi_get_capi_parameters_t); card->d.request((ENTITY *)&sync_req); a->flag_dynamic_l1_down = sync_req.xdi_capi_prms.info.flag_dynamic_l1_down; a->group_optimization_enabled = sync_req.xdi_capi_prms.info.group_optimization_enabled; a->request = DIRequest; /* card->d.request; */ a->max_plci = card->d.channels + 30; a->max_listen = (card->d.channels > 2) ? 8 : 2; if (! (a->plci = (PLCI *) diva_os_malloc(0, sizeof(PLCI) * a->max_plci))) { DBG_ERR(("diva_add_card: failed alloc plci struct.")) memset(a, 0, sizeof(DIVA_CAPI_ADAPTER)); return (0); } memset(a->plci, 0, sizeof(PLCI) * a->max_plci); for (k = 0; k < a->max_plci; k++) { a->Id = (byte) card->Id; a->plci[k].Sig.callback = sync_callback; a->plci[k].Sig.XNum = 1; a->plci[k].Sig.X = a->plci[k].XData; a->plci[k].Sig.user[0] = (word) (card->Id - 1); a->plci[k].Sig.user[1] = (word) k; a->plci[k].NL.callback = sync_callback; a->plci[k].NL.XNum = 1; a->plci[k].NL.X = a->plci[k].XData; a->plci[k].NL.user[0] = (word) ((card->Id - 1) | 0x8000); a->plci[k].NL.user[1] = (word) k; a->plci[k].adapter = a; } a->profile.Number = card->Id; a->profile.Channels = card->d.channels; if (card->d.features & DI_FAX3) { a->profile.Global_Options = 0x71; if (card->d.features & DI_CODEC) a->profile.Global_Options |= 0x6; #if IMPLEMENT_DTMF a->profile.Global_Options |= 0x8; #endif /* IMPLEMENT_DTMF */ a->profile.Global_Options |= 0x80; /* Line Interconnect */ #if IMPLEMENT_ECHO_CANCELLER a->profile.Global_Options |= 0x100; #endif /* IMPLEMENT_ECHO_CANCELLER */ a->profile.B1_Protocols = 0xdf; a->profile.B2_Protocols = 0x1fdb; a->profile.B3_Protocols = 0xb7; a->manufacturer_features = MANUFACTURER_FEATURE_HARDDTMF; } else { a->profile.Global_Options = 0x71; if (card->d.features & DI_CODEC) a->profile.Global_Options |= 0x2; a->profile.B1_Protocols = 0x43; a->profile.B2_Protocols = 0x1f0f; a->profile.B3_Protocols = 0x07; a->manufacturer_features = 0; } a->li_pri = (a->profile.Channels > 2); a->li_channels = a->li_pri ? MIXER_CHANNELS_PRI : MIXER_CHANNELS_BRI; a->li_base = 0; for (i = 0; &adapter[i] != a; i++) { if (adapter[i].request) a->li_base = adapter[i].li_base + adapter[i].li_channels; } k = li_total_channels + a->li_channels; new_li_config_table = (LI_CONFIG *) diva_os_malloc(0, ((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * k) * ((k + 3) & ~3)); if (new_li_config_table == NULL) { DBG_ERR(("diva_add_card: failed alloc li_config table.")) memset(a, 0, sizeof(DIVA_CAPI_ADAPTER)); return (0); } /* Prevent access to line interconnect table in process update */ diva_os_enter_spin_lock(&api_lock, &old_irql, "add card"); j = 0; for (i = 0; i < k; i++) { if ((i >= a->li_base) && (i < a->li_base + a->li_channels)) memset(&new_li_config_table[i], 0, sizeof(LI_CONFIG)); else memcpy(&new_li_config_table[i], &li_config_table[j], sizeof(LI_CONFIG)); new_li_config_table[i].flag_table = ((byte *) new_li_config_table) + (((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * i) * ((k + 3) & ~3)); new_li_config_table[i].coef_table = ((byte *) new_li_config_table) + (((k * sizeof(LI_CONFIG) + 3) & ~3) + (2 * i + 1) * ((k + 3) & ~3)); if ((i >= a->li_base) && (i < a->li_base + a->li_channels)) { new_li_config_table[i].adapter = a; memset(&new_li_config_table[i].flag_table[0], 0, k); memset(&new_li_config_table[i].coef_table[0], 0, k); } else { if (a->li_base != 0) { memcpy(&new_li_config_table[i].flag_table[0], &li_config_table[j].flag_table[0], a->li_base); memcpy(&new_li_config_table[i].coef_table[0], &li_config_table[j].coef_table[0], a->li_base); } memset(&new_li_config_table[i].flag_table[a->li_base], 0, a->li_channels); memset(&new_li_config_table[i].coef_table[a->li_base], 0, a->li_channels); if (a->li_base + a->li_channels < k) { memcpy(&new_li_config_table[i].flag_table[a->li_base + a->li_channels], &li_config_table[j].flag_table[a->li_base], k - (a->li_base + a->li_channels)); memcpy(&new_li_config_table[i].coef_table[a->li_base + a->li_channels], &li_config_table[j].coef_table[a->li_base], k - (a->li_base + a->li_channels)); } j++; } } li_total_channels = k; mem_to_free = li_config_table; li_config_table = new_li_config_table; for (i = card->Id; i < max_adapter; i++) { if (adapter[i].request) adapter[i].li_base += a->li_channels; } if (a == &adapter[max_adapter]) max_adapter++; list_add(&(card->list), &cards); AutomaticLaw(a); diva_os_leave_spin_lock(&api_lock, &old_irql, "add card"); if (mem_to_free) { diva_os_free(0, mem_to_free); } i = 0; while (i++ < 30) { if (a->automatic_law > 3) break; diva_os_sleep(10); } /* profile information */ PUT_WORD(&ctrl->profile.nbchannel, card->d.channels); ctrl->profile.goptions = a->profile.Global_Options; ctrl->profile.support1 = a->profile.B1_Protocols; ctrl->profile.support2 = a->profile.B2_Protocols; ctrl->profile.support3 = a->profile.B3_Protocols; /* manufacturer profile information */ ctrl->profile.manu[0] = a->man_profile.private_options; ctrl->profile.manu[1] = a->man_profile.rtp_primary_payloads; ctrl->profile.manu[2] = a->man_profile.rtp_additional_payloads; ctrl->profile.manu[3] = 0; ctrl->profile.manu[4] = 0; capi_ctr_ready(ctrl); DBG_TRC(("adapter added, max_adapter=%d", max_adapter)); return (1); } /* * register appl */ static void diva_register_appl(struct capi_ctr *ctrl, __u16 appl, capi_register_params *rp) { APPL *this; word bnum, xnum; int i = 0; unsigned char *p; void *DataNCCI, *DataFlags, *ReceiveBuffer, *xbuffer_used; void **xbuffer_ptr, **xbuffer_internal; diva_os_spin_lock_magic_t old_irql; unsigned int mem_len; int nconn = rp->level3cnt; if (diva_os_in_irq()) { DBG_ERR(("CAPI_REGISTER - in irq context !")) return; } DBG_TRC(("application register Id=%d", appl)) if (appl > MAX_APPL) { DBG_ERR(("CAPI_REGISTER - appl.Id exceeds MAX_APPL")) return; } if (nconn <= 0) nconn = ctrl->profile.nbchannel * -nconn; if (nconn == 0) nconn = ctrl->profile.nbchannel; DBG_LOG(("CAPI_REGISTER - Id = %d", appl)) DBG_LOG((" MaxLogicalConnections = %d(%d)", nconn, rp->level3cnt)) DBG_LOG((" MaxBDataBuffers = %d", rp->datablkcnt)) DBG_LOG((" MaxBDataLength = %d", rp->datablklen)) if (nconn < 1 || nconn > 255 || rp->datablklen < 80 || rp->datablklen > 2150 || rp->datablkcnt > 255) { DBG_ERR(("CAPI_REGISTER - invalid parameters")) return; } if (application[appl - 1].Id == appl) { DBG_LOG(("CAPI_REGISTER - appl already registered")) return; /* appl already registered */ } /* alloc memory */ bnum = nconn * rp->datablkcnt; xnum = nconn * MAX_DATA_B3; mem_len = bnum * sizeof(word); /* DataNCCI */ mem_len += bnum * sizeof(word); /* DataFlags */ mem_len += bnum * rp->datablklen; /* ReceiveBuffer */ mem_len += xnum; /* xbuffer_used */ mem_len += xnum * sizeof(void *); /* xbuffer_ptr */ mem_len += xnum * sizeof(void *); /* xbuffer_internal */ mem_len += xnum * rp->datablklen; /* xbuffer_ptr[xnum] */ DBG_LOG((" Allocated Memory = %d", mem_len)) if (!(p = diva_os_malloc(0, mem_len))) { DBG_ERR(("CAPI_REGISTER - memory allocation failed")) return; } memset(p, 0, mem_len); DataNCCI = (void *)p; p += bnum * sizeof(word); DataFlags = (void *)p; p += bnum * sizeof(word); ReceiveBuffer = (void *)p; p += bnum * rp->datablklen; xbuffer_used = (void *)p; p += xnum; xbuffer_ptr = (void **)p; p += xnum * sizeof(void *); xbuffer_internal = (void **)p; p += xnum * sizeof(void *); for (i = 0; i < xnum; i++) { xbuffer_ptr[i] = (void *)p; p += rp->datablklen; } /* initialize application data */ diva_os_enter_spin_lock(&api_lock, &old_irql, "register_appl"); this = &application[appl - 1]; memset(this, 0, sizeof(APPL)); this->Id = appl; for (i = 0; i < max_adapter; i++) { adapter[i].CIP_Mask[appl - 1] = 0; } this->queue_size = 1000; this->MaxNCCI = (byte) nconn; this->MaxNCCIData = (byte) rp->datablkcnt; this->MaxBuffer = bnum; this->MaxDataLength = rp->datablklen; this->DataNCCI = DataNCCI; this->DataFlags = DataFlags; this->ReceiveBuffer = ReceiveBuffer; this->xbuffer_used = xbuffer_used; this->xbuffer_ptr = xbuffer_ptr; this->xbuffer_internal = xbuffer_internal; for (i = 0; i < xnum; i++) { this->xbuffer_ptr[i] = xbuffer_ptr[i]; } CapiRegister(this->Id); diva_os_leave_spin_lock(&api_lock, &old_irql, "register_appl"); } /* * release appl */ static void diva_release_appl(struct capi_ctr *ctrl, __u16 appl) { diva_os_spin_lock_magic_t old_irql; APPL *this = &application[appl - 1]; void *mem_to_free = NULL; DBG_TRC(("application %d(%d) cleanup", this->Id, appl)) if (diva_os_in_irq()) { DBG_ERR(("CAPI_RELEASE - in irq context !")) return; } diva_os_enter_spin_lock(&api_lock, &old_irql, "release_appl"); if (this->Id) { CapiRelease(this->Id); mem_to_free = this->DataNCCI; this->DataNCCI = NULL; this->Id = 0; } diva_os_leave_spin_lock(&api_lock, &old_irql, "release_appl"); if (mem_to_free) diva_os_free(0, mem_to_free); } /* * send message */ static u16 diva_send_message(struct capi_ctr *ctrl, diva_os_message_buffer_s *dmb) { int i = 0; word ret = 0; diva_os_spin_lock_magic_t old_irql; CAPI_MSG *msg = (CAPI_MSG *) DIVA_MESSAGE_BUFFER_DATA(dmb); APPL *this = &application[GET_WORD(&msg->header.appl_id) - 1]; diva_card *card = ctrl->driverdata; __u32 length = DIVA_MESSAGE_BUFFER_LEN(dmb); word clength = GET_WORD(&msg->header.length); word command = GET_WORD(&msg->header.command); u16 retval = CAPI_NOERROR; if (diva_os_in_irq()) { DBG_ERR(("CAPI_SEND_MSG - in irq context !")) return CAPI_REGOSRESOURCEERR; } DBG_PRV1(("Write - appl = %d, cmd = 0x%x", this->Id, command)) if (card->remove_in_progress) { DBG_ERR(("CAPI_SEND_MSG - remove in progress!")) return CAPI_REGOSRESOURCEERR; } diva_os_enter_spin_lock(&api_lock, &old_irql, "send message"); if (!this->Id) { diva_os_leave_spin_lock(&api_lock, &old_irql, "send message"); return CAPI_ILLAPPNR; } /* patch controller number */ msg->header.controller = ControllerMap[card->Id] | (msg->header.controller & 0x80); /* preserve external controller bit */ switch (command) { default: xlog("\x00\x02", msg, 0x80, clength); break; case _DATA_B3_I | RESPONSE: #ifndef DIVA_NO_DEBUGLIB if (myDriverDebugHandle.dbgMask & DL_BLK) xlog("\x00\x02", msg, 0x80, clength); #endif break; case _DATA_B3_R: #ifndef DIVA_NO_DEBUGLIB if (myDriverDebugHandle.dbgMask & DL_BLK) xlog("\x00\x02", msg, 0x80, clength); #endif if (clength == 24) clength = 22; /* workaround for PPcom bug */ /* header is always 22 */ if (GET_WORD(&msg->info.data_b3_req.Data_Length) > this->MaxDataLength || GET_WORD(&msg->info.data_b3_req.Data_Length) > (length - clength)) { DBG_ERR(("Write - invalid message size")) retval = CAPI_ILLCMDORSUBCMDORMSGTOSMALL; goto write_end; } for (i = 0; i < (MAX_DATA_B3 * this->MaxNCCI) && this->xbuffer_used[i]; i++); if (i == (MAX_DATA_B3 * this->MaxNCCI)) { DBG_ERR(("Write - too many data pending")) retval = CAPI_SENDQUEUEFULL; goto write_end; } msg->info.data_b3_req.Data = i; this->xbuffer_internal[i] = NULL; memcpy(this->xbuffer_ptr[i], &((__u8 *) msg)[clength], GET_WORD(&msg->info.data_b3_req.Data_Length)); #ifndef DIVA_NO_DEBUGLIB if ((myDriverDebugHandle.dbgMask & DL_BLK) && (myDriverDebugHandle.dbgMask & DL_XLOG)) { int j; for (j = 0; j < GET_WORD(&msg->info.data_b3_req.Data_Length); j += 256) { DBG_BLK((((char *) this->xbuffer_ptr[i]) + j, ((GET_WORD(&msg->info.data_b3_req.Data_Length) - j) < 256) ? (GET_WORD(&msg->info.data_b3_req.Data_Length) - j) : 256)) if (!(myDriverDebugHandle.dbgMask & DL_PRV0)) break; /* not more if not explicitly requested */ } } #endif break; } memcpy(mapped_msg, msg, (__u32) clength); mapped_msg->header.controller = MapController(mapped_msg->header.controller); mapped_msg->header.length = clength; mapped_msg->header.command = command; mapped_msg->header.number = GET_WORD(&msg->header.number); ret = api_put(this, mapped_msg); switch (ret) { case 0: break; case _BAD_MSG: DBG_ERR(("Write - bad message")) retval = CAPI_ILLCMDORSUBCMDORMSGTOSMALL; break; case _QUEUE_FULL: DBG_ERR(("Write - queue full")) retval = CAPI_SENDQUEUEFULL; break; default: DBG_ERR(("Write - api_put returned unknown error")) retval = CAPI_UNKNOWNNOTPAR; break; } write_end: diva_os_leave_spin_lock(&api_lock, &old_irql, "send message"); if (retval == CAPI_NOERROR) diva_os_free_message_buffer(dmb); return retval; } /* * cards request function */ static void DIRequest(ENTITY *e) { DIVA_CAPI_ADAPTER *a = &(adapter[(byte) e->user[0]]); diva_card *os_card = (diva_card *) a->os_card; if (e->Req && (a->FlowControlIdTable[e->ReqCh] == e->Id)) { a->FlowControlSkipTable[e->ReqCh] = 1; } (*(os_card->d.request)) (e); } /* * callback function from didd */ static void didd_callback(void *context, DESCRIPTOR *adapter, int removal) { if (adapter->type == IDI_DADAPTER) { DBG_ERR(("Notification about IDI_DADAPTER change ! Oops.")); return; } else if (adapter->type == IDI_DIMAINT) { if (removal) { stop_dbg(); } else { memcpy(&MAdapter, adapter, sizeof(MAdapter)); dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT); } } else if ((adapter->type > 0) && (adapter->type < 16)) { /* IDI Adapter */ if (removal) { divacapi_remove_card(adapter); } else { diva_add_card(adapter); } } return; } /* * connect to didd */ static int divacapi_connect_didd(void) { int x = 0; int dadapter = 0; IDI_SYNC_REQ req; DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS]; DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table)); for (x = 0; x < MAX_DESCRIPTORS; x++) { if (DIDD_Table[x].type == IDI_DIMAINT) { /* MAINT found */ memcpy(&MAdapter, &DIDD_Table[x], sizeof(DAdapter)); dprintf = (DIVA_DI_PRINTF) MAdapter.request; DbgRegister("CAPI20", DRIVERRELEASE_CAPI, DBG_DEFAULT); break; } } for (x = 0; x < MAX_DESCRIPTORS; x++) { if (DIDD_Table[x].type == IDI_DADAPTER) { /* DADAPTER found */ dadapter = 1; memcpy(&DAdapter, &DIDD_Table[x], sizeof(DAdapter)); req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REGISTER_ADAPTER_NOTIFY; req.didd_notify.info.callback = (void *)didd_callback; req.didd_notify.info.context = NULL; DAdapter.request((ENTITY *)&req); if (req.didd_notify.e.Rc != 0xff) { stop_dbg(); return (0); } notify_handle = req.didd_notify.info.handle; } else if ((DIDD_Table[x].type > 0) && (DIDD_Table[x].type < 16)) { /* IDI Adapter found */ diva_add_card(&DIDD_Table[x]); } } if (!dadapter) { stop_dbg(); } return (dadapter); } /* * diconnect from didd */ static void divacapi_disconnect_didd(void) { IDI_SYNC_REQ req; stop_dbg(); req.didd_notify.e.Req = 0; req.didd_notify.e.Rc = IDI_SYNC_REQ_DIDD_REMOVE_ADAPTER_NOTIFY; req.didd_notify.info.handle = notify_handle; DAdapter.request((ENTITY *)&req); } /* * we do not provide date/time here, * the application should do this. */ int fax_head_line_time(char *buffer) { return (0); } /* * init (alloc) main structures */ static int DIVA_INIT_FUNCTION init_main_structs(void) { if (!(mapped_msg = (CAPI_MSG *) diva_os_malloc(0, MAX_MSG_SIZE))) { DBG_ERR(("init: failed alloc mapped_msg.")) return 0; } if (!(adapter = diva_os_malloc(0, sizeof(DIVA_CAPI_ADAPTER) * MAX_DESCRIPTORS))) { DBG_ERR(("init: failed alloc adapter struct.")) diva_os_free(0, mapped_msg); return 0; } memset(adapter, 0, sizeof(DIVA_CAPI_ADAPTER) * MAX_DESCRIPTORS); if (!(application = diva_os_malloc(0, sizeof(APPL) * MAX_APPL))) { DBG_ERR(("init: failed alloc application struct.")) diva_os_free(0, mapped_msg); diva_os_free(0, adapter); return 0; } memset(application, 0, sizeof(APPL) * MAX_APPL); return (1); } /* * remove (free) main structures */ static void remove_main_structs(void) { if (application) diva_os_free(0, application); if (adapter) diva_os_free(0, adapter); if (mapped_msg) diva_os_free(0, mapped_msg); } /* * api_remove_start */ static void do_api_remove_start(void) { diva_os_spin_lock_magic_t old_irql; int ret = 1, count = 100; do { diva_os_enter_spin_lock(&api_lock, &old_irql, "api remove start"); ret = api_remove_start(); diva_os_leave_spin_lock(&api_lock, &old_irql, "api remove start"); diva_os_sleep(10); } while (ret && count--); if (ret) DBG_ERR(("could not remove signaling ID's")) } /* * init */ int DIVA_INIT_FUNCTION init_capifunc(void) { diva_os_initialize_spin_lock(&api_lock, "capifunc"); memset(ControllerMap, 0, MAX_DESCRIPTORS + 1); max_adapter = 0; if (!init_main_structs()) { DBG_ERR(("init: failed to init main structs.")) diva_os_destroy_spin_lock(&api_lock, "capifunc"); return (0); } if (!divacapi_connect_didd()) { DBG_ERR(("init: failed to connect to DIDD.")) do_api_remove_start(); divacapi_remove_cards(); remove_main_structs(); diva_os_destroy_spin_lock(&api_lock, "capifunc"); return (0); } return (1); } /* * finit */ void DIVA_EXIT_FUNCTION finit_capifunc(void) { do_api_remove_start(); divacapi_disconnect_didd(); divacapi_remove_cards(); remove_main_structs(); diva_os_destroy_spin_lock(&api_lock, "capifunc"); }
gpl-2.0
boa19861105/Butterfly-Kernel
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
4809
68773
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2012 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include <linux/pci.h> #include <linux/delay.h> #include <linux/sched.h> #include "ixgbe.h" #include "ixgbe_phy.h" #include "ixgbe_mbx.h" #define IXGBE_82599_MAX_TX_QUEUES 128 #define IXGBE_82599_MAX_RX_QUEUES 128 #define IXGBE_82599_RAR_ENTRIES 128 #define IXGBE_82599_MC_TBL_SIZE 128 #define IXGBE_82599_VFT_TBL_SIZE 128 #define IXGBE_82599_RX_PB_SIZE 512 static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete); static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete); static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; /* enable the laser control functions for SFP+ fiber */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) { mac->ops.disable_tx_laser = &ixgbe_disable_tx_laser_multispeed_fiber; mac->ops.enable_tx_laser = &ixgbe_enable_tx_laser_multispeed_fiber; mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; } else { mac->ops.disable_tx_laser = NULL; mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; } if (hw->phy.multispeed_fiber) { /* Set up dual speed SFP+ support */ mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; } else { if ((mac->ops.get_media_type(hw) == ixgbe_media_type_backplane) && (hw->phy.smart_speed == ixgbe_smart_speed_auto || hw->phy.smart_speed == ixgbe_smart_speed_on) && !ixgbe_verify_lesm_fw_enabled_82599(hw)) mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; else mac->ops.setup_link = &ixgbe_setup_mac_link_82599; } } static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) { s32 ret_val = 0; u32 reg_anlp1 = 0; u32 i = 0; u16 list_offset, data_offset, data_value; if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { ixgbe_init_mac_link_ops_82599(hw); hw->phy.ops.reset = NULL; ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, &data_offset); if (ret_val != 0) goto setup_sfp_out; /* PHY config will finish before releasing the semaphore */ ret_val = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); if (ret_val != 0) { ret_val = IXGBE_ERR_SWFW_SYNC; goto setup_sfp_out; } hw->eeprom.ops.read(hw, ++data_offset, &data_value); while (data_value != 0xffff) { IXGBE_WRITE_REG(hw, IXGBE_CORECTL, data_value); IXGBE_WRITE_FLUSH(hw); hw->eeprom.ops.read(hw, ++data_offset, &data_value); } /* Release the semaphore */ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); /* * Delay obtaining semaphore again to allow FW access, * semaphore_delay is in ms usleep_range needs us. */ usleep_range(hw->eeprom.semaphore_delay * 1000, hw->eeprom.semaphore_delay * 2000); /* Now restart DSP by setting Restart_AN and clearing LMS */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw, IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) | IXGBE_AUTOC_AN_RESTART)); /* Wait for AN to leave state 0 */ for (i = 0; i < 10; i++) { usleep_range(4000, 8000); reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1); if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK) break; } if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) { hw_dbg(hw, "sfp module setup not complete\n"); ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; goto setup_sfp_out; } /* Restart DSP by setting Restart_AN and return to SFI mode */ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL | IXGBE_AUTOC_AN_RESTART)); } setup_sfp_out: return ret_val; } static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; ixgbe_init_mac_link_ops_82599(hw); mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); return 0; } /** * ixgbe_init_phy_ops_82599 - PHY/SFP specific init * @hw: pointer to hardware structure * * Initialize any function pointers that were not able to be * set during get_invariants because the PHY/SFP type was * not known. Perform the SFP init if necessary. * **/ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) { struct ixgbe_mac_info *mac = &hw->mac; struct ixgbe_phy_info *phy = &hw->phy; s32 ret_val = 0; /* Identify the PHY or SFP module */ ret_val = phy->ops.identify(hw); /* Setup function pointers based on detected SFP module and speeds */ ixgbe_init_mac_link_ops_82599(hw); /* If copper media, overwrite with copper function pointers */ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { mac->ops.setup_link = &ixgbe_setup_copper_link_82599; mac->ops.get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic; } /* Set necessary function pointers based on phy type */ switch (hw->phy.type) { case ixgbe_phy_tn: phy->ops.check_link = &ixgbe_check_phy_link_tnx; phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; phy->ops.get_firmware_version = &ixgbe_get_phy_firmware_version_tnx; break; default: break; } return ret_val; } /** * ixgbe_get_link_capabilities_82599 - Determines link capabilities * @hw: pointer to hardware structure * @speed: pointer to link speed * @negotiation: true when autoneg or autotry is enabled * * Determines the link capabilities by reading the AUTOC register. **/ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ixgbe_link_speed *speed, bool *negotiation) { s32 status = 0; u32 autoc = 0; /* Determine 1G link capabilities off of SFP+ type */ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1) { *speed = IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; goto out; } /* * Determine link capabilities based on the stored value of AUTOC, * which represents EEPROM defaults. If AUTOC value has not been * stored, use the current register value. */ if (hw->mac.orig_link_settings_stored) autoc = hw->mac.orig_autoc; else autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *negotiation = false; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: *speed = IXGBE_LINK_SPEED_10GB_FULL; *negotiation = false; break; case IXGBE_AUTOC_LMS_1G_AN: *speed = IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; break; case IXGBE_AUTOC_LMS_10G_SERIAL: *speed = IXGBE_LINK_SPEED_10GB_FULL; *negotiation = false; break; case IXGBE_AUTOC_LMS_KX4_KX_KR: case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: *speed = IXGBE_LINK_SPEED_UNKNOWN; if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; break; case IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII: *speed = IXGBE_LINK_SPEED_100_FULL; if (autoc & IXGBE_AUTOC_KR_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX4_SUPP) *speed |= IXGBE_LINK_SPEED_10GB_FULL; if (autoc & IXGBE_AUTOC_KX_SUPP) *speed |= IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; break; case IXGBE_AUTOC_LMS_SGMII_1G_100M: *speed = IXGBE_LINK_SPEED_1GB_FULL | IXGBE_LINK_SPEED_100_FULL; *negotiation = false; break; default: status = IXGBE_ERR_LINK_SETUP; goto out; break; } if (hw->phy.multispeed_fiber) { *speed |= IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL; *negotiation = true; } out: return status; } /** * ixgbe_get_media_type_82599 - Get media type * @hw: pointer to hardware structure * * Returns the media type (fiber, copper, backplane) **/ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) { enum ixgbe_media_type media_type; /* Detect if there is a copper PHY attached. */ switch (hw->phy.type) { case ixgbe_phy_cu_unknown: case ixgbe_phy_tn: media_type = ixgbe_media_type_copper; goto out; default: break; } switch (hw->device_id) { case IXGBE_DEV_ID_82599_KX4: case IXGBE_DEV_ID_82599_KX4_MEZZ: case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: case IXGBE_DEV_ID_82599_KR: case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: case IXGBE_DEV_ID_82599_XAUI_LOM: /* Default device ID is mezzanine card KX/KX4 */ media_type = ixgbe_media_type_backplane; break; case IXGBE_DEV_ID_82599_SFP: case IXGBE_DEV_ID_82599_SFP_FCOE: case IXGBE_DEV_ID_82599_SFP_EM: case IXGBE_DEV_ID_82599_SFP_SF2: case IXGBE_DEV_ID_82599_SFP_SF_QP: case IXGBE_DEV_ID_82599EN_SFP: media_type = ixgbe_media_type_fiber; break; case IXGBE_DEV_ID_82599_CX4: media_type = ixgbe_media_type_cx4; break; case IXGBE_DEV_ID_82599_T3_LOM: media_type = ixgbe_media_type_copper; break; case IXGBE_DEV_ID_82599_LS: media_type = ixgbe_media_type_fiber_lco; break; default: media_type = ixgbe_media_type_unknown; break; } out: return media_type; } /** * ixgbe_start_mac_link_82599 - Setup MAC link settings * @hw: pointer to hardware structure * @autoneg_wait_to_complete: true when waiting for completion is needed * * Configures link settings based on values in the ixgbe_hw struct. * Restarts the link. Performs autonegotiation if needed. **/ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, bool autoneg_wait_to_complete) { u32 autoc_reg; u32 links_reg; u32 i; s32 status = 0; /* Restart link */ autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc_reg |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc_reg); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { if ((autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || (autoc_reg & IXGBE_AUTOC_LMS_MASK) == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { links_reg = 0; /* Just in case Autoneg time = 0 */ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; hw_dbg(hw, "Autoneg did not complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msleep(50); return status; } /** * ixgbe_disable_tx_laser_multispeed_fiber - Disable Tx laser * @hw: pointer to hardware structure * * The base drivers may require better control over SFP+ module * PHY states. This includes selectively shutting down the Tx * laser on the PHY, effectively halting physical link. **/ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); /* Disable tx laser; allow 100us to go dark per spec */ esdp_reg |= IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); udelay(100); } /** * ixgbe_enable_tx_laser_multispeed_fiber - Enable Tx laser * @hw: pointer to hardware structure * * The base drivers may require better control over SFP+ module * PHY states. This includes selectively turning on the Tx * laser on the PHY, effectively starting physical link. **/ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); /* Enable tx laser; allow 100ms to light up */ esdp_reg &= ~IXGBE_ESDP_SDP3; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); msleep(100); } /** * ixgbe_flap_tx_laser_multispeed_fiber - Flap Tx laser * @hw: pointer to hardware structure * * When the driver changes the link speeds that it can support, * it sets autotry_restart to true to indicate that we need to * initiate a new autotry session with the link partner. To do * so, we set the speed then disable and re-enable the tx laser, to * alert the link partner that it also needs to restart autotry on its * end. This is consistent with true clause 37 autoneg, which also * involves a loss of signal. **/ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) { if (hw->mac.autotry_restart) { ixgbe_disable_tx_laser_multispeed_fiber(hw); ixgbe_enable_tx_laser_multispeed_fiber(hw); hw->mac.autotry_restart = false; } } /** * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status = 0; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; u32 speedcnt = 0; u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); u32 i = 0; bool link_up = false; bool negotiation; /* Mask off requested but non-supported speeds */ status = hw->mac.ops.get_link_capabilities(hw, &link_speed, &negotiation); if (status != 0) return status; speed &= link_speed; /* * Try each speed one by one, highest priority first. We do this in * software because 10gb fiber doesn't support speed autonegotiation. */ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { speedcnt++; highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; /* If we already have link at this speed, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) return status; if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) goto out; /* Set the module link speed */ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); /* Allow module to change analog characteristics (1G->10G) */ msleep(40); status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_10GB_FULL, autoneg, autoneg_wait_to_complete); if (status != 0) return status; /* Flap the tx laser if it has not already been done */ hw->mac.ops.flap_tx_laser(hw); /* * Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is * attempted. 82599 uses the same timing for 10g SFI. */ for (i = 0; i < 5; i++) { /* Wait for the link partner to also set speed */ msleep(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) return status; if (link_up) goto out; } } if (speed & IXGBE_LINK_SPEED_1GB_FULL) { speedcnt++; if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; /* If we already have link at this speed, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) return status; if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) goto out; /* Set the module link speed */ esdp_reg &= ~IXGBE_ESDP_SDP5; esdp_reg |= IXGBE_ESDP_SDP5_DIR; IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); IXGBE_WRITE_FLUSH(hw); /* Allow module to change analog characteristics (10G->1G) */ msleep(40); status = ixgbe_setup_mac_link_82599(hw, IXGBE_LINK_SPEED_1GB_FULL, autoneg, autoneg_wait_to_complete); if (status != 0) return status; /* Flap the tx laser if it has not already been done */ hw->mac.ops.flap_tx_laser(hw); /* Wait for the link partner to also set speed */ msleep(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) return status; if (link_up) goto out; } /* * We didn't get link. Configure back to the highest speed we tried, * (if there was more than one). We call ourselves back with just the * single highest speed that the user requested. */ if (speedcnt > 1) status = ixgbe_setup_mac_link_multispeed_fiber(hw, highest_link_speed, autoneg, autoneg_wait_to_complete); out: /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; return status; } /** * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Implements the Intel SmartSpeed algorithm. **/ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status = 0; ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; s32 i, j; bool link_up = false; u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* Set autoneg_advertised value based on input link speed */ hw->phy.autoneg_advertised = 0; if (speed & IXGBE_LINK_SPEED_10GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; if (speed & IXGBE_LINK_SPEED_1GB_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; if (speed & IXGBE_LINK_SPEED_100_FULL) hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; /* * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the * autoneg advertisement if link is unable to be established at the * highest negotiated rate. This can sometimes happen due to integrity * issues with the physical media connection. */ /* First, try to get link with full advertisement */ hw->phy.smart_speed_active = false; for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, autoneg_wait_to_complete); if (status != 0) goto out; /* * Wait for the controller to acquire link. Per IEEE 802.3ap, * Section 73.10.2, we may have to wait up to 500ms if KR is * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per * Table 9 in the AN MAS. */ for (i = 0; i < 5; i++) { mdelay(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) goto out; if (link_up) goto out; } } /* * We didn't get link. If we advertised KR plus one of KX4/KX * (or BX4/BX), then disable KR and try again. */ if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) || ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0)) goto out; /* Turn SmartSpeed on to disable KR support */ hw->phy.smart_speed_active = true; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, autoneg_wait_to_complete); if (status != 0) goto out; /* * Wait for the controller to acquire link. 600ms will allow for * the AN link_fail_inhibit_timer as well for multiple cycles of * parallel detect, both 10g and 1g. This allows for the maximum * connect attempts as defined in the AN MAS table 73-7. */ for (i = 0; i < 6; i++) { mdelay(100); /* If we have link, just jump out */ status = hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (status != 0) goto out; if (link_up) goto out; } /* We didn't get link. Turn SmartSpeed back off. */ hw->phy.smart_speed_active = false; status = ixgbe_setup_mac_link_82599(hw, speed, autoneg, autoneg_wait_to_complete); out: if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) hw_dbg(hw, "Smartspeed has downgraded the link speed from " "the maximum advertised\n"); return status; } /** * ixgbe_setup_mac_link_82599 - Set MAC link speed * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true when waiting for completion is needed * * Set the link speed in the AUTOC register and restarts link. **/ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status = 0; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 start_autoc = autoc; u32 orig_autoc = 0; u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; u32 links_reg; u32 i; ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; /* Check to see if speed passed in is supported. */ status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, &autoneg); if (status != 0) goto out; speed &= link_capabilities; if (speed == IXGBE_LINK_SPEED_UNKNOWN) { status = IXGBE_ERR_LINK_SETUP; goto out; } /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/ if (hw->mac.orig_link_settings_stored) orig_autoc = hw->mac.orig_autoc; else orig_autoc = autoc; if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { /* Set KX4/KX/KR support according to speed requested */ autoc &= ~(IXGBE_AUTOC_KX4_KX_SUPP_MASK | IXGBE_AUTOC_KR_SUPP); if (speed & IXGBE_LINK_SPEED_10GB_FULL) if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) autoc |= IXGBE_AUTOC_KX4_SUPP; if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) && (hw->phy.smart_speed_active == false)) autoc |= IXGBE_AUTOC_KR_SUPP; if (speed & IXGBE_LINK_SPEED_1GB_FULL) autoc |= IXGBE_AUTOC_KX_SUPP; } else if ((pma_pmd_1g == IXGBE_AUTOC_1G_SFI) && (link_mode == IXGBE_AUTOC_LMS_1G_LINK_NO_AN || link_mode == IXGBE_AUTOC_LMS_1G_AN)) { /* Switch from 1G SFI to 10G SFI if requested */ if ((speed == IXGBE_LINK_SPEED_10GB_FULL) && (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; autoc |= IXGBE_AUTOC_LMS_10G_SERIAL; } } else if ((pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) && (link_mode == IXGBE_AUTOC_LMS_10G_SERIAL)) { /* Switch from 10G SFI to 1G SFI if requested */ if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { autoc &= ~IXGBE_AUTOC_LMS_MASK; if (autoneg) autoc |= IXGBE_AUTOC_LMS_1G_AN; else autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; } } if (autoc != start_autoc) { /* Restart link */ autoc |= IXGBE_AUTOC_AN_RESTART; IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); /* Only poll for autoneg to complete if specified to do so */ if (autoneg_wait_to_complete) { if (link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN || link_mode == IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII) { links_reg = 0; /*Just in case Autoneg time=0*/ for (i = 0; i < IXGBE_AUTO_NEG_TIME; i++) { links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); if (links_reg & IXGBE_LINKS_KX_AN_COMP) break; msleep(100); } if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; hw_dbg(hw, "Autoneg did not " "complete.\n"); } } } /* Add delay to filter out noises during initial link setup */ msleep(50); } out: return status; } /** * ixgbe_setup_copper_link_82599 - Set the PHY autoneg advertised field * @hw: pointer to hardware structure * @speed: new link speed * @autoneg: true if autonegotiation enabled * @autoneg_wait_to_complete: true if waiting is needed to complete * * Restarts link on PHY and MAC based on settings passed in. **/ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg, bool autoneg_wait_to_complete) { s32 status; /* Setup the PHY according to input speed */ status = hw->phy.ops.setup_link_speed(hw, speed, autoneg, autoneg_wait_to_complete); /* Set up MAC */ ixgbe_start_mac_link_82599(hw, autoneg_wait_to_complete); return status; } /** * ixgbe_reset_hw_82599 - Perform hardware reset * @hw: pointer to hardware structure * * Resets the hardware by resetting the transmit and receive units, masks * and clears all interrupts, perform a PHY reset, and perform a link (MAC) * reset. **/ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) { ixgbe_link_speed link_speed; s32 status; u32 ctrl, i, autoc, autoc2; bool link_up = false; /* Call adapter stop to disable tx/rx and clear interrupts */ status = hw->mac.ops.stop_adapter(hw); if (status != 0) goto reset_hw_out; /* flush pending Tx transactions */ ixgbe_clear_tx_pending(hw); /* PHY ops must be identified and initialized prior to reset */ /* Identify PHY and related function pointers */ status = hw->phy.ops.init(hw); if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) goto reset_hw_out; /* Setup SFP module if there is one present. */ if (hw->phy.sfp_setup_needed) { status = hw->mac.ops.setup_sfp(hw); hw->phy.sfp_setup_needed = false; } if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) goto reset_hw_out; /* Reset PHY */ if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL) hw->phy.ops.reset(hw); mac_reset_top: /* * Issue global reset to the MAC. Needs to be SW reset if link is up. * If link reset is used when link is up, it might reset the PHY when * mng is using it. If link is down or the flag to force full link * reset is set, then perform link reset. */ ctrl = IXGBE_CTRL_LNK_RST; if (!hw->force_full_reset) { hw->mac.ops.check_link(hw, &link_speed, &link_up, false); if (link_up) ctrl = IXGBE_CTRL_RST; } ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); IXGBE_WRITE_FLUSH(hw); /* Poll for reset bit to self-clear indicating reset is complete */ for (i = 0; i < 10; i++) { udelay(1); ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); if (!(ctrl & IXGBE_CTRL_RST_MASK)) break; } if (ctrl & IXGBE_CTRL_RST_MASK) { status = IXGBE_ERR_RESET_FAILED; hw_dbg(hw, "Reset polling failed to complete.\n"); } msleep(50); /* * Double resets are required for recovery from certain error * conditions. Between resets, it is necessary to stall to allow time * for any pending HW events to complete. */ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; goto mac_reset_top; } /* * Store the original AUTOC/AUTOC2 values if they have not been * stored off yet. Otherwise restore the stored original * values since the reset operation sets back to defaults. */ autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); if (hw->mac.orig_link_settings_stored == false) { hw->mac.orig_autoc = autoc; hw->mac.orig_autoc2 = autoc2; hw->mac.orig_link_settings_stored = true; } else { if (autoc != hw->mac.orig_autoc) IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc | IXGBE_AUTOC_AN_RESTART)); if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) != (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) { autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK; autoc2 |= (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK); IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2); } } /* Store the permanent mac address */ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); /* * Store MAC address from RAR0, clear receive address registers, and * clear the multicast table. Also reset num_rar_entries to 128, * since we modify this value when programming the SAN MAC address. */ hw->mac.num_rar_entries = 128; hw->mac.ops.init_rx_addrs(hw); /* Store the permanent SAN mac address */ hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, hw->mac.san_addr, 0, IXGBE_RAH_AV); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; } /* Store the alternative WWNN/WWPN prefix */ hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, &hw->mac.wwpn_prefix); reset_hw_out: return status; } /** * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. * @hw: pointer to hardware structure **/ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) { int i; u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; /* * Before starting reinitialization process, * FDIRCMD.CMD must be zero. */ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & IXGBE_FDIRCMD_CMD_MASK)) break; udelay(10); } if (i >= IXGBE_FDIRCMD_CMD_POLL) { hw_dbg(hw, "Flow Director previous command isn't complete, " "aborting table re-initialization.\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); IXGBE_WRITE_FLUSH(hw); /* * 82599 adapters flow director init flow cannot be restarted, * Workaround 82599 silicon errata by performing the following steps * before re-writing the FDIRCTRL control register with the same value. * - write 1 to bit 8 of FDIRCMD register & * - write 0 to bit 8 of FDIRCMD register */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ~IXGBE_FDIRCMD_CLEARHT)); IXGBE_WRITE_FLUSH(hw); /* * Clear FDIR Hash register to clear any leftover hashes * waiting to be programmed. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, 0x00); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); /* Poll init-done after we write FDIRCTRL register */ for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; udelay(10); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); return IXGBE_ERR_FDIR_REINIT_FAILED; } /* Clear FDIR statistics registers (read to clear) */ IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); IXGBE_READ_REG(hw, IXGBE_FDIRFSTAT); IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); IXGBE_READ_REG(hw, IXGBE_FDIRMISS); IXGBE_READ_REG(hw, IXGBE_FDIRLEN); return 0; } /** * ixgbe_fdir_enable_82599 - Initialize Flow Director control registers * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register **/ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) { int i; /* Prime the keys for hashing */ IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); /* * Poll init-done after we write the register. Estimated times: * 10G: PBALLOC = 11b, timing is 60us * 1G: PBALLOC = 11b, timing is 600us * 100M: PBALLOC = 11b, timing is 6ms * * Multiple these timings by 4 if under full Rx load * * So we'll poll for IXGBE_FDIR_INIT_DONE_POLL times, sleeping for * 1 msec per poll time. If we're at line rate and drop to 100M, then * this might not finish in our poll time, but we can live with that * for now. */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCTRL, fdirctrl); IXGBE_WRITE_FLUSH(hw); for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & IXGBE_FDIRCTRL_INIT_DONE) break; usleep_range(1000, 2000); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) hw_dbg(hw, "Flow Director poll time exceeded!\n"); } /** * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation **/ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) { /* * Continue setup of fdirctrl register bits: * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 filters are left */ fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); return 0; } /** * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters * @hw: pointer to hardware structure * @fdirctrl: value to write to flow director control register, initially * contains just the value of the Rx packet buffer allocation **/ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) { /* * Continue setup of fdirctrl register bits: * Turn perfect match filtering on * Report hash in RSS field of Rx wb descriptor * Initialize the drop queue * Move the flexible bytes to use the ethertype - shift 6 words * Set the maximum length per hash bucket to 0xA filters * Send interrupt when 64 (0x4 * 16) filters are left */ fdirctrl |= IXGBE_FDIRCTRL_PERFECT_MATCH | IXGBE_FDIRCTRL_REPORT_STATUS | (IXGBE_FDIR_DROP_QUEUE << IXGBE_FDIRCTRL_DROP_Q_SHIFT) | (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT) | (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); /* write hashes and fdirctrl register, poll for completion */ ixgbe_fdir_enable_82599(hw, fdirctrl); return 0; } /* * These defines allow us to quickly generate all of the necessary instructions * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION * for values 0 through 15 */ #define IXGBE_ATR_COMMON_HASH_KEY \ (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY) #define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \ common_hash ^= lo_hash_dword >> n; \ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \ sig_hash ^= lo_hash_dword << (16 - n); \ if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \ common_hash ^= hi_hash_dword >> n; \ else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \ sig_hash ^= hi_hash_dword << (16 - n); \ } while (0); /** * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash * @stream: input bitstream to compute the hash on * * This function is almost identical to the function above but contains * several optomizations such as unwinding all of the loops, letting the * compiler work out all of the conditional ifs since the keys are static * defines, and computing two keys at once since the hashed dword stream * will be the same for both keys. **/ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = ntohl(input.dword); /* generate common hash dword */ hi_hash_dword = ntohl(common.dword); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); /* apply flow ID/VM pool/VLAN ID bits to hash words */ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ IXGBE_COMPUTE_SIG_HASH_ITERATION(0); /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed * so we do not add the vlan until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ IXGBE_COMPUTE_SIG_HASH_ITERATION(1); IXGBE_COMPUTE_SIG_HASH_ITERATION(2); IXGBE_COMPUTE_SIG_HASH_ITERATION(3); IXGBE_COMPUTE_SIG_HASH_ITERATION(4); IXGBE_COMPUTE_SIG_HASH_ITERATION(5); IXGBE_COMPUTE_SIG_HASH_ITERATION(6); IXGBE_COMPUTE_SIG_HASH_ITERATION(7); IXGBE_COMPUTE_SIG_HASH_ITERATION(8); IXGBE_COMPUTE_SIG_HASH_ITERATION(9); IXGBE_COMPUTE_SIG_HASH_ITERATION(10); IXGBE_COMPUTE_SIG_HASH_ITERATION(11); IXGBE_COMPUTE_SIG_HASH_ITERATION(12); IXGBE_COMPUTE_SIG_HASH_ITERATION(13); IXGBE_COMPUTE_SIG_HASH_ITERATION(14); IXGBE_COMPUTE_SIG_HASH_ITERATION(15); /* combine common_hash result with signature and bucket hashes */ bucket_hash ^= common_hash; bucket_hash &= IXGBE_ATR_HASH_MASK; sig_hash ^= common_hash << 16; sig_hash &= IXGBE_ATR_HASH_MASK << 16; /* return completed signature hash */ return sig_hash ^ bucket_hash; } /** * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter * @hw: pointer to hardware structure * @input: unique input dword * @common: compressed common input dword * @queue: queue index to direct traffic to **/ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_hash_dword input, union ixgbe_atr_hash_dword common, u8 queue) { u64 fdirhashcmd; u32 fdircmd; /* * Get the flow_type in order to program FDIRCMD properly * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */ switch (input.formatted.flow_type) { case IXGBE_ATR_FLOW_TYPE_TCPV4: case IXGBE_ATR_FLOW_TYPE_UDPV4: case IXGBE_ATR_FLOW_TYPE_SCTPV4: case IXGBE_ATR_FLOW_TYPE_TCPV6: case IXGBE_ATR_FLOW_TYPE_UDPV6: case IXGBE_ATR_FLOW_TYPE_SCTPV6: break; default: hw_dbg(hw, " Error on flow type input\n"); return IXGBE_ERR_CONFIG; } /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; /* * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. */ fdirhashcmd = (u64)fdircmd << 32; fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); return 0; } #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ do { \ u32 n = (_n); \ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \ bucket_hash ^= lo_hash_dword >> n; \ if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \ bucket_hash ^= hi_hash_dword >> n; \ } while (0); /** * ixgbe_atr_compute_perfect_hash_82599 - Compute the perfect filter hash * @atr_input: input bitstream to compute the hash on * @input_mask: mask for the input bitstream * * This function serves two main purposes. First it applys the input_mask * to the atr_input resulting in a cleaned up atr_input data stream. * Secondly it computes the hash and stores it in the bkt_hash field at * the end of the input byte stream. This way it will be available for * future use without needing to recompute the hash. **/ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, union ixgbe_atr_input *input_mask) { u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; u32 bucket_hash = 0; /* Apply masks to input data */ input->dword_stream[0] &= input_mask->dword_stream[0]; input->dword_stream[1] &= input_mask->dword_stream[1]; input->dword_stream[2] &= input_mask->dword_stream[2]; input->dword_stream[3] &= input_mask->dword_stream[3]; input->dword_stream[4] &= input_mask->dword_stream[4]; input->dword_stream[5] &= input_mask->dword_stream[5]; input->dword_stream[6] &= input_mask->dword_stream[6]; input->dword_stream[7] &= input_mask->dword_stream[7]; input->dword_stream[8] &= input_mask->dword_stream[8]; input->dword_stream[9] &= input_mask->dword_stream[9]; input->dword_stream[10] &= input_mask->dword_stream[10]; /* record the flow_vm_vlan bits as they are a key part to the hash */ flow_vm_vlan = ntohl(input->dword_stream[0]); /* generate common hash dword */ hi_hash_dword = ntohl(input->dword_stream[1] ^ input->dword_stream[2] ^ input->dword_stream[3] ^ input->dword_stream[4] ^ input->dword_stream[5] ^ input->dword_stream[6] ^ input->dword_stream[7] ^ input->dword_stream[8] ^ input->dword_stream[9] ^ input->dword_stream[10]); /* low dword is word swapped version of common */ lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); /* apply flow ID/VM pool/VLAN ID bits to hash words */ hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ IXGBE_COMPUTE_BKT_HASH_ITERATION(0); /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to * delay this because bit 0 of the stream should not be processed * so we do not add the vlan until after bit 0 was processed */ lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); /* Process remaining 30 bit of the key */ IXGBE_COMPUTE_BKT_HASH_ITERATION(1); IXGBE_COMPUTE_BKT_HASH_ITERATION(2); IXGBE_COMPUTE_BKT_HASH_ITERATION(3); IXGBE_COMPUTE_BKT_HASH_ITERATION(4); IXGBE_COMPUTE_BKT_HASH_ITERATION(5); IXGBE_COMPUTE_BKT_HASH_ITERATION(6); IXGBE_COMPUTE_BKT_HASH_ITERATION(7); IXGBE_COMPUTE_BKT_HASH_ITERATION(8); IXGBE_COMPUTE_BKT_HASH_ITERATION(9); IXGBE_COMPUTE_BKT_HASH_ITERATION(10); IXGBE_COMPUTE_BKT_HASH_ITERATION(11); IXGBE_COMPUTE_BKT_HASH_ITERATION(12); IXGBE_COMPUTE_BKT_HASH_ITERATION(13); IXGBE_COMPUTE_BKT_HASH_ITERATION(14); IXGBE_COMPUTE_BKT_HASH_ITERATION(15); /* * Limit hash to 13 bits since max bucket count is 8K. * Store result at the end of the input stream. */ input->formatted.bkt_hash = bucket_hash & 0x1FFF; } /** * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks * @input_mask: mask to be bit swapped * * The source and destination port masks for flow director are bit swapped * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to * generate a correctly swapped value we need to bit swap the mask and that * is what is accomplished by this function. **/ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) { u32 mask = ntohs(input_mask->formatted.dst_port); mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; mask |= ntohs(input_mask->formatted.src_port); mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8); } /* * These two macros are meant to address the fact that we have registers * that are either all or in part big-endian. As a result on big-endian * systems we will end up byte swapping the value to little-endian before * it is byte swapped again and written to the hardware in the original * big-endian format. */ #define IXGBE_STORE_AS_BE32(_value) \ (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \ (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) #define IXGBE_WRITE_REG_BE32(a, reg, value) \ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) #define IXGBE_STORE_AS_BE16(_value) \ ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input_mask) { /* mask IPv6 since it is currently not supported */ u32 fdirm = IXGBE_FDIRM_DIPv6; u32 fdirtcpm; /* * Program the relevant mask registers. If src/dst_port or src/dst_addr * are zero, then assume a full mask for that field. Also assume that * a VLAN of 0 is unspecified, so mask that out as well. L4type * cannot be masked out in this implementation. * * This also assumes IPv4 only. IPv6 masking isn't supported at this * point in time. */ /* verify bucket hash is cleared on hash generation */ if (input_mask->formatted.bkt_hash) hw_dbg(hw, " bucket hash should always be 0 in mask\n"); /* Program FDIRM and verify partial masks */ switch (input_mask->formatted.vm_pool & 0x7F) { case 0x0: fdirm |= IXGBE_FDIRM_POOL; case 0x7F: break; default: hw_dbg(hw, " Error on vm pool mask\n"); return IXGBE_ERR_CONFIG; } switch (input_mask->formatted.flow_type & IXGBE_ATR_L4TYPE_MASK) { case 0x0: fdirm |= IXGBE_FDIRM_L4P; if (input_mask->formatted.dst_port || input_mask->formatted.src_port) { hw_dbg(hw, " Error on src/dst port mask\n"); return IXGBE_ERR_CONFIG; } case IXGBE_ATR_L4TYPE_MASK: break; default: hw_dbg(hw, " Error on flow type mask\n"); return IXGBE_ERR_CONFIG; } switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { case 0x0000: /* mask VLAN ID, fall through to mask VLAN priority */ fdirm |= IXGBE_FDIRM_VLANID; case 0x0FFF: /* mask VLAN priority */ fdirm |= IXGBE_FDIRM_VLANP; break; case 0xE000: /* mask VLAN ID only, fall through */ fdirm |= IXGBE_FDIRM_VLANID; case 0xEFFF: /* no VLAN fields masked */ break; default: hw_dbg(hw, " Error on VLAN mask\n"); return IXGBE_ERR_CONFIG; } switch (input_mask->formatted.flex_bytes & 0xFFFF) { case 0x0000: /* Mask Flex Bytes, fall through */ fdirm |= IXGBE_FDIRM_FLEX; case 0xFFFF: break; default: hw_dbg(hw, " Error on flexible byte mask\n"); return IXGBE_ERR_CONFIG; } /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); /* store the TCP/UDP port masks, bit reversed from port layout */ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); /* write both the same so that UDP and TCP use the same mask */ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); /* store source and destination IP masks (big-enian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ~input_mask->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, ~input_mask->formatted.dst_ip[0]); return 0; } s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id, u8 queue) { u32 fdirport, fdirvlan, fdirhash, fdircmd; /* currently IPv6 is not supported, must be programmed with 0 */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), input->formatted.src_ip[0]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), input->formatted.src_ip[1]); IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.src_ip[2]); /* record the source address (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); /* record the first 32 bits of the destination address (big-endian) */ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); /* record source and destination port (little-endian)*/ fdirport = ntohs(input->formatted.dst_port); fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; fdirport |= ntohs(input->formatted.src_port); IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); /* record vlan (little-endian) and flex_bytes(big-endian) */ fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; fdirvlan |= ntohs(input->formatted.vlan_id); IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* * flush all previous writes to make certain registers are * programmed prior to issuing the command */ IXGBE_WRITE_FLUSH(hw); /* configure FDIRCMD register */ fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; if (queue == IXGBE_FDIR_DROP_QUEUE) fdircmd |= IXGBE_FDIRCMD_DROP; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); return 0; } s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, union ixgbe_atr_input *input, u16 soft_id) { u32 fdirhash; u32 fdircmd = 0; u32 retry_count; s32 err = 0; /* configure FDIRHASH register */ fdirhash = input->formatted.bkt_hash; fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); /* flush hash to HW */ IXGBE_WRITE_FLUSH(hw); /* Query if filter is present */ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); for (retry_count = 10; retry_count; retry_count--) { /* allow 10us for query to process */ udelay(10); /* verify query completed successfully */ fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) break; } if (!retry_count) err = IXGBE_ERR_FDIR_REINIT_FAILED; /* if filter exists in hardware then remove it */ if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); IXGBE_WRITE_FLUSH(hw); IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_REMOVE_FLOW); } return err; } /** * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register * @hw: pointer to hardware structure * @reg: analog register to read * @val: read value * * Performs read operation to Omer analog register specified. **/ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) { u32 core_ctl; IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | (reg << 8)); IXGBE_WRITE_FLUSH(hw); udelay(10); core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); *val = (u8)core_ctl; return 0; } /** * ixgbe_write_analog_reg8_82599 - Writes 8 bit Omer analog register * @hw: pointer to hardware structure * @reg: atlas register to write * @val: value to write * * Performs write operation to Omer analog register specified. **/ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) { u32 core_ctl; core_ctl = (reg << 8) | val; IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); IXGBE_WRITE_FLUSH(hw); udelay(10); return 0; } /** * ixgbe_start_hw_82599 - Prepare hardware for Tx/Rx * @hw: pointer to hardware structure * * Starts the hardware using the generic start_hw function * and the generation start_hw function. * Then performs revision-specific operations, if any. **/ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) { s32 ret_val = 0; ret_val = ixgbe_start_hw_generic(hw); if (ret_val != 0) goto out; ret_val = ixgbe_start_hw_gen2(hw); if (ret_val != 0) goto out; /* We need to run link autotry after the driver loads */ hw->mac.autotry_restart = true; hw->mac.rx_pb_size = IXGBE_82599_RX_PB_SIZE; if (ret_val == 0) ret_val = ixgbe_verify_fw_version_82599(hw); out: return ret_val; } /** * ixgbe_identify_phy_82599 - Get physical layer module * @hw: pointer to hardware structure * * Determines the physical layer module found on the current adapter. * If PHY already detected, maintains current PHY type in hw struct, * otherwise executes the PHY detection routine. **/ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_PHY_ADDR_INVALID; /* Detect PHY if not unknown - returns success if already detected. */ status = ixgbe_identify_phy_generic(hw); if (status != 0) { /* 82599 10GBASE-T requires an external PHY */ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) goto out; else status = ixgbe_identify_sfp_module_generic(hw); } /* Set PHY type none if no PHY detected */ if (hw->phy.type == ixgbe_phy_unknown) { hw->phy.type = ixgbe_phy_none; status = 0; } /* Return error if SFP module has been detected but is not supported */ if (hw->phy.type == ixgbe_phy_sfp_unsupported) status = IXGBE_ERR_SFP_NOT_SUPPORTED; out: return status; } /** * ixgbe_get_supported_physical_layer_82599 - Returns physical layer type * @hw: pointer to hardware structure * * Determines physical layer capabilities of the current configuration. **/ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) { u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; u16 ext_ability = 0; u8 comp_codes_10g = 0; u8 comp_codes_1g = 0; hw->phy.ops.identify(hw); switch (hw->phy.type) { case ixgbe_phy_tn: case ixgbe_phy_cu_unknown: hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, &ext_ability); if (ext_ability & MDIO_PMA_EXTABLE_10GBT) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; if (ext_ability & MDIO_PMA_EXTABLE_1000BT) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; if (ext_ability & MDIO_PMA_EXTABLE_100BTX) physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; goto out; default: break; } switch (autoc & IXGBE_AUTOC_LMS_MASK) { case IXGBE_AUTOC_LMS_1G_AN: case IXGBE_AUTOC_LMS_1G_LINK_NO_AN: if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) { physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX | IXGBE_PHYSICAL_LAYER_1000BASE_BX; goto out; } else /* SFI mode so read SFP module */ goto sfp_check; break; case IXGBE_AUTOC_LMS_10G_LINK_NO_AN: if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4; else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4; else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI; goto out; break; case IXGBE_AUTOC_LMS_10G_SERIAL: if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) { physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR; goto out; } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI) goto sfp_check; break; case IXGBE_AUTOC_LMS_KX4_KX_KR: case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN: if (autoc & IXGBE_AUTOC_KX_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX; if (autoc & IXGBE_AUTOC_KX4_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4; if (autoc & IXGBE_AUTOC_KR_SUPP) physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR; goto out; break; default: goto out; break; } sfp_check: /* SFP check must be done last since DA modules are sometimes used to * test KR mode - we need to id KR mode correctly before SFP module. * Call identify_sfp because the pluggable module may have changed */ hw->phy.ops.identify_sfp(hw); if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) goto out; switch (hw->phy.type) { case ixgbe_phy_sfp_passive_tyco: case ixgbe_phy_sfp_passive_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; break; case ixgbe_phy_sfp_ftl_active: case ixgbe_phy_sfp_active_unknown: physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; break; case ixgbe_phy_sfp_avago: case ixgbe_phy_sfp_ftl: case ixgbe_phy_sfp_intel: case ixgbe_phy_sfp_unknown: hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; break; default: break; } out: return physical_layer; } /** * ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599 * @hw: pointer to hardware structure * @regval: register value to write to RXCTRL * * Enables the Rx DMA unit for 82599 **/ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) { /* * Workaround for 82599 silicon errata when enabling the Rx datapath. * If traffic is incoming before we enable the Rx unit, it could hang * the Rx DMA unit. Therefore, make sure the security engine is * completely disabled prior to enabling the Rx unit. */ hw->mac.ops.disable_rx_buff(hw); IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); hw->mac.ops.enable_rx_buff(hw); return 0; } /** * ixgbe_verify_fw_version_82599 - verify fw version for 82599 * @hw: pointer to hardware structure * * Verifies that installed the firmware version is 0.6 or higher * for SFI devices. All 82599 SFI devices should have version 0.6 or higher. * * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or * if the FW version is not supported. **/ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) { s32 status = IXGBE_ERR_EEPROM_VERSION; u16 fw_offset, fw_ptp_cfg_offset; u16 fw_version = 0; /* firmware check is only necessary for SFI devices */ if (hw->phy.media_type != ixgbe_media_type_fiber) { status = 0; goto fw_version_out; } /* get the offset to the Firmware Module block */ hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); if ((fw_offset == 0) || (fw_offset == 0xFFFF)) goto fw_version_out; /* get the offset to the Pass Through Patch Configuration block */ hw->eeprom.ops.read(hw, (fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), &fw_ptp_cfg_offset); if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) goto fw_version_out; /* get the firmware version */ hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4), &fw_version); if (fw_version > 0x5) status = 0; fw_version_out: return status; } /** * ixgbe_verify_lesm_fw_enabled_82599 - Checks LESM FW module state. * @hw: pointer to hardware structure * * Returns true if the LESM FW module is present and enabled. Otherwise * returns false. Smart Speed must be disabled if LESM FW module is enabled. **/ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) { bool lesm_enabled = false; u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; s32 status; /* get the offset to the Firmware Module block */ status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); if ((status != 0) || (fw_offset == 0) || (fw_offset == 0xFFFF)) goto out; /* get the offset to the LESM Parameters block */ status = hw->eeprom.ops.read(hw, (fw_offset + IXGBE_FW_LESM_PARAMETERS_PTR), &fw_lesm_param_offset); if ((status != 0) || (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) goto out; /* get the lesm state word */ status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + IXGBE_FW_LESM_STATE_1), &fw_lesm_state); if ((status == 0) && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) lesm_enabled = true; out: return lesm_enabled; } /** * ixgbe_read_eeprom_buffer_82599 - Read EEPROM word(s) using * fastest available method * * @hw: pointer to hardware structure * @offset: offset of word in EEPROM to read * @words: number of words * @data: word(s) read from the EEPROM * * Retrieves 16 bit word(s) read from EEPROM **/ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, u16 words, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val = IXGBE_ERR_CONFIG; /* * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ if ((eeprom->type == ixgbe_eeprom_spi) && (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)) ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words, data); else ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words, data); return ret_val; } /** * ixgbe_read_eeprom_82599 - Read EEPROM word using * fastest available method * * @hw: pointer to hardware structure * @offset: offset of word in the EEPROM to read * @data: word read from the EEPROM * * Reads a 16 bit word from the EEPROM **/ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, u16 offset, u16 *data) { struct ixgbe_eeprom_info *eeprom = &hw->eeprom; s32 ret_val = IXGBE_ERR_CONFIG; /* * If EEPROM is detected and can be addressed using 14 bits, * use EERD otherwise use bit bang */ if ((eeprom->type == ixgbe_eeprom_spi) && (offset <= IXGBE_EERD_MAX_ADDR)) ret_val = ixgbe_read_eerd_generic(hw, offset, data); else ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data); return ret_val; } static struct ixgbe_mac_operations mac_ops_82599 = { .init_hw = &ixgbe_init_hw_generic, .reset_hw = &ixgbe_reset_hw_82599, .start_hw = &ixgbe_start_hw_82599, .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, .get_media_type = &ixgbe_get_media_type_82599, .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, .enable_rx_dma = &ixgbe_enable_rx_dma_82599, .disable_rx_buff = &ixgbe_disable_rx_buff_generic, .enable_rx_buff = &ixgbe_enable_rx_buff_generic, .get_mac_addr = &ixgbe_get_mac_addr_generic, .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, .get_device_caps = &ixgbe_get_device_caps_generic, .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, .stop_adapter = &ixgbe_stop_adapter_generic, .get_bus_info = &ixgbe_get_bus_info_generic, .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, .setup_link = &ixgbe_setup_mac_link_82599, .set_rxpba = &ixgbe_set_rxpba_generic, .check_link = &ixgbe_check_mac_link_generic, .get_link_capabilities = &ixgbe_get_link_capabilities_82599, .led_on = &ixgbe_led_on_generic, .led_off = &ixgbe_led_off_generic, .blink_led_start = &ixgbe_blink_led_start_generic, .blink_led_stop = &ixgbe_blink_led_stop_generic, .set_rar = &ixgbe_set_rar_generic, .clear_rar = &ixgbe_clear_rar_generic, .set_vmdq = &ixgbe_set_vmdq_generic, .clear_vmdq = &ixgbe_clear_vmdq_generic, .init_rx_addrs = &ixgbe_init_rx_addrs_generic, .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, .enable_mc = &ixgbe_enable_mc_generic, .disable_mc = &ixgbe_disable_mc_generic, .clear_vfta = &ixgbe_clear_vfta_generic, .set_vfta = &ixgbe_set_vfta_generic, .fc_enable = &ixgbe_fc_enable_generic, .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, .init_uta_tables = &ixgbe_init_uta_tables_generic, .setup_sfp = &ixgbe_setup_sfp_modules_82599, .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, .release_swfw_sync = &ixgbe_release_swfw_sync, }; static struct ixgbe_eeprom_operations eeprom_ops_82599 = { .init_params = &ixgbe_init_eeprom_params_generic, .read = &ixgbe_read_eeprom_82599, .read_buffer = &ixgbe_read_eeprom_buffer_82599, .write = &ixgbe_write_eeprom_generic, .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, .update_checksum = &ixgbe_update_eeprom_checksum_generic, }; static struct ixgbe_phy_operations phy_ops_82599 = { .identify = &ixgbe_identify_phy_82599, .identify_sfp = &ixgbe_identify_sfp_module_generic, .init = &ixgbe_init_phy_ops_82599, .reset = &ixgbe_reset_phy_generic, .read_reg = &ixgbe_read_phy_reg_generic, .write_reg = &ixgbe_write_phy_reg_generic, .setup_link = &ixgbe_setup_phy_link_generic, .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, .read_i2c_byte = &ixgbe_read_i2c_byte_generic, .write_i2c_byte = &ixgbe_write_i2c_byte_generic, .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, .check_overtemp = &ixgbe_tn_check_overtemp, }; struct ixgbe_info ixgbe_82599_info = { .mac = ixgbe_mac_82599EB, .get_invariants = &ixgbe_get_invariants_82599, .mac_ops = &mac_ops_82599, .eeprom_ops = &eeprom_ops_82599, .phy_ops = &phy_ops_82599, .mbx_ops = &mbx_ops_generic, };
gpl-2.0
ZdrowyGosciu/kernel_lge_g2_msm8974
drivers/media/radio/radio-rtrack2.c
4809
3371
/* * RadioTrack II driver * Copyright 1998 Ben Pfaff * * Based on RadioTrack I/RadioReveal (C) 1997 M. Kirkwood * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk> * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> */ #include <linux/module.h> /* Modules */ #include <linux/init.h> /* Initdata */ #include <linux/ioport.h> /* request_region */ #include <linux/delay.h> /* udelay */ #include <linux/videodev2.h> /* kernel radio structs */ #include <linux/mutex.h> #include <linux/io.h> /* outb, outb_p */ #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include "radio-isa.h" MODULE_AUTHOR("Ben Pfaff"); MODULE_DESCRIPTION("A driver for the RadioTrack II radio card."); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1.99"); #ifndef CONFIG_RADIO_RTRACK2_PORT #define CONFIG_RADIO_RTRACK2_PORT -1 #endif #define RTRACK2_MAX 2 static int io[RTRACK2_MAX] = { [0] = CONFIG_RADIO_RTRACK2_PORT, [1 ... (RTRACK2_MAX - 1)] = -1 }; static int radio_nr[RTRACK2_MAX] = { [0 ... (RTRACK2_MAX - 1)] = -1 }; module_param_array(io, int, NULL, 0444); MODULE_PARM_DESC(io, "I/O addresses of the RadioTrack card (0x20f or 0x30f)"); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Radio device numbers"); static struct radio_isa_card *rtrack2_alloc(void) { return kzalloc(sizeof(struct radio_isa_card), GFP_KERNEL); } static void zero(struct radio_isa_card *isa) { outb_p(1, isa->io); outb_p(3, isa->io); outb_p(1, isa->io); } static void one(struct radio_isa_card *isa) { outb_p(5, isa->io); outb_p(7, isa->io); outb_p(5, isa->io); } static int rtrack2_s_frequency(struct radio_isa_card *isa, u32 freq) { int i; freq = freq / 200 + 856; outb_p(0xc8, isa->io); outb_p(0xc9, isa->io); outb_p(0xc9, isa->io); for (i = 0; i < 10; i++) zero(isa); for (i = 14; i >= 0; i--) if (freq & (1 << i)) one(isa); else zero(isa); outb_p(0xc8, isa->io); if (!v4l2_ctrl_g_ctrl(isa->mute)) outb_p(0, isa->io); return 0; } static u32 rtrack2_g_signal(struct radio_isa_card *isa) { /* bit set = no signal present */ return (inb(isa->io) & 2) ? 0 : 0xffff; } static int rtrack2_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol) { outb(mute, isa->io); return 0; } static const struct radio_isa_ops rtrack2_ops = { .alloc = rtrack2_alloc, .s_mute_volume = rtrack2_s_mute_volume, .s_frequency = rtrack2_s_frequency, .g_signal = rtrack2_g_signal, }; static const int rtrack2_ioports[] = { 0x20f, 0x30f }; static struct radio_isa_driver rtrack2_driver = { .driver = { .match = radio_isa_match, .probe = radio_isa_probe, .remove = radio_isa_remove, .driver = { .name = "radio-rtrack2", }, }, .io_params = io, .radio_nr_params = radio_nr, .io_ports = rtrack2_ioports, .num_of_io_ports = ARRAY_SIZE(rtrack2_ioports), .region_size = 4, .card = "AIMSlab RadioTrack II", .ops = &rtrack2_ops, .has_stereo = true, }; static int __init rtrack2_init(void) { return isa_register_driver(&rtrack2_driver.driver, RTRACK2_MAX); } static void __exit rtrack2_exit(void) { isa_unregister_driver(&rtrack2_driver.driver); } module_init(rtrack2_init); module_exit(rtrack2_exit);
gpl-2.0
armani-dev/android_kernel_xiaomi_armani
drivers/scsi/isci/host.c
4809
89583
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/circ_buf.h> #include <linux/device.h> #include <scsi/sas.h> #include "host.h" #include "isci.h" #include "port.h" #include "probe_roms.h" #include "remote_device.h" #include "request.h" #include "scu_completion_codes.h" #include "scu_event_codes.h" #include "registers.h" #include "scu_remote_node_context.h" #include "scu_task_context.h" #define SCU_CONTEXT_RAM_INIT_STALL_TIME 200 #define smu_max_ports(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_LP_SHIFT) + 1 \ ) #define smu_max_task_contexts(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_TC_SHIFT) + 1 \ ) #define smu_max_rncs(dcc_value) \ (\ (((dcc_value) & SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_MASK) \ >> SMU_DEVICE_CONTEXT_CAPACITY_MAX_RNC_SHIFT) + 1 \ ) #define SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT 100 /** * * * The number of milliseconds to wait while a given phy is consuming power * before allowing another set of phys to consume power. Ultimately, this will * be specified by OEM parameter. */ #define SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL 500 /** * NORMALIZE_PUT_POINTER() - * * This macro will normalize the completion queue put pointer so its value can * be used as an array inde */ #define NORMALIZE_PUT_POINTER(x) \ ((x) & SMU_COMPLETION_QUEUE_PUT_POINTER_MASK) /** * NORMALIZE_EVENT_POINTER() - * * This macro will normalize the completion queue event entry so its value can * be used as an index. */ #define NORMALIZE_EVENT_POINTER(x) \ (\ ((x) & SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_MASK) \ >> SMU_COMPLETION_QUEUE_GET_EVENT_POINTER_SHIFT \ ) /** * NORMALIZE_GET_POINTER() - * * This macro will normalize the completion queue get pointer so its value can * be used as an index into an array */ #define NORMALIZE_GET_POINTER(x) \ ((x) & SMU_COMPLETION_QUEUE_GET_POINTER_MASK) /** * NORMALIZE_GET_POINTER_CYCLE_BIT() - * * This macro will normalize the completion queue cycle pointer so it matches * the completion queue cycle bit */ #define NORMALIZE_GET_POINTER_CYCLE_BIT(x) \ ((SMU_CQGR_CYCLE_BIT & (x)) << (31 - SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT)) /** * COMPLETION_QUEUE_CYCLE_BIT() - * * This macro will return the cycle bit of the completion queue entry */ #define COMPLETION_QUEUE_CYCLE_BIT(x) ((x) & 0x80000000) /* Init the state machine and call the state entry function (if any) */ void sci_init_sm(struct sci_base_state_machine *sm, const struct sci_base_state *state_table, u32 initial_state) { sci_state_transition_t handler; sm->initial_state_id = initial_state; sm->previous_state_id = initial_state; sm->current_state_id = initial_state; sm->state_table = state_table; handler = sm->state_table[initial_state].enter_state; if (handler) handler(sm); } /* Call the state exit fn, update the current state, call the state entry fn */ void sci_change_state(struct sci_base_state_machine *sm, u32 next_state) { sci_state_transition_t handler; handler = sm->state_table[sm->current_state_id].exit_state; if (handler) handler(sm); sm->previous_state_id = sm->current_state_id; sm->current_state_id = next_state; handler = sm->state_table[sm->current_state_id].enter_state; if (handler) handler(sm); } static bool sci_controller_completion_queue_has_entries(struct isci_host *ihost) { u32 get_value = ihost->completion_queue_get; u32 get_index = get_value & SMU_COMPLETION_QUEUE_GET_POINTER_MASK; if (NORMALIZE_GET_POINTER_CYCLE_BIT(get_value) == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index])) return true; return false; } static bool sci_controller_isr(struct isci_host *ihost) { if (sci_controller_completion_queue_has_entries(ihost)) { return true; } else { /* * we have a spurious interrupt it could be that we have already * emptied the completion queue from a previous interrupt */ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); /* * There is a race in the hardware that could cause us not to be notified * of an interrupt completion if we do not take this step. We will mask * then unmask the interrupts so if there is another interrupt pending * the clearing of the interrupt source we get the next interrupt message. */ writel(0xFF000000, &ihost->smu_registers->interrupt_mask); writel(0, &ihost->smu_registers->interrupt_mask); } return false; } irqreturn_t isci_msix_isr(int vec, void *data) { struct isci_host *ihost = data; if (sci_controller_isr(ihost)) tasklet_schedule(&ihost->completion_tasklet); return IRQ_HANDLED; } static bool sci_controller_error_isr(struct isci_host *ihost) { u32 interrupt_status; interrupt_status = readl(&ihost->smu_registers->interrupt_status); interrupt_status &= (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND); if (interrupt_status != 0) { /* * There is an error interrupt pending so let it through and handle * in the callback */ return true; } /* * There is a race in the hardware that could cause us not to be notified * of an interrupt completion if we do not take this step. We will mask * then unmask the error interrupts so if there was another interrupt * pending we will be notified. * Could we write the value of (SMU_ISR_QUEUE_ERROR | SMU_ISR_QUEUE_SUSPEND)? */ writel(0xff, &ihost->smu_registers->interrupt_mask); writel(0, &ihost->smu_registers->interrupt_mask); return false; } static void sci_controller_task_completion(struct isci_host *ihost, u32 ent) { u32 index = SCU_GET_COMPLETION_INDEX(ent); struct isci_request *ireq = ihost->reqs[index]; /* Make sure that we really want to process this IO request */ if (test_bit(IREQ_ACTIVE, &ireq->flags) && ireq->io_tag != SCI_CONTROLLER_INVALID_IO_TAG && ISCI_TAG_SEQ(ireq->io_tag) == ihost->io_request_sequence[index]) /* Yep this is a valid io request pass it along to the * io request handler */ sci_io_request_tc_completion(ireq, ent); } static void sci_controller_sdma_completion(struct isci_host *ihost, u32 ent) { u32 index; struct isci_request *ireq; struct isci_remote_device *idev; index = SCU_GET_COMPLETION_INDEX(ent); switch (scu_get_command_request_type(ent)) { case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_TC: ireq = ihost->reqs[index]; dev_warn(&ihost->pdev->dev, "%s: %x for io request %p\n", __func__, ent, ireq); /* @todo For a post TC operation we need to fail the IO * request */ break; case SCU_CONTEXT_COMMAND_REQUEST_TYPE_DUMP_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_OTHER_RNC: case SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_RNC: idev = ihost->device_table[index]; dev_warn(&ihost->pdev->dev, "%s: %x for device %p\n", __func__, ent, idev); /* @todo For a port RNC operation we need to fail the * device */ break; default: dev_warn(&ihost->pdev->dev, "%s: unknown completion type %x\n", __func__, ent); break; } } static void sci_controller_unsolicited_frame(struct isci_host *ihost, u32 ent) { u32 index; u32 frame_index; struct scu_unsolicited_frame_header *frame_header; struct isci_phy *iphy; struct isci_remote_device *idev; enum sci_status result = SCI_FAILURE; frame_index = SCU_GET_FRAME_INDEX(ent); frame_header = ihost->uf_control.buffers.array[frame_index].header; ihost->uf_control.buffers.array[frame_index].state = UNSOLICITED_FRAME_IN_USE; if (SCU_GET_FRAME_ERROR(ent)) { /* * / @todo If the IAF frame or SIGNATURE FIS frame has an error will * / this cause a problem? We expect the phy initialization will * / fail if there is an error in the frame. */ sci_controller_release_frame(ihost, frame_index); return; } if (frame_header->is_address_frame) { index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; result = sci_phy_frame_handler(iphy, frame_index); } else { index = SCU_GET_COMPLETION_INDEX(ent); if (index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { /* * This is a signature fis or a frame from a direct attached SATA * device that has not yet been created. In either case forwared * the frame to the PE and let it take care of the frame data. */ index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; result = sci_phy_frame_handler(iphy, frame_index); } else { if (index < ihost->remote_node_entries) idev = ihost->device_table[index]; else idev = NULL; if (idev != NULL) result = sci_remote_device_frame_handler(idev, frame_index); else sci_controller_release_frame(ihost, frame_index); } } if (result != SCI_SUCCESS) { /* * / @todo Is there any reason to report some additional error message * / when we get this failure notifiction? */ } } static void sci_controller_event_completion(struct isci_host *ihost, u32 ent) { struct isci_remote_device *idev; struct isci_request *ireq; struct isci_phy *iphy; u32 index; index = SCU_GET_COMPLETION_INDEX(ent); switch (scu_get_event_type(ent)) { case SCU_EVENT_TYPE_SMU_COMMAND_ERROR: /* / @todo The driver did something wrong and we need to fix the condtion. */ dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received SMU command error " "0x%x\n", __func__, ihost, ent); break; case SCU_EVENT_TYPE_SMU_PCQ_ERROR: case SCU_EVENT_TYPE_SMU_ERROR: case SCU_EVENT_TYPE_FATAL_MEMORY_ERROR: /* * / @todo This is a hardware failure and its likely that we want to * / reset the controller. */ dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received fatal controller " "event 0x%x\n", __func__, ihost, ent); break; case SCU_EVENT_TYPE_TRANSPORT_ERROR: ireq = ihost->reqs[index]; sci_io_request_event_handler(ireq, ent); break; case SCU_EVENT_TYPE_PTX_SCHEDULE_EVENT: switch (scu_get_event_specifier(ent)) { case SCU_EVENT_SPECIFIC_SMP_RESPONSE_NO_PE: case SCU_EVENT_SPECIFIC_TASK_TIMEOUT: ireq = ihost->reqs[index]; if (ireq != NULL) sci_io_request_event_handler(ireq, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for io request object " "that doesnt exist.\n", __func__, ihost, ent); break; case SCU_EVENT_SPECIFIC_IT_NEXUS_TIMEOUT: idev = ihost->device_table[index]; if (idev != NULL) sci_remote_device_event_handler(idev, ent); else dev_warn(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received " "event 0x%x for remote device object " "that doesnt exist.\n", __func__, ihost, ent); break; } break; case SCU_EVENT_TYPE_BROADCAST_CHANGE: /* * direct the broadcast change event to the phy first and then let * the phy redirect the broadcast change to the port object */ case SCU_EVENT_TYPE_ERR_CNT_EVENT: /* * direct error counter event to the phy object since that is where * we get the event notification. This is a type 4 event. */ case SCU_EVENT_TYPE_OSSP_EVENT: index = SCU_GET_PROTOCOL_ENGINE_INDEX(ent); iphy = &ihost->phys[index]; sci_phy_event_handler(iphy, ent); break; case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: case SCU_EVENT_TYPE_RNC_OPS_MISC: if (index < ihost->remote_node_entries) { idev = ihost->device_table[index]; if (idev != NULL) sci_remote_device_event_handler(idev, ent); } else dev_err(&ihost->pdev->dev, "%s: SCIC Controller 0x%p received event 0x%x " "for remote device object 0x%0x that doesnt " "exist.\n", __func__, ihost, ent, index); break; default: dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown event code %x\n", __func__, ent); break; } } static void sci_controller_process_completions(struct isci_host *ihost) { u32 completion_count = 0; u32 ent; u32 get_index; u32 get_cycle; u32 event_get; u32 event_cycle; dev_dbg(&ihost->pdev->dev, "%s: completion queue begining get:0x%08x\n", __func__, ihost->completion_queue_get); /* Get the component parts of the completion queue */ get_index = NORMALIZE_GET_POINTER(ihost->completion_queue_get); get_cycle = SMU_CQGR_CYCLE_BIT & ihost->completion_queue_get; event_get = NORMALIZE_EVENT_POINTER(ihost->completion_queue_get); event_cycle = SMU_CQGR_EVENT_CYCLE_BIT & ihost->completion_queue_get; while ( NORMALIZE_GET_POINTER_CYCLE_BIT(get_cycle) == COMPLETION_QUEUE_CYCLE_BIT(ihost->completion_queue[get_index]) ) { completion_count++; ent = ihost->completion_queue[get_index]; /* increment the get pointer and check for rollover to toggle the cycle bit */ get_cycle ^= ((get_index+1) & SCU_MAX_COMPLETION_QUEUE_ENTRIES) << (SMU_COMPLETION_QUEUE_GET_CYCLE_BIT_SHIFT - SCU_MAX_COMPLETION_QUEUE_SHIFT); get_index = (get_index+1) & (SCU_MAX_COMPLETION_QUEUE_ENTRIES-1); dev_dbg(&ihost->pdev->dev, "%s: completion queue entry:0x%08x\n", __func__, ent); switch (SCU_GET_COMPLETION_TYPE(ent)) { case SCU_COMPLETION_TYPE_TASK: sci_controller_task_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_SDMA: sci_controller_sdma_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_UFI: sci_controller_unsolicited_frame(ihost, ent); break; case SCU_COMPLETION_TYPE_EVENT: sci_controller_event_completion(ihost, ent); break; case SCU_COMPLETION_TYPE_NOTIFY: { event_cycle ^= ((event_get+1) & SCU_MAX_EVENTS) << (SMU_COMPLETION_QUEUE_GET_EVENT_CYCLE_BIT_SHIFT - SCU_MAX_EVENTS_SHIFT); event_get = (event_get+1) & (SCU_MAX_EVENTS-1); sci_controller_event_completion(ihost, ent); break; } default: dev_warn(&ihost->pdev->dev, "%s: SCIC Controller received unknown " "completion type %x\n", __func__, ent); break; } } /* Update the get register if we completed one or more entries */ if (completion_count > 0) { ihost->completion_queue_get = SMU_CQGR_GEN_BIT(ENABLE) | SMU_CQGR_GEN_BIT(EVENT_ENABLE) | event_cycle | SMU_CQGR_GEN_VAL(EVENT_POINTER, event_get) | get_cycle | SMU_CQGR_GEN_VAL(POINTER, get_index); writel(ihost->completion_queue_get, &ihost->smu_registers->completion_queue_get); } dev_dbg(&ihost->pdev->dev, "%s: completion queue ending get:0x%08x\n", __func__, ihost->completion_queue_get); } static void sci_controller_error_handler(struct isci_host *ihost) { u32 interrupt_status; interrupt_status = readl(&ihost->smu_registers->interrupt_status); if ((interrupt_status & SMU_ISR_QUEUE_SUSPEND) && sci_controller_completion_queue_has_entries(ihost)) { sci_controller_process_completions(ihost); writel(SMU_ISR_QUEUE_SUSPEND, &ihost->smu_registers->interrupt_status); } else { dev_err(&ihost->pdev->dev, "%s: status: %#x\n", __func__, interrupt_status); sci_change_state(&ihost->sm, SCIC_FAILED); return; } /* If we dont process any completions I am not sure that we want to do this. * We are in the middle of a hardware fault and should probably be reset. */ writel(0, &ihost->smu_registers->interrupt_mask); } irqreturn_t isci_intx_isr(int vec, void *data) { irqreturn_t ret = IRQ_NONE; struct isci_host *ihost = data; if (sci_controller_isr(ihost)) { writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); tasklet_schedule(&ihost->completion_tasklet); ret = IRQ_HANDLED; } else if (sci_controller_error_isr(ihost)) { spin_lock(&ihost->scic_lock); sci_controller_error_handler(ihost); spin_unlock(&ihost->scic_lock); ret = IRQ_HANDLED; } return ret; } irqreturn_t isci_error_isr(int vec, void *data) { struct isci_host *ihost = data; if (sci_controller_error_isr(ihost)) sci_controller_error_handler(ihost); return IRQ_HANDLED; } /** * isci_host_start_complete() - This function is called by the core library, * through the ISCI Module, to indicate controller start status. * @isci_host: This parameter specifies the ISCI host object * @completion_status: This parameter specifies the completion status from the * core library. * */ static void isci_host_start_complete(struct isci_host *ihost, enum sci_status completion_status) { if (completion_status != SCI_SUCCESS) dev_info(&ihost->pdev->dev, "controller start timed out, continuing...\n"); isci_host_change_state(ihost, isci_ready); clear_bit(IHOST_START_PENDING, &ihost->flags); wake_up(&ihost->eventq); } int isci_host_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct sas_ha_struct *ha = SHOST_TO_SAS_HA(shost); struct isci_host *ihost = ha->lldd_ha; if (test_bit(IHOST_START_PENDING, &ihost->flags)) return 0; sas_drain_work(ha); dev_dbg(&ihost->pdev->dev, "%s: ihost->status = %d, time = %ld\n", __func__, isci_host_get_state(ihost), time); return 1; } /** * sci_controller_get_suggested_start_timeout() - This method returns the * suggested sci_controller_start() timeout amount. The user is free to * use any timeout value, but this method provides the suggested minimum * start timeout value. The returned value is based upon empirical * information determined as a result of interoperability testing. * @controller: the handle to the controller object for which to return the * suggested start timeout. * * This method returns the number of milliseconds for the suggested start * operation timeout. */ static u32 sci_controller_get_suggested_start_timeout(struct isci_host *ihost) { /* Validate the user supplied parameters. */ if (!ihost) return 0; /* * The suggested minimum timeout value for a controller start operation: * * Signature FIS Timeout * + Phy Start Timeout * + Number of Phy Spin Up Intervals * --------------------------------- * Number of milliseconds for the controller start operation. * * NOTE: The number of phy spin up intervals will be equivalent * to the number of phys divided by the number phys allowed * per interval - 1 (once OEM parameters are supported). * Currently we assume only 1 phy per interval. */ return SCIC_SDS_SIGNATURE_FIS_TIMEOUT + SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT + ((SCI_MAX_PHYS - 1) * SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); } static void sci_controller_enable_interrupts(struct isci_host *ihost) { BUG_ON(ihost->smu_registers == NULL); writel(0, &ihost->smu_registers->interrupt_mask); } void sci_controller_disable_interrupts(struct isci_host *ihost) { BUG_ON(ihost->smu_registers == NULL); writel(0xffffffff, &ihost->smu_registers->interrupt_mask); } static void sci_controller_enable_port_task_scheduler(struct isci_host *ihost) { u32 port_task_scheduler_value; port_task_scheduler_value = readl(&ihost->scu_registers->peg0.ptsg.control); port_task_scheduler_value |= (SCU_PTSGCR_GEN_BIT(ETM_ENABLE) | SCU_PTSGCR_GEN_BIT(PTSG_ENABLE)); writel(port_task_scheduler_value, &ihost->scu_registers->peg0.ptsg.control); } static void sci_controller_assign_task_entries(struct isci_host *ihost) { u32 task_assignment; /* * Assign all the TCs to function 0 * TODO: Do we actually need to read this register to write it back? */ task_assignment = readl(&ihost->smu_registers->task_context_assignment[0]); task_assignment |= (SMU_TCA_GEN_VAL(STARTING, 0)) | (SMU_TCA_GEN_VAL(ENDING, ihost->task_context_entries - 1)) | (SMU_TCA_GEN_BIT(RANGE_CHECK_ENABLE)); writel(task_assignment, &ihost->smu_registers->task_context_assignment[0]); } static void sci_controller_initialize_completion_queue(struct isci_host *ihost) { u32 index; u32 completion_queue_control_value; u32 completion_queue_get_value; u32 completion_queue_put_value; ihost->completion_queue_get = 0; completion_queue_control_value = (SMU_CQC_QUEUE_LIMIT_SET(SCU_MAX_COMPLETION_QUEUE_ENTRIES - 1) | SMU_CQC_EVENT_LIMIT_SET(SCU_MAX_EVENTS - 1)); writel(completion_queue_control_value, &ihost->smu_registers->completion_queue_control); /* Set the completion queue get pointer and enable the queue */ completion_queue_get_value = ( (SMU_CQGR_GEN_VAL(POINTER, 0)) | (SMU_CQGR_GEN_VAL(EVENT_POINTER, 0)) | (SMU_CQGR_GEN_BIT(ENABLE)) | (SMU_CQGR_GEN_BIT(EVENT_ENABLE)) ); writel(completion_queue_get_value, &ihost->smu_registers->completion_queue_get); /* Set the completion queue put pointer */ completion_queue_put_value = ( (SMU_CQPR_GEN_VAL(POINTER, 0)) | (SMU_CQPR_GEN_VAL(EVENT_POINTER, 0)) ); writel(completion_queue_put_value, &ihost->smu_registers->completion_queue_put); /* Initialize the cycle bit of the completion queue entries */ for (index = 0; index < SCU_MAX_COMPLETION_QUEUE_ENTRIES; index++) { /* * If get.cycle_bit != completion_queue.cycle_bit * its not a valid completion queue entry * so at system start all entries are invalid */ ihost->completion_queue[index] = 0x80000000; } } static void sci_controller_initialize_unsolicited_frame_queue(struct isci_host *ihost) { u32 frame_queue_control_value; u32 frame_queue_get_value; u32 frame_queue_put_value; /* Write the queue size */ frame_queue_control_value = SCU_UFQC_GEN_VAL(QUEUE_SIZE, SCU_MAX_UNSOLICITED_FRAMES); writel(frame_queue_control_value, &ihost->scu_registers->sdma.unsolicited_frame_queue_control); /* Setup the get pointer for the unsolicited frame queue */ frame_queue_get_value = ( SCU_UFQGP_GEN_VAL(POINTER, 0) | SCU_UFQGP_GEN_BIT(ENABLE_BIT) ); writel(frame_queue_get_value, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); /* Setup the put pointer for the unsolicited frame queue */ frame_queue_put_value = SCU_UFQPP_GEN_VAL(POINTER, 0); writel(frame_queue_put_value, &ihost->scu_registers->sdma.unsolicited_frame_put_pointer); } static void sci_controller_transition_to_ready(struct isci_host *ihost, enum sci_status status) { if (ihost->sm.current_state_id == SCIC_STARTING) { /* * We move into the ready state, because some of the phys/ports * may be up and operational. */ sci_change_state(&ihost->sm, SCIC_READY); isci_host_start_complete(ihost, status); } } static bool is_phy_starting(struct isci_phy *iphy) { enum sci_phy_states state; state = iphy->sm.current_state_id; switch (state) { case SCI_PHY_STARTING: case SCI_PHY_SUB_INITIAL: case SCI_PHY_SUB_AWAIT_SAS_SPEED_EN: case SCI_PHY_SUB_AWAIT_IAF_UF: case SCI_PHY_SUB_AWAIT_SAS_POWER: case SCI_PHY_SUB_AWAIT_SATA_POWER: case SCI_PHY_SUB_AWAIT_SATA_PHY_EN: case SCI_PHY_SUB_AWAIT_SATA_SPEED_EN: case SCI_PHY_SUB_AWAIT_SIG_FIS_UF: case SCI_PHY_SUB_FINAL: return true; default: return false; } } /** * sci_controller_start_next_phy - start phy * @scic: controller * * If all the phys have been started, then attempt to transition the * controller to the READY state and inform the user * (sci_cb_controller_start_complete()). */ static enum sci_status sci_controller_start_next_phy(struct isci_host *ihost) { struct sci_oem_params *oem = &ihost->oem_parameters; struct isci_phy *iphy; enum sci_status status; status = SCI_SUCCESS; if (ihost->phy_startup_timer_pending) return status; if (ihost->next_phy_to_start >= SCI_MAX_PHYS) { bool is_controller_start_complete = true; u32 state; u8 index; for (index = 0; index < SCI_MAX_PHYS; index++) { iphy = &ihost->phys[index]; state = iphy->sm.current_state_id; if (!phy_get_non_dummy_port(iphy)) continue; /* The controller start operation is complete iff: * - all links have been given an opportunity to start * - have no indication of a connected device * - have an indication of a connected device and it has * finished the link training process. */ if ((iphy->is_in_link_training == false && state == SCI_PHY_INITIAL) || (iphy->is_in_link_training == false && state == SCI_PHY_STOPPED) || (iphy->is_in_link_training == true && is_phy_starting(iphy)) || (ihost->port_agent.phy_ready_mask != ihost->port_agent.phy_configured_mask)) { is_controller_start_complete = false; break; } } /* * The controller has successfully finished the start process. * Inform the SCI Core user and transition to the READY state. */ if (is_controller_start_complete == true) { sci_controller_transition_to_ready(ihost, SCI_SUCCESS); sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; } } else { iphy = &ihost->phys[ihost->next_phy_to_start]; if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { if (phy_get_non_dummy_port(iphy) == NULL) { ihost->next_phy_to_start++; /* Caution recursion ahead be forwarned * * The PHY was never added to a PORT in MPC mode * so start the next phy in sequence This phy * will never go link up and will not draw power * the OEM parameters either configured the phy * incorrectly for the PORT or it was never * assigned to a PORT */ return sci_controller_start_next_phy(ihost); } } status = sci_phy_start(iphy); if (status == SCI_SUCCESS) { sci_mod_timer(&ihost->phy_timer, SCIC_SDS_CONTROLLER_PHY_START_TIMEOUT); ihost->phy_startup_timer_pending = true; } else { dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed " "to stop phy %d because of status " "%d.\n", __func__, ihost->phys[ihost->next_phy_to_start].phy_index, status); } ihost->next_phy_to_start++; } return status; } static void phy_startup_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; struct isci_host *ihost = container_of(tmr, typeof(*ihost), phy_timer); unsigned long flags; enum sci_status status; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; ihost->phy_startup_timer_pending = false; do { status = sci_controller_start_next_phy(ihost); } while (status != SCI_SUCCESS); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } static u16 isci_tci_active(struct isci_host *ihost) { return CIRC_CNT(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } static enum sci_status sci_controller_start(struct isci_host *ihost, u32 timeout) { enum sci_status result; u16 index; if (ihost->sm.current_state_id != SCIC_INITIALIZED) { dev_warn(&ihost->pdev->dev, "SCIC Controller start operation requested in " "invalid state\n"); return SCI_FAILURE_INVALID_STATE; } /* Build the TCi free pool */ BUILD_BUG_ON(SCI_MAX_IO_REQUESTS > 1 << sizeof(ihost->tci_pool[0]) * 8); ihost->tci_head = 0; ihost->tci_tail = 0; for (index = 0; index < ihost->task_context_entries; index++) isci_tci_free(ihost, index); /* Build the RNi free pool */ sci_remote_node_table_initialize(&ihost->available_remote_nodes, ihost->remote_node_entries); /* * Before anything else lets make sure we will not be * interrupted by the hardware. */ sci_controller_disable_interrupts(ihost); /* Enable the port task scheduler */ sci_controller_enable_port_task_scheduler(ihost); /* Assign all the task entries to ihost physical function */ sci_controller_assign_task_entries(ihost); /* Now initialize the completion queue */ sci_controller_initialize_completion_queue(ihost); /* Initialize the unsolicited frame queue for use */ sci_controller_initialize_unsolicited_frame_queue(ihost); /* Start all of the ports on this controller */ for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; result = sci_port_start(iport); if (result) return result; } sci_controller_start_next_phy(ihost); sci_mod_timer(&ihost->timer, timeout); sci_change_state(&ihost->sm, SCIC_STARTING); return SCI_SUCCESS; } void isci_host_scan_start(struct Scsi_Host *shost) { struct isci_host *ihost = SHOST_TO_SAS_HA(shost)->lldd_ha; unsigned long tmo = sci_controller_get_suggested_start_timeout(ihost); set_bit(IHOST_START_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); sci_controller_start(ihost, tmo); sci_controller_enable_interrupts(ihost); spin_unlock_irq(&ihost->scic_lock); } static void isci_host_stop_complete(struct isci_host *ihost, enum sci_status completion_status) { isci_host_change_state(ihost, isci_stopped); sci_controller_disable_interrupts(ihost); clear_bit(IHOST_STOP_PENDING, &ihost->flags); wake_up(&ihost->eventq); } static void sci_controller_completion_handler(struct isci_host *ihost) { /* Empty out the completion queue */ if (sci_controller_completion_queue_has_entries(ihost)) sci_controller_process_completions(ihost); /* Clear the interrupt and enable all interrupts again */ writel(SMU_ISR_COMPLETION, &ihost->smu_registers->interrupt_status); /* Could we write the value of SMU_ISR_COMPLETION? */ writel(0xFF000000, &ihost->smu_registers->interrupt_mask); writel(0, &ihost->smu_registers->interrupt_mask); } /** * isci_host_completion_routine() - This function is the delayed service * routine that calls the sci core library's completion handler. It's * scheduled as a tasklet from the interrupt service routine when interrupts * in use, or set as the timeout function in polled mode. * @data: This parameter specifies the ISCI host object * */ static void isci_host_completion_routine(unsigned long data) { struct isci_host *ihost = (struct isci_host *)data; struct list_head completed_request_list; struct list_head errored_request_list; struct list_head *current_position; struct list_head *next_position; struct isci_request *request; struct isci_request *next_request; struct sas_task *task; u16 active; INIT_LIST_HEAD(&completed_request_list); INIT_LIST_HEAD(&errored_request_list); spin_lock_irq(&ihost->scic_lock); sci_controller_completion_handler(ihost); /* Take the lists of completed I/Os from the host. */ list_splice_init(&ihost->requests_to_complete, &completed_request_list); /* Take the list of errored I/Os from the host. */ list_splice_init(&ihost->requests_to_errorback, &errored_request_list); spin_unlock_irq(&ihost->scic_lock); /* Process any completions in the lists. */ list_for_each_safe(current_position, next_position, &completed_request_list) { request = list_entry(current_position, struct isci_request, completed_node); task = isci_request_access_task(request); /* Normal notification (task_done) */ dev_dbg(&ihost->pdev->dev, "%s: Normal - request/task = %p/%p\n", __func__, request, task); /* Return the task to libsas */ if (task != NULL) { task->lldd_task = NULL; if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) { /* If the task is already in the abort path, * the task_done callback cannot be called. */ task->task_done(task); } } spin_lock_irq(&ihost->scic_lock); isci_free_tag(ihost, request->io_tag); spin_unlock_irq(&ihost->scic_lock); } list_for_each_entry_safe(request, next_request, &errored_request_list, completed_node) { task = isci_request_access_task(request); /* Use sas_task_abort */ dev_warn(&ihost->pdev->dev, "%s: Error - request/task = %p/%p\n", __func__, request, task); if (task != NULL) { /* Put the task into the abort path if it's not there * already. */ if (!(task->task_state_flags & SAS_TASK_STATE_ABORTED)) sas_task_abort(task); } else { /* This is a case where the request has completed with a * status such that it needed further target servicing, * but the sas_task reference has already been removed * from the request. Since it was errored, it was not * being aborted, so there is nothing to do except free * it. */ spin_lock_irq(&ihost->scic_lock); /* Remove the request from the remote device's list * of pending requests. */ list_del_init(&request->dev_node); isci_free_tag(ihost, request->io_tag); spin_unlock_irq(&ihost->scic_lock); } } /* the coalesence timeout doubles at each encoding step, so * update it based on the ilog2 value of the outstanding requests */ active = isci_tci_active(ihost); writel(SMU_ICC_GEN_VAL(NUMBER, active) | SMU_ICC_GEN_VAL(TIMER, ISCI_COALESCE_BASE + ilog2(active)), &ihost->smu_registers->interrupt_coalesce_control); } /** * sci_controller_stop() - This method will stop an individual controller * object.This method will invoke the associated user callback upon * completion. The completion callback is called when the following * conditions are met: -# the method return status is SCI_SUCCESS. -# the * controller has been quiesced. This method will ensure that all IO * requests are quiesced, phys are stopped, and all additional operation by * the hardware is halted. * @controller: the handle to the controller object to stop. * @timeout: This parameter specifies the number of milliseconds in which the * stop operation should complete. * * The controller must be in the STARTED or STOPPED state. Indicate if the * controller stop method succeeded or failed in some way. SCI_SUCCESS if the * stop operation successfully began. SCI_WARNING_ALREADY_IN_STATE if the * controller is already in the STOPPED state. SCI_FAILURE_INVALID_STATE if the * controller is not either in the STARTED or STOPPED states. */ static enum sci_status sci_controller_stop(struct isci_host *ihost, u32 timeout) { if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "SCIC Controller stop operation requested in " "invalid state\n"); return SCI_FAILURE_INVALID_STATE; } sci_mod_timer(&ihost->timer, timeout); sci_change_state(&ihost->sm, SCIC_STOPPING); return SCI_SUCCESS; } /** * sci_controller_reset() - This method will reset the supplied core * controller regardless of the state of said controller. This operation is * considered destructive. In other words, all current operations are wiped * out. No IO completions for outstanding devices occur. Outstanding IO * requests are not aborted or completed at the actual remote device. * @controller: the handle to the controller object to reset. * * Indicate if the controller reset method succeeded or failed in some way. * SCI_SUCCESS if the reset operation successfully started. SCI_FATAL_ERROR if * the controller reset operation is unable to complete. */ static enum sci_status sci_controller_reset(struct isci_host *ihost) { switch (ihost->sm.current_state_id) { case SCIC_RESET: case SCIC_READY: case SCIC_STOPPED: case SCIC_FAILED: /* * The reset operation is not a graceful cleanup, just * perform the state transition. */ sci_change_state(&ihost->sm, SCIC_RESETTING); return SCI_SUCCESS; default: dev_warn(&ihost->pdev->dev, "SCIC Controller reset operation requested in " "invalid state\n"); return SCI_FAILURE_INVALID_STATE; } } void isci_host_deinit(struct isci_host *ihost) { int i; /* disable output data selects */ for (i = 0; i < isci_gpio_count(ihost); i++) writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); isci_host_change_state(ihost, isci_stopping); for (i = 0; i < SCI_MAX_PORTS; i++) { struct isci_port *iport = &ihost->ports[i]; struct isci_remote_device *idev, *d; list_for_each_entry_safe(idev, d, &iport->remote_dev_list, node) { if (test_bit(IDEV_ALLOCATED, &idev->flags)) isci_remote_device_stop(ihost, idev); } } set_bit(IHOST_STOP_PENDING, &ihost->flags); spin_lock_irq(&ihost->scic_lock); sci_controller_stop(ihost, SCIC_CONTROLLER_STOP_TIMEOUT); spin_unlock_irq(&ihost->scic_lock); wait_for_stop(ihost); /* disable sgpio: where the above wait should give time for the * enclosure to sample the gpios going inactive */ writel(0, &ihost->scu_registers->peg0.sgpio.interface_control); sci_controller_reset(ihost); /* Cancel any/all outstanding port timers */ for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; del_timer_sync(&iport->timer.timer); } /* Cancel any/all outstanding phy timers */ for (i = 0; i < SCI_MAX_PHYS; i++) { struct isci_phy *iphy = &ihost->phys[i]; del_timer_sync(&iphy->sata_timer.timer); } del_timer_sync(&ihost->port_agent.timer.timer); del_timer_sync(&ihost->power_control.timer.timer); del_timer_sync(&ihost->timer.timer); del_timer_sync(&ihost->phy_timer.timer); } static void __iomem *scu_base(struct isci_host *isci_host) { struct pci_dev *pdev = isci_host->pdev; int id = isci_host->id; return pcim_iomap_table(pdev)[SCI_SCU_BAR * 2] + SCI_SCU_BAR_SIZE * id; } static void __iomem *smu_base(struct isci_host *isci_host) { struct pci_dev *pdev = isci_host->pdev; int id = isci_host->id; return pcim_iomap_table(pdev)[SCI_SMU_BAR * 2] + SCI_SMU_BAR_SIZE * id; } static void isci_user_parameters_get(struct sci_user_parameters *u) { int i; for (i = 0; i < SCI_MAX_PHYS; i++) { struct sci_phy_user_params *u_phy = &u->phys[i]; u_phy->max_speed_generation = phy_gen; /* we are not exporting these for now */ u_phy->align_insertion_frequency = 0x7f; u_phy->in_connection_align_insertion_frequency = 0xff; u_phy->notify_enable_spin_up_insertion_frequency = 0x33; } u->stp_inactivity_timeout = stp_inactive_to; u->ssp_inactivity_timeout = ssp_inactive_to; u->stp_max_occupancy_timeout = stp_max_occ_to; u->ssp_max_occupancy_timeout = ssp_max_occ_to; u->no_outbound_task_timeout = no_outbound_task_to; u->max_concurr_spinup = max_concurr_spinup; } static void sci_controller_initial_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_change_state(&ihost->sm, SCIC_RESET); } static inline void sci_controller_starting_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_del_timer(&ihost->timer); } #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS 853 #define INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS 1280 #define INTERRUPT_COALESCE_TIMEOUT_MAX_US 2700000 #define INTERRUPT_COALESCE_NUMBER_MAX 256 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN 7 #define INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX 28 /** * sci_controller_set_interrupt_coalescence() - This method allows the user to * configure the interrupt coalescence. * @controller: This parameter represents the handle to the controller object * for which its interrupt coalesce register is overridden. * @coalesce_number: Used to control the number of entries in the Completion * Queue before an interrupt is generated. If the number of entries exceed * this number, an interrupt will be generated. The valid range of the input * is [0, 256]. A setting of 0 results in coalescing being disabled. * @coalesce_timeout: Timeout value in microseconds. The valid range of the * input is [0, 2700000] . A setting of 0 is allowed and results in no * interrupt coalescing timeout. * * Indicate if the user successfully set the interrupt coalesce parameters. * SCI_SUCCESS The user successfully updated the interrutp coalescence. * SCI_FAILURE_INVALID_PARAMETER_VALUE The user input value is out of range. */ static enum sci_status sci_controller_set_interrupt_coalescence(struct isci_host *ihost, u32 coalesce_number, u32 coalesce_timeout) { u8 timeout_encode = 0; u32 min = 0; u32 max = 0; /* Check if the input parameters fall in the range. */ if (coalesce_number > INTERRUPT_COALESCE_NUMBER_MAX) return SCI_FAILURE_INVALID_PARAMETER_VALUE; /* * Defined encoding for interrupt coalescing timeout: * Value Min Max Units * ----- --- --- ----- * 0 - - Disabled * 1 13.3 20.0 ns * 2 26.7 40.0 * 3 53.3 80.0 * 4 106.7 160.0 * 5 213.3 320.0 * 6 426.7 640.0 * 7 853.3 1280.0 * 8 1.7 2.6 us * 9 3.4 5.1 * 10 6.8 10.2 * 11 13.7 20.5 * 12 27.3 41.0 * 13 54.6 81.9 * 14 109.2 163.8 * 15 218.5 327.7 * 16 436.9 655.4 * 17 873.8 1310.7 * 18 1.7 2.6 ms * 19 3.5 5.2 * 20 7.0 10.5 * 21 14.0 21.0 * 22 28.0 41.9 * 23 55.9 83.9 * 24 111.8 167.8 * 25 223.7 335.5 * 26 447.4 671.1 * 27 894.8 1342.2 * 28 1.8 2.7 s * Others Undefined */ /* * Use the table above to decide the encode of interrupt coalescing timeout * value for register writing. */ if (coalesce_timeout == 0) timeout_encode = 0; else{ /* make the timeout value in unit of (10 ns). */ coalesce_timeout = coalesce_timeout * 100; min = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_LOWER_BOUND_NS / 10; max = INTERRUPT_COALESCE_TIMEOUT_BASE_RANGE_UPPER_BOUND_NS / 10; /* get the encode of timeout for register writing. */ for (timeout_encode = INTERRUPT_COALESCE_TIMEOUT_ENCODE_MIN; timeout_encode <= INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX; timeout_encode++) { if (min <= coalesce_timeout && max > coalesce_timeout) break; else if (coalesce_timeout >= max && coalesce_timeout < min * 2 && coalesce_timeout <= INTERRUPT_COALESCE_TIMEOUT_MAX_US * 100) { if ((coalesce_timeout - max) < (2 * min - coalesce_timeout)) break; else{ timeout_encode++; break; } } else { max = max * 2; min = min * 2; } } if (timeout_encode == INTERRUPT_COALESCE_TIMEOUT_ENCODE_MAX + 1) /* the value is out of range. */ return SCI_FAILURE_INVALID_PARAMETER_VALUE; } writel(SMU_ICC_GEN_VAL(NUMBER, coalesce_number) | SMU_ICC_GEN_VAL(TIMER, timeout_encode), &ihost->smu_registers->interrupt_coalesce_control); ihost->interrupt_coalesce_number = (u16)coalesce_number; ihost->interrupt_coalesce_timeout = coalesce_timeout / 100; return SCI_SUCCESS; } static void sci_controller_ready_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); u32 val; /* enable clock gating for power control of the scu unit */ val = readl(&ihost->smu_registers->clock_gating_control); val &= ~(SMU_CGUCR_GEN_BIT(REGCLK_ENABLE) | SMU_CGUCR_GEN_BIT(TXCLK_ENABLE) | SMU_CGUCR_GEN_BIT(XCLK_ENABLE)); val |= SMU_CGUCR_GEN_BIT(IDLE_ENABLE); writel(val, &ihost->smu_registers->clock_gating_control); /* set the default interrupt coalescence number and timeout value. */ sci_controller_set_interrupt_coalescence(ihost, 0, 0); } static void sci_controller_ready_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* disable interrupt coalescence. */ sci_controller_set_interrupt_coalescence(ihost, 0, 0); } static enum sci_status sci_controller_stop_phys(struct isci_host *ihost) { u32 index; enum sci_status status; enum sci_status phy_status; status = SCI_SUCCESS; for (index = 0; index < SCI_MAX_PHYS; index++) { phy_status = sci_phy_stop(&ihost->phys[index]); if (phy_status != SCI_SUCCESS && phy_status != SCI_FAILURE_INVALID_STATE) { status = SCI_FAILURE; dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to stop " "phy %d because of status %d.\n", __func__, ihost->phys[index].phy_index, phy_status); } } return status; } static enum sci_status sci_controller_stop_ports(struct isci_host *ihost) { u32 index; enum sci_status port_status; enum sci_status status = SCI_SUCCESS; for (index = 0; index < ihost->logical_port_entries; index++) { struct isci_port *iport = &ihost->ports[index]; port_status = sci_port_stop(iport); if ((port_status != SCI_SUCCESS) && (port_status != SCI_FAILURE_INVALID_STATE)) { status = SCI_FAILURE; dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed to " "stop port %d because of status %d.\n", __func__, iport->logical_port_index, port_status); } } return status; } static enum sci_status sci_controller_stop_devices(struct isci_host *ihost) { u32 index; enum sci_status status; enum sci_status device_status; status = SCI_SUCCESS; for (index = 0; index < ihost->remote_node_entries; index++) { if (ihost->device_table[index] != NULL) { /* / @todo What timeout value do we want to provide to this request? */ device_status = sci_remote_device_stop(ihost->device_table[index], 0); if ((device_status != SCI_SUCCESS) && (device_status != SCI_FAILURE_INVALID_STATE)) { dev_warn(&ihost->pdev->dev, "%s: Controller stop operation failed " "to stop device 0x%p because of " "status %d.\n", __func__, ihost->device_table[index], device_status); } } } return status; } static void sci_controller_stopping_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); /* Stop all of the components for this controller */ sci_controller_stop_phys(ihost); sci_controller_stop_ports(ihost); sci_controller_stop_devices(ihost); } static void sci_controller_stopping_state_exit(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_del_timer(&ihost->timer); } static void sci_controller_reset_hardware(struct isci_host *ihost) { /* Disable interrupts so we dont take any spurious interrupts */ sci_controller_disable_interrupts(ihost); /* Reset the SCU */ writel(0xFFFFFFFF, &ihost->smu_registers->soft_reset_control); /* Delay for 1ms to before clearing the CQP and UFQPR. */ udelay(1000); /* The write to the CQGR clears the CQP */ writel(0x00000000, &ihost->smu_registers->completion_queue_get); /* The write to the UFQGP clears the UFQPR */ writel(0, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } static void sci_controller_resetting_state_enter(struct sci_base_state_machine *sm) { struct isci_host *ihost = container_of(sm, typeof(*ihost), sm); sci_controller_reset_hardware(ihost); sci_change_state(&ihost->sm, SCIC_RESET); } static const struct sci_base_state sci_controller_state_table[] = { [SCIC_INITIAL] = { .enter_state = sci_controller_initial_state_enter, }, [SCIC_RESET] = {}, [SCIC_INITIALIZING] = {}, [SCIC_INITIALIZED] = {}, [SCIC_STARTING] = { .exit_state = sci_controller_starting_state_exit, }, [SCIC_READY] = { .enter_state = sci_controller_ready_state_enter, .exit_state = sci_controller_ready_state_exit, }, [SCIC_RESETTING] = { .enter_state = sci_controller_resetting_state_enter, }, [SCIC_STOPPING] = { .enter_state = sci_controller_stopping_state_enter, .exit_state = sci_controller_stopping_state_exit, }, [SCIC_STOPPED] = {}, [SCIC_FAILED] = {} }; static void sci_controller_set_default_config_parameters(struct isci_host *ihost) { /* these defaults are overridden by the platform / firmware */ u16 index; /* Default to APC mode. */ ihost->oem_parameters.controller.mode_type = SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE; /* Default to APC mode. */ ihost->oem_parameters.controller.max_concurr_spin_up = 1; /* Default to no SSC operation. */ ihost->oem_parameters.controller.do_enable_ssc = false; /* Default to short cables on all phys. */ ihost->oem_parameters.controller.cable_selection_mask = 0; /* Initialize all of the port parameter information to narrow ports. */ for (index = 0; index < SCI_MAX_PORTS; index++) { ihost->oem_parameters.ports[index].phy_mask = 0; } /* Initialize all of the phy parameter information. */ for (index = 0; index < SCI_MAX_PHYS; index++) { /* Default to 3G (i.e. Gen 2). */ ihost->user_parameters.phys[index].max_speed_generation = SCIC_SDS_PARM_GEN2_SPEED; /* the frequencies cannot be 0 */ ihost->user_parameters.phys[index].align_insertion_frequency = 0x7f; ihost->user_parameters.phys[index].in_connection_align_insertion_frequency = 0xff; ihost->user_parameters.phys[index].notify_enable_spin_up_insertion_frequency = 0x33; /* * Previous Vitesse based expanders had a arbitration issue that * is worked around by having the upper 32-bits of SAS address * with a value greater then the Vitesse company identifier. * Hence, usage of 0x5FCFFFFF. */ ihost->oem_parameters.phys[index].sas_address.low = 0x1 + ihost->id; ihost->oem_parameters.phys[index].sas_address.high = 0x5FCFFFFF; } ihost->user_parameters.stp_inactivity_timeout = 5; ihost->user_parameters.ssp_inactivity_timeout = 5; ihost->user_parameters.stp_max_occupancy_timeout = 5; ihost->user_parameters.ssp_max_occupancy_timeout = 20; ihost->user_parameters.no_outbound_task_timeout = 2; } static void controller_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; struct isci_host *ihost = container_of(tmr, typeof(*ihost), timer); struct sci_base_state_machine *sm = &ihost->sm; unsigned long flags; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; if (sm->current_state_id == SCIC_STARTING) sci_controller_transition_to_ready(ihost, SCI_FAILURE_TIMEOUT); else if (sm->current_state_id == SCIC_STOPPING) { sci_change_state(sm, SCIC_FAILED); isci_host_stop_complete(ihost, SCI_FAILURE_TIMEOUT); } else /* / @todo Now what do we want to do in this case? */ dev_err(&ihost->pdev->dev, "%s: Controller timer fired when controller was not " "in a state being timed.\n", __func__); done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } static enum sci_status sci_controller_construct(struct isci_host *ihost, void __iomem *scu_base, void __iomem *smu_base) { u8 i; sci_init_sm(&ihost->sm, sci_controller_state_table, SCIC_INITIAL); ihost->scu_registers = scu_base; ihost->smu_registers = smu_base; sci_port_configuration_agent_construct(&ihost->port_agent); /* Construct the ports for this controller */ for (i = 0; i < SCI_MAX_PORTS; i++) sci_port_construct(&ihost->ports[i], i, ihost); sci_port_construct(&ihost->ports[i], SCIC_SDS_DUMMY_PORT, ihost); /* Construct the phys for this controller */ for (i = 0; i < SCI_MAX_PHYS; i++) { /* Add all the PHYs to the dummy port */ sci_phy_construct(&ihost->phys[i], &ihost->ports[SCI_MAX_PORTS], i); } ihost->invalid_phy_mask = 0; sci_init_timer(&ihost->timer, controller_timeout); /* Initialize the User and OEM parameters to default values. */ sci_controller_set_default_config_parameters(ihost); return sci_controller_reset(ihost); } int sci_oem_parameters_validate(struct sci_oem_params *oem, u8 version) { int i; for (i = 0; i < SCI_MAX_PORTS; i++) if (oem->ports[i].phy_mask > SCIC_SDS_PARM_PHY_MASK_MAX) return -EINVAL; for (i = 0; i < SCI_MAX_PHYS; i++) if (oem->phys[i].sas_address.high == 0 && oem->phys[i].sas_address.low == 0) return -EINVAL; if (oem->controller.mode_type == SCIC_PORT_AUTOMATIC_CONFIGURATION_MODE) { for (i = 0; i < SCI_MAX_PHYS; i++) if (oem->ports[i].phy_mask != 0) return -EINVAL; } else if (oem->controller.mode_type == SCIC_PORT_MANUAL_CONFIGURATION_MODE) { u8 phy_mask = 0; for (i = 0; i < SCI_MAX_PHYS; i++) phy_mask |= oem->ports[i].phy_mask; if (phy_mask == 0) return -EINVAL; } else return -EINVAL; if (oem->controller.max_concurr_spin_up > MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT || oem->controller.max_concurr_spin_up < 1) return -EINVAL; if (oem->controller.do_enable_ssc) { if (version < ISCI_ROM_VER_1_1 && oem->controller.do_enable_ssc != 1) return -EINVAL; if (version >= ISCI_ROM_VER_1_1) { u8 test = oem->controller.ssc_sata_tx_spread_level; switch (test) { case 0: case 2: case 3: case 6: case 7: break; default: return -EINVAL; } test = oem->controller.ssc_sas_tx_spread_level; if (oem->controller.ssc_sas_tx_type == 0) { switch (test) { case 0: case 2: case 3: break; default: return -EINVAL; } } else if (oem->controller.ssc_sas_tx_type == 1) { switch (test) { case 0: case 3: case 6: break; default: return -EINVAL; } } } } return 0; } static enum sci_status sci_oem_parameters_set(struct isci_host *ihost) { u32 state = ihost->sm.current_state_id; struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); if (state == SCIC_RESET || state == SCIC_INITIALIZING || state == SCIC_INITIALIZED) { u8 oem_version = pci_info->orom ? pci_info->orom->hdr.version : ISCI_ROM_VER_1_0; if (sci_oem_parameters_validate(&ihost->oem_parameters, oem_version)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; return SCI_SUCCESS; } return SCI_FAILURE_INVALID_STATE; } static u8 max_spin_up(struct isci_host *ihost) { if (ihost->user_parameters.max_concurr_spinup) return min_t(u8, ihost->user_parameters.max_concurr_spinup, MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); else return min_t(u8, ihost->oem_parameters.controller.max_concurr_spin_up, MAX_CONCURRENT_DEVICE_SPIN_UP_COUNT); } static void power_control_timeout(unsigned long data) { struct sci_timer *tmr = (struct sci_timer *)data; struct isci_host *ihost = container_of(tmr, typeof(*ihost), power_control.timer); struct isci_phy *iphy; unsigned long flags; u8 i; spin_lock_irqsave(&ihost->scic_lock, flags); if (tmr->cancel) goto done; ihost->power_control.phys_granted_power = 0; if (ihost->power_control.phys_waiting == 0) { ihost->power_control.timer_started = false; goto done; } for (i = 0; i < SCI_MAX_PHYS; i++) { if (ihost->power_control.phys_waiting == 0) break; iphy = ihost->power_control.requesters[i]; if (iphy == NULL) continue; if (ihost->power_control.phys_granted_power >= max_spin_up(ihost)) break; ihost->power_control.requesters[i] = NULL; ihost->power_control.phys_waiting--; ihost->power_control.phys_granted_power++; sci_phy_consume_power_handler(iphy); if (iphy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS) { u8 j; for (j = 0; j < SCI_MAX_PHYS; j++) { struct isci_phy *requester = ihost->power_control.requesters[j]; /* * Search the power_control queue to see if there are other phys * attached to the same remote device. If found, take all of * them out of await_sas_power state. */ if (requester != NULL && requester != iphy) { u8 other = memcmp(requester->frame_rcvd.iaf.sas_addr, iphy->frame_rcvd.iaf.sas_addr, sizeof(requester->frame_rcvd.iaf.sas_addr)); if (other == 0) { ihost->power_control.requesters[j] = NULL; ihost->power_control.phys_waiting--; sci_phy_consume_power_handler(requester); } } } } } /* * It doesn't matter if the power list is empty, we need to start the * timer in case another phy becomes ready. */ sci_mod_timer(tmr, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); ihost->power_control.timer_started = true; done: spin_unlock_irqrestore(&ihost->scic_lock, flags); } void sci_controller_power_control_queue_insert(struct isci_host *ihost, struct isci_phy *iphy) { BUG_ON(iphy == NULL); if (ihost->power_control.phys_granted_power < max_spin_up(ihost)) { ihost->power_control.phys_granted_power++; sci_phy_consume_power_handler(iphy); /* * stop and start the power_control timer. When the timer fires, the * no_of_phys_granted_power will be set to 0 */ if (ihost->power_control.timer_started) sci_del_timer(&ihost->power_control.timer); sci_mod_timer(&ihost->power_control.timer, SCIC_SDS_CONTROLLER_POWER_CONTROL_INTERVAL); ihost->power_control.timer_started = true; } else { /* * There are phys, attached to the same sas address as this phy, are * already in READY state, this phy don't need wait. */ u8 i; struct isci_phy *current_phy; for (i = 0; i < SCI_MAX_PHYS; i++) { u8 other; current_phy = &ihost->phys[i]; other = memcmp(current_phy->frame_rcvd.iaf.sas_addr, iphy->frame_rcvd.iaf.sas_addr, sizeof(current_phy->frame_rcvd.iaf.sas_addr)); if (current_phy->sm.current_state_id == SCI_PHY_READY && current_phy->protocol == SCIC_SDS_PHY_PROTOCOL_SAS && other == 0) { sci_phy_consume_power_handler(iphy); break; } } if (i == SCI_MAX_PHYS) { /* Add the phy in the waiting list */ ihost->power_control.requesters[iphy->phy_index] = iphy; ihost->power_control.phys_waiting++; } } } void sci_controller_power_control_queue_remove(struct isci_host *ihost, struct isci_phy *iphy) { BUG_ON(iphy == NULL); if (ihost->power_control.requesters[iphy->phy_index]) ihost->power_control.phys_waiting--; ihost->power_control.requesters[iphy->phy_index] = NULL; } static int is_long_cable(int phy, unsigned char selection_byte) { return !!(selection_byte & (1 << phy)); } static int is_medium_cable(int phy, unsigned char selection_byte) { return !!(selection_byte & (1 << (phy + 4))); } static enum cable_selections decode_selection_byte( int phy, unsigned char selection_byte) { return ((selection_byte & (1 << phy)) ? 1 : 0) + (selection_byte & (1 << (phy + 4)) ? 2 : 0); } static unsigned char *to_cable_select(struct isci_host *ihost) { if (is_cable_select_overridden()) return ((unsigned char *)&cable_selection_override) + ihost->id; else return &ihost->oem_parameters.controller.cable_selection_mask; } enum cable_selections decode_cable_selection(struct isci_host *ihost, int phy) { return decode_selection_byte(phy, *to_cable_select(ihost)); } char *lookup_cable_names(enum cable_selections selection) { static char *cable_names[] = { [short_cable] = "short", [long_cable] = "long", [medium_cable] = "medium", [undefined_cable] = "<undefined, assumed long>" /* bit 0==1 */ }; return (selection <= undefined_cable) ? cable_names[selection] : cable_names[undefined_cable]; } #define AFE_REGISTER_WRITE_DELAY 10 static void sci_controller_afe_initialization(struct isci_host *ihost) { struct scu_afe_registers __iomem *afe = &ihost->scu_registers->afe; const struct sci_oem_params *oem = &ihost->oem_parameters; struct pci_dev *pdev = ihost->pdev; u32 afe_status; u32 phy_id; unsigned char cable_selection_mask = *to_cable_select(ihost); /* Clear DFX Status registers */ writel(0x0081000f, &afe->afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); if (is_b0(pdev) || is_c0(pdev) || is_c1(pdev)) { /* PM Rx Equalization Save, PM SPhy Rx Acknowledgement * Timer, PM Stagger Timer */ writel(0x0007FFFF, &afe->afe_pmsn_master_control2); udelay(AFE_REGISTER_WRITE_DELAY); } /* Configure bias currents to normal */ if (is_a2(pdev)) writel(0x00005A00, &afe->afe_bias_control); else if (is_b0(pdev) || is_c0(pdev)) writel(0x00005F00, &afe->afe_bias_control); else if (is_c1(pdev)) writel(0x00005500, &afe->afe_bias_control); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable PLL */ if (is_a2(pdev)) writel(0x80040908, &afe->afe_pll_control0); else if (is_b0(pdev) || is_c0(pdev)) writel(0x80040A08, &afe->afe_pll_control0); else if (is_c1(pdev)) { writel(0x80000B08, &afe->afe_pll_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x00000B08, &afe->afe_pll_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x80000B08, &afe->afe_pll_control0); } udelay(AFE_REGISTER_WRITE_DELAY); /* Wait for the PLL to lock */ do { afe_status = readl(&afe->afe_common_block_status); udelay(AFE_REGISTER_WRITE_DELAY); } while ((afe_status & 0x00001000) == 0); if (is_a2(pdev)) { /* Shorten SAS SNW lock time (RxLock timer value from 76 * us to 50 us) */ writel(0x7bcc96ad, &afe->afe_pmsn_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } for (phy_id = 0; phy_id < SCI_MAX_PHYS; phy_id++) { struct scu_afe_transceiver *xcvr = &afe->scu_afe_xcvr[phy_id]; const struct sci_phy_oem_params *oem_phy = &oem->phys[phy_id]; int cable_length_long = is_long_cable(phy_id, cable_selection_mask); int cable_length_medium = is_medium_cable(phy_id, cable_selection_mask); if (is_a2(pdev)) { /* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800) */ writel(0x00004512, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x0050100F, &xcvr->afe_xcvr_control1); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_b0(pdev)) { /* Configure transmitter SSC parameters */ writel(0x00030000, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_c0(pdev)) { /* Configure transmitter SSC parameters */ writel(0x00010202, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); /* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800) */ writel(0x00014500, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); } else if (is_c1(pdev)) { /* Configure transmitter SSC parameters */ writel(0x00010202, &xcvr->afe_tx_ssc_control); udelay(AFE_REGISTER_WRITE_DELAY); /* All defaults, except the Receive Word * Alignament/Comma Detect Enable....(0xe800) */ writel(0x0001C500, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); } /* Power up TX and RX out from power down (PWRDNTX and * PWRDNRX) & increase TX int & ext bias 20%....(0xe85c) */ if (is_a2(pdev)) writel(0x000003F0, &xcvr->afe_channel_control); else if (is_b0(pdev)) { writel(0x000003D7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x000003D4, &xcvr->afe_channel_control); } else if (is_c0(pdev)) { writel(0x000001E7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x000001E4, &xcvr->afe_channel_control); } else if (is_c1(pdev)) { writel(cable_length_long ? 0x000002F7 : 0x000001F7, &xcvr->afe_channel_control); udelay(AFE_REGISTER_WRITE_DELAY); writel(cable_length_long ? 0x000002F4 : 0x000001F4, &xcvr->afe_channel_control); } udelay(AFE_REGISTER_WRITE_DELAY); if (is_a2(pdev)) { /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); udelay(AFE_REGISTER_WRITE_DELAY); } if (is_a2(pdev) || is_b0(pdev)) /* RDPI=0x0(RX Power On), RXOOBDETPDNC=0x0, * TPD=0x0(TX Power On), RDD=0x0(RX Detect * Enabled) ....(0xe800) */ writel(0x00004100, &xcvr->afe_xcvr_control0); else if (is_c0(pdev)) writel(0x00014100, &xcvr->afe_xcvr_control0); else if (is_c1(pdev)) writel(0x0001C100, &xcvr->afe_xcvr_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Leave DFE/FFE on */ if (is_a2(pdev)) writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); else if (is_b0(pdev)) { writel(0x3F11103F, &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); } else if (is_c0(pdev)) { writel(0x01400C0F, &xcvr->afe_rx_ssc_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x3F6F103F, &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); } else if (is_c1(pdev)) { writel(cable_length_long ? 0x01500C0C : cable_length_medium ? 0x01400C0D : 0x02400C0D, &xcvr->afe_xcvr_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(0x000003E0, &xcvr->afe_dfx_rx_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(cable_length_long ? 0x33091C1F : cable_length_medium ? 0x3315181F : 0x2B17161F, &xcvr->afe_rx_ssc_control0); udelay(AFE_REGISTER_WRITE_DELAY); /* Enable TX equalization (0xe824) */ writel(0x00040000, &xcvr->afe_tx_control); } udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control0, &xcvr->afe_tx_amp_control0); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control1, &xcvr->afe_tx_amp_control1); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control2, &xcvr->afe_tx_amp_control2); udelay(AFE_REGISTER_WRITE_DELAY); writel(oem_phy->afe_tx_amp_control3, &xcvr->afe_tx_amp_control3); udelay(AFE_REGISTER_WRITE_DELAY); } /* Transfer control to the PEs */ writel(0x00010f00, &afe->afe_dfx_master_control0); udelay(AFE_REGISTER_WRITE_DELAY); } static void sci_controller_initialize_power_control(struct isci_host *ihost) { sci_init_timer(&ihost->power_control.timer, power_control_timeout); memset(ihost->power_control.requesters, 0, sizeof(ihost->power_control.requesters)); ihost->power_control.phys_waiting = 0; ihost->power_control.phys_granted_power = 0; } static enum sci_status sci_controller_initialize(struct isci_host *ihost) { struct sci_base_state_machine *sm = &ihost->sm; enum sci_status result = SCI_FAILURE; unsigned long i, state, val; if (ihost->sm.current_state_id != SCIC_RESET) { dev_warn(&ihost->pdev->dev, "SCIC Controller initialize operation requested " "in invalid state\n"); return SCI_FAILURE_INVALID_STATE; } sci_change_state(sm, SCIC_INITIALIZING); sci_init_timer(&ihost->phy_timer, phy_startup_timeout); ihost->next_phy_to_start = 0; ihost->phy_startup_timer_pending = false; sci_controller_initialize_power_control(ihost); /* * There is nothing to do here for B0 since we do not have to * program the AFE registers. * / @todo The AFE settings are supposed to be correct for the B0 but * / presently they seem to be wrong. */ sci_controller_afe_initialization(ihost); /* Take the hardware out of reset */ writel(0, &ihost->smu_registers->soft_reset_control); /* * / @todo Provide meaningfull error code for hardware failure * result = SCI_FAILURE_CONTROLLER_HARDWARE; */ for (i = 100; i >= 1; i--) { u32 status; /* Loop until the hardware reports success */ udelay(SCU_CONTEXT_RAM_INIT_STALL_TIME); status = readl(&ihost->smu_registers->control_status); if ((status & SCU_RAM_INIT_COMPLETED) == SCU_RAM_INIT_COMPLETED) break; } if (i == 0) goto out; /* * Determine what are the actaul device capacities that the * hardware will support */ val = readl(&ihost->smu_registers->device_context_capacity); /* Record the smaller of the two capacity values */ ihost->logical_port_entries = min(smu_max_ports(val), SCI_MAX_PORTS); ihost->task_context_entries = min(smu_max_task_contexts(val), SCI_MAX_IO_REQUESTS); ihost->remote_node_entries = min(smu_max_rncs(val), SCI_MAX_REMOTE_DEVICES); /* * Make all PEs that are unassigned match up with the * logical ports */ for (i = 0; i < ihost->logical_port_entries; i++) { struct scu_port_task_scheduler_group_registers __iomem *ptsg = &ihost->scu_registers->peg0.ptsg; writel(i, &ptsg->protocol_engine[i]); } /* Initialize hardware PCI Relaxed ordering in DMA engines */ val = readl(&ihost->scu_registers->sdma.pdma_configuration); val |= SCU_PDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); writel(val, &ihost->scu_registers->sdma.pdma_configuration); val = readl(&ihost->scu_registers->sdma.cdma_configuration); val |= SCU_CDMACR_GEN_BIT(PCI_RELAXED_ORDERING_ENABLE); writel(val, &ihost->scu_registers->sdma.cdma_configuration); /* * Initialize the PHYs before the PORTs because the PHY registers * are accessed during the port initialization. */ for (i = 0; i < SCI_MAX_PHYS; i++) { result = sci_phy_initialize(&ihost->phys[i], &ihost->scu_registers->peg0.pe[i].tl, &ihost->scu_registers->peg0.pe[i].ll); if (result != SCI_SUCCESS) goto out; } for (i = 0; i < ihost->logical_port_entries; i++) { struct isci_port *iport = &ihost->ports[i]; iport->port_task_scheduler_registers = &ihost->scu_registers->peg0.ptsg.port[i]; iport->port_pe_configuration_register = &ihost->scu_registers->peg0.ptsg.protocol_engine[0]; iport->viit_registers = &ihost->scu_registers->peg0.viit[i]; } result = sci_port_configuration_agent_initialize(ihost, &ihost->port_agent); out: /* Advance the controller state machine */ if (result == SCI_SUCCESS) state = SCIC_INITIALIZED; else state = SCIC_FAILED; sci_change_state(sm, state); return result; } static enum sci_status sci_user_parameters_set(struct isci_host *ihost, struct sci_user_parameters *sci_parms) { u32 state = ihost->sm.current_state_id; if (state == SCIC_RESET || state == SCIC_INITIALIZING || state == SCIC_INITIALIZED) { u16 index; /* * Validate the user parameters. If they are not legal, then * return a failure. */ for (index = 0; index < SCI_MAX_PHYS; index++) { struct sci_phy_user_params *user_phy; user_phy = &sci_parms->phys[index]; if (!((user_phy->max_speed_generation <= SCIC_SDS_PARM_MAX_SPEED) && (user_phy->max_speed_generation > SCIC_SDS_PARM_NO_SPEED))) return SCI_FAILURE_INVALID_PARAMETER_VALUE; if (user_phy->in_connection_align_insertion_frequency < 3) return SCI_FAILURE_INVALID_PARAMETER_VALUE; if ((user_phy->in_connection_align_insertion_frequency < 3) || (user_phy->align_insertion_frequency == 0) || (user_phy-> notify_enable_spin_up_insertion_frequency == 0)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; } if ((sci_parms->stp_inactivity_timeout == 0) || (sci_parms->ssp_inactivity_timeout == 0) || (sci_parms->stp_max_occupancy_timeout == 0) || (sci_parms->ssp_max_occupancy_timeout == 0) || (sci_parms->no_outbound_task_timeout == 0)) return SCI_FAILURE_INVALID_PARAMETER_VALUE; memcpy(&ihost->user_parameters, sci_parms, sizeof(*sci_parms)); return SCI_SUCCESS; } return SCI_FAILURE_INVALID_STATE; } static int sci_controller_mem_init(struct isci_host *ihost) { struct device *dev = &ihost->pdev->dev; dma_addr_t dma; size_t size; int err; size = SCU_MAX_COMPLETION_QUEUE_ENTRIES * sizeof(u32); ihost->completion_queue = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); if (!ihost->completion_queue) return -ENOMEM; writel(lower_32_bits(dma), &ihost->smu_registers->completion_queue_lower); writel(upper_32_bits(dma), &ihost->smu_registers->completion_queue_upper); size = ihost->remote_node_entries * sizeof(union scu_remote_node_context); ihost->remote_node_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); if (!ihost->remote_node_context_table) return -ENOMEM; writel(lower_32_bits(dma), &ihost->smu_registers->remote_node_context_lower); writel(upper_32_bits(dma), &ihost->smu_registers->remote_node_context_upper); size = ihost->task_context_entries * sizeof(struct scu_task_context), ihost->task_context_table = dmam_alloc_coherent(dev, size, &dma, GFP_KERNEL); if (!ihost->task_context_table) return -ENOMEM; ihost->task_context_dma = dma; writel(lower_32_bits(dma), &ihost->smu_registers->host_task_table_lower); writel(upper_32_bits(dma), &ihost->smu_registers->host_task_table_upper); err = sci_unsolicited_frame_control_construct(ihost); if (err) return err; /* * Inform the silicon as to the location of the UF headers and * address table. */ writel(lower_32_bits(ihost->uf_control.headers.physical_address), &ihost->scu_registers->sdma.uf_header_base_address_lower); writel(upper_32_bits(ihost->uf_control.headers.physical_address), &ihost->scu_registers->sdma.uf_header_base_address_upper); writel(lower_32_bits(ihost->uf_control.address_table.physical_address), &ihost->scu_registers->sdma.uf_address_table_lower); writel(upper_32_bits(ihost->uf_control.address_table.physical_address), &ihost->scu_registers->sdma.uf_address_table_upper); return 0; } int isci_host_init(struct isci_host *ihost) { int err = 0, i; enum sci_status status; struct sci_user_parameters sci_user_params; struct isci_pci_info *pci_info = to_pci_info(ihost->pdev); spin_lock_init(&ihost->state_lock); spin_lock_init(&ihost->scic_lock); init_waitqueue_head(&ihost->eventq); isci_host_change_state(ihost, isci_starting); status = sci_controller_construct(ihost, scu_base(ihost), smu_base(ihost)); if (status != SCI_SUCCESS) { dev_err(&ihost->pdev->dev, "%s: sci_controller_construct failed - status = %x\n", __func__, status); return -ENODEV; } ihost->sas_ha.dev = &ihost->pdev->dev; ihost->sas_ha.lldd_ha = ihost; /* * grab initial values stored in the controller object for OEM and USER * parameters */ isci_user_parameters_get(&sci_user_params); status = sci_user_parameters_set(ihost, &sci_user_params); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: sci_user_parameters_set failed\n", __func__); return -ENODEV; } /* grab any OEM parameters specified in orom */ if (pci_info->orom) { status = isci_parse_oem_parameters(&ihost->oem_parameters, pci_info->orom, ihost->id); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "parsing firmware oem parameters failed\n"); return -EINVAL; } } status = sci_oem_parameters_set(ihost); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: sci_oem_parameters_set failed\n", __func__); return -ENODEV; } tasklet_init(&ihost->completion_tasklet, isci_host_completion_routine, (unsigned long)ihost); INIT_LIST_HEAD(&ihost->requests_to_complete); INIT_LIST_HEAD(&ihost->requests_to_errorback); spin_lock_irq(&ihost->scic_lock); status = sci_controller_initialize(ihost); spin_unlock_irq(&ihost->scic_lock); if (status != SCI_SUCCESS) { dev_warn(&ihost->pdev->dev, "%s: sci_controller_initialize failed -" " status = 0x%x\n", __func__, status); return -ENODEV; } err = sci_controller_mem_init(ihost); if (err) return err; for (i = 0; i < SCI_MAX_PORTS; i++) isci_port_init(&ihost->ports[i], ihost, i); for (i = 0; i < SCI_MAX_PHYS; i++) isci_phy_init(&ihost->phys[i], ihost, i); /* enable sgpio */ writel(1, &ihost->scu_registers->peg0.sgpio.interface_control); for (i = 0; i < isci_gpio_count(ihost); i++) writel(SGPIO_HW_CONTROL, &ihost->scu_registers->peg0.sgpio.output_data_select[i]); writel(0, &ihost->scu_registers->peg0.sgpio.vendor_specific_code); for (i = 0; i < SCI_MAX_REMOTE_DEVICES; i++) { struct isci_remote_device *idev = &ihost->devices[i]; INIT_LIST_HEAD(&idev->reqs_in_process); INIT_LIST_HEAD(&idev->node); } for (i = 0; i < SCI_MAX_IO_REQUESTS; i++) { struct isci_request *ireq; dma_addr_t dma; ireq = dmam_alloc_coherent(&ihost->pdev->dev, sizeof(struct isci_request), &dma, GFP_KERNEL); if (!ireq) return -ENOMEM; ireq->tc = &ihost->task_context_table[i]; ireq->owning_controller = ihost; spin_lock_init(&ireq->state_lock); ireq->request_daddr = dma; ireq->isci_host = ihost; ihost->reqs[i] = ireq; } return 0; } void sci_controller_link_up(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: sci_del_timer(&ihost->phy_timer); ihost->phy_startup_timer_pending = false; ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, iport, iphy); sci_controller_start_next_phy(ihost); break; case SCIC_READY: ihost->port_agent.link_up_handler(ihost, &ihost->port_agent, iport, iphy); break; default: dev_dbg(&ihost->pdev->dev, "%s: SCIC Controller linkup event from phy %d in " "unexpected state %d\n", __func__, iphy->phy_index, ihost->sm.current_state_id); } } void sci_controller_link_down(struct isci_host *ihost, struct isci_port *iport, struct isci_phy *iphy) { switch (ihost->sm.current_state_id) { case SCIC_STARTING: case SCIC_READY: ihost->port_agent.link_down_handler(ihost, &ihost->port_agent, iport, iphy); break; default: dev_dbg(&ihost->pdev->dev, "%s: SCIC Controller linkdown event from phy %d in " "unexpected state %d\n", __func__, iphy->phy_index, ihost->sm.current_state_id); } } static bool sci_controller_has_remote_devices_stopping(struct isci_host *ihost) { u32 index; for (index = 0; index < ihost->remote_node_entries; index++) { if ((ihost->device_table[index] != NULL) && (ihost->device_table[index]->sm.current_state_id == SCI_DEV_STOPPING)) return true; } return false; } void sci_controller_remote_device_stopped(struct isci_host *ihost, struct isci_remote_device *idev) { if (ihost->sm.current_state_id != SCIC_STOPPING) { dev_dbg(&ihost->pdev->dev, "SCIC Controller 0x%p remote device stopped event " "from device 0x%p in unexpected state %d\n", ihost, idev, ihost->sm.current_state_id); return; } if (!sci_controller_has_remote_devices_stopping(ihost)) sci_change_state(&ihost->sm, SCIC_STOPPED); } void sci_controller_post_request(struct isci_host *ihost, u32 request) { dev_dbg(&ihost->pdev->dev, "%s[%d]: %#x\n", __func__, ihost->id, request); writel(request, &ihost->smu_registers->post_context_port); } struct isci_request *sci_request_by_tag(struct isci_host *ihost, u16 io_tag) { u16 task_index; u16 task_sequence; task_index = ISCI_TAG_TCI(io_tag); if (task_index < ihost->task_context_entries) { struct isci_request *ireq = ihost->reqs[task_index]; if (test_bit(IREQ_ACTIVE, &ireq->flags)) { task_sequence = ISCI_TAG_SEQ(io_tag); if (task_sequence == ihost->io_request_sequence[task_index]) return ireq; } } return NULL; } /** * This method allocates remote node index and the reserves the remote node * context space for use. This method can fail if there are no more remote * node index available. * @scic: This is the controller object which contains the set of * free remote node ids * @sci_dev: This is the device object which is requesting the a remote node * id * @node_id: This is the remote node id that is assinged to the device if one * is available * * enum sci_status SCI_FAILURE_OUT_OF_RESOURCES if there are no available remote * node index available. */ enum sci_status sci_controller_allocate_remote_node_context(struct isci_host *ihost, struct isci_remote_device *idev, u16 *node_id) { u16 node_index; u32 remote_node_count = sci_remote_device_node_count(idev); node_index = sci_remote_node_table_allocate_remote_node( &ihost->available_remote_nodes, remote_node_count ); if (node_index != SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) { ihost->device_table[node_index] = idev; *node_id = node_index; return SCI_SUCCESS; } return SCI_FAILURE_INSUFFICIENT_RESOURCES; } void sci_controller_free_remote_node_context(struct isci_host *ihost, struct isci_remote_device *idev, u16 node_id) { u32 remote_node_count = sci_remote_device_node_count(idev); if (ihost->device_table[node_id] == idev) { ihost->device_table[node_id] = NULL; sci_remote_node_table_release_remote_node_index( &ihost->available_remote_nodes, remote_node_count, node_id ); } } void sci_controller_copy_sata_response(void *response_buffer, void *frame_header, void *frame_buffer) { /* XXX type safety? */ memcpy(response_buffer, frame_header, sizeof(u32)); memcpy(response_buffer + sizeof(u32), frame_buffer, sizeof(struct dev_to_host_fis) - sizeof(u32)); } void sci_controller_release_frame(struct isci_host *ihost, u32 frame_index) { if (sci_unsolicited_frame_control_release_frame(&ihost->uf_control, frame_index)) writel(ihost->uf_control.get, &ihost->scu_registers->sdma.unsolicited_frame_get_pointer); } void isci_tci_free(struct isci_host *ihost, u16 tci) { u16 tail = ihost->tci_tail & (SCI_MAX_IO_REQUESTS-1); ihost->tci_pool[tail] = tci; ihost->tci_tail = tail + 1; } static u16 isci_tci_alloc(struct isci_host *ihost) { u16 head = ihost->tci_head & (SCI_MAX_IO_REQUESTS-1); u16 tci = ihost->tci_pool[head]; ihost->tci_head = head + 1; return tci; } static u16 isci_tci_space(struct isci_host *ihost) { return CIRC_SPACE(ihost->tci_head, ihost->tci_tail, SCI_MAX_IO_REQUESTS); } u16 isci_alloc_tag(struct isci_host *ihost) { if (isci_tci_space(ihost)) { u16 tci = isci_tci_alloc(ihost); u8 seq = ihost->io_request_sequence[tci]; return ISCI_TAG(seq, tci); } return SCI_CONTROLLER_INVALID_IO_TAG; } enum sci_status isci_free_tag(struct isci_host *ihost, u16 io_tag) { u16 tci = ISCI_TAG_TCI(io_tag); u16 seq = ISCI_TAG_SEQ(io_tag); /* prevent tail from passing head */ if (isci_tci_active(ihost) == 0) return SCI_FAILURE_INVALID_IO_TAG; if (seq == ihost->io_request_sequence[tci]) { ihost->io_request_sequence[tci] = (seq+1) & (SCI_MAX_SEQ-1); isci_tci_free(ihost, tci); return SCI_SUCCESS; } return SCI_FAILURE_INVALID_IO_TAG; } enum sci_status sci_controller_start_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "invalid state to start I/O"); return SCI_FAILURE_INVALID_STATE; } status = sci_remote_device_start_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; set_bit(IREQ_ACTIVE, &ireq->flags); sci_controller_post_request(ihost, ireq->post_context); return SCI_SUCCESS; } enum sci_status sci_controller_terminate_request(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { /* terminate an ongoing (i.e. started) core IO request. This does not * abort the IO request at the target, but rather removes the IO * request from the host controller. */ enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "invalid state to terminate request\n"); return SCI_FAILURE_INVALID_STATE; } status = sci_io_request_terminate(ireq); if (status != SCI_SUCCESS) return status; /* * Utilize the original post context command and or in the POST_TC_ABORT * request sub-type. */ sci_controller_post_request(ihost, ireq->post_context | SCU_CONTEXT_COMMAND_REQUEST_POST_TC_ABORT); return SCI_SUCCESS; } /** * sci_controller_complete_io() - This method will perform core specific * completion operations for an IO request. After this method is invoked, * the user should consider the IO request as invalid until it is properly * reused (i.e. re-constructed). * @ihost: The handle to the controller object for which to complete the * IO request. * @idev: The handle to the remote device object for which to complete * the IO request. * @ireq: the handle to the io request object to complete. */ enum sci_status sci_controller_complete_io(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; u16 index; switch (ihost->sm.current_state_id) { case SCIC_STOPPING: /* XXX: Implement this function */ return SCI_FAILURE; case SCIC_READY: status = sci_remote_device_complete_io(ihost, idev, ireq); if (status != SCI_SUCCESS) return status; index = ISCI_TAG_TCI(ireq->io_tag); clear_bit(IREQ_ACTIVE, &ireq->flags); return SCI_SUCCESS; default: dev_warn(&ihost->pdev->dev, "invalid state to complete I/O"); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_controller_continue_io(struct isci_request *ireq) { struct isci_host *ihost = ireq->owning_controller; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "invalid state to continue I/O"); return SCI_FAILURE_INVALID_STATE; } set_bit(IREQ_ACTIVE, &ireq->flags); sci_controller_post_request(ihost, ireq->post_context); return SCI_SUCCESS; } /** * sci_controller_start_task() - This method is called by the SCIC user to * send/start a framework task management request. * @controller: the handle to the controller object for which to start the task * management request. * @remote_device: the handle to the remote device object for which to start * the task management request. * @task_request: the handle to the task request object to start. */ enum sci_task_status sci_controller_start_task(struct isci_host *ihost, struct isci_remote_device *idev, struct isci_request *ireq) { enum sci_status status; if (ihost->sm.current_state_id != SCIC_READY) { dev_warn(&ihost->pdev->dev, "%s: SCIC Controller starting task from invalid " "state\n", __func__); return SCI_TASK_FAILURE_INVALID_STATE; } status = sci_remote_device_start_task(ihost, idev, ireq); switch (status) { case SCI_FAILURE_RESET_DEVICE_PARTIAL_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); /* * We will let framework know this task request started successfully, * although core is still woring on starting the request (to post tc when * RNC is resumed.) */ return SCI_SUCCESS; case SCI_SUCCESS: set_bit(IREQ_ACTIVE, &ireq->flags); sci_controller_post_request(ihost, ireq->post_context); break; default: break; } return status; } static int sci_write_gpio_tx_gp(struct isci_host *ihost, u8 reg_index, u8 reg_count, u8 *write_data) { int d; /* no support for TX_GP_CFG */ if (reg_index == 0) return -EINVAL; for (d = 0; d < isci_gpio_count(ihost); d++) { u32 val = 0x444; /* all ODx.n clear */ int i; for (i = 0; i < 3; i++) { int bit = (i << 2) + 2; bit = try_test_sas_gpio_gp_bit(to_sas_gpio_od(d, i), write_data, reg_index, reg_count); if (bit < 0) break; /* if od is set, clear the 'invert' bit */ val &= ~(bit << ((i << 2) + 2)); } if (i < 3) break; writel(val, &ihost->scu_registers->peg0.sgpio.output_data_select[d]); } /* unless reg_index is > 1, we should always be able to write at * least one register */ return d > 0; } int isci_gpio_write(struct sas_ha_struct *sas_ha, u8 reg_type, u8 reg_index, u8 reg_count, u8 *write_data) { struct isci_host *ihost = sas_ha->lldd_ha; int written; switch (reg_type) { case SAS_GPIO_REG_TX_GP: written = sci_write_gpio_tx_gp(ihost, reg_index, reg_count, write_data); break; default: written = -EINVAL; } return written; }
gpl-2.0
sleekmason/cyanogenmod12
drivers/regulator/wm8400-regulator.c
4809
10420
/* * Regulator support for WM8400 * * Copyright 2008 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * */ #include <linux/bug.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/regulator/driver.h> #include <linux/mfd/wm8400-private.h> static int wm8400_ldo_is_enabled(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); return (val & WM8400_LDO1_ENA) != 0; } static int wm8400_ldo_enable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev), WM8400_LDO1_ENA, WM8400_LDO1_ENA); } static int wm8400_ldo_disable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev), WM8400_LDO1_ENA, 0); } static int wm8400_ldo_list_voltage(struct regulator_dev *dev, unsigned selector) { if (selector > WM8400_LDO1_VSEL_MASK) return -EINVAL; if (selector < 15) return 900000 + (selector * 50000); else return 1600000 + ((selector - 14) * 100000); } static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); val &= WM8400_LDO1_VSEL_MASK; return val; } static int wm8400_ldo_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; if (min_uV < 900000 || min_uV > 3300000) return -EINVAL; if (min_uV < 1700000) { /* Steps of 50mV from 900mV; */ val = DIV_ROUND_UP(min_uV - 900000, 50000); if ((val * 50000) + 900000 > max_uV) return -EINVAL; BUG_ON((val * 50000) + 900000 < min_uV); } else { /* Steps of 100mV from 1700mV */ val = DIV_ROUND_UP(min_uV - 1700000, 100000); if ((val * 100000) + 1700000 > max_uV) return -EINVAL; BUG_ON((val * 100000) + 1700000 < min_uV); val += 0xf; } *selector = val; return wm8400_set_bits(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev), WM8400_LDO1_VSEL_MASK, val); } static struct regulator_ops wm8400_ldo_ops = { .is_enabled = wm8400_ldo_is_enabled, .enable = wm8400_ldo_enable, .disable = wm8400_ldo_disable, .list_voltage = wm8400_ldo_list_voltage, .get_voltage_sel = wm8400_ldo_get_voltage_sel, .set_voltage = wm8400_ldo_set_voltage, }; static int wm8400_dcdc_is_enabled(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; u16 val; val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); return (val & WM8400_DC1_ENA) != 0; } static int wm8400_dcdc_enable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ENA, WM8400_DC1_ENA); } static int wm8400_dcdc_disable(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ENA, 0); } static int wm8400_dcdc_list_voltage(struct regulator_dev *dev, unsigned selector) { if (selector > WM8400_DC1_VSEL_MASK) return -EINVAL; return 850000 + (selector * 25000); } static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); val &= WM8400_DC1_VSEL_MASK; return val; } static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, int min_uV, int max_uV, unsigned *selector) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); u16 val; int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; if (min_uV < 850000) return -EINVAL; val = DIV_ROUND_UP(min_uV - 850000, 25000); if (850000 + (25000 * val) > max_uV) return -EINVAL; BUG_ON(850000 + (25000 * val) < min_uV); *selector = val; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_VSEL_MASK, val); } static unsigned int wm8400_dcdc_get_mode(struct regulator_dev *dev) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; u16 data[2]; int ret; ret = wm8400_block_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset, 2, data); if (ret != 0) return 0; /* Datasheet: hibernate */ if (data[0] & WM8400_DC1_SLEEP) return REGULATOR_MODE_STANDBY; /* Datasheet: standby */ if (!(data[0] & WM8400_DC1_ACTIVE)) return REGULATOR_MODE_IDLE; /* Datasheet: active with or without force PWM */ if (data[1] & WM8400_DC1_FRC_PWM) return REGULATOR_MODE_FAST; else return REGULATOR_MODE_NORMAL; } static int wm8400_dcdc_set_mode(struct regulator_dev *dev, unsigned int mode) { struct wm8400 *wm8400 = rdev_get_drvdata(dev); int offset = (rdev_get_id(dev) - WM8400_DCDC1) * 2; int ret; switch (mode) { case REGULATOR_MODE_FAST: /* Datasheet: active with force PWM */ ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset, WM8400_DC1_FRC_PWM, WM8400_DC1_FRC_PWM); if (ret != 0) return ret; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, WM8400_DC1_ACTIVE); case REGULATOR_MODE_NORMAL: /* Datasheet: active */ ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_2 + offset, WM8400_DC1_FRC_PWM, 0); if (ret != 0) return ret; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ACTIVE | WM8400_DC1_SLEEP, WM8400_DC1_ACTIVE); case REGULATOR_MODE_IDLE: /* Datasheet: standby */ ret = wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_ACTIVE, 0); if (ret != 0) return ret; return wm8400_set_bits(wm8400, WM8400_DCDC1_CONTROL_1 + offset, WM8400_DC1_SLEEP, 0); default: return -EINVAL; } } static unsigned int wm8400_dcdc_get_optimum_mode(struct regulator_dev *dev, int input_uV, int output_uV, int load_uA) { return REGULATOR_MODE_NORMAL; } static struct regulator_ops wm8400_dcdc_ops = { .is_enabled = wm8400_dcdc_is_enabled, .enable = wm8400_dcdc_enable, .disable = wm8400_dcdc_disable, .list_voltage = wm8400_dcdc_list_voltage, .get_voltage_sel = wm8400_dcdc_get_voltage_sel, .set_voltage = wm8400_dcdc_set_voltage, .get_mode = wm8400_dcdc_get_mode, .set_mode = wm8400_dcdc_set_mode, .get_optimum_mode = wm8400_dcdc_get_optimum_mode, }; static struct regulator_desc regulators[] = { { .name = "LDO1", .id = WM8400_LDO1, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO1_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO2", .id = WM8400_LDO2, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO2_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO3", .id = WM8400_LDO3, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO3_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "LDO4", .id = WM8400_LDO4, .ops = &wm8400_ldo_ops, .n_voltages = WM8400_LDO4_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "DCDC1", .id = WM8400_DCDC1, .ops = &wm8400_dcdc_ops, .n_voltages = WM8400_DC1_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, { .name = "DCDC2", .id = WM8400_DCDC2, .ops = &wm8400_dcdc_ops, .n_voltages = WM8400_DC2_VSEL_MASK + 1, .type = REGULATOR_VOLTAGE, .owner = THIS_MODULE, }, }; static int __devinit wm8400_regulator_probe(struct platform_device *pdev) { struct wm8400 *wm8400 = container_of(pdev, struct wm8400, regulators[pdev->id]); struct regulator_dev *rdev; rdev = regulator_register(&regulators[pdev->id], &pdev->dev, pdev->dev.platform_data, wm8400, NULL); if (IS_ERR(rdev)) return PTR_ERR(rdev); platform_set_drvdata(pdev, rdev); return 0; } static int __devexit wm8400_regulator_remove(struct platform_device *pdev) { struct regulator_dev *rdev = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); regulator_unregister(rdev); return 0; } static struct platform_driver wm8400_regulator_driver = { .driver = { .name = "wm8400-regulator", }, .probe = wm8400_regulator_probe, .remove = __devexit_p(wm8400_regulator_remove), }; /** * wm8400_register_regulator - enable software control of a WM8400 regulator * * This function enables software control of a WM8400 regulator via * the regulator API. It is intended to be called from the * platform_init() callback of the WM8400 MFD driver. * * @param dev The WM8400 device to operate on. * @param reg The regulator to control. * @param initdata Regulator initdata for the regulator. */ int wm8400_register_regulator(struct device *dev, int reg, struct regulator_init_data *initdata) { struct wm8400 *wm8400 = dev_get_drvdata(dev); if (wm8400->regulators[reg].name) return -EBUSY; initdata->driver_data = wm8400; wm8400->regulators[reg].name = "wm8400-regulator"; wm8400->regulators[reg].id = reg; wm8400->regulators[reg].dev.parent = dev; wm8400->regulators[reg].dev.platform_data = initdata; return platform_device_register(&wm8400->regulators[reg]); } EXPORT_SYMBOL_GPL(wm8400_register_regulator); static int __init wm8400_regulator_init(void) { return platform_driver_register(&wm8400_regulator_driver); } subsys_initcall(wm8400_regulator_init); static void __exit wm8400_regulator_exit(void) { platform_driver_unregister(&wm8400_regulator_driver); } module_exit(wm8400_regulator_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("WM8400 regulator driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8400-regulator");
gpl-2.0
joyfish/android_kernel_huawei_msm8928
drivers/staging/omapdrm/omap_crtc.c
4809
6711
/* * drivers/staging/omapdrm/omap_crtc.c * * Copyright (C) 2011 Texas Instruments * Author: Rob Clark <rob@ti.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "omap_drv.h" #include "drm_mode.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" #define to_omap_crtc(x) container_of(x, struct omap_crtc, base) struct omap_crtc { struct drm_crtc base; struct drm_plane *plane; const char *name; int id; /* if there is a pending flip, these will be non-null: */ struct drm_pending_vblank_event *event; struct drm_framebuffer *old_fb; }; static void omap_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green, u16 *blue, uint32_t start, uint32_t size) { /* not supported.. at least not yet */ } static void omap_crtc_destroy(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); omap_crtc->plane->funcs->destroy(omap_crtc->plane); drm_crtc_cleanup(crtc); kfree(omap_crtc); } static void omap_crtc_dpms(struct drm_crtc *crtc, int mode) { struct omap_drm_private *priv = crtc->dev->dev_private; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); int i; WARN_ON(omap_plane_dpms(omap_crtc->plane, mode)); for (i = 0; i < priv->num_planes; i++) { struct drm_plane *plane = priv->planes[i]; if (plane->crtc == crtc) WARN_ON(omap_plane_dpms(plane, mode)); } } static bool omap_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { return true; } static int omap_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct drm_plane *plane = omap_crtc->plane; return omap_plane_mode_set(plane, crtc, crtc->fb, 0, 0, mode->hdisplay, mode->vdisplay, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); } static void omap_crtc_prepare(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); DBG("%s", omap_crtc->name); omap_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); } static void omap_crtc_commit(struct drm_crtc *crtc) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); DBG("%s", omap_crtc->name); omap_crtc_dpms(crtc, DRM_MODE_DPMS_ON); } static int omap_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct drm_plane *plane = omap_crtc->plane; struct drm_display_mode *mode = &crtc->mode; return plane->funcs->update_plane(plane, crtc, crtc->fb, 0, 0, mode->hdisplay, mode->vdisplay, x << 16, y << 16, mode->hdisplay << 16, mode->vdisplay << 16); } static void omap_crtc_load_lut(struct drm_crtc *crtc) { } static void vblank_cb(void *arg) { static uint32_t sequence = 0; struct drm_crtc *crtc = arg; struct drm_device *dev = crtc->dev; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct drm_pending_vblank_event *event = omap_crtc->event; unsigned long flags; struct timeval now; WARN_ON(!event); omap_crtc->event = NULL; /* wakeup userspace */ if (event) { do_gettimeofday(&now); spin_lock_irqsave(&dev->event_lock, flags); /* TODO: we can't yet use the vblank time accounting, * because omapdss lower layer is the one that knows * the irq # and registers the handler, which more or * less defeats how drm_irq works.. for now just fake * the sequence number and use gettimeofday.. * event->event.sequence = drm_vblank_count_and_time( dev, omap_crtc->id, &now); */ event->event.sequence = sequence++; event->event.tv_sec = now.tv_sec; event->event.tv_usec = now.tv_usec; list_add_tail(&event->base.link, &event->base.file_priv->event_list); wake_up_interruptible(&event->base.file_priv->event_wait); spin_unlock_irqrestore(&dev->event_lock, flags); } } static void page_flip_cb(void *arg) { struct drm_crtc *crtc = arg; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); struct drm_framebuffer *old_fb = omap_crtc->old_fb; omap_crtc->old_fb = NULL; omap_crtc_mode_set_base(crtc, crtc->x, crtc->y, old_fb); /* really we'd like to setup the callback atomically w/ setting the * new scanout buffer to avoid getting stuck waiting an extra vblank * cycle.. for now go for correctness and later figure out speed.. */ omap_plane_on_endwin(omap_crtc->plane, vblank_cb, crtc); } static int omap_crtc_page_flip_locked(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct drm_pending_vblank_event *event) { struct drm_device *dev = crtc->dev; struct omap_crtc *omap_crtc = to_omap_crtc(crtc); DBG("%d -> %d", crtc->fb ? crtc->fb->base.id : -1, fb->base.id); if (omap_crtc->event) { dev_err(dev->dev, "already a pending flip\n"); return -EINVAL; } omap_crtc->old_fb = crtc->fb; omap_crtc->event = event; crtc->fb = fb; omap_gem_op_async(omap_framebuffer_bo(fb, 0), OMAP_GEM_READ, page_flip_cb, crtc); return 0; } static const struct drm_crtc_funcs omap_crtc_funcs = { .gamma_set = omap_crtc_gamma_set, .set_config = drm_crtc_helper_set_config, .destroy = omap_crtc_destroy, .page_flip = omap_crtc_page_flip_locked, }; static const struct drm_crtc_helper_funcs omap_crtc_helper_funcs = { .dpms = omap_crtc_dpms, .mode_fixup = omap_crtc_mode_fixup, .mode_set = omap_crtc_mode_set, .prepare = omap_crtc_prepare, .commit = omap_crtc_commit, .mode_set_base = omap_crtc_mode_set_base, .load_lut = omap_crtc_load_lut, }; /* initialize crtc */ struct drm_crtc *omap_crtc_init(struct drm_device *dev, struct omap_overlay *ovl, int id) { struct drm_crtc *crtc = NULL; struct omap_crtc *omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL); DBG("%s", ovl->name); if (!omap_crtc) { dev_err(dev->dev, "could not allocate CRTC\n"); goto fail; } crtc = &omap_crtc->base; omap_crtc->plane = omap_plane_init(dev, ovl, (1 << id), true); omap_crtc->plane->crtc = crtc; omap_crtc->name = ovl->name; omap_crtc->id = id; drm_crtc_init(dev, crtc, &omap_crtc_funcs); drm_crtc_helper_add(crtc, &omap_crtc_helper_funcs); return crtc; fail: if (crtc) { omap_crtc_destroy(crtc); } return NULL; }
gpl-2.0
SimpleAOSP-Kernel/kernel_flo
drivers/net/wireless/wl12xx/acx.c
4809
40200
/* * This file is part of wl1271 * * Copyright (C) 2008-2009 Nokia Corporation * * Contact: Luciano Coelho <luciano.coelho@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include "acx.h" #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include "wl12xx.h" #include "debug.h" #include "wl12xx_80211.h" #include "reg.h" #include "ps.h" int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 wake_up_event, u8 listen_interval) { struct acx_wake_up_condition *wake_up; int ret; wl1271_debug(DEBUG_ACX, "acx wake up conditions (wake_up_event %d listen_interval %d)", wake_up_event, listen_interval); wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL); if (!wake_up) { ret = -ENOMEM; goto out; } wake_up->role_id = wlvif->role_id; wake_up->wake_up_event = wake_up_event; wake_up->listen_interval = listen_interval; ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, wake_up, sizeof(*wake_up)); if (ret < 0) { wl1271_warning("could not set wake up conditions: %d", ret); goto out; } out: kfree(wake_up); return ret; } int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth) { struct acx_sleep_auth *auth; int ret; wl1271_debug(DEBUG_ACX, "acx sleep auth"); auth = kzalloc(sizeof(*auth), GFP_KERNEL); if (!auth) { ret = -ENOMEM; goto out; } auth->sleep_auth = sleep_auth; ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth)); out: kfree(auth); return ret; } int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif, int power) { struct acx_current_tx_power *acx; int ret; wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr %d", power); if (power < 0 || power > 25) return -EINVAL; acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->current_tx_power = power * 10; ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("configure of tx power failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_feature_config *feature; int ret; wl1271_debug(DEBUG_ACX, "acx feature cfg"); feature = kzalloc(sizeof(*feature), GFP_KERNEL); if (!feature) { ret = -ENOMEM; goto out; } /* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */ feature->role_id = wlvif->role_id; feature->data_flow_options = 0; feature->options = 0; ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG, feature, sizeof(*feature)); if (ret < 0) { wl1271_error("Couldnt set HW encryption"); goto out; } out: kfree(feature); return ret; } int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map, size_t len) { int ret; wl1271_debug(DEBUG_ACX, "acx mem map"); ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len); if (ret < 0) return ret; return 0; } int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl) { struct acx_rx_msdu_lifetime *acx; int ret; wl1271_debug(DEBUG_ACX, "acx rx msdu life time"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time); ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("failed to set rx msdu life time: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum acx_slot_type slot_time) { struct acx_slot *slot; int ret; wl1271_debug(DEBUG_ACX, "acx slot"); slot = kzalloc(sizeof(*slot), GFP_KERNEL); if (!slot) { ret = -ENOMEM; goto out; } slot->role_id = wlvif->role_id; slot->wone_index = STATION_WONE_INDEX; slot->slot_time = slot_time; ret = wl1271_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot)); if (ret < 0) { wl1271_warning("failed to set slot time: %d", ret); goto out; } out: kfree(slot); return ret; } int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable, void *mc_list, u32 mc_list_len) { struct acx_dot11_grp_addr_tbl *acx; int ret; wl1271_debug(DEBUG_ACX, "acx group address tbl"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } /* MAC filtering */ acx->role_id = wlvif->role_id; acx->enabled = enable; acx->num_groups = mc_list_len; memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN); ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("failed to set group addr table: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_service_period_timeout(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_rx_timeout *rx_timeout; int ret; rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL); if (!rx_timeout) { ret = -ENOMEM; goto out; } wl1271_debug(DEBUG_ACX, "acx service period timeout"); rx_timeout->role_id = wlvif->role_id; rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout); rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout); ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, rx_timeout, sizeof(*rx_timeout)); if (ret < 0) { wl1271_warning("failed to set service period timeout: %d", ret); goto out; } out: kfree(rx_timeout); return ret; } int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif, u32 rts_threshold) { struct acx_rts_threshold *rts; int ret; /* * If the RTS threshold is not configured or out of range, use the * default value. */ if (rts_threshold > IEEE80211_MAX_RTS_THRESHOLD) rts_threshold = wl->conf.rx.rts_threshold; wl1271_debug(DEBUG_ACX, "acx rts threshold: %d", rts_threshold); rts = kzalloc(sizeof(*rts), GFP_KERNEL); if (!rts) { ret = -ENOMEM; goto out; } rts->role_id = wlvif->role_id; rts->threshold = cpu_to_le16((u16)rts_threshold); ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); if (ret < 0) { wl1271_warning("failed to set rts threshold: %d", ret); goto out; } out: kfree(rts); return ret; } int wl1271_acx_dco_itrim_params(struct wl1271 *wl) { struct acx_dco_itrim_params *dco; struct conf_itrim_settings *c = &wl->conf.itrim; int ret; wl1271_debug(DEBUG_ACX, "acx dco itrim parameters"); dco = kzalloc(sizeof(*dco), GFP_KERNEL); if (!dco) { ret = -ENOMEM; goto out; } dco->enable = c->enable; dco->timeout = cpu_to_le32(c->timeout); ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS, dco, sizeof(*dco)); if (ret < 0) { wl1271_warning("failed to set dco itrim parameters: %d", ret); goto out; } out: kfree(dco); return ret; } int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable_filter) { struct acx_beacon_filter_option *beacon_filter = NULL; int ret = 0; wl1271_debug(DEBUG_ACX, "acx beacon filter opt"); if (enable_filter && wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED) goto out; beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); if (!beacon_filter) { ret = -ENOMEM; goto out; } beacon_filter->role_id = wlvif->role_id; beacon_filter->enable = enable_filter; /* * When set to zero, and the filter is enabled, beacons * without the unicast TIM bit set are dropped. */ beacon_filter->max_num_beacons = 0; ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT, beacon_filter, sizeof(*beacon_filter)); if (ret < 0) { wl1271_warning("failed to set beacon filter opt: %d", ret); goto out; } out: kfree(beacon_filter); return ret; } int wl1271_acx_beacon_filter_table(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_beacon_filter_ie_table *ie_table; int i, idx = 0; int ret; bool vendor_spec = false; wl1271_debug(DEBUG_ACX, "acx beacon filter table"); ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL); if (!ie_table) { ret = -ENOMEM; goto out; } /* configure default beacon pass-through rules */ ie_table->role_id = wlvif->role_id; ie_table->num_ie = 0; for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) { struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]); ie_table->table[idx++] = r->ie; ie_table->table[idx++] = r->rule; if (r->ie == WLAN_EID_VENDOR_SPECIFIC) { /* only one vendor specific ie allowed */ if (vendor_spec) continue; /* for vendor specific rules configure the additional fields */ memcpy(&(ie_table->table[idx]), r->oui, CONF_BCN_IE_OUI_LEN); idx += CONF_BCN_IE_OUI_LEN; ie_table->table[idx++] = r->type; memcpy(&(ie_table->table[idx]), r->version, CONF_BCN_IE_VER_LEN); idx += CONF_BCN_IE_VER_LEN; vendor_spec = true; } ie_table->num_ie++; } ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, ie_table, sizeof(*ie_table)); if (ret < 0) { wl1271_warning("failed to set beacon filter table: %d", ret); goto out; } out: kfree(ie_table); return ret; } #define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable) { struct acx_conn_monit_params *acx; u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE; u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE; int ret; wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s", enable ? "enabled" : "disabled"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } if (enable) { threshold = wl->conf.conn.synch_fail_thold; timeout = wl->conf.conn.bss_lose_timeout; } acx->role_id = wlvif->role_id; acx->synch_fail_thold = cpu_to_le32(threshold); acx->bss_lose_timeout = cpu_to_le32(timeout); ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("failed to set connection monitor " "parameters: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable) { struct acx_bt_wlan_coex *pta; int ret; wl1271_debug(DEBUG_ACX, "acx sg enable"); pta = kzalloc(sizeof(*pta), GFP_KERNEL); if (!pta) { ret = -ENOMEM; goto out; } if (enable) pta->enable = wl->conf.sg.state; else pta->enable = CONF_SG_DISABLE; ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta)); if (ret < 0) { wl1271_warning("failed to set softgemini enable: %d", ret); goto out; } out: kfree(pta); return ret; } int wl12xx_acx_sg_cfg(struct wl1271 *wl) { struct acx_bt_wlan_coex_param *param; struct conf_sg_settings *c = &wl->conf.sg; int i, ret; wl1271_debug(DEBUG_ACX, "acx sg cfg"); param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) { ret = -ENOMEM; goto out; } /* BT-WLAN coext parameters */ for (i = 0; i < CONF_SG_PARAMS_MAX; i++) param->params[i] = cpu_to_le32(c->params[i]); param->param_idx = CONF_SG_PARAMS_ALL; ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); if (ret < 0) { wl1271_warning("failed to set sg config: %d", ret); goto out; } out: kfree(param); return ret; } int wl1271_acx_cca_threshold(struct wl1271 *wl) { struct acx_energy_detection *detection; int ret; wl1271_debug(DEBUG_ACX, "acx cca threshold"); detection = kzalloc(sizeof(*detection), GFP_KERNEL); if (!detection) { ret = -ENOMEM; goto out; } detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold); detection->tx_energy_detection = wl->conf.tx.tx_energy_detection; ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, detection, sizeof(*detection)); if (ret < 0) wl1271_warning("failed to set cca threshold: %d", ret); out: kfree(detection); return ret; } int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_beacon_broadcast *bb; int ret; wl1271_debug(DEBUG_ACX, "acx bcn dtim options"); bb = kzalloc(sizeof(*bb), GFP_KERNEL); if (!bb) { ret = -ENOMEM; goto out; } bb->role_id = wlvif->role_id; bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout); bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout); bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps; bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold; ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); if (ret < 0) { wl1271_warning("failed to set rx config: %d", ret); goto out; } out: kfree(bb); return ret; } int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid) { struct acx_aid *acx_aid; int ret; wl1271_debug(DEBUG_ACX, "acx aid"); acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL); if (!acx_aid) { ret = -ENOMEM; goto out; } acx_aid->role_id = wlvif->role_id; acx_aid->aid = cpu_to_le16(aid); ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); if (ret < 0) { wl1271_warning("failed to set aid: %d", ret); goto out; } out: kfree(acx_aid); return ret; } int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask) { struct acx_event_mask *mask; int ret; wl1271_debug(DEBUG_ACX, "acx event mbox mask"); mask = kzalloc(sizeof(*mask), GFP_KERNEL); if (!mask) { ret = -ENOMEM; goto out; } /* high event mask is unused */ mask->high_event_mask = cpu_to_le32(0xffffffff); mask->event_mask = cpu_to_le32(event_mask); ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK, mask, sizeof(*mask)); if (ret < 0) { wl1271_warning("failed to set acx_event_mbox_mask: %d", ret); goto out; } out: kfree(mask); return ret; } int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum acx_preamble_type preamble) { struct acx_preamble *acx; int ret; wl1271_debug(DEBUG_ACX, "acx_set_preamble"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->preamble = preamble; ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of preamble failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif, enum acx_ctsprotect_type ctsprotect) { struct acx_ctsprotect *acx; int ret; wl1271_debug(DEBUG_ACX, "acx_set_ctsprotect"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->ctsprotect = ctsprotect; ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of ctsprotect failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats) { int ret; wl1271_debug(DEBUG_ACX, "acx statistics"); ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats, sizeof(*stats)); if (ret < 0) { wl1271_warning("acx statistics failed: %d", ret); return -ENOMEM; } return 0; } int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct acx_rate_policy *acx; struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf; int ret = 0; wl1271_debug(DEBUG_ACX, "acx rate policies"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x", wlvif->basic_rate, wlvif->rate_set); /* configure one basic rate class */ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx); acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of rate policies failed: %d", ret); goto out; } /* configure one AP supported rate class */ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx); acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->rate_set); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of rate policies failed: %d", ret); goto out; } /* * configure one rate class for basic p2p operations. * (p2p packets should always go out with OFDM rates, even * if we are currently connected to 11b AP) */ acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx); acx->rate_policy.enabled_rates = cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of rate policies failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c, u8 idx) { struct acx_rate_policy *acx; int ret = 0; wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x", idx, c->enabled_rates); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates); acx->rate_policy.short_retry_limit = c->short_retry_limit; acx->rate_policy.long_retry_limit = c->long_retry_limit; acx->rate_policy.aflags = c->aflags; acx->rate_policy_idx = cpu_to_le32(idx); ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of ap rate policy failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop) { struct acx_ac_cfg *acx; int ret = 0; wl1271_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d " "aifs %d txop %d", ac, cw_min, cw_max, aifsn, txop); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->ac = ac; acx->cw_min = cw_min; acx->cw_max = cpu_to_le16(cw_max); acx->aifsn = aifsn; acx->tx_op_limit = cpu_to_le16(txop); ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx ac cfg failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue_id, u8 channel_type, u8 tsid, u8 ps_scheme, u8 ack_policy, u32 apsd_conf0, u32 apsd_conf1) { struct acx_tid_config *acx; int ret = 0; wl1271_debug(DEBUG_ACX, "acx tid config"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->queue_id = queue_id; acx->channel_type = channel_type; acx->tsid = tsid; acx->ps_scheme = ps_scheme; acx->ack_policy = ack_policy; acx->apsd_conf[0] = cpu_to_le32(apsd_conf0); acx->apsd_conf[1] = cpu_to_le32(apsd_conf1); ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of tid config failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold) { struct acx_frag_threshold *acx; int ret = 0; /* * If the fragmentation is not configured or out of range, use the * default value. */ if (frag_threshold > IEEE80211_MAX_FRAG_THRESHOLD) frag_threshold = wl->conf.tx.frag_threshold; wl1271_debug(DEBUG_ACX, "acx frag threshold: %d", frag_threshold); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->frag_threshold = cpu_to_le16((u16)frag_threshold); ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of frag threshold failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_tx_config_options(struct wl1271 *wl) { struct acx_tx_config_options *acx; int ret = 0; wl1271_debug(DEBUG_ACX, "acx tx config options"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout); acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold); ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("Setting of tx options failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl12xx_acx_mem_cfg(struct wl1271 *wl) { struct wl12xx_acx_config_memory *mem_conf; struct conf_memory_settings *mem; int ret; wl1271_debug(DEBUG_ACX, "wl1271 mem cfg"); mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL); if (!mem_conf) { ret = -ENOMEM; goto out; } if (wl->chip.id == CHIP_ID_1283_PG20) mem = &wl->conf.mem_wl128x; else mem = &wl->conf.mem_wl127x; /* memory config */ mem_conf->num_stations = mem->num_stations; mem_conf->rx_mem_block_num = mem->rx_block_num; mem_conf->tx_min_mem_block_num = mem->tx_min_block_num; mem_conf->num_ssid_profiles = mem->ssid_profiles; mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS); mem_conf->dyn_mem_enable = mem->dynamic_memory; mem_conf->tx_free_req = mem->min_req_tx_blocks; mem_conf->rx_free_req = mem->min_req_rx_blocks; mem_conf->tx_min = mem->tx_min; mem_conf->fwlog_blocks = wl->conf.fwlog.mem_blocks; ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, sizeof(*mem_conf)); if (ret < 0) { wl1271_warning("wl1271 mem config failed: %d", ret); goto out; } out: kfree(mem_conf); return ret; } int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap) { struct wl1271_acx_host_config_bitmap *bitmap_conf; int ret; bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL); if (!bitmap_conf) { ret = -ENOMEM; goto out; } bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap); ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP, bitmap_conf, sizeof(*bitmap_conf)); if (ret < 0) { wl1271_warning("wl1271 bitmap config opt failed: %d", ret); goto out; } out: kfree(bitmap_conf); return ret; } int wl1271_acx_init_mem_config(struct wl1271 *wl) { int ret; wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), GFP_KERNEL); if (!wl->target_mem_map) { wl1271_error("couldn't allocate target memory map"); return -ENOMEM; } /* we now ask for the firmware built memory map */ ret = wl1271_acx_mem_map(wl, (void *)wl->target_mem_map, sizeof(struct wl1271_acx_mem_map)); if (ret < 0) { wl1271_error("couldn't retrieve firmware memory map"); kfree(wl->target_mem_map); wl->target_mem_map = NULL; return ret; } /* initialize TX block book keeping */ wl->tx_blocks_available = le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks); wl1271_debug(DEBUG_TX, "available tx blocks: %d", wl->tx_blocks_available); return 0; } int wl1271_acx_init_rx_interrupt(struct wl1271 *wl) { struct wl1271_acx_rx_config_opt *rx_conf; int ret; wl1271_debug(DEBUG_ACX, "wl1271 rx interrupt config"); rx_conf = kzalloc(sizeof(*rx_conf), GFP_KERNEL); if (!rx_conf) { ret = -ENOMEM; goto out; } rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold); rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout); rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold); rx_conf->queue_type = wl->conf.rx.queue_type; ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf, sizeof(*rx_conf)); if (ret < 0) { wl1271_warning("wl1271 rx config opt failed: %d", ret); goto out; } out: kfree(rx_conf); return ret; } int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable) { struct wl1271_acx_bet_enable *acx = NULL; int ret = 0; wl1271_debug(DEBUG_ACX, "acx bet enable"); if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE) goto out; acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE; acx->max_consecutive = wl->conf.conn.bet_max_consecutive; ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx bet enable failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 enable, __be32 address) { struct wl1271_acx_arp_filter *acx; int ret; wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->version = ACX_IPV4_VERSION; acx->enable = enable; if (enable) memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE); ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("failed to set arp ip filter: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_pm_config(struct wl1271 *wl) { struct wl1271_acx_pm_config *acx = NULL; struct conf_pm_config_settings *c = &wl->conf.pm_config; int ret = 0; wl1271_debug(DEBUG_ACX, "acx pm config"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time); acx->host_fast_wakeup_support = c->host_fast_wakeup_support; ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx pm config failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable) { struct wl1271_acx_keep_alive_mode *acx = NULL; int ret = 0; wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->enabled = enable; ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx keep alive mode failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 index, u8 tpl_valid) { struct wl1271_acx_keep_alive_config *acx = NULL; int ret = 0; wl1271_debug(DEBUG_ACX, "acx keep alive config"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval); acx->index = index; acx->tpl_validation = tpl_valid; acx->trigger = ACX_KEEP_ALIVE_NO_TX; ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx keep alive config failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable, s16 thold, u8 hyst) { struct wl1271_acx_rssi_snr_trigger *acx = NULL; int ret = 0; wl1271_debug(DEBUG_ACX, "acx rssi snr trigger"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } wlvif->last_rssi_event = -1; acx->role_id = wlvif->role_id; acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing); acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON; acx->type = WL1271_ACX_TRIG_TYPE_EDGE; if (enable) acx->enable = WL1271_ACX_TRIG_ENABLE; else acx->enable = WL1271_ACX_TRIG_DISABLE; acx->index = WL1271_ACX_TRIG_IDX_RSSI; acx->dir = WL1271_ACX_TRIG_DIR_BIDIR; acx->threshold = cpu_to_le16(thold); acx->hysteresis = hyst; ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx rssi snr trigger setting failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_acx_rssi_snr_avg_weights *acx = NULL; struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger; int ret = 0; wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->rssi_beacon = c->avg_weight_rssi_beacon; acx->rssi_data = c->avg_weight_rssi_data; acx->snr_beacon = c->avg_weight_snr_beacon; acx->snr_data = c->avg_weight_snr_data; ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx rssi snr trigger weights failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_set_ht_capabilities(struct wl1271 *wl, struct ieee80211_sta_ht_cap *ht_cap, bool allow_ht_operation, u8 hlid) { struct wl1271_acx_ht_capabilities *acx; int ret = 0; u32 ht_capabilites = 0; wl1271_debug(DEBUG_ACX, "acx ht capabilities setting " "sta supp: %d sta cap: %d", ht_cap->ht_supported, ht_cap->cap); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } if (allow_ht_operation && ht_cap->ht_supported) { /* no need to translate capabilities - use the spec values */ ht_capabilites = ht_cap->cap; /* * this bit is not employed by the spec but only by FW to * indicate peer HT support */ ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION; /* get data from A-MPDU parameters field */ acx->ampdu_max_length = ht_cap->ampdu_factor; acx->ampdu_min_spacing = ht_cap->ampdu_density; } acx->hlid = hlid; acx->ht_capabilites = cpu_to_le32(ht_capabilites); ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx ht capabilities setting failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_set_ht_information(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 ht_operation_mode) { struct wl1271_acx_ht_information *acx; int ret = 0; wl1271_debug(DEBUG_ACX, "acx ht information setting"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->role_id = wlvif->role_id; acx->ht_protection = (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION); acx->rifs_mode = 0; acx->gf_protection = !!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); acx->ht_tx_burst_limit = 0; acx->dual_cts_protection = 0; ret = wl1271_cmd_configure(wl, ACX_HT_BSS_OPERATION, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx ht information setting failed: %d", ret); goto out; } out: kfree(acx); return ret; } /* Configure BA session initiator/receiver parameters setting in the FW. */ int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_acx_ba_initiator_policy *acx; int ret; wl1271_debug(DEBUG_ACX, "acx ba initiator policy"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } /* set for the current role */ acx->role_id = wlvif->role_id; acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap; acx->win_size = wl->conf.ht.tx_ba_win_size; acx->inactivity_timeout = wl->conf.ht.inactivity_timeout; ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_INIT_POLICY, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx ba initiator policy failed: %d", ret); goto out; } out: kfree(acx); return ret; } /* setup BA session receiver setting in the FW. */ int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index, u16 ssn, bool enable, u8 peer_hlid) { struct wl1271_acx_ba_receiver_setup *acx; int ret; wl1271_debug(DEBUG_ACX, "acx ba receiver session setting"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->hlid = peer_hlid; acx->tid = tid_index; acx->enable = enable; acx->win_size = wl->conf.ht.rx_ba_win_size; acx->ssn = ssn; ret = wl1271_cmd_configure(wl, ACX_BA_SESSION_RX_SETUP, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx ba receiver session failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif, u64 *mactime) { struct wl12xx_acx_fw_tsf_information *tsf_info; int ret; tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL); if (!tsf_info) { ret = -ENOMEM; goto out; } tsf_info->role_id = wlvif->role_id; ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO, tsf_info, sizeof(*tsf_info)); if (ret < 0) { wl1271_warning("acx tsf info interrogate failed"); goto out; } *mactime = le32_to_cpu(tsf_info->current_tsf_low) | ((u64) le32_to_cpu(tsf_info->current_tsf_high) << 32); out: kfree(tsf_info); return ret; } int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool enable) { struct wl1271_acx_ps_rx_streaming *rx_streaming; u32 conf_queues, enable_queues; int i, ret = 0; wl1271_debug(DEBUG_ACX, "acx ps rx streaming"); rx_streaming = kzalloc(sizeof(*rx_streaming), GFP_KERNEL); if (!rx_streaming) { ret = -ENOMEM; goto out; } conf_queues = wl->conf.rx_streaming.queues; if (enable) enable_queues = conf_queues; else enable_queues = 0; for (i = 0; i < 8; i++) { /* * Skip non-changed queues, to avoid redundant acxs. * this check assumes conf.rx_streaming.queues can't * be changed while rx_streaming is enabled. */ if (!(conf_queues & BIT(i))) continue; rx_streaming->role_id = wlvif->role_id; rx_streaming->tid = i; rx_streaming->enable = enable_queues & BIT(i); rx_streaming->period = wl->conf.rx_streaming.interval; rx_streaming->timeout = wl->conf.rx_streaming.interval; ret = wl1271_cmd_configure(wl, ACX_PS_RX_STREAMING, rx_streaming, sizeof(*rx_streaming)); if (ret < 0) { wl1271_warning("acx ps rx streaming failed: %d", ret); goto out; } } out: kfree(rx_streaming); return ret; } int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_acx_ap_max_tx_retry *acx = NULL; int ret; wl1271_debug(DEBUG_ACX, "acx ap max tx retry"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) return -ENOMEM; acx->role_id = wlvif->role_id; acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries); ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx ap max tx retry failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif) { struct wl1271_acx_config_ps *config_ps; int ret; wl1271_debug(DEBUG_ACX, "acx config ps"); config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL); if (!config_ps) { ret = -ENOMEM; goto out; } config_ps->exit_retries = wl->conf.conn.psm_exit_retries; config_ps->enter_retries = wl->conf.conn.psm_entry_retries; config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate); ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps, sizeof(*config_ps)); if (ret < 0) { wl1271_warning("acx config ps failed: %d", ret); goto out; } out: kfree(config_ps); return ret; } int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr) { struct wl1271_acx_inconnection_sta *acx = NULL; int ret; wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) return -ENOMEM; memcpy(acx->addr, addr, ETH_ALEN); ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx set inconnaction sta failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl1271_acx_fm_coex(struct wl1271 *wl) { struct wl1271_acx_fm_coex *acx; int ret; wl1271_debug(DEBUG_ACX, "acx fm coex setting"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->enable = wl->conf.fm_coex.enable; acx->swallow_period = wl->conf.fm_coex.swallow_period; acx->n_divider_fref_set_1 = wl->conf.fm_coex.n_divider_fref_set_1; acx->n_divider_fref_set_2 = wl->conf.fm_coex.n_divider_fref_set_2; acx->m_divider_fref_set_1 = cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_1); acx->m_divider_fref_set_2 = cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_2); acx->coex_pll_stabilization_time = cpu_to_le32(wl->conf.fm_coex.coex_pll_stabilization_time); acx->ldo_stabilization_time = cpu_to_le16(wl->conf.fm_coex.ldo_stabilization_time); acx->fm_disturbed_band_margin = wl->conf.fm_coex.fm_disturbed_band_margin; acx->swallow_clk_diff = wl->conf.fm_coex.swallow_clk_diff; ret = wl1271_cmd_configure(wl, ACX_FM_COEX_CFG, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx fm coex setting failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl) { struct wl12xx_acx_set_rate_mgmt_params *acx = NULL; struct conf_rate_policy_settings *conf = &wl->conf.rate; int ret; wl1271_debug(DEBUG_ACX, "acx set rate mgmt params"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) return -ENOMEM; acx->index = ACX_RATE_MGMT_ALL_PARAMS; acx->rate_retry_score = cpu_to_le16(conf->rate_retry_score); acx->per_add = cpu_to_le16(conf->per_add); acx->per_th1 = cpu_to_le16(conf->per_th1); acx->per_th2 = cpu_to_le16(conf->per_th2); acx->max_per = cpu_to_le16(conf->max_per); acx->inverse_curiosity_factor = conf->inverse_curiosity_factor; acx->tx_fail_low_th = conf->tx_fail_low_th; acx->tx_fail_high_th = conf->tx_fail_high_th; acx->per_alpha_shift = conf->per_alpha_shift; acx->per_add_shift = conf->per_add_shift; acx->per_beta1_shift = conf->per_beta1_shift; acx->per_beta2_shift = conf->per_beta2_shift; acx->rate_check_up = conf->rate_check_up; acx->rate_check_down = conf->rate_check_down; memcpy(acx->rate_retry_policy, conf->rate_retry_policy, sizeof(acx->rate_retry_policy)); ret = wl1271_cmd_configure(wl, ACX_SET_RATE_MGMT_PARAMS, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx set rate mgmt params failed: %d", ret); goto out; } out: kfree(acx); return ret; } int wl12xx_acx_config_hangover(struct wl1271 *wl) { struct wl12xx_acx_config_hangover *acx; struct conf_hangover_settings *conf = &wl->conf.hangover; int ret; wl1271_debug(DEBUG_ACX, "acx config hangover"); acx = kzalloc(sizeof(*acx), GFP_KERNEL); if (!acx) { ret = -ENOMEM; goto out; } acx->recover_time = cpu_to_le32(conf->recover_time); acx->hangover_period = conf->hangover_period; acx->dynamic_mode = conf->dynamic_mode; acx->early_termination_mode = conf->early_termination_mode; acx->max_period = conf->max_period; acx->min_period = conf->min_period; acx->increase_delta = conf->increase_delta; acx->decrease_delta = conf->decrease_delta; acx->quiet_time = conf->quiet_time; acx->increase_time = conf->increase_time; acx->window_size = acx->window_size; ret = wl1271_cmd_configure(wl, ACX_CONFIG_HANGOVER, acx, sizeof(*acx)); if (ret < 0) { wl1271_warning("acx config hangover failed: %d", ret); goto out; } out: kfree(acx); return ret; }
gpl-2.0
yoyoliyang/linux-sunxi
drivers/spi/spi-fsl-espi.c
4809
18156
/* * Freescale eSPI controller driver. * * Copyright 2010 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/spi/spi.h> #include <linux/platform_device.h> #include <linux/fsl_devices.h> #include <linux/mm.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_spi.h> #include <linux/interrupt.h> #include <linux/err.h> #include <sysdev/fsl_soc.h> #include "spi-fsl-lib.h" /* eSPI Controller registers */ struct fsl_espi_reg { __be32 mode; /* 0x000 - eSPI mode register */ __be32 event; /* 0x004 - eSPI event register */ __be32 mask; /* 0x008 - eSPI mask register */ __be32 command; /* 0x00c - eSPI command register */ __be32 transmit; /* 0x010 - eSPI transmit FIFO access register*/ __be32 receive; /* 0x014 - eSPI receive FIFO access register*/ u8 res[8]; /* 0x018 - 0x01c reserved */ __be32 csmode[4]; /* 0x020 - 0x02c eSPI cs mode register */ }; struct fsl_espi_transfer { const void *tx_buf; void *rx_buf; unsigned len; unsigned n_tx; unsigned n_rx; unsigned actual_length; int status; }; /* eSPI Controller mode register definitions */ #define SPMODE_ENABLE (1 << 31) #define SPMODE_LOOP (1 << 30) #define SPMODE_TXTHR(x) ((x) << 8) #define SPMODE_RXTHR(x) ((x) << 0) /* eSPI Controller CS mode register definitions */ #define CSMODE_CI_INACTIVEHIGH (1 << 31) #define CSMODE_CP_BEGIN_EDGECLK (1 << 30) #define CSMODE_REV (1 << 29) #define CSMODE_DIV16 (1 << 28) #define CSMODE_PM(x) ((x) << 24) #define CSMODE_POL_1 (1 << 20) #define CSMODE_LEN(x) ((x) << 16) #define CSMODE_BEF(x) ((x) << 12) #define CSMODE_AFT(x) ((x) << 8) #define CSMODE_CG(x) ((x) << 3) /* Default mode/csmode for eSPI controller */ #define SPMODE_INIT_VAL (SPMODE_TXTHR(4) | SPMODE_RXTHR(3)) #define CSMODE_INIT_VAL (CSMODE_POL_1 | CSMODE_BEF(0) \ | CSMODE_AFT(0) | CSMODE_CG(1)) /* SPIE register values */ #define SPIE_NE 0x00000200 /* Not empty */ #define SPIE_NF 0x00000100 /* Not full */ /* SPIM register values */ #define SPIM_NE 0x00000200 /* Not empty */ #define SPIM_NF 0x00000100 /* Not full */ #define SPIE_RXCNT(reg) ((reg >> 24) & 0x3F) #define SPIE_TXCNT(reg) ((reg >> 16) & 0x3F) /* SPCOM register values */ #define SPCOM_CS(x) ((x) << 30) #define SPCOM_TRANLEN(x) ((x) << 0) #define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */ static void fsl_espi_change_mode(struct spi_device *spi) { struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct spi_mpc8xxx_cs *cs = spi->controller_state; struct fsl_espi_reg *reg_base = mspi->reg_base; __be32 __iomem *mode = &reg_base->csmode[spi->chip_select]; __be32 __iomem *espi_mode = &reg_base->mode; u32 tmp; unsigned long flags; /* Turn off IRQs locally to minimize time that SPI is disabled. */ local_irq_save(flags); /* Turn off SPI unit prior changing mode */ tmp = mpc8xxx_spi_read_reg(espi_mode); mpc8xxx_spi_write_reg(espi_mode, tmp & ~SPMODE_ENABLE); mpc8xxx_spi_write_reg(mode, cs->hw_mode); mpc8xxx_spi_write_reg(espi_mode, tmp); local_irq_restore(flags); } static u32 fsl_espi_tx_buf_lsb(struct mpc8xxx_spi *mpc8xxx_spi) { u32 data; u16 data_h; u16 data_l; const u32 *tx = mpc8xxx_spi->tx; if (!tx) return 0; data = *tx++ << mpc8xxx_spi->tx_shift; data_l = data & 0xffff; data_h = (data >> 16) & 0xffff; swab16s(&data_l); swab16s(&data_h); data = data_h | data_l; mpc8xxx_spi->tx = tx; return data; } static int fsl_espi_setup_transfer(struct spi_device *spi, struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); int bits_per_word = 0; u8 pm; u32 hz = 0; struct spi_mpc8xxx_cs *cs = spi->controller_state; if (t) { bits_per_word = t->bits_per_word; hz = t->speed_hz; } /* spi_transfer level calls that work per-word */ if (!bits_per_word) bits_per_word = spi->bits_per_word; /* Make sure its a bit width we support [4..16] */ if ((bits_per_word < 4) || (bits_per_word > 16)) return -EINVAL; if (!hz) hz = spi->max_speed_hz; cs->rx_shift = 0; cs->tx_shift = 0; cs->get_rx = mpc8xxx_spi_rx_buf_u32; cs->get_tx = mpc8xxx_spi_tx_buf_u32; if (bits_per_word <= 8) { cs->rx_shift = 8 - bits_per_word; } else if (bits_per_word <= 16) { cs->rx_shift = 16 - bits_per_word; if (spi->mode & SPI_LSB_FIRST) cs->get_tx = fsl_espi_tx_buf_lsb; } else { return -EINVAL; } mpc8xxx_spi->rx_shift = cs->rx_shift; mpc8xxx_spi->tx_shift = cs->tx_shift; mpc8xxx_spi->get_rx = cs->get_rx; mpc8xxx_spi->get_tx = cs->get_tx; bits_per_word = bits_per_word - 1; /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_LEN(0xF) | CSMODE_DIV16 | CSMODE_PM(0xF)); cs->hw_mode |= CSMODE_LEN(bits_per_word); if ((mpc8xxx_spi->spibrg / hz) > 64) { cs->hw_mode |= CSMODE_DIV16; pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 16 * 4); WARN_ONCE(pm > 33, "%s: Requested speed is too low: %d Hz. " "Will use %d Hz instead.\n", dev_name(&spi->dev), hz, mpc8xxx_spi->spibrg / (4 * 16 * (32 + 1))); if (pm > 33) pm = 33; } else { pm = DIV_ROUND_UP(mpc8xxx_spi->spibrg, hz * 4); } if (pm) pm--; if (pm < 2) pm = 2; cs->hw_mode |= CSMODE_PM(pm); fsl_espi_change_mode(spi); return 0; } static int fsl_espi_cpu_bufs(struct mpc8xxx_spi *mspi, struct spi_transfer *t, unsigned int len) { u32 word; struct fsl_espi_reg *reg_base = mspi->reg_base; mspi->count = len; /* enable rx ints */ mpc8xxx_spi_write_reg(&reg_base->mask, SPIM_NE); /* transmit word */ word = mspi->get_tx(mspi); mpc8xxx_spi_write_reg(&reg_base->transmit, word); return 0; } static int fsl_espi_bufs(struct spi_device *spi, struct spi_transfer *t) { struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master); struct fsl_espi_reg *reg_base = mpc8xxx_spi->reg_base; unsigned int len = t->len; u8 bits_per_word; int ret; bits_per_word = spi->bits_per_word; if (t->bits_per_word) bits_per_word = t->bits_per_word; mpc8xxx_spi->len = t->len; len = roundup(len, 4) / 4; mpc8xxx_spi->tx = t->tx_buf; mpc8xxx_spi->rx = t->rx_buf; INIT_COMPLETION(mpc8xxx_spi->done); /* Set SPCOM[CS] and SPCOM[TRANLEN] field */ if ((t->len - 1) > SPCOM_TRANLEN_MAX) { dev_err(mpc8xxx_spi->dev, "Transaction length (%d)" " beyond the SPCOM[TRANLEN] field\n", t->len); return -EINVAL; } mpc8xxx_spi_write_reg(&reg_base->command, (SPCOM_CS(spi->chip_select) | SPCOM_TRANLEN(t->len - 1))); ret = fsl_espi_cpu_bufs(mpc8xxx_spi, t, len); if (ret) return ret; wait_for_completion(&mpc8xxx_spi->done); /* disable rx ints */ mpc8xxx_spi_write_reg(&reg_base->mask, 0); return mpc8xxx_spi->count; } static inline void fsl_espi_addr2cmd(unsigned int addr, u8 *cmd) { if (cmd) { cmd[1] = (u8)(addr >> 16); cmd[2] = (u8)(addr >> 8); cmd[3] = (u8)(addr >> 0); } } static inline unsigned int fsl_espi_cmd2addr(u8 *cmd) { if (cmd) return cmd[1] << 16 | cmd[2] << 8 | cmd[3] << 0; return 0; } static void fsl_espi_do_trans(struct spi_message *m, struct fsl_espi_transfer *tr) { struct spi_device *spi = m->spi; struct mpc8xxx_spi *mspi = spi_master_get_devdata(spi->master); struct fsl_espi_transfer *espi_trans = tr; struct spi_message message; struct spi_transfer *t, *first, trans; int status = 0; spi_message_init(&message); memset(&trans, 0, sizeof(trans)); first = list_first_entry(&m->transfers, struct spi_transfer, transfer_list); list_for_each_entry(t, &m->transfers, transfer_list) { if ((first->bits_per_word != t->bits_per_word) || (first->speed_hz != t->speed_hz)) { espi_trans->status = -EINVAL; dev_err(mspi->dev, "bits_per_word/speed_hz should be" " same for the same SPI transfer\n"); return; } trans.speed_hz = t->speed_hz; trans.bits_per_word = t->bits_per_word; trans.delay_usecs = max(first->delay_usecs, t->delay_usecs); } trans.len = espi_trans->len; trans.tx_buf = espi_trans->tx_buf; trans.rx_buf = espi_trans->rx_buf; spi_message_add_tail(&trans, &message); list_for_each_entry(t, &message.transfers, transfer_list) { if (t->bits_per_word || t->speed_hz) { status = -EINVAL; status = fsl_espi_setup_transfer(spi, t); if (status < 0) break; } if (t->len) status = fsl_espi_bufs(spi, t); if (status) { status = -EMSGSIZE; break; } if (t->delay_usecs) udelay(t->delay_usecs); } espi_trans->status = status; fsl_espi_setup_transfer(spi, NULL); } static void fsl_espi_cmd_trans(struct spi_message *m, struct fsl_espi_transfer *trans, u8 *rx_buff) { struct spi_transfer *t; u8 *local_buf; int i = 0; struct fsl_espi_transfer *espi_trans = trans; local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); if (!local_buf) { espi_trans->status = -ENOMEM; return; } list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) { memcpy(local_buf + i, t->tx_buf, t->len); i += t->len; } } espi_trans->tx_buf = local_buf; espi_trans->rx_buf = local_buf + espi_trans->n_tx; fsl_espi_do_trans(m, espi_trans); espi_trans->actual_length = espi_trans->len; kfree(local_buf); } static void fsl_espi_rw_trans(struct spi_message *m, struct fsl_espi_transfer *trans, u8 *rx_buff) { struct fsl_espi_transfer *espi_trans = trans; unsigned int n_tx = espi_trans->n_tx; unsigned int n_rx = espi_trans->n_rx; struct spi_transfer *t; u8 *local_buf; u8 *rx_buf = rx_buff; unsigned int trans_len; unsigned int addr; int i, pos, loop; local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL); if (!local_buf) { espi_trans->status = -ENOMEM; return; } for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) { trans_len = n_rx - pos; if (trans_len > SPCOM_TRANLEN_MAX - n_tx) trans_len = SPCOM_TRANLEN_MAX - n_tx; i = 0; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) { memcpy(local_buf + i, t->tx_buf, t->len); i += t->len; } } if (pos > 0) { addr = fsl_espi_cmd2addr(local_buf); addr += pos; fsl_espi_addr2cmd(addr, local_buf); } espi_trans->n_tx = n_tx; espi_trans->n_rx = trans_len; espi_trans->len = trans_len + n_tx; espi_trans->tx_buf = local_buf; espi_trans->rx_buf = local_buf + n_tx; fsl_espi_do_trans(m, espi_trans); memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len); if (loop > 0) espi_trans->actual_length += espi_trans->len - n_tx; else espi_trans->actual_length += espi_trans->len; } kfree(local_buf); } static void fsl_espi_do_one_msg(struct spi_message *m) { struct spi_transfer *t; u8 *rx_buf = NULL; unsigned int n_tx = 0; unsigned int n_rx = 0; struct fsl_espi_transfer espi_trans; list_for_each_entry(t, &m->transfers, transfer_list) { if (t->tx_buf) n_tx += t->len; if (t->rx_buf) { n_rx += t->len; rx_buf = t->rx_buf; } } espi_trans.n_tx = n_tx; espi_trans.n_rx = n_rx; espi_trans.len = n_tx + n_rx; espi_trans.actual_length = 0; espi_trans.status = 0; if (!rx_buf) fsl_espi_cmd_trans(m, &espi_trans, NULL); else fsl_espi_rw_trans(m, &espi_trans, rx_buf); m->actual_length = espi_trans.actual_length; m->status = espi_trans.status; m->complete(m->context); } static int fsl_espi_setup(struct spi_device *spi) { struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; int retval; u32 hw_mode; u32 loop_mode; struct spi_mpc8xxx_cs *cs = spi->controller_state; if (!spi->max_speed_hz) return -EINVAL; if (!cs) { cs = kzalloc(sizeof *cs, GFP_KERNEL); if (!cs) return -ENOMEM; spi->controller_state = cs; } mpc8xxx_spi = spi_master_get_devdata(spi->master); reg_base = mpc8xxx_spi->reg_base; hw_mode = cs->hw_mode; /* Save original settings */ cs->hw_mode = mpc8xxx_spi_read_reg( &reg_base->csmode[spi->chip_select]); /* mask out bits we are going to set */ cs->hw_mode &= ~(CSMODE_CP_BEGIN_EDGECLK | CSMODE_CI_INACTIVEHIGH | CSMODE_REV); if (spi->mode & SPI_CPHA) cs->hw_mode |= CSMODE_CP_BEGIN_EDGECLK; if (spi->mode & SPI_CPOL) cs->hw_mode |= CSMODE_CI_INACTIVEHIGH; if (!(spi->mode & SPI_LSB_FIRST)) cs->hw_mode |= CSMODE_REV; /* Handle the loop mode */ loop_mode = mpc8xxx_spi_read_reg(&reg_base->mode); loop_mode &= ~SPMODE_LOOP; if (spi->mode & SPI_LOOP) loop_mode |= SPMODE_LOOP; mpc8xxx_spi_write_reg(&reg_base->mode, loop_mode); retval = fsl_espi_setup_transfer(spi, NULL); if (retval < 0) { cs->hw_mode = hw_mode; /* Restore settings */ return retval; } return 0; } void fsl_espi_cpu_irq(struct mpc8xxx_spi *mspi, u32 events) { struct fsl_espi_reg *reg_base = mspi->reg_base; /* We need handle RX first */ if (events & SPIE_NE) { u32 rx_data, tmp; u8 rx_data_8; /* Spin until RX is done */ while (SPIE_RXCNT(events) < min(4, mspi->len)) { cpu_relax(); events = mpc8xxx_spi_read_reg(&reg_base->event); } if (mspi->len >= 4) { rx_data = mpc8xxx_spi_read_reg(&reg_base->receive); } else { tmp = mspi->len; rx_data = 0; while (tmp--) { rx_data_8 = in_8((u8 *)&reg_base->receive); rx_data |= (rx_data_8 << (tmp * 8)); } rx_data <<= (4 - mspi->len) * 8; } mspi->len -= 4; if (mspi->rx) mspi->get_rx(rx_data, mspi); } if (!(events & SPIE_NF)) { int ret; /* spin until TX is done */ ret = spin_event_timeout(((events = mpc8xxx_spi_read_reg( &reg_base->event)) & SPIE_NF) == 0, 1000, 0); if (!ret) { dev_err(mspi->dev, "tired waiting for SPIE_NF\n"); return; } } /* Clear the events */ mpc8xxx_spi_write_reg(&reg_base->event, events); mspi->count -= 1; if (mspi->count) { u32 word = mspi->get_tx(mspi); mpc8xxx_spi_write_reg(&reg_base->transmit, word); } else { complete(&mspi->done); } } static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { struct mpc8xxx_spi *mspi = context_data; struct fsl_espi_reg *reg_base = mspi->reg_base; irqreturn_t ret = IRQ_NONE; u32 events; /* Get interrupt events(tx/rx) */ events = mpc8xxx_spi_read_reg(&reg_base->event); if (events) ret = IRQ_HANDLED; dev_vdbg(mspi->dev, "%s: events %x\n", __func__, events); fsl_espi_cpu_irq(mspi, events); return ret; } static void fsl_espi_remove(struct mpc8xxx_spi *mspi) { iounmap(mspi->reg_base); } static struct spi_master * __devinit fsl_espi_probe(struct device *dev, struct resource *mem, unsigned int irq) { struct fsl_spi_platform_data *pdata = dev->platform_data; struct spi_master *master; struct mpc8xxx_spi *mpc8xxx_spi; struct fsl_espi_reg *reg_base; u32 regval; int i, ret = 0; master = spi_alloc_master(dev, sizeof(struct mpc8xxx_spi)); if (!master) { ret = -ENOMEM; goto err; } dev_set_drvdata(dev, master); ret = mpc8xxx_spi_probe(dev, mem, irq); if (ret) goto err_probe; master->setup = fsl_espi_setup; mpc8xxx_spi = spi_master_get_devdata(master); mpc8xxx_spi->spi_do_one_msg = fsl_espi_do_one_msg; mpc8xxx_spi->spi_remove = fsl_espi_remove; mpc8xxx_spi->reg_base = ioremap(mem->start, resource_size(mem)); if (!mpc8xxx_spi->reg_base) { ret = -ENOMEM; goto err_probe; } reg_base = mpc8xxx_spi->reg_base; /* Register for SPI Interrupt */ ret = request_irq(mpc8xxx_spi->irq, fsl_espi_irq, 0, "fsl_espi", mpc8xxx_spi); if (ret) goto free_irq; if (mpc8xxx_spi->flags & SPI_QE_CPU_MODE) { mpc8xxx_spi->rx_shift = 16; mpc8xxx_spi->tx_shift = 24; } /* SPI controller initializations */ mpc8xxx_spi_write_reg(&reg_base->mode, 0); mpc8xxx_spi_write_reg(&reg_base->mask, 0); mpc8xxx_spi_write_reg(&reg_base->command, 0); mpc8xxx_spi_write_reg(&reg_base->event, 0xffffffff); /* Init eSPI CS mode register */ for (i = 0; i < pdata->max_chipselect; i++) mpc8xxx_spi_write_reg(&reg_base->csmode[i], CSMODE_INIT_VAL); /* Enable SPI interface */ regval = pdata->initial_spmode | SPMODE_INIT_VAL | SPMODE_ENABLE; mpc8xxx_spi_write_reg(&reg_base->mode, regval); ret = spi_register_master(master); if (ret < 0) goto unreg_master; dev_info(dev, "at 0x%p (irq = %d)\n", reg_base, mpc8xxx_spi->irq); return master; unreg_master: free_irq(mpc8xxx_spi->irq, mpc8xxx_spi); free_irq: iounmap(mpc8xxx_spi->reg_base); err_probe: spi_master_put(master); err: return ERR_PTR(ret); } static int of_fsl_espi_get_chipselects(struct device *dev) { struct device_node *np = dev->of_node; struct fsl_spi_platform_data *pdata = dev->platform_data; const u32 *prop; int len; prop = of_get_property(np, "fsl,espi-num-chipselects", &len); if (!prop || len < sizeof(*prop)) { dev_err(dev, "No 'fsl,espi-num-chipselects' property\n"); return -EINVAL; } pdata->max_chipselect = *prop; pdata->cs_control = NULL; return 0; } static int __devinit of_fsl_espi_probe(struct platform_device *ofdev) { struct device *dev = &ofdev->dev; struct device_node *np = ofdev->dev.of_node; struct spi_master *master; struct resource mem; struct resource irq; int ret = -ENOMEM; ret = of_mpc8xxx_spi_probe(ofdev); if (ret) return ret; ret = of_fsl_espi_get_chipselects(dev); if (ret) goto err; ret = of_address_to_resource(np, 0, &mem); if (ret) goto err; ret = of_irq_to_resource(np, 0, &irq); if (!ret) { ret = -EINVAL; goto err; } master = fsl_espi_probe(dev, &mem, irq.start); if (IS_ERR(master)) { ret = PTR_ERR(master); goto err; } return 0; err: return ret; } static int __devexit of_fsl_espi_remove(struct platform_device *dev) { return mpc8xxx_spi_remove(&dev->dev); } static const struct of_device_id of_fsl_espi_match[] = { { .compatible = "fsl,mpc8536-espi" }, {} }; MODULE_DEVICE_TABLE(of, of_fsl_espi_match); static struct platform_driver fsl_espi_driver = { .driver = { .name = "fsl_espi", .owner = THIS_MODULE, .of_match_table = of_fsl_espi_match, }, .probe = of_fsl_espi_probe, .remove = __devexit_p(of_fsl_espi_remove), }; module_platform_driver(fsl_espi_driver); MODULE_AUTHOR("Mingkai Hu"); MODULE_DESCRIPTION("Enhanced Freescale SPI Driver"); MODULE_LICENSE("GPL");
gpl-2.0
deepjyotisaran/android_kernel_samsung_exynos5410
drivers/regulator/max1586.c
4809
7487
/* * max1586.c -- Voltage and current regulation for the Maxim 1586 * * Copyright (C) 2008 Robert Jarzmik * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/slab.h> #include <linux/regulator/max1586.h> #define MAX1586_V3_MAX_VSEL 31 #define MAX1586_V6_MAX_VSEL 3 #define MAX1586_V3_MIN_UV 700000 #define MAX1586_V3_MAX_UV 1475000 #define MAX1586_V6_MIN_UV 0 #define MAX1586_V6_MAX_UV 3000000 #define I2C_V3_SELECT (0 << 5) #define I2C_V6_SELECT (1 << 5) struct max1586_data { struct i2c_client *client; /* min/max V3 voltage */ unsigned int min_uV; unsigned int max_uV; struct regulator_dev *rdev[0]; }; /* * V3 voltage * On I2C bus, sending a "x" byte to the max1586 means : * set V3 to 0.700V + (x & 0x1f) * 0.025V * This voltage can be increased by external resistors * R24 and R25=100kOhm as described in the data sheet. * The gain is approximately: 1 + R24/R25 + R24/185.5kOhm */ static int max1586_v3_calc_voltage(struct max1586_data *max1586, unsigned selector) { unsigned range_uV = max1586->max_uV - max1586->min_uV; return max1586->min_uV + (selector * range_uV / MAX1586_V3_MAX_VSEL); } static int max1586_v3_set(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct max1586_data *max1586 = rdev_get_drvdata(rdev); struct i2c_client *client = max1586->client; unsigned range_uV = max1586->max_uV - max1586->min_uV; u8 v3_prog; if (min_uV > max1586->max_uV || max_uV < max1586->min_uV) return -EINVAL; if (min_uV < max1586->min_uV) min_uV = max1586->min_uV; *selector = DIV_ROUND_UP((min_uV - max1586->min_uV) * MAX1586_V3_MAX_VSEL, range_uV); if (max1586_v3_calc_voltage(max1586, *selector) > max_uV) return -EINVAL; dev_dbg(&client->dev, "changing voltage v3 to %dmv\n", max1586_v3_calc_voltage(max1586, *selector) / 1000); v3_prog = I2C_V3_SELECT | (u8) *selector; return i2c_smbus_write_byte(client, v3_prog); } static int max1586_v3_list(struct regulator_dev *rdev, unsigned selector) { struct max1586_data *max1586 = rdev_get_drvdata(rdev); if (selector > MAX1586_V3_MAX_VSEL) return -EINVAL; return max1586_v3_calc_voltage(max1586, selector); } /* * V6 voltage * On I2C bus, sending a "x" byte to the max1586 means : * set V6 to either 0V, 1.8V, 2.5V, 3V depending on (x & 0x3) * As regulator framework doesn't accept voltages to be 0V, we use 1uV. */ static int max1586_v6_calc_voltage(unsigned selector) { static int voltages_uv[] = { 1, 1800000, 2500000, 3000000 }; return voltages_uv[selector]; } static int max1586_v6_set(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned int *selector) { struct i2c_client *client = rdev_get_drvdata(rdev); u8 v6_prog; if (min_uV < MAX1586_V6_MIN_UV || min_uV > MAX1586_V6_MAX_UV) return -EINVAL; if (max_uV < MAX1586_V6_MIN_UV || max_uV > MAX1586_V6_MAX_UV) return -EINVAL; if (min_uV < 1800000) *selector = 0; else if (min_uV < 2500000) *selector = 1; else if (min_uV < 3000000) *selector = 2; else if (min_uV >= 3000000) *selector = 3; if (max1586_v6_calc_voltage(*selector) > max_uV) return -EINVAL; dev_dbg(&client->dev, "changing voltage v6 to %dmv\n", max1586_v6_calc_voltage(*selector) / 1000); v6_prog = I2C_V6_SELECT | (u8) *selector; return i2c_smbus_write_byte(client, v6_prog); } static int max1586_v6_list(struct regulator_dev *rdev, unsigned selector) { if (selector > MAX1586_V6_MAX_VSEL) return -EINVAL; return max1586_v6_calc_voltage(selector); } /* * The Maxim 1586 controls V3 and V6 voltages, but offers no way of reading back * the set up value. */ static struct regulator_ops max1586_v3_ops = { .set_voltage = max1586_v3_set, .list_voltage = max1586_v3_list, }; static struct regulator_ops max1586_v6_ops = { .set_voltage = max1586_v6_set, .list_voltage = max1586_v6_list, }; static struct regulator_desc max1586_reg[] = { { .name = "Output_V3", .id = MAX1586_V3, .ops = &max1586_v3_ops, .type = REGULATOR_VOLTAGE, .n_voltages = MAX1586_V3_MAX_VSEL + 1, .owner = THIS_MODULE, }, { .name = "Output_V6", .id = MAX1586_V6, .ops = &max1586_v6_ops, .type = REGULATOR_VOLTAGE, .n_voltages = MAX1586_V6_MAX_VSEL + 1, .owner = THIS_MODULE, }, }; static int __devinit max1586_pmic_probe(struct i2c_client *client, const struct i2c_device_id *i2c_id) { struct regulator_dev **rdev; struct max1586_platform_data *pdata = client->dev.platform_data; struct max1586_data *max1586; int i, id, ret = -ENOMEM; max1586 = kzalloc(sizeof(struct max1586_data) + sizeof(struct regulator_dev *) * (MAX1586_V6 + 1), GFP_KERNEL); if (!max1586) goto out; max1586->client = client; if (!pdata->v3_gain) { ret = -EINVAL; goto out_unmap; } max1586->min_uV = MAX1586_V3_MIN_UV / 1000 * pdata->v3_gain / 1000; max1586->max_uV = MAX1586_V3_MAX_UV / 1000 * pdata->v3_gain / 1000; rdev = max1586->rdev; for (i = 0; i < pdata->num_subdevs && i <= MAX1586_V6; i++) { id = pdata->subdevs[i].id; if (!pdata->subdevs[i].platform_data) continue; if (id < MAX1586_V3 || id > MAX1586_V6) { dev_err(&client->dev, "invalid regulator id %d\n", id); goto err; } rdev[i] = regulator_register(&max1586_reg[id], &client->dev, pdata->subdevs[i].platform_data, max1586, NULL); if (IS_ERR(rdev[i])) { ret = PTR_ERR(rdev[i]); dev_err(&client->dev, "failed to register %s\n", max1586_reg[id].name); goto err; } } i2c_set_clientdata(client, max1586); dev_info(&client->dev, "Maxim 1586 regulator driver loaded\n"); return 0; err: while (--i >= 0) regulator_unregister(rdev[i]); out_unmap: kfree(max1586); out: return ret; } static int __devexit max1586_pmic_remove(struct i2c_client *client) { struct max1586_data *max1586 = i2c_get_clientdata(client); int i; for (i = 0; i <= MAX1586_V6; i++) if (max1586->rdev[i]) regulator_unregister(max1586->rdev[i]); kfree(max1586); return 0; } static const struct i2c_device_id max1586_id[] = { { "max1586", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max1586_id); static struct i2c_driver max1586_pmic_driver = { .probe = max1586_pmic_probe, .remove = __devexit_p(max1586_pmic_remove), .driver = { .name = "max1586", .owner = THIS_MODULE, }, .id_table = max1586_id, }; static int __init max1586_pmic_init(void) { return i2c_add_driver(&max1586_pmic_driver); } subsys_initcall(max1586_pmic_init); static void __exit max1586_pmic_exit(void) { i2c_del_driver(&max1586_pmic_driver); } module_exit(max1586_pmic_exit); /* Module information */ MODULE_DESCRIPTION("MAXIM 1586 voltage regulator driver"); MODULE_AUTHOR("Robert Jarzmik"); MODULE_LICENSE("GPL");
gpl-2.0
flar2/ElementalX-m7-5.0
arch/blackfin/kernel/debug-mmrs.c
5065
41933
/* * debugfs interface to core/system MMRs * * Copyright 2007-2011 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/debugfs.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/module.h> #include <asm/blackfin.h> #include <asm/gpio.h> #include <asm/gptimers.h> #include <asm/bfin_can.h> #include <asm/bfin_dma.h> #include <asm/bfin_ppi.h> #include <asm/bfin_serial.h> #include <asm/bfin5xx_spi.h> #include <asm/bfin_twi.h> /* Common code defines PORT_MUX on us, so redirect the MMR back locally */ #ifdef BFIN_PORT_MUX #undef PORT_MUX #define PORT_MUX BFIN_PORT_MUX #endif #define _d(name, bits, addr, perms) debugfs_create_x##bits(name, perms, parent, (u##bits *)(addr)) #define d(name, bits, addr) _d(name, bits, addr, S_IRUSR|S_IWUSR) #define d_RO(name, bits, addr) _d(name, bits, addr, S_IRUSR) #define d_WO(name, bits, addr) _d(name, bits, addr, S_IWUSR) #define D_RO(name, bits) d_RO(#name, bits, name) #define D_WO(name, bits) d_WO(#name, bits, name) #define D32(name) d(#name, 32, name) #define D16(name) d(#name, 16, name) #define REGS_OFF(peri, mmr) offsetof(struct bfin_##peri##_regs, mmr) #define __REGS(peri, sname, rname) \ do { \ struct bfin_##peri##_regs r; \ void *addr = (void *)(base + REGS_OFF(peri, rname)); \ strcpy(_buf, sname); \ if (sizeof(r.rname) == 2) \ debugfs_create_x16(buf, S_IRUSR|S_IWUSR, parent, addr); \ else \ debugfs_create_x32(buf, S_IRUSR|S_IWUSR, parent, addr); \ } while (0) #define REGS_STR_PFX(buf, pfx, num) \ ({ \ buf + (num >= 0 ? \ sprintf(buf, #pfx "%i_", num) : \ sprintf(buf, #pfx "_")); \ }) #define REGS_STR_PFX_C(buf, pfx, num) \ ({ \ buf + (num >= 0 ? \ sprintf(buf, #pfx "%c_", 'A' + num) : \ sprintf(buf, #pfx "_")); \ }) /* * Core registers (not memory mapped) */ extern u32 last_seqstat; static int debug_cclk_get(void *data, u64 *val) { *val = get_cclk(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_debug_cclk, debug_cclk_get, NULL, "0x%08llx\n"); static int debug_sclk_get(void *data, u64 *val) { *val = get_sclk(); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_debug_sclk, debug_sclk_get, NULL, "0x%08llx\n"); #define DEFINE_SYSREG(sr, pre, post) \ static int sysreg_##sr##_get(void *data, u64 *val) \ { \ unsigned long tmp; \ pre; \ __asm__ __volatile__("%0 = " #sr ";" : "=d"(tmp)); \ *val = tmp; \ return 0; \ } \ static int sysreg_##sr##_set(void *data, u64 val) \ { \ unsigned long tmp = val; \ __asm__ __volatile__(#sr " = %0;" : : "d"(tmp)); \ post; \ return 0; \ } \ DEFINE_SIMPLE_ATTRIBUTE(fops_sysreg_##sr, sysreg_##sr##_get, sysreg_##sr##_set, "0x%08llx\n") DEFINE_SYSREG(cycles, , ); DEFINE_SYSREG(cycles2, __asm__ __volatile__("%0 = cycles;" : "=d"(tmp)), ); DEFINE_SYSREG(emudat, , ); DEFINE_SYSREG(seqstat, , ); DEFINE_SYSREG(syscfg, , CSYNC()); #define D_SYSREG(sr) debugfs_create_file(#sr, S_IRUSR|S_IWUSR, parent, NULL, &fops_sysreg_##sr) /* * CAN */ #define CAN_OFF(mmr) REGS_OFF(can, mmr) #define __CAN(uname, lname) __REGS(can, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_can(struct dentry *parent, unsigned long base, int num) { static struct dentry *am, *mb; int i, j; char buf[32], *_buf = REGS_STR_PFX(buf, CAN, num); if (!am) { am = debugfs_create_dir("am", parent); mb = debugfs_create_dir("mb", parent); } __CAN(MC1, mc1); __CAN(MD1, md1); __CAN(TRS1, trs1); __CAN(TRR1, trr1); __CAN(TA1, ta1); __CAN(AA1, aa1); __CAN(RMP1, rmp1); __CAN(RML1, rml1); __CAN(MBTIF1, mbtif1); __CAN(MBRIF1, mbrif1); __CAN(MBIM1, mbim1); __CAN(RFH1, rfh1); __CAN(OPSS1, opss1); __CAN(MC2, mc2); __CAN(MD2, md2); __CAN(TRS2, trs2); __CAN(TRR2, trr2); __CAN(TA2, ta2); __CAN(AA2, aa2); __CAN(RMP2, rmp2); __CAN(RML2, rml2); __CAN(MBTIF2, mbtif2); __CAN(MBRIF2, mbrif2); __CAN(MBIM2, mbim2); __CAN(RFH2, rfh2); __CAN(OPSS2, opss2); __CAN(CLOCK, clock); __CAN(TIMING, timing); __CAN(DEBUG, debug); __CAN(STATUS, status); __CAN(CEC, cec); __CAN(GIS, gis); __CAN(GIM, gim); __CAN(GIF, gif); __CAN(CONTROL, control); __CAN(INTR, intr); __CAN(VERSION, version); __CAN(MBTD, mbtd); __CAN(EWR, ewr); __CAN(ESR, esr); /*__CAN(UCREG, ucreg); no longer exists */ __CAN(UCCNT, uccnt); __CAN(UCRC, ucrc); __CAN(UCCNF, uccnf); __CAN(VERSION2, version2); for (i = 0; i < 32; ++i) { sprintf(_buf, "AM%02iL", i); debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am, (u16 *)(base + CAN_OFF(msk[i].aml))); sprintf(_buf, "AM%02iH", i); debugfs_create_x16(buf, S_IRUSR|S_IWUSR, am, (u16 *)(base + CAN_OFF(msk[i].amh))); for (j = 0; j < 3; ++j) { sprintf(_buf, "MB%02i_DATA%i", i, j); debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb, (u16 *)(base + CAN_OFF(chl[i].data[j*2]))); } sprintf(_buf, "MB%02i_LENGTH", i); debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb, (u16 *)(base + CAN_OFF(chl[i].dlc))); sprintf(_buf, "MB%02i_TIMESTAMP", i); debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb, (u16 *)(base + CAN_OFF(chl[i].tsv))); sprintf(_buf, "MB%02i_ID0", i); debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb, (u16 *)(base + CAN_OFF(chl[i].id0))); sprintf(_buf, "MB%02i_ID1", i); debugfs_create_x16(buf, S_IRUSR|S_IWUSR, mb, (u16 *)(base + CAN_OFF(chl[i].id1))); } } #define CAN(num) bfin_debug_mmrs_can(parent, CAN##num##_MC1, num) /* * DMA */ #define __DMA(uname, lname) __REGS(dma, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_dma(struct dentry *parent, unsigned long base, int num, char mdma, const char *pfx) { char buf[32], *_buf; if (mdma) _buf = buf + sprintf(buf, "%s_%c%i_", pfx, mdma, num); else _buf = buf + sprintf(buf, "%s%i_", pfx, num); __DMA(NEXT_DESC_PTR, next_desc_ptr); __DMA(START_ADDR, start_addr); __DMA(CONFIG, config); __DMA(X_COUNT, x_count); __DMA(X_MODIFY, x_modify); __DMA(Y_COUNT, y_count); __DMA(Y_MODIFY, y_modify); __DMA(CURR_DESC_PTR, curr_desc_ptr); __DMA(CURR_ADDR, curr_addr); __DMA(IRQ_STATUS, irq_status); if (strcmp(pfx, "IMDMA") != 0) __DMA(PERIPHERAL_MAP, peripheral_map); __DMA(CURR_X_COUNT, curr_x_count); __DMA(CURR_Y_COUNT, curr_y_count); } #define _DMA(num, base, mdma, pfx) bfin_debug_mmrs_dma(parent, base, num, mdma, pfx "DMA") #define DMA(num) _DMA(num, DMA##num##_NEXT_DESC_PTR, 0, "") #define _MDMA(num, x) \ do { \ _DMA(num, x##DMA_D##num##_NEXT_DESC_PTR, 'D', #x); \ _DMA(num, x##DMA_S##num##_NEXT_DESC_PTR, 'S', #x); \ } while (0) #define MDMA(num) _MDMA(num, M) #define IMDMA(num) _MDMA(num, IM) /* * EPPI */ #define __EPPI(uname, lname) __REGS(eppi, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_eppi(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, EPPI, num); __EPPI(STATUS, status); __EPPI(HCOUNT, hcount); __EPPI(HDELAY, hdelay); __EPPI(VCOUNT, vcount); __EPPI(VDELAY, vdelay); __EPPI(FRAME, frame); __EPPI(LINE, line); __EPPI(CLKDIV, clkdiv); __EPPI(CONTROL, control); __EPPI(FS1W_HBL, fs1w_hbl); __EPPI(FS1P_AVPL, fs1p_avpl); __EPPI(FS2W_LVB, fs2w_lvb); __EPPI(FS2P_LAVF, fs2p_lavf); __EPPI(CLIP, clip); } #define EPPI(num) bfin_debug_mmrs_eppi(parent, EPPI##num##_STATUS, num) /* * General Purpose Timers */ #define __GPTIMER(uname, lname) __REGS(gptimer, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_gptimer(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, TIMER, num); __GPTIMER(CONFIG, config); __GPTIMER(COUNTER, counter); __GPTIMER(PERIOD, period); __GPTIMER(WIDTH, width); } #define GPTIMER(num) bfin_debug_mmrs_gptimer(parent, TIMER##num##_CONFIG, num) #define GPTIMER_GROUP_OFF(mmr) REGS_OFF(gptimer_group, mmr) #define __GPTIMER_GROUP(uname, lname) __REGS(gptimer_group, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_gptimer_group(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf; if (num == -1) { _buf = buf + sprintf(buf, "TIMER_"); __GPTIMER_GROUP(ENABLE, enable); __GPTIMER_GROUP(DISABLE, disable); __GPTIMER_GROUP(STATUS, status); } else { /* These MMRs are a bit odd as the group # is a suffix */ _buf = buf + sprintf(buf, "TIMER_ENABLE%i", num); d(buf, 16, base + GPTIMER_GROUP_OFF(enable)); _buf = buf + sprintf(buf, "TIMER_DISABLE%i", num); d(buf, 16, base + GPTIMER_GROUP_OFF(disable)); _buf = buf + sprintf(buf, "TIMER_STATUS%i", num); d(buf, 32, base + GPTIMER_GROUP_OFF(status)); } } #define GPTIMER_GROUP(mmr, num) bfin_debug_mmrs_gptimer_group(parent, mmr, num) /* * Handshake MDMA */ #define __HMDMA(uname, lname) __REGS(hmdma, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_hmdma(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, HMDMA, num); __HMDMA(CONTROL, control); __HMDMA(ECINIT, ecinit); __HMDMA(BCINIT, bcinit); __HMDMA(ECURGENT, ecurgent); __HMDMA(ECOVERFLOW, ecoverflow); __HMDMA(ECOUNT, ecount); __HMDMA(BCOUNT, bcount); } #define HMDMA(num) bfin_debug_mmrs_hmdma(parent, HMDMA##num##_CONTROL, num) /* * Peripheral Interrupts (PINT/GPIO) */ #ifdef PINT0_MASK_SET #define __PINT(uname, lname) __REGS(pint, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_pint(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, PINT, num); __PINT(MASK_SET, mask_set); __PINT(MASK_CLEAR, mask_clear); __PINT(REQUEST, request); __PINT(ASSIGN, assign); __PINT(EDGE_SET, edge_set); __PINT(EDGE_CLEAR, edge_clear); __PINT(INVERT_SET, invert_set); __PINT(INVERT_CLEAR, invert_clear); __PINT(PINSTATE, pinstate); __PINT(LATCH, latch); } #define PINT(num) bfin_debug_mmrs_pint(parent, PINT##num##_MASK_SET, num) #endif /* * Port/GPIO */ #define bfin_gpio_regs gpio_port_t #define __PORT(uname, lname) __REGS(gpio, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_port(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf; #ifdef __ADSPBF54x__ _buf = REGS_STR_PFX_C(buf, PORT, num); __PORT(FER, port_fer); __PORT(SET, data_set); __PORT(CLEAR, data_clear); __PORT(DIR_SET, dir_set); __PORT(DIR_CLEAR, dir_clear); __PORT(INEN, inen); __PORT(MUX, port_mux); #else _buf = buf + sprintf(buf, "PORT%cIO_", num); __PORT(CLEAR, data_clear); __PORT(SET, data_set); __PORT(TOGGLE, toggle); __PORT(MASKA, maska); __PORT(MASKA_CLEAR, maska_clear); __PORT(MASKA_SET, maska_set); __PORT(MASKA_TOGGLE, maska_toggle); __PORT(MASKB, maskb); __PORT(MASKB_CLEAR, maskb_clear); __PORT(MASKB_SET, maskb_set); __PORT(MASKB_TOGGLE, maskb_toggle); __PORT(DIR, dir); __PORT(POLAR, polar); __PORT(EDGE, edge); __PORT(BOTH, both); __PORT(INEN, inen); #endif _buf[-1] = '\0'; d(buf, 16, base + REGS_OFF(gpio, data)); } #define PORT(base, num) bfin_debug_mmrs_port(parent, base, num) /* * PPI */ #define __PPI(uname, lname) __REGS(ppi, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_ppi(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, PPI, num); __PPI(CONTROL, control); __PPI(STATUS, status); __PPI(COUNT, count); __PPI(DELAY, delay); __PPI(FRAME, frame); } #define PPI(num) bfin_debug_mmrs_ppi(parent, PPI##num##_CONTROL, num) /* * SPI */ #define __SPI(uname, lname) __REGS(spi, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_spi(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, SPI, num); __SPI(CTL, ctl); __SPI(FLG, flg); __SPI(STAT, stat); __SPI(TDBR, tdbr); __SPI(RDBR, rdbr); __SPI(BAUD, baud); __SPI(SHADOW, shadow); } #define SPI(num) bfin_debug_mmrs_spi(parent, SPI##num##_REGBASE, num) /* * SPORT */ static inline int sport_width(void *mmr) { unsigned long lmmr = (unsigned long)mmr; if ((lmmr & 0xff) == 0x10) /* SPORT#_TX has 0x10 offset -> SPORT#_TCR2 has 0x04 offset */ lmmr -= 0xc; else /* SPORT#_RX has 0x18 offset -> SPORT#_RCR2 has 0x24 offset */ lmmr += 0xc; /* extract SLEN field from control register 2 and add 1 */ return (bfin_read16(lmmr) & 0x1f) + 1; } static int sport_set(void *mmr, u64 val) { unsigned long flags; local_irq_save(flags); if (sport_width(mmr) <= 16) bfin_write16(mmr, val); else bfin_write32(mmr, val); local_irq_restore(flags); return 0; } static int sport_get(void *mmr, u64 *val) { unsigned long flags; local_irq_save(flags); if (sport_width(mmr) <= 16) *val = bfin_read16(mmr); else *val = bfin_read32(mmr); local_irq_restore(flags); return 0; } DEFINE_SIMPLE_ATTRIBUTE(fops_sport, sport_get, sport_set, "0x%08llx\n"); /*DEFINE_SIMPLE_ATTRIBUTE(fops_sport_ro, sport_get, NULL, "0x%08llx\n");*/ DEFINE_SIMPLE_ATTRIBUTE(fops_sport_wo, NULL, sport_set, "0x%08llx\n"); #define SPORT_OFF(mmr) (SPORT0_##mmr - SPORT0_TCR1) #define _D_SPORT(name, perms, fops) \ do { \ strcpy(_buf, #name); \ debugfs_create_file(buf, perms, parent, (void *)(base + SPORT_OFF(name)), fops); \ } while (0) #define __SPORT_RW(name) _D_SPORT(name, S_IRUSR|S_IWUSR, &fops_sport) #define __SPORT_RO(name) _D_SPORT(name, S_IRUSR, &fops_sport_ro) #define __SPORT_WO(name) _D_SPORT(name, S_IWUSR, &fops_sport_wo) #define __SPORT(name, bits) \ do { \ strcpy(_buf, #name); \ debugfs_create_x##bits(buf, S_IRUSR|S_IWUSR, parent, (u##bits *)(base + SPORT_OFF(name))); \ } while (0) static void __init __maybe_unused bfin_debug_mmrs_sport(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, SPORT, num); __SPORT(CHNL, 16); __SPORT(MCMC1, 16); __SPORT(MCMC2, 16); __SPORT(MRCS0, 32); __SPORT(MRCS1, 32); __SPORT(MRCS2, 32); __SPORT(MRCS3, 32); __SPORT(MTCS0, 32); __SPORT(MTCS1, 32); __SPORT(MTCS2, 32); __SPORT(MTCS3, 32); __SPORT(RCLKDIV, 16); __SPORT(RCR1, 16); __SPORT(RCR2, 16); __SPORT(RFSDIV, 16); __SPORT_RW(RX); __SPORT(STAT, 16); __SPORT(TCLKDIV, 16); __SPORT(TCR1, 16); __SPORT(TCR2, 16); __SPORT(TFSDIV, 16); __SPORT_WO(TX); } #define SPORT(num) bfin_debug_mmrs_sport(parent, SPORT##num##_TCR1, num) /* * TWI */ #define __TWI(uname, lname) __REGS(twi, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_twi(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, TWI, num); __TWI(CLKDIV, clkdiv); __TWI(CONTROL, control); __TWI(SLAVE_CTL, slave_ctl); __TWI(SLAVE_STAT, slave_stat); __TWI(SLAVE_ADDR, slave_addr); __TWI(MASTER_CTL, master_ctl); __TWI(MASTER_STAT, master_stat); __TWI(MASTER_ADDR, master_addr); __TWI(INT_STAT, int_stat); __TWI(INT_MASK, int_mask); __TWI(FIFO_CTL, fifo_ctl); __TWI(FIFO_STAT, fifo_stat); __TWI(XMT_DATA8, xmt_data8); __TWI(XMT_DATA16, xmt_data16); __TWI(RCV_DATA8, rcv_data8); __TWI(RCV_DATA16, rcv_data16); } #define TWI(num) bfin_debug_mmrs_twi(parent, TWI##num##_CLKDIV, num) /* * UART */ #define __UART(uname, lname) __REGS(uart, #uname, lname) static void __init __maybe_unused bfin_debug_mmrs_uart(struct dentry *parent, unsigned long base, int num) { char buf[32], *_buf = REGS_STR_PFX(buf, UART, num); #ifdef BFIN_UART_BF54X_STYLE __UART(DLL, dll); __UART(DLH, dlh); __UART(GCTL, gctl); __UART(LCR, lcr); __UART(MCR, mcr); __UART(LSR, lsr); __UART(MSR, msr); __UART(SCR, scr); __UART(IER_SET, ier_set); __UART(IER_CLEAR, ier_clear); __UART(THR, thr); __UART(RBR, rbr); #else __UART(DLL, dll); __UART(THR, thr); __UART(RBR, rbr); __UART(DLH, dlh); __UART(IER, ier); __UART(IIR, iir); __UART(LCR, lcr); __UART(MCR, mcr); __UART(LSR, lsr); __UART(MSR, msr); __UART(SCR, scr); __UART(GCTL, gctl); #endif } #define UART(num) bfin_debug_mmrs_uart(parent, UART##num##_DLL, num) /* * The actual debugfs generation */ static struct dentry *debug_mmrs_dentry; static int __init bfin_debug_mmrs_init(void) { struct dentry *top, *parent; pr_info("debug-mmrs: setting up Blackfin MMR debugfs\n"); top = debugfs_create_dir("blackfin", NULL); if (top == NULL) return -1; parent = debugfs_create_dir("core_regs", top); debugfs_create_file("cclk", S_IRUSR, parent, NULL, &fops_debug_cclk); debugfs_create_file("sclk", S_IRUSR, parent, NULL, &fops_debug_sclk); debugfs_create_x32("last_seqstat", S_IRUSR, parent, &last_seqstat); D_SYSREG(cycles); D_SYSREG(cycles2); D_SYSREG(emudat); D_SYSREG(seqstat); D_SYSREG(syscfg); /* Core MMRs */ parent = debugfs_create_dir("ctimer", top); D32(TCNTL); D32(TCOUNT); D32(TPERIOD); D32(TSCALE); parent = debugfs_create_dir("cec", top); D32(EVT0); D32(EVT1); D32(EVT2); D32(EVT3); D32(EVT4); D32(EVT5); D32(EVT6); D32(EVT7); D32(EVT8); D32(EVT9); D32(EVT10); D32(EVT11); D32(EVT12); D32(EVT13); D32(EVT14); D32(EVT15); D32(EVT_OVERRIDE); D32(IMASK); D32(IPEND); D32(ILAT); D32(IPRIO); parent = debugfs_create_dir("debug", top); D32(DBGSTAT); D32(DSPID); parent = debugfs_create_dir("mmu", top); D32(SRAM_BASE_ADDRESS); D32(DCPLB_ADDR0); D32(DCPLB_ADDR10); D32(DCPLB_ADDR11); D32(DCPLB_ADDR12); D32(DCPLB_ADDR13); D32(DCPLB_ADDR14); D32(DCPLB_ADDR15); D32(DCPLB_ADDR1); D32(DCPLB_ADDR2); D32(DCPLB_ADDR3); D32(DCPLB_ADDR4); D32(DCPLB_ADDR5); D32(DCPLB_ADDR6); D32(DCPLB_ADDR7); D32(DCPLB_ADDR8); D32(DCPLB_ADDR9); D32(DCPLB_DATA0); D32(DCPLB_DATA10); D32(DCPLB_DATA11); D32(DCPLB_DATA12); D32(DCPLB_DATA13); D32(DCPLB_DATA14); D32(DCPLB_DATA15); D32(DCPLB_DATA1); D32(DCPLB_DATA2); D32(DCPLB_DATA3); D32(DCPLB_DATA4); D32(DCPLB_DATA5); D32(DCPLB_DATA6); D32(DCPLB_DATA7); D32(DCPLB_DATA8); D32(DCPLB_DATA9); D32(DCPLB_FAULT_ADDR); D32(DCPLB_STATUS); D32(DMEM_CONTROL); D32(DTEST_COMMAND); D32(DTEST_DATA0); D32(DTEST_DATA1); D32(ICPLB_ADDR0); D32(ICPLB_ADDR1); D32(ICPLB_ADDR2); D32(ICPLB_ADDR3); D32(ICPLB_ADDR4); D32(ICPLB_ADDR5); D32(ICPLB_ADDR6); D32(ICPLB_ADDR7); D32(ICPLB_ADDR8); D32(ICPLB_ADDR9); D32(ICPLB_ADDR10); D32(ICPLB_ADDR11); D32(ICPLB_ADDR12); D32(ICPLB_ADDR13); D32(ICPLB_ADDR14); D32(ICPLB_ADDR15); D32(ICPLB_DATA0); D32(ICPLB_DATA1); D32(ICPLB_DATA2); D32(ICPLB_DATA3); D32(ICPLB_DATA4); D32(ICPLB_DATA5); D32(ICPLB_DATA6); D32(ICPLB_DATA7); D32(ICPLB_DATA8); D32(ICPLB_DATA9); D32(ICPLB_DATA10); D32(ICPLB_DATA11); D32(ICPLB_DATA12); D32(ICPLB_DATA13); D32(ICPLB_DATA14); D32(ICPLB_DATA15); D32(ICPLB_FAULT_ADDR); D32(ICPLB_STATUS); D32(IMEM_CONTROL); if (!ANOMALY_05000481) { D32(ITEST_COMMAND); D32(ITEST_DATA0); D32(ITEST_DATA1); } parent = debugfs_create_dir("perf", top); D32(PFCNTR0); D32(PFCNTR1); D32(PFCTL); parent = debugfs_create_dir("trace", top); D32(TBUF); D32(TBUFCTL); D32(TBUFSTAT); parent = debugfs_create_dir("watchpoint", top); D32(WPIACTL); D32(WPIA0); D32(WPIA1); D32(WPIA2); D32(WPIA3); D32(WPIA4); D32(WPIA5); D32(WPIACNT0); D32(WPIACNT1); D32(WPIACNT2); D32(WPIACNT3); D32(WPIACNT4); D32(WPIACNT5); D32(WPDACTL); D32(WPDA0); D32(WPDA1); D32(WPDACNT0); D32(WPDACNT1); D32(WPSTAT); /* System MMRs */ #ifdef ATAPI_CONTROL parent = debugfs_create_dir("atapi", top); D16(ATAPI_CONTROL); D16(ATAPI_DEV_ADDR); D16(ATAPI_DEV_RXBUF); D16(ATAPI_DEV_TXBUF); D16(ATAPI_DMA_TFRCNT); D16(ATAPI_INT_MASK); D16(ATAPI_INT_STATUS); D16(ATAPI_LINE_STATUS); D16(ATAPI_MULTI_TIM_0); D16(ATAPI_MULTI_TIM_1); D16(ATAPI_MULTI_TIM_2); D16(ATAPI_PIO_TFRCNT); D16(ATAPI_PIO_TIM_0); D16(ATAPI_PIO_TIM_1); D16(ATAPI_REG_TIM_0); D16(ATAPI_SM_STATE); D16(ATAPI_STATUS); D16(ATAPI_TERMINATE); D16(ATAPI_UDMAOUT_TFRCNT); D16(ATAPI_ULTRA_TIM_0); D16(ATAPI_ULTRA_TIM_1); D16(ATAPI_ULTRA_TIM_2); D16(ATAPI_ULTRA_TIM_3); D16(ATAPI_UMAIN_TFRCNT); D16(ATAPI_XFER_LEN); #endif #if defined(CAN_MC1) || defined(CAN0_MC1) || defined(CAN1_MC1) parent = debugfs_create_dir("can", top); # ifdef CAN_MC1 bfin_debug_mmrs_can(parent, CAN_MC1, -1); # endif # ifdef CAN0_MC1 CAN(0); # endif # ifdef CAN1_MC1 CAN(1); # endif #endif #ifdef CNT_COMMAND parent = debugfs_create_dir("counter", top); D16(CNT_COMMAND); D16(CNT_CONFIG); D32(CNT_COUNTER); D16(CNT_DEBOUNCE); D16(CNT_IMASK); D32(CNT_MAX); D32(CNT_MIN); D16(CNT_STATUS); #endif parent = debugfs_create_dir("dmac", top); #ifdef DMAC_TC_CNT D16(DMAC_TC_CNT); D16(DMAC_TC_PER); #endif #ifdef DMAC0_TC_CNT D16(DMAC0_TC_CNT); D16(DMAC0_TC_PER); #endif #ifdef DMAC1_TC_CNT D16(DMAC1_TC_CNT); D16(DMAC1_TC_PER); #endif #ifdef DMAC1_PERIMUX D16(DMAC1_PERIMUX); #endif #ifdef __ADSPBF561__ /* XXX: should rewrite the MMR map */ # define DMA0_NEXT_DESC_PTR DMA2_0_NEXT_DESC_PTR # define DMA1_NEXT_DESC_PTR DMA2_1_NEXT_DESC_PTR # define DMA2_NEXT_DESC_PTR DMA2_2_NEXT_DESC_PTR # define DMA3_NEXT_DESC_PTR DMA2_3_NEXT_DESC_PTR # define DMA4_NEXT_DESC_PTR DMA2_4_NEXT_DESC_PTR # define DMA5_NEXT_DESC_PTR DMA2_5_NEXT_DESC_PTR # define DMA6_NEXT_DESC_PTR DMA2_6_NEXT_DESC_PTR # define DMA7_NEXT_DESC_PTR DMA2_7_NEXT_DESC_PTR # define DMA8_NEXT_DESC_PTR DMA2_8_NEXT_DESC_PTR # define DMA9_NEXT_DESC_PTR DMA2_9_NEXT_DESC_PTR # define DMA10_NEXT_DESC_PTR DMA2_10_NEXT_DESC_PTR # define DMA11_NEXT_DESC_PTR DMA2_11_NEXT_DESC_PTR # define DMA12_NEXT_DESC_PTR DMA1_0_NEXT_DESC_PTR # define DMA13_NEXT_DESC_PTR DMA1_1_NEXT_DESC_PTR # define DMA14_NEXT_DESC_PTR DMA1_2_NEXT_DESC_PTR # define DMA15_NEXT_DESC_PTR DMA1_3_NEXT_DESC_PTR # define DMA16_NEXT_DESC_PTR DMA1_4_NEXT_DESC_PTR # define DMA17_NEXT_DESC_PTR DMA1_5_NEXT_DESC_PTR # define DMA18_NEXT_DESC_PTR DMA1_6_NEXT_DESC_PTR # define DMA19_NEXT_DESC_PTR DMA1_7_NEXT_DESC_PTR # define DMA20_NEXT_DESC_PTR DMA1_8_NEXT_DESC_PTR # define DMA21_NEXT_DESC_PTR DMA1_9_NEXT_DESC_PTR # define DMA22_NEXT_DESC_PTR DMA1_10_NEXT_DESC_PTR # define DMA23_NEXT_DESC_PTR DMA1_11_NEXT_DESC_PTR #endif parent = debugfs_create_dir("dma", top); DMA(0); DMA(1); DMA(1); DMA(2); DMA(3); DMA(4); DMA(5); DMA(6); DMA(7); #ifdef DMA8_NEXT_DESC_PTR DMA(8); DMA(9); DMA(10); DMA(11); #endif #ifdef DMA12_NEXT_DESC_PTR DMA(12); DMA(13); DMA(14); DMA(15); DMA(16); DMA(17); DMA(18); DMA(19); #endif #ifdef DMA20_NEXT_DESC_PTR DMA(20); DMA(21); DMA(22); DMA(23); #endif parent = debugfs_create_dir("ebiu_amc", top); D32(EBIU_AMBCTL0); D32(EBIU_AMBCTL1); D16(EBIU_AMGCTL); #ifdef EBIU_MBSCTL D16(EBIU_MBSCTL); D32(EBIU_ARBSTAT); D32(EBIU_MODE); D16(EBIU_FCTL); #endif #ifdef EBIU_SDGCTL parent = debugfs_create_dir("ebiu_sdram", top); # ifdef __ADSPBF561__ D32(EBIU_SDBCTL); # else D16(EBIU_SDBCTL); # endif D32(EBIU_SDGCTL); D16(EBIU_SDRRC); D16(EBIU_SDSTAT); #endif #ifdef EBIU_DDRACCT parent = debugfs_create_dir("ebiu_ddr", top); D32(EBIU_DDRACCT); D32(EBIU_DDRARCT); D32(EBIU_DDRBRC0); D32(EBIU_DDRBRC1); D32(EBIU_DDRBRC2); D32(EBIU_DDRBRC3); D32(EBIU_DDRBRC4); D32(EBIU_DDRBRC5); D32(EBIU_DDRBRC6); D32(EBIU_DDRBRC7); D32(EBIU_DDRBWC0); D32(EBIU_DDRBWC1); D32(EBIU_DDRBWC2); D32(EBIU_DDRBWC3); D32(EBIU_DDRBWC4); D32(EBIU_DDRBWC5); D32(EBIU_DDRBWC6); D32(EBIU_DDRBWC7); D32(EBIU_DDRCTL0); D32(EBIU_DDRCTL1); D32(EBIU_DDRCTL2); D32(EBIU_DDRCTL3); D32(EBIU_DDRGC0); D32(EBIU_DDRGC1); D32(EBIU_DDRGC2); D32(EBIU_DDRGC3); D32(EBIU_DDRMCCL); D32(EBIU_DDRMCEN); D32(EBIU_DDRQUE); D32(EBIU_DDRTACT); D32(EBIU_ERRADD); D16(EBIU_ERRMST); D16(EBIU_RSTCTL); #endif #ifdef EMAC_ADDRHI parent = debugfs_create_dir("emac", top); D32(EMAC_ADDRHI); D32(EMAC_ADDRLO); D32(EMAC_FLC); D32(EMAC_HASHHI); D32(EMAC_HASHLO); D32(EMAC_MMC_CTL); D32(EMAC_MMC_RIRQE); D32(EMAC_MMC_RIRQS); D32(EMAC_MMC_TIRQE); D32(EMAC_MMC_TIRQS); D32(EMAC_OPMODE); D32(EMAC_RXC_ALIGN); D32(EMAC_RXC_ALLFRM); D32(EMAC_RXC_ALLOCT); D32(EMAC_RXC_BROAD); D32(EMAC_RXC_DMAOVF); D32(EMAC_RXC_EQ64); D32(EMAC_RXC_FCS); D32(EMAC_RXC_GE1024); D32(EMAC_RXC_LNERRI); D32(EMAC_RXC_LNERRO); D32(EMAC_RXC_LONG); D32(EMAC_RXC_LT1024); D32(EMAC_RXC_LT128); D32(EMAC_RXC_LT256); D32(EMAC_RXC_LT512); D32(EMAC_RXC_MACCTL); D32(EMAC_RXC_MULTI); D32(EMAC_RXC_OCTET); D32(EMAC_RXC_OK); D32(EMAC_RXC_OPCODE); D32(EMAC_RXC_PAUSE); D32(EMAC_RXC_SHORT); D32(EMAC_RXC_TYPED); D32(EMAC_RXC_UNICST); D32(EMAC_RX_IRQE); D32(EMAC_RX_STAT); D32(EMAC_RX_STKY); D32(EMAC_STAADD); D32(EMAC_STADAT); D32(EMAC_SYSCTL); D32(EMAC_SYSTAT); D32(EMAC_TXC_1COL); D32(EMAC_TXC_ABORT); D32(EMAC_TXC_ALLFRM); D32(EMAC_TXC_ALLOCT); D32(EMAC_TXC_BROAD); D32(EMAC_TXC_CRSERR); D32(EMAC_TXC_DEFER); D32(EMAC_TXC_DMAUND); D32(EMAC_TXC_EQ64); D32(EMAC_TXC_GE1024); D32(EMAC_TXC_GT1COL); D32(EMAC_TXC_LATECL); D32(EMAC_TXC_LT1024); D32(EMAC_TXC_LT128); D32(EMAC_TXC_LT256); D32(EMAC_TXC_LT512); D32(EMAC_TXC_MACCTL); D32(EMAC_TXC_MULTI); D32(EMAC_TXC_OCTET); D32(EMAC_TXC_OK); D32(EMAC_TXC_UNICST); D32(EMAC_TXC_XS_COL); D32(EMAC_TXC_XS_DFR); D32(EMAC_TX_IRQE); D32(EMAC_TX_STAT); D32(EMAC_TX_STKY); D32(EMAC_VLAN1); D32(EMAC_VLAN2); D32(EMAC_WKUP_CTL); D32(EMAC_WKUP_FFCMD); D32(EMAC_WKUP_FFCRC0); D32(EMAC_WKUP_FFCRC1); D32(EMAC_WKUP_FFMSK0); D32(EMAC_WKUP_FFMSK1); D32(EMAC_WKUP_FFMSK2); D32(EMAC_WKUP_FFMSK3); D32(EMAC_WKUP_FFOFF); # ifdef EMAC_PTP_ACCR D32(EMAC_PTP_ACCR); D32(EMAC_PTP_ADDEND); D32(EMAC_PTP_ALARMHI); D32(EMAC_PTP_ALARMLO); D16(EMAC_PTP_CTL); D32(EMAC_PTP_FOFF); D32(EMAC_PTP_FV1); D32(EMAC_PTP_FV2); D32(EMAC_PTP_FV3); D16(EMAC_PTP_ID_OFF); D32(EMAC_PTP_ID_SNAP); D16(EMAC_PTP_IE); D16(EMAC_PTP_ISTAT); D32(EMAC_PTP_OFFSET); D32(EMAC_PTP_PPS_PERIOD); D32(EMAC_PTP_PPS_STARTHI); D32(EMAC_PTP_PPS_STARTLO); D32(EMAC_PTP_RXSNAPHI); D32(EMAC_PTP_RXSNAPLO); D32(EMAC_PTP_TIMEHI); D32(EMAC_PTP_TIMELO); D32(EMAC_PTP_TXSNAPHI); D32(EMAC_PTP_TXSNAPLO); # endif #endif #if defined(EPPI0_STATUS) || defined(EPPI1_STATUS) || defined(EPPI2_STATUS) parent = debugfs_create_dir("eppi", top); # ifdef EPPI0_STATUS EPPI(0); # endif # ifdef EPPI1_STATUS EPPI(1); # endif # ifdef EPPI2_STATUS EPPI(2); # endif #endif parent = debugfs_create_dir("gptimer", top); #ifdef TIMER_ENABLE GPTIMER_GROUP(TIMER_ENABLE, -1); #endif #ifdef TIMER_ENABLE0 GPTIMER_GROUP(TIMER_ENABLE0, 0); #endif #ifdef TIMER_ENABLE1 GPTIMER_GROUP(TIMER_ENABLE1, 1); #endif /* XXX: Should convert BF561 MMR names */ #ifdef TMRS4_DISABLE GPTIMER_GROUP(TMRS4_ENABLE, 0); GPTIMER_GROUP(TMRS8_ENABLE, 1); #endif GPTIMER(0); GPTIMER(1); GPTIMER(2); #ifdef TIMER3_CONFIG GPTIMER(3); GPTIMER(4); GPTIMER(5); GPTIMER(6); GPTIMER(7); #endif #ifdef TIMER8_CONFIG GPTIMER(8); GPTIMER(9); GPTIMER(10); #endif #ifdef TIMER11_CONFIG GPTIMER(11); #endif #ifdef HMDMA0_CONTROL parent = debugfs_create_dir("hmdma", top); HMDMA(0); HMDMA(1); #endif #ifdef HOST_CONTROL parent = debugfs_create_dir("hostdp", top); D16(HOST_CONTROL); D16(HOST_STATUS); D16(HOST_TIMEOUT); #endif #ifdef IMDMA_S0_CONFIG parent = debugfs_create_dir("imdma", top); IMDMA(0); IMDMA(1); #endif #ifdef KPAD_CTL parent = debugfs_create_dir("keypad", top); D16(KPAD_CTL); D16(KPAD_PRESCALE); D16(KPAD_MSEL); D16(KPAD_ROWCOL); D16(KPAD_STAT); D16(KPAD_SOFTEVAL); #endif parent = debugfs_create_dir("mdma", top); MDMA(0); MDMA(1); #ifdef MDMA_D2_CONFIG MDMA(2); MDMA(3); #endif #ifdef MXVR_CONFIG parent = debugfs_create_dir("mxvr", top); D16(MXVR_CONFIG); # ifdef MXVR_PLL_CTL_0 D32(MXVR_PLL_CTL_0); # endif D32(MXVR_STATE_0); D32(MXVR_STATE_1); D32(MXVR_INT_STAT_0); D32(MXVR_INT_STAT_1); D32(MXVR_INT_EN_0); D32(MXVR_INT_EN_1); D16(MXVR_POSITION); D16(MXVR_MAX_POSITION); D16(MXVR_DELAY); D16(MXVR_MAX_DELAY); D32(MXVR_LADDR); D16(MXVR_GADDR); D32(MXVR_AADDR); D32(MXVR_ALLOC_0); D32(MXVR_ALLOC_1); D32(MXVR_ALLOC_2); D32(MXVR_ALLOC_3); D32(MXVR_ALLOC_4); D32(MXVR_ALLOC_5); D32(MXVR_ALLOC_6); D32(MXVR_ALLOC_7); D32(MXVR_ALLOC_8); D32(MXVR_ALLOC_9); D32(MXVR_ALLOC_10); D32(MXVR_ALLOC_11); D32(MXVR_ALLOC_12); D32(MXVR_ALLOC_13); D32(MXVR_ALLOC_14); D32(MXVR_SYNC_LCHAN_0); D32(MXVR_SYNC_LCHAN_1); D32(MXVR_SYNC_LCHAN_2); D32(MXVR_SYNC_LCHAN_3); D32(MXVR_SYNC_LCHAN_4); D32(MXVR_SYNC_LCHAN_5); D32(MXVR_SYNC_LCHAN_6); D32(MXVR_SYNC_LCHAN_7); D32(MXVR_DMA0_CONFIG); D32(MXVR_DMA0_START_ADDR); D16(MXVR_DMA0_COUNT); D32(MXVR_DMA0_CURR_ADDR); D16(MXVR_DMA0_CURR_COUNT); D32(MXVR_DMA1_CONFIG); D32(MXVR_DMA1_START_ADDR); D16(MXVR_DMA1_COUNT); D32(MXVR_DMA1_CURR_ADDR); D16(MXVR_DMA1_CURR_COUNT); D32(MXVR_DMA2_CONFIG); D32(MXVR_DMA2_START_ADDR); D16(MXVR_DMA2_COUNT); D32(MXVR_DMA2_CURR_ADDR); D16(MXVR_DMA2_CURR_COUNT); D32(MXVR_DMA3_CONFIG); D32(MXVR_DMA3_START_ADDR); D16(MXVR_DMA3_COUNT); D32(MXVR_DMA3_CURR_ADDR); D16(MXVR_DMA3_CURR_COUNT); D32(MXVR_DMA4_CONFIG); D32(MXVR_DMA4_START_ADDR); D16(MXVR_DMA4_COUNT); D32(MXVR_DMA4_CURR_ADDR); D16(MXVR_DMA4_CURR_COUNT); D32(MXVR_DMA5_CONFIG); D32(MXVR_DMA5_START_ADDR); D16(MXVR_DMA5_COUNT); D32(MXVR_DMA5_CURR_ADDR); D16(MXVR_DMA5_CURR_COUNT); D32(MXVR_DMA6_CONFIG); D32(MXVR_DMA6_START_ADDR); D16(MXVR_DMA6_COUNT); D32(MXVR_DMA6_CURR_ADDR); D16(MXVR_DMA6_CURR_COUNT); D32(MXVR_DMA7_CONFIG); D32(MXVR_DMA7_START_ADDR); D16(MXVR_DMA7_COUNT); D32(MXVR_DMA7_CURR_ADDR); D16(MXVR_DMA7_CURR_COUNT); D16(MXVR_AP_CTL); D32(MXVR_APRB_START_ADDR); D32(MXVR_APRB_CURR_ADDR); D32(MXVR_APTB_START_ADDR); D32(MXVR_APTB_CURR_ADDR); D32(MXVR_CM_CTL); D32(MXVR_CMRB_START_ADDR); D32(MXVR_CMRB_CURR_ADDR); D32(MXVR_CMTB_START_ADDR); D32(MXVR_CMTB_CURR_ADDR); D32(MXVR_RRDB_START_ADDR); D32(MXVR_RRDB_CURR_ADDR); D32(MXVR_PAT_DATA_0); D32(MXVR_PAT_EN_0); D32(MXVR_PAT_DATA_1); D32(MXVR_PAT_EN_1); D16(MXVR_FRAME_CNT_0); D16(MXVR_FRAME_CNT_1); D32(MXVR_ROUTING_0); D32(MXVR_ROUTING_1); D32(MXVR_ROUTING_2); D32(MXVR_ROUTING_3); D32(MXVR_ROUTING_4); D32(MXVR_ROUTING_5); D32(MXVR_ROUTING_6); D32(MXVR_ROUTING_7); D32(MXVR_ROUTING_8); D32(MXVR_ROUTING_9); D32(MXVR_ROUTING_10); D32(MXVR_ROUTING_11); D32(MXVR_ROUTING_12); D32(MXVR_ROUTING_13); D32(MXVR_ROUTING_14); # ifdef MXVR_PLL_CTL_1 D32(MXVR_PLL_CTL_1); # endif D16(MXVR_BLOCK_CNT); # ifdef MXVR_CLK_CTL D32(MXVR_CLK_CTL); # endif # ifdef MXVR_CDRPLL_CTL D32(MXVR_CDRPLL_CTL); # endif # ifdef MXVR_FMPLL_CTL D32(MXVR_FMPLL_CTL); # endif # ifdef MXVR_PIN_CTL D16(MXVR_PIN_CTL); # endif # ifdef MXVR_SCLK_CNT D16(MXVR_SCLK_CNT); # endif #endif #ifdef NFC_ADDR parent = debugfs_create_dir("nfc", top); D_WO(NFC_ADDR, 16); D_WO(NFC_CMD, 16); D_RO(NFC_COUNT, 16); D16(NFC_CTL); D_WO(NFC_DATA_RD, 16); D_WO(NFC_DATA_WR, 16); D_RO(NFC_ECC0, 16); D_RO(NFC_ECC1, 16); D_RO(NFC_ECC2, 16); D_RO(NFC_ECC3, 16); D16(NFC_IRQMASK); D16(NFC_IRQSTAT); D_WO(NFC_PGCTL, 16); D_RO(NFC_READ, 16); D16(NFC_RST); D_RO(NFC_STAT, 16); #endif #ifdef OTP_CONTROL parent = debugfs_create_dir("otp", top); D16(OTP_CONTROL); D16(OTP_BEN); D16(OTP_STATUS); D32(OTP_TIMING); D32(OTP_DATA0); D32(OTP_DATA1); D32(OTP_DATA2); D32(OTP_DATA3); #endif #ifdef PINT0_MASK_SET parent = debugfs_create_dir("pint", top); PINT(0); PINT(1); PINT(2); PINT(3); #endif #ifdef PIXC_CTL parent = debugfs_create_dir("pixc", top); D16(PIXC_CTL); D16(PIXC_PPL); D16(PIXC_LPF); D16(PIXC_AHSTART); D16(PIXC_AHEND); D16(PIXC_AVSTART); D16(PIXC_AVEND); D16(PIXC_ATRANSP); D16(PIXC_BHSTART); D16(PIXC_BHEND); D16(PIXC_BVSTART); D16(PIXC_BVEND); D16(PIXC_BTRANSP); D16(PIXC_INTRSTAT); D32(PIXC_RYCON); D32(PIXC_GUCON); D32(PIXC_BVCON); D32(PIXC_CCBIAS); D32(PIXC_TC); #endif parent = debugfs_create_dir("pll", top); D16(PLL_CTL); D16(PLL_DIV); D16(PLL_LOCKCNT); D16(PLL_STAT); D16(VR_CTL); D32(CHIPID); /* it's part of this hardware block */ #if defined(PPI_CONTROL) || defined(PPI0_CONTROL) || defined(PPI1_CONTROL) parent = debugfs_create_dir("ppi", top); # ifdef PPI_CONTROL bfin_debug_mmrs_ppi(parent, PPI_CONTROL, -1); # endif # ifdef PPI0_CONTROL PPI(0); # endif # ifdef PPI1_CONTROL PPI(1); # endif #endif #ifdef PWM_CTRL parent = debugfs_create_dir("pwm", top); D16(PWM_CTRL); D16(PWM_STAT); D16(PWM_TM); D16(PWM_DT); D16(PWM_GATE); D16(PWM_CHA); D16(PWM_CHB); D16(PWM_CHC); D16(PWM_SEG); D16(PWM_SYNCWT); D16(PWM_CHAL); D16(PWM_CHBL); D16(PWM_CHCL); D16(PWM_LSI); D16(PWM_STAT2); #endif #ifdef RSI_CONFIG parent = debugfs_create_dir("rsi", top); D32(RSI_ARGUMENT); D16(RSI_CEATA_CONTROL); D16(RSI_CLK_CONTROL); D16(RSI_COMMAND); D16(RSI_CONFIG); D16(RSI_DATA_CNT); D16(RSI_DATA_CONTROL); D16(RSI_DATA_LGTH); D32(RSI_DATA_TIMER); D16(RSI_EMASK); D16(RSI_ESTAT); D32(RSI_FIFO); D16(RSI_FIFO_CNT); D32(RSI_MASK0); D32(RSI_MASK1); D16(RSI_PID0); D16(RSI_PID1); D16(RSI_PID2); D16(RSI_PID3); D16(RSI_PID4); D16(RSI_PID5); D16(RSI_PID6); D16(RSI_PID7); D16(RSI_PWR_CONTROL); D16(RSI_RD_WAIT_EN); D32(RSI_RESPONSE0); D32(RSI_RESPONSE1); D32(RSI_RESPONSE2); D32(RSI_RESPONSE3); D16(RSI_RESP_CMD); D32(RSI_STATUS); D_WO(RSI_STATUSCL, 16); #endif #ifdef RTC_ALARM parent = debugfs_create_dir("rtc", top); D32(RTC_ALARM); D16(RTC_ICTL); D16(RTC_ISTAT); D16(RTC_PREN); D32(RTC_STAT); D16(RTC_SWCNT); #endif #ifdef SDH_CFG parent = debugfs_create_dir("sdh", top); D32(SDH_ARGUMENT); D16(SDH_CFG); D16(SDH_CLK_CTL); D16(SDH_COMMAND); D_RO(SDH_DATA_CNT, 16); D16(SDH_DATA_CTL); D16(SDH_DATA_LGTH); D32(SDH_DATA_TIMER); D16(SDH_E_MASK); D16(SDH_E_STATUS); D32(SDH_FIFO); D_RO(SDH_FIFO_CNT, 16); D32(SDH_MASK0); D32(SDH_MASK1); D_RO(SDH_PID0, 16); D_RO(SDH_PID1, 16); D_RO(SDH_PID2, 16); D_RO(SDH_PID3, 16); D_RO(SDH_PID4, 16); D_RO(SDH_PID5, 16); D_RO(SDH_PID6, 16); D_RO(SDH_PID7, 16); D16(SDH_PWR_CTL); D16(SDH_RD_WAIT_EN); D_RO(SDH_RESPONSE0, 32); D_RO(SDH_RESPONSE1, 32); D_RO(SDH_RESPONSE2, 32); D_RO(SDH_RESPONSE3, 32); D_RO(SDH_RESP_CMD, 16); D_RO(SDH_STATUS, 32); D_WO(SDH_STATUS_CLR, 16); #endif #ifdef SECURE_CONTROL parent = debugfs_create_dir("security", top); D16(SECURE_CONTROL); D16(SECURE_STATUS); D32(SECURE_SYSSWT); #endif parent = debugfs_create_dir("sic", top); D16(SWRST); D16(SYSCR); D16(SIC_RVECT); D32(SIC_IAR0); D32(SIC_IAR1); D32(SIC_IAR2); #ifdef SIC_IAR3 D32(SIC_IAR3); #endif #ifdef SIC_IAR4 D32(SIC_IAR4); D32(SIC_IAR5); D32(SIC_IAR6); #endif #ifdef SIC_IAR7 D32(SIC_IAR7); #endif #ifdef SIC_IAR8 D32(SIC_IAR8); D32(SIC_IAR9); D32(SIC_IAR10); D32(SIC_IAR11); #endif #ifdef SIC_IMASK D32(SIC_IMASK); D32(SIC_ISR); D32(SIC_IWR); #endif #ifdef SIC_IMASK0 D32(SIC_IMASK0); D32(SIC_IMASK1); D32(SIC_ISR0); D32(SIC_ISR1); D32(SIC_IWR0); D32(SIC_IWR1); #endif #ifdef SIC_IMASK2 D32(SIC_IMASK2); D32(SIC_ISR2); D32(SIC_IWR2); #endif #ifdef SICB_RVECT D16(SICB_SWRST); D16(SICB_SYSCR); D16(SICB_RVECT); D32(SICB_IAR0); D32(SICB_IAR1); D32(SICB_IAR2); D32(SICB_IAR3); D32(SICB_IAR4); D32(SICB_IAR5); D32(SICB_IAR6); D32(SICB_IAR7); D32(SICB_IMASK0); D32(SICB_IMASK1); D32(SICB_ISR0); D32(SICB_ISR1); D32(SICB_IWR0); D32(SICB_IWR1); #endif parent = debugfs_create_dir("spi", top); #ifdef SPI0_REGBASE SPI(0); #endif #ifdef SPI1_REGBASE SPI(1); #endif #ifdef SPI2_REGBASE SPI(2); #endif parent = debugfs_create_dir("sport", top); #ifdef SPORT0_STAT SPORT(0); #endif #ifdef SPORT1_STAT SPORT(1); #endif #ifdef SPORT2_STAT SPORT(2); #endif #ifdef SPORT3_STAT SPORT(3); #endif #if defined(TWI_CLKDIV) || defined(TWI0_CLKDIV) || defined(TWI1_CLKDIV) parent = debugfs_create_dir("twi", top); # ifdef TWI_CLKDIV bfin_debug_mmrs_twi(parent, TWI_CLKDIV, -1); # endif # ifdef TWI0_CLKDIV TWI(0); # endif # ifdef TWI1_CLKDIV TWI(1); # endif #endif parent = debugfs_create_dir("uart", top); #ifdef BFIN_UART_DLL bfin_debug_mmrs_uart(parent, BFIN_UART_DLL, -1); #endif #ifdef UART0_DLL UART(0); #endif #ifdef UART1_DLL UART(1); #endif #ifdef UART2_DLL UART(2); #endif #ifdef UART3_DLL UART(3); #endif #ifdef USB_FADDR parent = debugfs_create_dir("usb", top); D16(USB_FADDR); D16(USB_POWER); D16(USB_INTRTX); D16(USB_INTRRX); D16(USB_INTRTXE); D16(USB_INTRRXE); D16(USB_INTRUSB); D16(USB_INTRUSBE); D16(USB_FRAME); D16(USB_INDEX); D16(USB_TESTMODE); D16(USB_GLOBINTR); D16(USB_GLOBAL_CTL); D16(USB_TX_MAX_PACKET); D16(USB_CSR0); D16(USB_TXCSR); D16(USB_RX_MAX_PACKET); D16(USB_RXCSR); D16(USB_COUNT0); D16(USB_RXCOUNT); D16(USB_TXTYPE); D16(USB_NAKLIMIT0); D16(USB_TXINTERVAL); D16(USB_RXTYPE); D16(USB_RXINTERVAL); D16(USB_TXCOUNT); D16(USB_EP0_FIFO); D16(USB_EP1_FIFO); D16(USB_EP2_FIFO); D16(USB_EP3_FIFO); D16(USB_EP4_FIFO); D16(USB_EP5_FIFO); D16(USB_EP6_FIFO); D16(USB_EP7_FIFO); D16(USB_OTG_DEV_CTL); D16(USB_OTG_VBUS_IRQ); D16(USB_OTG_VBUS_MASK); D16(USB_LINKINFO); D16(USB_VPLEN); D16(USB_HS_EOF1); D16(USB_FS_EOF1); D16(USB_LS_EOF1); D16(USB_APHY_CNTRL); D16(USB_APHY_CALIB); D16(USB_APHY_CNTRL2); D16(USB_PHY_TEST); D16(USB_PLLOSC_CTRL); D16(USB_SRP_CLKDIV); D16(USB_EP_NI0_TXMAXP); D16(USB_EP_NI0_TXCSR); D16(USB_EP_NI0_RXMAXP); D16(USB_EP_NI0_RXCSR); D16(USB_EP_NI0_RXCOUNT); D16(USB_EP_NI0_TXTYPE); D16(USB_EP_NI0_TXINTERVAL); D16(USB_EP_NI0_RXTYPE); D16(USB_EP_NI0_RXINTERVAL); D16(USB_EP_NI0_TXCOUNT); D16(USB_EP_NI1_TXMAXP); D16(USB_EP_NI1_TXCSR); D16(USB_EP_NI1_RXMAXP); D16(USB_EP_NI1_RXCSR); D16(USB_EP_NI1_RXCOUNT); D16(USB_EP_NI1_TXTYPE); D16(USB_EP_NI1_TXINTERVAL); D16(USB_EP_NI1_RXTYPE); D16(USB_EP_NI1_RXINTERVAL); D16(USB_EP_NI1_TXCOUNT); D16(USB_EP_NI2_TXMAXP); D16(USB_EP_NI2_TXCSR); D16(USB_EP_NI2_RXMAXP); D16(USB_EP_NI2_RXCSR); D16(USB_EP_NI2_RXCOUNT); D16(USB_EP_NI2_TXTYPE); D16(USB_EP_NI2_TXINTERVAL); D16(USB_EP_NI2_RXTYPE); D16(USB_EP_NI2_RXINTERVAL); D16(USB_EP_NI2_TXCOUNT); D16(USB_EP_NI3_TXMAXP); D16(USB_EP_NI3_TXCSR); D16(USB_EP_NI3_RXMAXP); D16(USB_EP_NI3_RXCSR); D16(USB_EP_NI3_RXCOUNT); D16(USB_EP_NI3_TXTYPE); D16(USB_EP_NI3_TXINTERVAL); D16(USB_EP_NI3_RXTYPE); D16(USB_EP_NI3_RXINTERVAL); D16(USB_EP_NI3_TXCOUNT); D16(USB_EP_NI4_TXMAXP); D16(USB_EP_NI4_TXCSR); D16(USB_EP_NI4_RXMAXP); D16(USB_EP_NI4_RXCSR); D16(USB_EP_NI4_RXCOUNT); D16(USB_EP_NI4_TXTYPE); D16(USB_EP_NI4_TXINTERVAL); D16(USB_EP_NI4_RXTYPE); D16(USB_EP_NI4_RXINTERVAL); D16(USB_EP_NI4_TXCOUNT); D16(USB_EP_NI5_TXMAXP); D16(USB_EP_NI5_TXCSR); D16(USB_EP_NI5_RXMAXP); D16(USB_EP_NI5_RXCSR); D16(USB_EP_NI5_RXCOUNT); D16(USB_EP_NI5_TXTYPE); D16(USB_EP_NI5_TXINTERVAL); D16(USB_EP_NI5_RXTYPE); D16(USB_EP_NI5_RXINTERVAL); D16(USB_EP_NI5_TXCOUNT); D16(USB_EP_NI6_TXMAXP); D16(USB_EP_NI6_TXCSR); D16(USB_EP_NI6_RXMAXP); D16(USB_EP_NI6_RXCSR); D16(USB_EP_NI6_RXCOUNT); D16(USB_EP_NI6_TXTYPE); D16(USB_EP_NI6_TXINTERVAL); D16(USB_EP_NI6_RXTYPE); D16(USB_EP_NI6_RXINTERVAL); D16(USB_EP_NI6_TXCOUNT); D16(USB_EP_NI7_TXMAXP); D16(USB_EP_NI7_TXCSR); D16(USB_EP_NI7_RXMAXP); D16(USB_EP_NI7_RXCSR); D16(USB_EP_NI7_RXCOUNT); D16(USB_EP_NI7_TXTYPE); D16(USB_EP_NI7_TXINTERVAL); D16(USB_EP_NI7_RXTYPE); D16(USB_EP_NI7_RXINTERVAL); D16(USB_EP_NI7_TXCOUNT); D16(USB_DMA_INTERRUPT); D16(USB_DMA0CONTROL); D16(USB_DMA0ADDRLOW); D16(USB_DMA0ADDRHIGH); D16(USB_DMA0COUNTLOW); D16(USB_DMA0COUNTHIGH); D16(USB_DMA1CONTROL); D16(USB_DMA1ADDRLOW); D16(USB_DMA1ADDRHIGH); D16(USB_DMA1COUNTLOW); D16(USB_DMA1COUNTHIGH); D16(USB_DMA2CONTROL); D16(USB_DMA2ADDRLOW); D16(USB_DMA2ADDRHIGH); D16(USB_DMA2COUNTLOW); D16(USB_DMA2COUNTHIGH); D16(USB_DMA3CONTROL); D16(USB_DMA3ADDRLOW); D16(USB_DMA3ADDRHIGH); D16(USB_DMA3COUNTLOW); D16(USB_DMA3COUNTHIGH); D16(USB_DMA4CONTROL); D16(USB_DMA4ADDRLOW); D16(USB_DMA4ADDRHIGH); D16(USB_DMA4COUNTLOW); D16(USB_DMA4COUNTHIGH); D16(USB_DMA5CONTROL); D16(USB_DMA5ADDRLOW); D16(USB_DMA5ADDRHIGH); D16(USB_DMA5COUNTLOW); D16(USB_DMA5COUNTHIGH); D16(USB_DMA6CONTROL); D16(USB_DMA6ADDRLOW); D16(USB_DMA6ADDRHIGH); D16(USB_DMA6COUNTLOW); D16(USB_DMA6COUNTHIGH); D16(USB_DMA7CONTROL); D16(USB_DMA7ADDRLOW); D16(USB_DMA7ADDRHIGH); D16(USB_DMA7COUNTLOW); D16(USB_DMA7COUNTHIGH); #endif #ifdef WDOG_CNT parent = debugfs_create_dir("watchdog", top); D32(WDOG_CNT); D16(WDOG_CTL); D32(WDOG_STAT); #endif #ifdef WDOGA_CNT parent = debugfs_create_dir("watchdog", top); D32(WDOGA_CNT); D16(WDOGA_CTL); D32(WDOGA_STAT); D32(WDOGB_CNT); D16(WDOGB_CTL); D32(WDOGB_STAT); #endif /* BF533 glue */ #ifdef FIO_FLAG_D #define PORTFIO FIO_FLAG_D #endif /* BF561 glue */ #ifdef FIO0_FLAG_D #define PORTFIO FIO0_FLAG_D #endif #ifdef FIO1_FLAG_D #define PORTGIO FIO1_FLAG_D #endif #ifdef FIO2_FLAG_D #define PORTHIO FIO2_FLAG_D #endif parent = debugfs_create_dir("port", top); #ifdef PORTFIO PORT(PORTFIO, 'F'); #endif #ifdef PORTGIO PORT(PORTGIO, 'G'); #endif #ifdef PORTHIO PORT(PORTHIO, 'H'); #endif #ifdef __ADSPBF51x__ D16(PORTF_FER); D16(PORTF_DRIVE); D16(PORTF_HYSTERESIS); D16(PORTF_MUX); D16(PORTG_FER); D16(PORTG_DRIVE); D16(PORTG_HYSTERESIS); D16(PORTG_MUX); D16(PORTH_FER); D16(PORTH_DRIVE); D16(PORTH_HYSTERESIS); D16(PORTH_MUX); D16(MISCPORT_DRIVE); D16(MISCPORT_HYSTERESIS); #endif /* BF51x */ #ifdef __ADSPBF52x__ D16(PORTF_FER); D16(PORTF_DRIVE); D16(PORTF_HYSTERESIS); D16(PORTF_MUX); D16(PORTF_SLEW); D16(PORTG_FER); D16(PORTG_DRIVE); D16(PORTG_HYSTERESIS); D16(PORTG_MUX); D16(PORTG_SLEW); D16(PORTH_FER); D16(PORTH_DRIVE); D16(PORTH_HYSTERESIS); D16(PORTH_MUX); D16(PORTH_SLEW); D16(MISCPORT_DRIVE); D16(MISCPORT_HYSTERESIS); D16(MISCPORT_SLEW); #endif /* BF52x */ #ifdef BF537_FAMILY D16(PORTF_FER); D16(PORTG_FER); D16(PORTH_FER); D16(PORT_MUX); #endif /* BF534 BF536 BF537 */ #ifdef BF538_FAMILY D16(PORTCIO_FER); D16(PORTCIO); D16(PORTCIO_CLEAR); D16(PORTCIO_SET); D16(PORTCIO_TOGGLE); D16(PORTCIO_DIR); D16(PORTCIO_INEN); D16(PORTDIO); D16(PORTDIO_CLEAR); D16(PORTDIO_DIR); D16(PORTDIO_FER); D16(PORTDIO_INEN); D16(PORTDIO_SET); D16(PORTDIO_TOGGLE); D16(PORTEIO); D16(PORTEIO_CLEAR); D16(PORTEIO_DIR); D16(PORTEIO_FER); D16(PORTEIO_INEN); D16(PORTEIO_SET); D16(PORTEIO_TOGGLE); #endif /* BF538 BF539 */ #ifdef __ADSPBF54x__ { int num; unsigned long base; base = PORTA_FER; for (num = 0; num < 10; ++num) { PORT(base, num); base += sizeof(struct bfin_gpio_regs); } } #endif /* BF54x */ debug_mmrs_dentry = top; return 0; } module_init(bfin_debug_mmrs_init); static void __exit bfin_debug_mmrs_exit(void) { debugfs_remove_recursive(debug_mmrs_dentry); } module_exit(bfin_debug_mmrs_exit); MODULE_LICENSE("GPL");
gpl-2.0
shakalaca/ASUS_PadFone_PF500KL
kernel/sound/core/seq/seq_memory.c
7881
12745
/* * ALSA sequencer Memory Manager * Copyright (c) 1998 by Frank van de Pol <fvdpol@coil.demon.nl> * Jaroslav Kysela <perex@perex.cz> * 2000 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/export.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <sound/core.h> #include <sound/seq_kernel.h> #include "seq_memory.h" #include "seq_queue.h" #include "seq_info.h" #include "seq_lock.h" static inline int snd_seq_pool_available(struct snd_seq_pool *pool) { return pool->total_elements - atomic_read(&pool->counter); } static inline int snd_seq_output_ok(struct snd_seq_pool *pool) { return snd_seq_pool_available(pool) >= pool->room; } /* * Variable length event: * The event like sysex uses variable length type. * The external data may be stored in three different formats. * 1) kernel space * This is the normal case. * ext.data.len = length * ext.data.ptr = buffer pointer * 2) user space * When an event is generated via read(), the external data is * kept in user space until expanded. * ext.data.len = length | SNDRV_SEQ_EXT_USRPTR * ext.data.ptr = userspace pointer * 3) chained cells * When the variable length event is enqueued (in prioq or fifo), * the external data is decomposed to several cells. * ext.data.len = length | SNDRV_SEQ_EXT_CHAINED * ext.data.ptr = the additiona cell head * -> cell.next -> cell.next -> .. */ /* * exported: * call dump function to expand external data. */ static int get_var_len(const struct snd_seq_event *event) { if ((event->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) != SNDRV_SEQ_EVENT_LENGTH_VARIABLE) return -EINVAL; return event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; } int snd_seq_dump_var_event(const struct snd_seq_event *event, snd_seq_dump_func_t func, void *private_data) { int len, err; struct snd_seq_event_cell *cell; if ((len = get_var_len(event)) <= 0) return len; if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { char buf[32]; char __user *curptr = (char __force __user *)event->data.ext.ptr; while (len > 0) { int size = sizeof(buf); if (len < size) size = len; if (copy_from_user(buf, curptr, size)) return -EFAULT; err = func(private_data, buf, size); if (err < 0) return err; curptr += size; len -= size; } return 0; } if (! (event->data.ext.len & SNDRV_SEQ_EXT_CHAINED)) { return func(private_data, event->data.ext.ptr, len); } cell = (struct snd_seq_event_cell *)event->data.ext.ptr; for (; len > 0 && cell; cell = cell->next) { int size = sizeof(struct snd_seq_event); if (len < size) size = len; err = func(private_data, &cell->event, size); if (err < 0) return err; len -= size; } return 0; } EXPORT_SYMBOL(snd_seq_dump_var_event); /* * exported: * expand the variable length event to linear buffer space. */ static int seq_copy_in_kernel(char **bufptr, const void *src, int size) { memcpy(*bufptr, src, size); *bufptr += size; return 0; } static int seq_copy_in_user(char __user **bufptr, const void *src, int size) { if (copy_to_user(*bufptr, src, size)) return -EFAULT; *bufptr += size; return 0; } int snd_seq_expand_var_event(const struct snd_seq_event *event, int count, char *buf, int in_kernel, int size_aligned) { int len, newlen; int err; if ((len = get_var_len(event)) < 0) return len; newlen = len; if (size_aligned > 0) newlen = roundup(len, size_aligned); if (count < newlen) return -EAGAIN; if (event->data.ext.len & SNDRV_SEQ_EXT_USRPTR) { if (! in_kernel) return -EINVAL; if (copy_from_user(buf, (void __force __user *)event->data.ext.ptr, len)) return -EFAULT; return newlen; } err = snd_seq_dump_var_event(event, in_kernel ? (snd_seq_dump_func_t)seq_copy_in_kernel : (snd_seq_dump_func_t)seq_copy_in_user, &buf); return err < 0 ? err : newlen; } EXPORT_SYMBOL(snd_seq_expand_var_event); /* * release this cell, free extended data if available */ static inline void free_cell(struct snd_seq_pool *pool, struct snd_seq_event_cell *cell) { cell->next = pool->free; pool->free = cell; atomic_dec(&pool->counter); } void snd_seq_cell_free(struct snd_seq_event_cell * cell) { unsigned long flags; struct snd_seq_pool *pool; if (snd_BUG_ON(!cell)) return; pool = cell->pool; if (snd_BUG_ON(!pool)) return; spin_lock_irqsave(&pool->lock, flags); free_cell(pool, cell); if (snd_seq_ev_is_variable(&cell->event)) { if (cell->event.data.ext.len & SNDRV_SEQ_EXT_CHAINED) { struct snd_seq_event_cell *curp, *nextptr; curp = cell->event.data.ext.ptr; for (; curp; curp = nextptr) { nextptr = curp->next; curp->next = pool->free; free_cell(pool, curp); } } } if (waitqueue_active(&pool->output_sleep)) { /* has enough space now? */ if (snd_seq_output_ok(pool)) wake_up(&pool->output_sleep); } spin_unlock_irqrestore(&pool->lock, flags); } /* * allocate an event cell. */ static int snd_seq_cell_alloc(struct snd_seq_pool *pool, struct snd_seq_event_cell **cellp, int nonblock, struct file *file) { struct snd_seq_event_cell *cell; unsigned long flags; int err = -EAGAIN; wait_queue_t wait; if (pool == NULL) return -EINVAL; *cellp = NULL; init_waitqueue_entry(&wait, current); spin_lock_irqsave(&pool->lock, flags); if (pool->ptr == NULL) { /* not initialized */ snd_printd("seq: pool is not initialized\n"); err = -EINVAL; goto __error; } while (pool->free == NULL && ! nonblock && ! pool->closing) { set_current_state(TASK_INTERRUPTIBLE); add_wait_queue(&pool->output_sleep, &wait); spin_unlock_irq(&pool->lock); schedule(); spin_lock_irq(&pool->lock); remove_wait_queue(&pool->output_sleep, &wait); /* interrupted? */ if (signal_pending(current)) { err = -ERESTARTSYS; goto __error; } } if (pool->closing) { /* closing.. */ err = -ENOMEM; goto __error; } cell = pool->free; if (cell) { int used; pool->free = cell->next; atomic_inc(&pool->counter); used = atomic_read(&pool->counter); if (pool->max_used < used) pool->max_used = used; pool->event_alloc_success++; /* clear cell pointers */ cell->next = NULL; err = 0; } else pool->event_alloc_failures++; *cellp = cell; __error: spin_unlock_irqrestore(&pool->lock, flags); return err; } /* * duplicate the event to a cell. * if the event has external data, the data is decomposed to additional * cells. */ int snd_seq_event_dup(struct snd_seq_pool *pool, struct snd_seq_event *event, struct snd_seq_event_cell **cellp, int nonblock, struct file *file) { int ncells, err; unsigned int extlen; struct snd_seq_event_cell *cell; *cellp = NULL; ncells = 0; extlen = 0; if (snd_seq_ev_is_variable(event)) { extlen = event->data.ext.len & ~SNDRV_SEQ_EXT_MASK; ncells = (extlen + sizeof(struct snd_seq_event) - 1) / sizeof(struct snd_seq_event); } if (ncells >= pool->total_elements) return -ENOMEM; err = snd_seq_cell_alloc(pool, &cell, nonblock, file); if (err < 0) return err; /* copy the event */ cell->event = *event; /* decompose */ if (snd_seq_ev_is_variable(event)) { int len = extlen; int is_chained = event->data.ext.len & SNDRV_SEQ_EXT_CHAINED; int is_usrptr = event->data.ext.len & SNDRV_SEQ_EXT_USRPTR; struct snd_seq_event_cell *src, *tmp, *tail; char *buf; cell->event.data.ext.len = extlen | SNDRV_SEQ_EXT_CHAINED; cell->event.data.ext.ptr = NULL; src = (struct snd_seq_event_cell *)event->data.ext.ptr; buf = (char *)event->data.ext.ptr; tail = NULL; while (ncells-- > 0) { int size = sizeof(struct snd_seq_event); if (len < size) size = len; err = snd_seq_cell_alloc(pool, &tmp, nonblock, file); if (err < 0) goto __error; if (cell->event.data.ext.ptr == NULL) cell->event.data.ext.ptr = tmp; if (tail) tail->next = tmp; tail = tmp; /* copy chunk */ if (is_chained && src) { tmp->event = src->event; src = src->next; } else if (is_usrptr) { if (copy_from_user(&tmp->event, (char __force __user *)buf, size)) { err = -EFAULT; goto __error; } } else { memcpy(&tmp->event, buf, size); } buf += size; len -= size; } } *cellp = cell; return 0; __error: snd_seq_cell_free(cell); return err; } /* poll wait */ int snd_seq_pool_poll_wait(struct snd_seq_pool *pool, struct file *file, poll_table *wait) { poll_wait(file, &pool->output_sleep, wait); return snd_seq_output_ok(pool); } /* allocate room specified number of events */ int snd_seq_pool_init(struct snd_seq_pool *pool) { int cell; struct snd_seq_event_cell *cellptr; unsigned long flags; if (snd_BUG_ON(!pool)) return -EINVAL; if (pool->ptr) /* should be atomic? */ return 0; pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size); if (pool->ptr == NULL) { snd_printd("seq: malloc for sequencer events failed\n"); return -ENOMEM; } /* add new cells to the free cell list */ spin_lock_irqsave(&pool->lock, flags); pool->free = NULL; for (cell = 0; cell < pool->size; cell++) { cellptr = pool->ptr + cell; cellptr->pool = pool; cellptr->next = pool->free; pool->free = cellptr; } pool->room = (pool->size + 1) / 2; /* init statistics */ pool->max_used = 0; pool->total_elements = pool->size; spin_unlock_irqrestore(&pool->lock, flags); return 0; } /* remove events */ int snd_seq_pool_done(struct snd_seq_pool *pool) { unsigned long flags; struct snd_seq_event_cell *ptr; int max_count = 5 * HZ; if (snd_BUG_ON(!pool)) return -EINVAL; /* wait for closing all threads */ spin_lock_irqsave(&pool->lock, flags); pool->closing = 1; spin_unlock_irqrestore(&pool->lock, flags); if (waitqueue_active(&pool->output_sleep)) wake_up(&pool->output_sleep); while (atomic_read(&pool->counter) > 0) { if (max_count == 0) { snd_printk(KERN_WARNING "snd_seq_pool_done timeout: %d cells remain\n", atomic_read(&pool->counter)); break; } schedule_timeout_uninterruptible(1); max_count--; } /* release all resources */ spin_lock_irqsave(&pool->lock, flags); ptr = pool->ptr; pool->ptr = NULL; pool->free = NULL; pool->total_elements = 0; spin_unlock_irqrestore(&pool->lock, flags); vfree(ptr); spin_lock_irqsave(&pool->lock, flags); pool->closing = 0; spin_unlock_irqrestore(&pool->lock, flags); return 0; } /* init new memory pool */ struct snd_seq_pool *snd_seq_pool_new(int poolsize) { struct snd_seq_pool *pool; /* create pool block */ pool = kzalloc(sizeof(*pool), GFP_KERNEL); if (pool == NULL) { snd_printd("seq: malloc failed for pool\n"); return NULL; } spin_lock_init(&pool->lock); pool->ptr = NULL; pool->free = NULL; pool->total_elements = 0; atomic_set(&pool->counter, 0); pool->closing = 0; init_waitqueue_head(&pool->output_sleep); pool->size = poolsize; /* init statistics */ pool->max_used = 0; return pool; } /* remove memory pool */ int snd_seq_pool_delete(struct snd_seq_pool **ppool) { struct snd_seq_pool *pool = *ppool; *ppool = NULL; if (pool == NULL) return 0; snd_seq_pool_done(pool); kfree(pool); return 0; } /* initialize sequencer memory */ int __init snd_sequencer_memory_init(void) { return 0; } /* release sequencer memory */ void __exit snd_sequencer_memory_done(void) { } /* exported to seq_clientmgr.c */ void snd_seq_info_pool(struct snd_info_buffer *buffer, struct snd_seq_pool *pool, char *space) { if (pool == NULL) return; snd_iprintf(buffer, "%sPool size : %d\n", space, pool->total_elements); snd_iprintf(buffer, "%sCells in use : %d\n", space, atomic_read(&pool->counter)); snd_iprintf(buffer, "%sPeak cells in use : %d\n", space, pool->max_used); snd_iprintf(buffer, "%sAlloc success : %d\n", space, pool->event_alloc_success); snd_iprintf(buffer, "%sAlloc failures : %d\n", space, pool->event_alloc_failures); }
gpl-2.0
pakohan/syso-kernel
linux-3.4.68/arch/blackfin/kernel/kgdb_test.c
8905
2461
/* * arch/blackfin/kernel/kgdb_test.c - Blackfin kgdb tests * * Copyright 2005-2008 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <asm/current.h> #include <asm/uaccess.h> #include <asm/blackfin.h> /* Symbols are here for kgdb test to poke directly */ static char cmdline[256]; static size_t len; #ifndef CONFIG_SMP static int num1 __attribute__((l1_data)); void kgdb_l1_test(void) __attribute__((l1_text)); void kgdb_l1_test(void) { pr_alert("L1(before change) : data variable addr = 0x%p, data value is %d\n", &num1, num1); pr_alert("L1 : code function addr = 0x%p\n", kgdb_l1_test); num1 = num1 + 10; pr_alert("L1(after change) : data variable addr = 0x%p, data value is %d\n", &num1, num1); } #endif #if L2_LENGTH static int num2 __attribute__((l2)); void kgdb_l2_test(void) __attribute__((l2)); void kgdb_l2_test(void) { pr_alert("L2(before change) : data variable addr = 0x%p, data value is %d\n", &num2, num2); pr_alert("L2 : code function addr = 0x%p\n", kgdb_l2_test); num2 = num2 + 20; pr_alert("L2(after change) : data variable addr = 0x%p, data value is %d\n", &num2, num2); } #endif noinline int kgdb_test(char *name, int len, int count, int z) { pr_alert("kgdb name(%d): %s, %d, %d\n", len, name, count, z); count = z; return count; } static ssize_t kgdb_test_proc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { kgdb_test("hello world!", 12, 0x55, 0x10); #ifndef CONFIG_SMP kgdb_l1_test(); #endif #if L2_LENGTH kgdb_l2_test(); #endif return 0; } static ssize_t kgdb_test_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { len = min_t(size_t, 255, count); memcpy(cmdline, buffer, count); cmdline[len] = 0; return len; } static const struct file_operations kgdb_test_proc_fops = { .owner = THIS_MODULE, .read = kgdb_test_proc_read, .write = kgdb_test_proc_write, .llseek = noop_llseek, }; static int __init kgdbtest_init(void) { struct proc_dir_entry *entry; #if L2_LENGTH num2 = 0; #endif entry = proc_create("kgdbtest", 0, NULL, &kgdb_test_proc_fops); if (entry == NULL) return -ENOMEM; return 0; } static void __exit kgdbtest_exit(void) { remove_proc_entry("kgdbtest", NULL); } module_init(kgdbtest_init); module_exit(kgdbtest_exit); MODULE_LICENSE("GPL");
gpl-2.0
pietrushnic/linux
drivers/uwb/whc-rc.c
10441
13503
/* * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3]) * Radio Control command/event transport to the UWB stack * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Initialize and hook up the Radio Control interface. * * For each device probed, creates an 'struct whcrc' which contains * just the representation of the UWB Radio Controller, and the logic * for reading notifications and passing them to the UWB Core. * * So we initialize all of those, register the UWB Radio Controller * and setup the notification/event handle to pipe the notifications * to the UWB management Daemon. * * Once uwb_rc_add() is called, the UWB stack takes control, resets * the radio and readies the device to take commands the UWB * API/user-space. * * Note this driver is just a transport driver; the commands are * formed at the UWB stack and given to this driver who will deliver * them to the hw and transfer the replies/notifications back to the * UWB stack through the UWB daemon (UWBD). */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/uwb.h> #include <linux/uwb/whci.h> #include <linux/uwb/umc.h> #include "uwb-internal.h" /** * Descriptor for an instance of the UWB Radio Control Driver that * attaches to the URC interface of the WHCI PCI card. * * Unless there is a lock specific to the 'data members', all access * is protected by uwb_rc->mutex. */ struct whcrc { struct umc_dev *umc_dev; struct uwb_rc *uwb_rc; /* UWB host controller */ unsigned long area; void __iomem *rc_base; size_t rc_len; spinlock_t irq_lock; void *evt_buf, *cmd_buf; dma_addr_t evt_dma_buf, cmd_dma_buf; wait_queue_head_t cmd_wq; struct work_struct event_work; }; /** * Execute an UWB RC command on WHCI/RC * * @rc: Instance of a Radio Controller that is a whcrc * @cmd: Buffer containing the RCCB and payload to execute * @cmd_size: Size of the command buffer. * * We copy the command into whcrc->cmd_buf (as it is pretty and * aligned`and physically contiguous) and then press the right keys in * the controller's URCCMD register to get it to read it. We might * have to wait for the cmd_sem to be open to us. * * NOTE: rc's mutex has to be locked */ static int whcrc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size) { int result = 0; struct whcrc *whcrc = uwb_rc->priv; struct device *dev = &whcrc->umc_dev->dev; u32 urccmd; if (cmd_size >= 4096) return -EINVAL; /* * If the URC is halted, then the hardware has reset itself. * Attempt to recover by restarting the device and then return * an error as it's likely that the current command isn't * valid for a newly started RC. */ if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { dev_err(dev, "requesting reset of halted radio controller\n"); uwb_rc_reset_all(uwb_rc); return -EIO; } result = wait_event_timeout(whcrc->cmd_wq, !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); if (result == 0) { dev_err(dev, "device is not ready to execute commands\n"); return -ETIMEDOUT; } memmove(whcrc->cmd_buf, cmd, cmd_size); le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR); spin_lock(&whcrc->irq_lock); urccmd = le_readl(whcrc->rc_base + URCCMD); urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK); le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size, whcrc->rc_base + URCCMD); spin_unlock(&whcrc->irq_lock); return 0; } static int whcrc_reset(struct uwb_rc *rc) { struct whcrc *whcrc = rc->priv; return umc_controller_reset(whcrc->umc_dev); } /** * Reset event reception mechanism and tell hw we are ready to get more * * We have read all the events in the event buffer, so we are ready to * reset it to the beginning. * * This is only called during initialization or after an event buffer * has been retired. This means we can be sure that event processing * is disabled and it's safe to update the URCEVTADDR register. * * There's no need to wait for the event processing to start as the * URC will not clear URCCMD_ACTIVE until (internal) event buffer * space is available. */ static void whcrc_enable_events(struct whcrc *whcrc) { u32 urccmd; le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); spin_lock(&whcrc->irq_lock); urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); spin_unlock(&whcrc->irq_lock); } static void whcrc_event_work(struct work_struct *work) { struct whcrc *whcrc = container_of(work, struct whcrc, event_work); size_t size; u64 urcevtaddr; urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); size = urcevtaddr & URCEVTADDR_OFFSET_MASK; uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); whcrc_enable_events(whcrc); } /** * Catch interrupts? * * We ack inmediately (and expect the hw to do the right thing and * raise another IRQ if things have changed :) */ static irqreturn_t whcrc_irq_cb(int irq, void *_whcrc) { struct whcrc *whcrc = _whcrc; struct device *dev = &whcrc->umc_dev->dev; u32 urcsts; urcsts = le_readl(whcrc->rc_base + URCSTS); if (!(urcsts & URCSTS_INT_MASK)) return IRQ_NONE; le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); if (urcsts & URCSTS_HSE) { dev_err(dev, "host system error -- hardware halted\n"); /* FIXME: do something sensible here */ goto out; } if (urcsts & URCSTS_ER) schedule_work(&whcrc->event_work); if (urcsts & URCSTS_RCI) wake_up_all(&whcrc->cmd_wq); out: return IRQ_HANDLED; } /** * Initialize a UMC RC interface: map regions, get (shared) IRQ */ static int whcrc_setup_rc_umc(struct whcrc *whcrc) { int result = 0; struct device *dev = &whcrc->umc_dev->dev; struct umc_dev *umc_dev = whcrc->umc_dev; whcrc->area = umc_dev->resource.start; whcrc->rc_len = resource_size(&umc_dev->resource); result = -EBUSY; if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) { dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", whcrc->rc_len, whcrc->area, result); goto error_request_region; } whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len); if (whcrc->rc_base == NULL) { dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n", whcrc->rc_len, whcrc->area, result); goto error_ioremap_nocache; } result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED, KBUILD_MODNAME, whcrc); if (result < 0) { dev_err(dev, "can't allocate IRQ %d: %d\n", umc_dev->irq, result); goto error_request_irq; } result = -ENOMEM; whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, &whcrc->cmd_dma_buf, GFP_KERNEL); if (whcrc->cmd_buf == NULL) { dev_err(dev, "Can't allocate cmd transfer buffer\n"); goto error_cmd_buffer; } whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, &whcrc->evt_dma_buf, GFP_KERNEL); if (whcrc->evt_buf == NULL) { dev_err(dev, "Can't allocate evt transfer buffer\n"); goto error_evt_buffer; } return 0; error_evt_buffer: dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, whcrc->cmd_dma_buf); error_cmd_buffer: free_irq(umc_dev->irq, whcrc); error_request_irq: iounmap(whcrc->rc_base); error_ioremap_nocache: release_mem_region(whcrc->area, whcrc->rc_len); error_request_region: return result; } /** * Release RC's UMC resources */ static void whcrc_release_rc_umc(struct whcrc *whcrc) { struct umc_dev *umc_dev = whcrc->umc_dev; dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf, whcrc->evt_dma_buf); dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, whcrc->cmd_dma_buf); free_irq(umc_dev->irq, whcrc); iounmap(whcrc->rc_base); release_mem_region(whcrc->area, whcrc->rc_len); } /** * whcrc_start_rc - start a WHCI radio controller * @whcrc: the radio controller to start * * Reset the UMC device, start the radio controller, enable events and * finally enable interrupts. */ static int whcrc_start_rc(struct uwb_rc *rc) { struct whcrc *whcrc = rc->priv; struct device *dev = &whcrc->umc_dev->dev; /* Reset the thing */ le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, 5000, "hardware reset") < 0) return -EBUSY; /* Set the event buffer, start the controller (enable IRQs later) */ le_writel(0, whcrc->rc_base + URCINTR); le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, 5000, "radio controller start") < 0) return -ETIMEDOUT; whcrc_enable_events(whcrc); le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); return 0; } /** * whcrc_stop_rc - stop a WHCI radio controller * @whcrc: the radio controller to stop * * Disable interrupts and cancel any pending event processing work * before clearing the Run/Stop bit. */ static void whcrc_stop_rc(struct uwb_rc *rc) { struct whcrc *whcrc = rc->priv; struct umc_dev *umc_dev = whcrc->umc_dev; le_writel(0, whcrc->rc_base + URCINTR); cancel_work_sync(&whcrc->event_work); le_writel(0, whcrc->rc_base + URCCMD); whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop"); } static void whcrc_init(struct whcrc *whcrc) { spin_lock_init(&whcrc->irq_lock); init_waitqueue_head(&whcrc->cmd_wq); INIT_WORK(&whcrc->event_work, whcrc_event_work); } /** * Initialize the radio controller. * * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the * IRQ handler we use that to determine if the hw is ready to * handle events. Looks like a race condition, but it really is * not. */ static int whcrc_probe(struct umc_dev *umc_dev) { int result; struct uwb_rc *uwb_rc; struct whcrc *whcrc; struct device *dev = &umc_dev->dev; result = -ENOMEM; uwb_rc = uwb_rc_alloc(); if (uwb_rc == NULL) { dev_err(dev, "unable to allocate RC instance\n"); goto error_rc_alloc; } whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL); if (whcrc == NULL) { dev_err(dev, "unable to allocate WHC-RC instance\n"); goto error_alloc; } whcrc_init(whcrc); whcrc->umc_dev = umc_dev; result = whcrc_setup_rc_umc(whcrc); if (result < 0) { dev_err(dev, "Can't setup RC UMC interface: %d\n", result); goto error_setup_rc_umc; } whcrc->uwb_rc = uwb_rc; uwb_rc->owner = THIS_MODULE; uwb_rc->cmd = whcrc_cmd; uwb_rc->reset = whcrc_reset; uwb_rc->start = whcrc_start_rc; uwb_rc->stop = whcrc_stop_rc; result = uwb_rc_add(uwb_rc, dev, whcrc); if (result < 0) goto error_rc_add; umc_set_drvdata(umc_dev, whcrc); return 0; error_rc_add: whcrc_release_rc_umc(whcrc); error_setup_rc_umc: kfree(whcrc); error_alloc: uwb_rc_put(uwb_rc); error_rc_alloc: return result; } /** * Clean up the radio control resources * * When we up the command semaphore, everybody possibly held trying to * execute a command should be granted entry and then they'll see the * host is quiescing and up it (so it will chain to the next waiter). * This should not happen (in any case), as we can only remove when * there are no handles open... */ static void whcrc_remove(struct umc_dev *umc_dev) { struct whcrc *whcrc = umc_get_drvdata(umc_dev); struct uwb_rc *uwb_rc = whcrc->uwb_rc; umc_set_drvdata(umc_dev, NULL); uwb_rc_rm(uwb_rc); whcrc_release_rc_umc(whcrc); kfree(whcrc); uwb_rc_put(uwb_rc); } static int whcrc_pre_reset(struct umc_dev *umc) { struct whcrc *whcrc = umc_get_drvdata(umc); struct uwb_rc *uwb_rc = whcrc->uwb_rc; uwb_rc_pre_reset(uwb_rc); return 0; } static int whcrc_post_reset(struct umc_dev *umc) { struct whcrc *whcrc = umc_get_drvdata(umc); struct uwb_rc *uwb_rc = whcrc->uwb_rc; return uwb_rc_post_reset(uwb_rc); } /* PCI device ID's that we handle [so it gets loaded] */ static struct pci_device_id __used whcrc_id_table[] = { { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, { /* empty last entry */ } }; MODULE_DEVICE_TABLE(pci, whcrc_id_table); static struct umc_driver whcrc_driver = { .name = "whc-rc", .cap_id = UMC_CAP_ID_WHCI_RC, .probe = whcrc_probe, .remove = whcrc_remove, .pre_reset = whcrc_pre_reset, .post_reset = whcrc_post_reset, }; static int __init whcrc_driver_init(void) { return umc_driver_register(&whcrc_driver); } module_init(whcrc_driver_init); static void __exit whcrc_driver_exit(void) { umc_driver_unregister(&whcrc_driver); } module_exit(whcrc_driver_exit); MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver"); MODULE_LICENSE("GPL");
gpl-2.0
evnit/android_kernel_samsung_msm8660-common-10.2
drivers/parisc/power.c
13001
7462
/* * linux/drivers/parisc/power.c * HP PARISC soft power switch support driver * * Copyright (c) 2001-2007 Helge Deller <deller@gmx.de> * All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * * * HINT: * Support of the soft power switch button may be enabled or disabled at * runtime through the "/proc/sys/kernel/power" procfs entry. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/pm.h> #include <asm/pdc.h> #include <asm/io.h> #include <asm/led.h> #define DRIVER_NAME "powersw" #define KTHREAD_NAME "kpowerswd" /* how often should the power button be polled ? */ #define POWERSWITCH_POLL_PER_SEC 2 /* how long does the power button needs to be down until we react ? */ #define POWERSWITCH_DOWN_SEC 2 /* assembly code to access special registers */ /* taken from PCXL ERS page 82 */ #define DIAG_CODE(code) (0x14000000 + ((code)<<5)) #define MFCPU_X(rDiagReg, t_ch, t_th, code) \ (DIAG_CODE(code) + ((rDiagReg)<<21) + ((t_ch)<<16) + ((t_th)<<0) ) #define MTCPU(dr, gr) MFCPU_X(dr, gr, 0, 0x12) /* move value of gr to dr[dr] */ #define MFCPU_C(dr, gr) MFCPU_X(dr, gr, 0, 0x30) /* for dr0 and dr8 only ! */ #define MFCPU_T(dr, gr) MFCPU_X(dr, 0, gr, 0xa0) /* all dr except dr0 and dr8 */ #define __getDIAG(dr) ( { \ register unsigned long __res asm("r28");\ __asm__ __volatile__ ( \ ".word %1" : "=&r" (__res) : "i" (MFCPU_T(dr,28) ) \ ); \ __res; \ } ) /* local shutdown counter */ static int shutdown_timer __read_mostly; /* check, give feedback and start shutdown after one second */ static void process_shutdown(void) { if (shutdown_timer == 0) printk(KERN_ALERT KTHREAD_NAME ": Shutdown requested...\n"); shutdown_timer++; /* wait until the button was pressed for 1 second */ if (shutdown_timer == (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC)) { static const char msg[] = "Shutting down..."; printk(KERN_INFO KTHREAD_NAME ": %s\n", msg); lcd_print(msg); /* send kill signal */ if (kill_cad_pid(SIGINT, 1)) { /* just in case killing init process failed */ if (pm_power_off) pm_power_off(); } } } /* main power switch task struct */ static struct task_struct *power_task; /* filename in /proc which can be used to enable/disable the power switch */ #define SYSCTL_FILENAME "sys/kernel/power" /* soft power switch enabled/disabled */ int pwrsw_enabled __read_mostly = 1; /* main kernel thread worker. It polls the button state */ static int kpowerswd(void *param) { __set_current_state(TASK_RUNNING); do { int button_not_pressed; unsigned long soft_power_reg = (unsigned long) param; schedule_timeout_interruptible(pwrsw_enabled ? HZ : HZ/POWERSWITCH_POLL_PER_SEC); __set_current_state(TASK_RUNNING); if (unlikely(!pwrsw_enabled)) continue; if (soft_power_reg) { /* * Non-Gecko-style machines: * Check the power switch status which is read from the * real I/O location at soft_power_reg. * Bit 31 ("the lowest bit) is the status of the power switch. * This bit is "1" if the button is NOT pressed. */ button_not_pressed = (gsc_readl(soft_power_reg) & 0x1); } else { /* * On gecko style machines (e.g. 712/xx and 715/xx) * the power switch status is stored in Bit 0 ("the highest bit") * of CPU diagnose register 25. * Warning: Some machines never reset the DIAG flag, even if * the button has been released again. */ button_not_pressed = (__getDIAG(25) & 0x80000000); } if (likely(button_not_pressed)) { if (unlikely(shutdown_timer && /* avoid writing if not necessary */ shutdown_timer < (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC))) { shutdown_timer = 0; printk(KERN_INFO KTHREAD_NAME ": Shutdown request aborted.\n"); } } else process_shutdown(); } while (!kthread_should_stop()); return 0; } /* * powerfail interruption handler (irq IRQ_FROM_REGION(CPU_IRQ_REGION)+2) */ #if 0 static void powerfail_interrupt(int code, void *x) { printk(KERN_CRIT "POWERFAIL INTERRUPTION !\n"); poweroff(); } #endif /* parisc_panic_event() is called by the panic handler. * As soon as a panic occurs, our tasklets above will not be * executed any longer. This function then re-enables the * soft-power switch and allows the user to switch off the system */ static int parisc_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { /* re-enable the soft-power switch */ pdc_soft_power_button(0); return NOTIFY_DONE; } static struct notifier_block parisc_panic_block = { .notifier_call = parisc_panic_event, .priority = INT_MAX, }; static int __init power_init(void) { unsigned long ret; unsigned long soft_power_reg; #if 0 request_irq( IRQ_FROM_REGION(CPU_IRQ_REGION)+2, &powerfail_interrupt, 0, "powerfail", NULL); #endif /* enable the soft power switch if possible */ ret = pdc_soft_power_info(&soft_power_reg); if (ret == PDC_OK) ret = pdc_soft_power_button(1); if (ret != PDC_OK) soft_power_reg = -1UL; switch (soft_power_reg) { case 0: printk(KERN_INFO DRIVER_NAME ": Gecko-style soft power switch enabled.\n"); break; case -1UL: printk(KERN_INFO DRIVER_NAME ": Soft power switch support not available.\n"); return -ENODEV; default: printk(KERN_INFO DRIVER_NAME ": Soft power switch at 0x%08lx enabled.\n", soft_power_reg); } power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME); if (IS_ERR(power_task)) { printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n"); pdc_soft_power_button(0); return -EIO; } /* Register a call for panic conditions. */ atomic_notifier_chain_register(&panic_notifier_list, &parisc_panic_block); return 0; } static void __exit power_exit(void) { kthread_stop(power_task); atomic_notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block); pdc_soft_power_button(0); } arch_initcall(power_init); module_exit(power_exit); MODULE_AUTHOR("Helge Deller <deller@gmx.de>"); MODULE_DESCRIPTION("Soft power switch driver"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
cmenard/GB_Bullet
drivers/parisc/power.c
13001
7462
/* * linux/drivers/parisc/power.c * HP PARISC soft power switch support driver * * Copyright (c) 2001-2007 Helge Deller <deller@gmx.de> * All rights reserved. * * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL"). * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * * * HINT: * Support of the soft power switch button may be enabled or disabled at * runtime through the "/proc/sys/kernel/power" procfs entry. */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/notifier.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/kthread.h> #include <linux/pm.h> #include <asm/pdc.h> #include <asm/io.h> #include <asm/led.h> #define DRIVER_NAME "powersw" #define KTHREAD_NAME "kpowerswd" /* how often should the power button be polled ? */ #define POWERSWITCH_POLL_PER_SEC 2 /* how long does the power button needs to be down until we react ? */ #define POWERSWITCH_DOWN_SEC 2 /* assembly code to access special registers */ /* taken from PCXL ERS page 82 */ #define DIAG_CODE(code) (0x14000000 + ((code)<<5)) #define MFCPU_X(rDiagReg, t_ch, t_th, code) \ (DIAG_CODE(code) + ((rDiagReg)<<21) + ((t_ch)<<16) + ((t_th)<<0) ) #define MTCPU(dr, gr) MFCPU_X(dr, gr, 0, 0x12) /* move value of gr to dr[dr] */ #define MFCPU_C(dr, gr) MFCPU_X(dr, gr, 0, 0x30) /* for dr0 and dr8 only ! */ #define MFCPU_T(dr, gr) MFCPU_X(dr, 0, gr, 0xa0) /* all dr except dr0 and dr8 */ #define __getDIAG(dr) ( { \ register unsigned long __res asm("r28");\ __asm__ __volatile__ ( \ ".word %1" : "=&r" (__res) : "i" (MFCPU_T(dr,28) ) \ ); \ __res; \ } ) /* local shutdown counter */ static int shutdown_timer __read_mostly; /* check, give feedback and start shutdown after one second */ static void process_shutdown(void) { if (shutdown_timer == 0) printk(KERN_ALERT KTHREAD_NAME ": Shutdown requested...\n"); shutdown_timer++; /* wait until the button was pressed for 1 second */ if (shutdown_timer == (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC)) { static const char msg[] = "Shutting down..."; printk(KERN_INFO KTHREAD_NAME ": %s\n", msg); lcd_print(msg); /* send kill signal */ if (kill_cad_pid(SIGINT, 1)) { /* just in case killing init process failed */ if (pm_power_off) pm_power_off(); } } } /* main power switch task struct */ static struct task_struct *power_task; /* filename in /proc which can be used to enable/disable the power switch */ #define SYSCTL_FILENAME "sys/kernel/power" /* soft power switch enabled/disabled */ int pwrsw_enabled __read_mostly = 1; /* main kernel thread worker. It polls the button state */ static int kpowerswd(void *param) { __set_current_state(TASK_RUNNING); do { int button_not_pressed; unsigned long soft_power_reg = (unsigned long) param; schedule_timeout_interruptible(pwrsw_enabled ? HZ : HZ/POWERSWITCH_POLL_PER_SEC); __set_current_state(TASK_RUNNING); if (unlikely(!pwrsw_enabled)) continue; if (soft_power_reg) { /* * Non-Gecko-style machines: * Check the power switch status which is read from the * real I/O location at soft_power_reg. * Bit 31 ("the lowest bit) is the status of the power switch. * This bit is "1" if the button is NOT pressed. */ button_not_pressed = (gsc_readl(soft_power_reg) & 0x1); } else { /* * On gecko style machines (e.g. 712/xx and 715/xx) * the power switch status is stored in Bit 0 ("the highest bit") * of CPU diagnose register 25. * Warning: Some machines never reset the DIAG flag, even if * the button has been released again. */ button_not_pressed = (__getDIAG(25) & 0x80000000); } if (likely(button_not_pressed)) { if (unlikely(shutdown_timer && /* avoid writing if not necessary */ shutdown_timer < (POWERSWITCH_DOWN_SEC*POWERSWITCH_POLL_PER_SEC))) { shutdown_timer = 0; printk(KERN_INFO KTHREAD_NAME ": Shutdown request aborted.\n"); } } else process_shutdown(); } while (!kthread_should_stop()); return 0; } /* * powerfail interruption handler (irq IRQ_FROM_REGION(CPU_IRQ_REGION)+2) */ #if 0 static void powerfail_interrupt(int code, void *x) { printk(KERN_CRIT "POWERFAIL INTERRUPTION !\n"); poweroff(); } #endif /* parisc_panic_event() is called by the panic handler. * As soon as a panic occurs, our tasklets above will not be * executed any longer. This function then re-enables the * soft-power switch and allows the user to switch off the system */ static int parisc_panic_event(struct notifier_block *this, unsigned long event, void *ptr) { /* re-enable the soft-power switch */ pdc_soft_power_button(0); return NOTIFY_DONE; } static struct notifier_block parisc_panic_block = { .notifier_call = parisc_panic_event, .priority = INT_MAX, }; static int __init power_init(void) { unsigned long ret; unsigned long soft_power_reg; #if 0 request_irq( IRQ_FROM_REGION(CPU_IRQ_REGION)+2, &powerfail_interrupt, 0, "powerfail", NULL); #endif /* enable the soft power switch if possible */ ret = pdc_soft_power_info(&soft_power_reg); if (ret == PDC_OK) ret = pdc_soft_power_button(1); if (ret != PDC_OK) soft_power_reg = -1UL; switch (soft_power_reg) { case 0: printk(KERN_INFO DRIVER_NAME ": Gecko-style soft power switch enabled.\n"); break; case -1UL: printk(KERN_INFO DRIVER_NAME ": Soft power switch support not available.\n"); return -ENODEV; default: printk(KERN_INFO DRIVER_NAME ": Soft power switch at 0x%08lx enabled.\n", soft_power_reg); } power_task = kthread_run(kpowerswd, (void*)soft_power_reg, KTHREAD_NAME); if (IS_ERR(power_task)) { printk(KERN_ERR DRIVER_NAME ": thread creation failed. Driver not loaded.\n"); pdc_soft_power_button(0); return -EIO; } /* Register a call for panic conditions. */ atomic_notifier_chain_register(&panic_notifier_list, &parisc_panic_block); return 0; } static void __exit power_exit(void) { kthread_stop(power_task); atomic_notifier_chain_unregister(&panic_notifier_list, &parisc_panic_block); pdc_soft_power_button(0); } arch_initcall(power_init); module_exit(power_exit); MODULE_AUTHOR("Helge Deller <deller@gmx.de>"); MODULE_DESCRIPTION("Soft power switch driver"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
gilelad/linux
drivers/pinctrl/uniphier/pinctrl-ph1-ld6b.c
202
58611
/* * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pinctrl/pinctrl.h> #include <linux/platform_device.h> #include "pinctrl-uniphier.h" #define DRIVER_NAME "ph1-ld6b-pinctrl" static const struct pinctrl_pin_desc ph1_ld6b_pins[] = { UNIPHIER_PINCTRL_PIN(0, "ED0", UNIPHIER_PIN_IECTRL_NONE, 0, UNIPHIER_PIN_DRV_4_8, 0, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(1, "ED1", UNIPHIER_PIN_IECTRL_NONE, 1, UNIPHIER_PIN_DRV_4_8, 1, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(2, "ED2", UNIPHIER_PIN_IECTRL_NONE, 2, UNIPHIER_PIN_DRV_4_8, 2, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(3, "ED3", UNIPHIER_PIN_IECTRL_NONE, 3, UNIPHIER_PIN_DRV_4_8, 3, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(4, "ED4", UNIPHIER_PIN_IECTRL_NONE, 4, UNIPHIER_PIN_DRV_4_8, 4, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(5, "ED5", UNIPHIER_PIN_IECTRL_NONE, 5, UNIPHIER_PIN_DRV_4_8, 5, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(6, "ED6", UNIPHIER_PIN_IECTRL_NONE, 6, UNIPHIER_PIN_DRV_4_8, 6, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(7, "ED7", UNIPHIER_PIN_IECTRL_NONE, 7, UNIPHIER_PIN_DRV_4_8, 7, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(8, "XERWE0", UNIPHIER_PIN_IECTRL_NONE, 8, UNIPHIER_PIN_DRV_4_8, 8, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(9, "XERWE1", UNIPHIER_PIN_IECTRL_NONE, 9, UNIPHIER_PIN_DRV_4_8, 9, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(10, "ERXW", UNIPHIER_PIN_IECTRL_NONE, 10, UNIPHIER_PIN_DRV_4_8, 10, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(11, "ES0", UNIPHIER_PIN_IECTRL_NONE, 11, UNIPHIER_PIN_DRV_4_8, 11, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(12, "ES1", UNIPHIER_PIN_IECTRL_NONE, 12, UNIPHIER_PIN_DRV_4_8, 12, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(13, "ES2", UNIPHIER_PIN_IECTRL_NONE, 13, UNIPHIER_PIN_DRV_4_8, 13, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(14, "XECS1", UNIPHIER_PIN_IECTRL_NONE, 14, UNIPHIER_PIN_DRV_4_8, 14, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(15, "PCA00", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 15, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(16, "PCA01", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 16, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(17, "PCA02", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 17, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(18, "PCA03", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 18, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(19, "PCA04", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 19, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(20, "PCA05", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 20, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(21, "PCA06", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 21, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(22, "PCA07", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 22, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(23, "PCA08", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 23, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(24, "PCA09", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 24, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(25, "PCA10", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 25, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(26, "PCA11", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 26, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(27, "PCA12", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 27, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(28, "PCA13", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 28, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(29, "PCA14", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 29, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(30, "XNFRE", UNIPHIER_PIN_IECTRL_NONE, 30, UNIPHIER_PIN_DRV_4_8, 30, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(31, "XNFWE", UNIPHIER_PIN_IECTRL_NONE, 31, UNIPHIER_PIN_DRV_4_8, 31, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(32, "NFALE", UNIPHIER_PIN_IECTRL_NONE, 32, UNIPHIER_PIN_DRV_4_8, 32, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(33, "NFCLE", UNIPHIER_PIN_IECTRL_NONE, 33, UNIPHIER_PIN_DRV_4_8, 33, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(34, "XNFWP", UNIPHIER_PIN_IECTRL_NONE, 34, UNIPHIER_PIN_DRV_4_8, 34, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(35, "XNFCE0", UNIPHIER_PIN_IECTRL_NONE, 35, UNIPHIER_PIN_DRV_4_8, 35, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(36, "NFRYBY0", UNIPHIER_PIN_IECTRL_NONE, 36, UNIPHIER_PIN_DRV_4_8, 36, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(37, "XNFCE1", UNIPHIER_PIN_IECTRL_NONE, 37, UNIPHIER_PIN_DRV_4_8, 37, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(38, "NFRYBY1", UNIPHIER_PIN_IECTRL_NONE, 38, UNIPHIER_PIN_DRV_4_8, 38, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(39, "NFD0", UNIPHIER_PIN_IECTRL_NONE, 39, UNIPHIER_PIN_DRV_4_8, 39, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(40, "NFD1", UNIPHIER_PIN_IECTRL_NONE, 40, UNIPHIER_PIN_DRV_4_8, 40, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(41, "NFD2", UNIPHIER_PIN_IECTRL_NONE, 41, UNIPHIER_PIN_DRV_4_8, 41, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(42, "NFD3", UNIPHIER_PIN_IECTRL_NONE, 42, UNIPHIER_PIN_DRV_4_8, 42, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(43, "NFD4", UNIPHIER_PIN_IECTRL_NONE, 43, UNIPHIER_PIN_DRV_4_8, 43, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(44, "NFD5", UNIPHIER_PIN_IECTRL_NONE, 44, UNIPHIER_PIN_DRV_4_8, 44, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(45, "NFD6", UNIPHIER_PIN_IECTRL_NONE, 45, UNIPHIER_PIN_DRV_4_8, 45, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(46, "NFD7", UNIPHIER_PIN_IECTRL_NONE, 46, UNIPHIER_PIN_DRV_4_8, 46, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(47, "SDCLK", UNIPHIER_PIN_IECTRL_NONE, 0, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(48, "SDCMD", UNIPHIER_PIN_IECTRL_NONE, 4, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(49, "SDDAT0", UNIPHIER_PIN_IECTRL_NONE, 8, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(50, "SDDAT1", UNIPHIER_PIN_IECTRL_NONE, 12, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(51, "SDDAT2", UNIPHIER_PIN_IECTRL_NONE, 16, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(52, "SDDAT3", UNIPHIER_PIN_IECTRL_NONE, 20, UNIPHIER_PIN_DRV_8_12_16_20, -1, UNIPHIER_PIN_PULL_UP_FIXED), UNIPHIER_PINCTRL_PIN(53, "SDCD", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 53, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(54, "SDWP", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 54, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(55, "SDVOLC", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 55, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(56, "USB0VBUS", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 56, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(57, "USB0OD", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 57, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(58, "USB1VBUS", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 58, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(59, "USB1OD", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 59, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(60, "USB2VBUS", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 60, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(61, "USB2OD", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 61, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(62, "USB3VBUS", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 62, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(63, "USB3OD", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 63, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(64, "HS0BCLKOUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 64, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(65, "HS0SYNCOUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 65, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(66, "HS0VALOUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 66, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(67, "HS0DOUT0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 67, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(68, "HS0DOUT1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 68, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(69, "HS0DOUT2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 69, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(70, "HS0DOUT3", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 70, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(71, "HS0DOUT4", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 71, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(72, "HS0DOUT5", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 72, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(73, "HS0DOUT6", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 73, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(74, "HS0DOUT7", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 74, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(75, "HS1BCLKIN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 75, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(76, "HS1SYNCIN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 76, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(77, "HS1VALIN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 77, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(78, "HS1DIN0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 78, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(79, "HS1DIN1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 79, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(80, "HS1DIN2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 80, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(81, "HS1DIN3", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 81, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(82, "HS1DIN4", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 82, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(83, "HS1DIN5", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 83, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(84, "HS1DIN6", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 84, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(85, "HS1DIN7", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 85, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(86, "HS2BCLKIN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 86, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(87, "HS2SYNCIN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 87, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(88, "HS2VALIN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 88, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(89, "HS2DIN0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 89, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(90, "HS2DIN1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 90, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(91, "HS2DIN2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 91, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(92, "HS2DIN3", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 92, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(93, "HS2DIN4", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 93, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(94, "HS2DIN5", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 94, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(95, "HS2DIN6", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 95, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(96, "HS2DIN7", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 96, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(97, "AO1IEC", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 97, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(98, "AO1DACCK", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 98, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(99, "AO1BCK", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 99, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(100, "AO1LRCK", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 100, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(101, "AO1D0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 101, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(102, "AO1D1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 102, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(103, "AO1D2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 103, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(104, "AO1D3", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 104, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(105, "AO2DACCK", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 105, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(106, "AO2BCK", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 106, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(107, "AO2LRCK", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 107, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(108, "AO2D0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 108, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(109, "SDA0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 109, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(110, "SCL0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 110, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(111, "SDA1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 111, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(112, "SCL1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 112, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(113, "SBO0", 0, 113, UNIPHIER_PIN_DRV_4_8, 113, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(114, "SBI0", 0, 114, UNIPHIER_PIN_DRV_4_8, 114, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(115, "TXD1", 0, 115, UNIPHIER_PIN_DRV_4_8, 115, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(116, "RXD1", 0, 116, UNIPHIER_PIN_DRV_4_8, 116, UNIPHIER_PIN_PULL_UP), UNIPHIER_PINCTRL_PIN(117, "PWSRA", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 117, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(118, "XIRQ0", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 118, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(119, "XIRQ1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 119, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(120, "XIRQ2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 120, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(121, "XIRQ3", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 121, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(122, "XIRQ4", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 122, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(123, "XIRQ5", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 123, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(124, "XIRQ6", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 124, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(125, "XIRQ7", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 125, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(126, "XIRQ8", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 126, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(127, "PORT00", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 127, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(128, "PORT01", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 128, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(129, "PORT02", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 129, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(130, "PORT03", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 130, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(131, "PORT04", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 131, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(132, "PORT05", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 132, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(133, "PORT06", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 133, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(134, "PORT07", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 134, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(135, "PORT10", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 135, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(136, "PORT11", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 136, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(137, "PORT12", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 137, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(138, "PORT13", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 138, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(139, "PORT14", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 139, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(140, "PORT15", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 140, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(141, "PORT16", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 141, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(142, "LPST", UNIPHIER_PIN_IECTRL_NONE, 142, UNIPHIER_PIN_DRV_4_8, 142, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(143, "MDC", 0, 143, UNIPHIER_PIN_DRV_4_8, 143, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(144, "MDIO", 0, 144, UNIPHIER_PIN_DRV_4_8, 144, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(145, "MDIO_INTL", 0, 145, UNIPHIER_PIN_DRV_4_8, 145, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(146, "PHYRSTL", 0, 146, UNIPHIER_PIN_DRV_4_8, 146, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(147, "RGMII_RXCLK", 0, 147, UNIPHIER_PIN_DRV_4_8, 147, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(148, "RGMII_RXD0", 0, 148, UNIPHIER_PIN_DRV_4_8, 148, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(149, "RGMII_RXD1", 0, 149, UNIPHIER_PIN_DRV_4_8, 149, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(150, "RGMII_RXD2", 0, 150, UNIPHIER_PIN_DRV_4_8, 150, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(151, "RGMII_RXD3", 0, 151, UNIPHIER_PIN_DRV_4_8, 151, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(152, "RGMII_RXCTL", 0, 152, UNIPHIER_PIN_DRV_4_8, 152, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(153, "RGMII_TXCLK", 0, 153, UNIPHIER_PIN_DRV_4_8, 153, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(154, "RGMII_TXD0", 0, 154, UNIPHIER_PIN_DRV_4_8, 154, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(155, "RGMII_TXD1", 0, 155, UNIPHIER_PIN_DRV_4_8, 155, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(156, "RGMII_TXD2", 0, 156, UNIPHIER_PIN_DRV_4_8, 156, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(157, "RGMII_TXD3", 0, 157, UNIPHIER_PIN_DRV_4_8, 157, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(158, "RGMII_TXCTL", 0, 158, UNIPHIER_PIN_DRV_4_8, 158, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(159, "A_D_PCD00OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 159, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(160, "A_D_PCD01OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 160, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(161, "A_D_PCD02OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 161, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(162, "A_D_PCD03OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 162, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(163, "A_D_PCD04OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 163, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(164, "A_D_PCD05OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 164, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(165, "A_D_PCD06OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 165, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(166, "A_D_PCD07OUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 166, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(167, "A_D_PCD00IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 167, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(168, "A_D_PCD01IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 168, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(169, "A_D_PCD02IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 169, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(170, "A_D_PCD03IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 170, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(171, "A_D_PCD04IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 171, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(172, "A_D_PCD05IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 172, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(173, "A_D_PCD06IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 173, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(174, "A_D_PCD07IN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 174, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(175, "A_D_PCDNOE", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 175, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(176, "A_D_PC0READY", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 176, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(177, "A_D_PC0CD1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 177, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(178, "A_D_PC0CD2", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 178, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(179, "A_D_PC0WAIT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 179, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(180, "A_D_PC0RESET", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 180, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(181, "A_D_PC0CE1", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 181, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(182, "A_D_PC0WE", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 182, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(183, "A_D_PC0OE", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 183, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(184, "A_D_PC0IOWR", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 184, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(185, "A_D_PC0IORD", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 185, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(186, "A_D_PC0NOE", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 186, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(187, "A_D_HS0BCLKIN", 0, 187, UNIPHIER_PIN_DRV_4_8, 187, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(188, "A_D_HS0SYNCIN", 0, 188, UNIPHIER_PIN_DRV_4_8, 188, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(189, "A_D_HS0VALIN", 0, 189, UNIPHIER_PIN_DRV_4_8, 189, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(190, "A_D_HS0DIN0", 0, 190, UNIPHIER_PIN_DRV_4_8, 190, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(191, "A_D_HS0DIN1", 0, 191, UNIPHIER_PIN_DRV_4_8, 191, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(192, "A_D_HS0DIN2", 0, 192, UNIPHIER_PIN_DRV_4_8, 192, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(193, "A_D_HS0DIN3", 0, 193, UNIPHIER_PIN_DRV_4_8, 193, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(194, "A_D_HS0DIN4", 0, 194, UNIPHIER_PIN_DRV_4_8, 194, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(195, "A_D_HS0DIN5", 0, 195, UNIPHIER_PIN_DRV_4_8, 195, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(196, "A_D_HS0DIN6", 0, 196, UNIPHIER_PIN_DRV_4_8, 196, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(197, "A_D_HS0DIN7", 0, 197, UNIPHIER_PIN_DRV_4_8, 197, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(198, "A_D_AO1ARC", 0, 198, UNIPHIER_PIN_DRV_4_8, 198, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(199, "A_D_SPIXRST", UNIPHIER_PIN_IECTRL_NONE, 199, UNIPHIER_PIN_DRV_4_8, 199, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(200, "A_D_SPISCLK0", UNIPHIER_PIN_IECTRL_NONE, 200, UNIPHIER_PIN_DRV_4_8, 200, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(201, "A_D_SPITXD0", UNIPHIER_PIN_IECTRL_NONE, 201, UNIPHIER_PIN_DRV_4_8, 201, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(202, "A_D_SPIRXD0", UNIPHIER_PIN_IECTRL_NONE, 202, UNIPHIER_PIN_DRV_4_8, 202, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(203, "A_D_DMDCLK", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 203, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(204, "A_D_DMDPSYNC", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 204, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(205, "A_D_DMDVAL", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 205, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(206, "A_D_DMDDATA", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_8, 206, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(207, "A_D_HDMIRXXIRQ", 0, 207, UNIPHIER_PIN_DRV_4_8, 207, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(208, "A_D_VBIXIRQ", 0, 208, UNIPHIER_PIN_DRV_4_8, 208, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(209, "A_D_HDMITXXIRQ", 0, 209, UNIPHIER_PIN_DRV_4_8, 209, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(210, "A_D_DMDIRQ", UNIPHIER_PIN_IECTRL_NONE, 210, UNIPHIER_PIN_DRV_4_8, 210, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(211, "A_D_SPICIRQ", UNIPHIER_PIN_IECTRL_NONE, 211, UNIPHIER_PIN_DRV_4_8, 211, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(212, "A_D_SPIBIRQ", UNIPHIER_PIN_IECTRL_NONE, 212, UNIPHIER_PIN_DRV_4_8, 212, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(213, "A_D_BESDAOUT", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_4, 213, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(214, "A_D_BESDAIN", UNIPHIER_PIN_IECTRL_NONE, -1, UNIPHIER_PIN_DRV_FIXED_4, 214, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(215, "A_D_BESCLOUT", UNIPHIER_PIN_IECTRL_NONE, 215, UNIPHIER_PIN_DRV_4_8, 215, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(216, "A_D_VDACCLKOUT", 0, 216, UNIPHIER_PIN_DRV_4_8, 216, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(217, "A_D_VDACDOUT5", 0, 217, UNIPHIER_PIN_DRV_4_8, 217, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(218, "A_D_VDACDOUT6", 0, 218, UNIPHIER_PIN_DRV_4_8, 218, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(219, "A_D_VDACDOUT7", 0, 219, UNIPHIER_PIN_DRV_4_8, 219, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(220, "A_D_VDACDOUT8", 0, 220, UNIPHIER_PIN_DRV_4_8, 220, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(221, "A_D_VDACDOUT9", 0, 221, UNIPHIER_PIN_DRV_4_8, 221, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(222, "A_D_SIFBCKIN", 0, 222, UNIPHIER_PIN_DRV_4_8, 222, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(223, "A_D_SIFLRCKIN", 0, 223, UNIPHIER_PIN_DRV_4_8, 223, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(224, "A_D_SIFDIN", 0, 224, UNIPHIER_PIN_DRV_4_8, 224, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(225, "A_D_LIBCKOUT", 0, 225, UNIPHIER_PIN_DRV_4_8, 225, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(226, "A_D_LILRCKOUT", 0, 226, UNIPHIER_PIN_DRV_4_8, 226, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(227, "A_D_LIDIN", 0, 227, UNIPHIER_PIN_DRV_4_8, 227, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(228, "A_D_LODOUT", 0, 228, UNIPHIER_PIN_DRV_4_8, 228, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(229, "A_D_HPDOUT", 0, 229, UNIPHIER_PIN_DRV_4_8, 229, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(230, "A_D_MCLK", 0, 230, UNIPHIER_PIN_DRV_4_8, 230, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(231, "A_D_A2PLLREFOUT", 0, 231, UNIPHIER_PIN_DRV_4_8, 231, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(232, "A_D_HDMI3DSDAOUT", 0, 232, UNIPHIER_PIN_DRV_4_8, 232, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(233, "A_D_HDMI3DSDAIN", 0, 233, UNIPHIER_PIN_DRV_4_8, 233, UNIPHIER_PIN_PULL_DOWN), UNIPHIER_PINCTRL_PIN(234, "A_D_HDMI3DSCLIN", 0, 234, UNIPHIER_PIN_DRV_4_8, 234, UNIPHIER_PIN_PULL_DOWN), }; static const unsigned adinter_pins[] = { 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, 231, 232, 233, 234, }; static const unsigned adinter_muxvals[] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; static const unsigned emmc_pins[] = {36, 37, 38, 39, 40, 41, 42}; static const unsigned emmc_muxvals[] = {1, 1, 1, 1, 1, 1, 1}; static const unsigned emmc_dat8_pins[] = {43, 44, 45, 46}; static const unsigned emmc_dat8_muxvals[] = {1, 1, 1, 1}; static const unsigned i2c0_pins[] = {109, 110}; static const unsigned i2c0_muxvals[] = {0, 0}; static const unsigned i2c1_pins[] = {111, 112}; static const unsigned i2c1_muxvals[] = {0, 0}; static const unsigned i2c2_pins[] = {115, 116}; static const unsigned i2c2_muxvals[] = {1, 1}; static const unsigned i2c3_pins[] = {118, 119}; static const unsigned i2c3_muxvals[] = {1, 1}; static const unsigned nand_pins[] = {30, 31, 32, 33, 34, 35, 36, 39, 40, 41, 42, 43, 44, 45, 46}; static const unsigned nand_muxvals[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; static const unsigned nand_cs1_pins[] = {37, 38}; static const unsigned nand_cs1_muxvals[] = {0, 0}; static const unsigned uart0_pins[] = {135, 136}; static const unsigned uart0_muxvals[] = {3, 3}; static const unsigned uart0b_pins[] = {11, 12}; static const unsigned uart0b_muxvals[] = {2, 2}; static const unsigned uart1_pins[] = {115, 116}; static const unsigned uart1_muxvals[] = {0, 0}; static const unsigned uart1b_pins[] = {113, 114}; static const unsigned uart1b_muxvals[] = {1, 1}; static const unsigned uart2_pins[] = {113, 114}; static const unsigned uart2_muxvals[] = {2, 2}; static const unsigned uart2b_pins[] = {86, 87}; static const unsigned uart2b_muxvals[] = {1, 1}; static const unsigned usb0_pins[] = {56, 57}; static const unsigned usb0_muxvals[] = {0, 0}; static const unsigned usb1_pins[] = {58, 59}; static const unsigned usb1_muxvals[] = {0, 0}; static const unsigned usb2_pins[] = {60, 61}; static const unsigned usb2_muxvals[] = {0, 0}; static const unsigned usb3_pins[] = {62, 63}; static const unsigned usb3_muxvals[] = {0, 0}; static const unsigned port_range0_pins[] = { 127, 128, 129, 130, 131, 132, 133, 134, /* PORT0x */ 135, 136, 137, 138, 139, 140, 141, 142, /* PORT1x */ 0, 1, 2, 3, 4, 5, 6, 7, /* PORT2x */ 8, 9, 10, 11, 12, 13, 14, 15, /* PORT3x */ 16, 17, 18, 19, 21, 22, 23, 24, /* PORT4x */ 25, 30, 31, 32, 33, 34, 35, 36, /* PORT5x */ 37, 38, 39, 40, 41, 42, 43, 44, /* PORT6x */ 45, 46, 47, 48, 49, 50, 51, 52, /* PORT7x */ 53, 54, 55, 56, 57, 58, 59, 60, /* PORT8x */ 61, 62, 63, 64, 65, 66, 67, 68, /* PORT9x */ 69, 70, 71, 76, 77, 78, 79, 80, /* PORT10x */ }; static const unsigned port_range0_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT0x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT1x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT2x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT3x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT4x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT5x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT6x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT7x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT8x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT9x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT10x */ }; static const unsigned port_range1_pins[] = { 81, 82, 83, 84, 85, 86, 87, 88, /* PORT12x */ 89, 90, 95, 96, 97, 98, 99, 100, /* PORT13x */ 101, 102, 103, 104, 105, 106, 107, 108, /* PORT14x */ 118, 119, 120, 121, 122, 123, 124, 125, /* PORT15x */ 126, 72, 73, 92, 177, 93, 94, 176, /* PORT16x */ 74, 91, 27, 28, 29, 75, 20, 26, /* PORT17x */ 109, 110, 111, 112, 113, 114, 115, 116, /* PORT18x */ 117, 143, 144, 145, 146, 147, 148, 149, /* PORT19x */ 150, 151, 152, 153, 154, 155, 156, 157, /* PORT20x */ 158, 159, 160, 161, 162, 163, 164, 165, /* PORT21x */ 166, 178, 179, 180, 181, 182, 183, 184, /* PORT22x */ 185, 187, 188, 189, 190, 191, 192, 193, /* PORT23x */ 194, 195, 196, 197, 198, 199, 200, 201, /* PORT24x */ 202, 203, 204, 205, 206, 207, 208, 209, /* PORT25x */ 210, 211, 212, 213, 214, 215, 216, 217, /* PORT26x */ 218, 219, 220, 221, 223, 224, 225, 226, /* PORT27x */ 227, 228, 229, 230, 231, 232, 233, 234, /* PORT28x */ }; static const unsigned port_range1_muxvals[] = { 15, 15, 15, 15, 15, 15, 15, 15, /* PORT12x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT13x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT14x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT15x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT16x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT17x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT18x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT19x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT20x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT21x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT22x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT23x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT24x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT25x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT26x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT27x */ 15, 15, 15, 15, 15, 15, 15, 15, /* PORT28x */ }; static const unsigned xirq_pins[] = { 118, 119, 120, 121, 122, 123, 124, 125, /* XIRQ0-7 */ 126, 72, 73, 92, 177, 93, 94, 176, /* XIRQ8-15 */ 74, 91, 27, 28, 29, 75, 20, 26, /* XIRQ16-23 */ }; static const unsigned xirq_muxvals[] = { 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ0-7 */ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ8-15 */ 14, 14, 14, 14, 14, 14, 14, 14, /* XIRQ16-23 */ }; static const struct uniphier_pinctrl_group ph1_ld6b_groups[] = { UNIPHIER_PINCTRL_GROUP(adinter), UNIPHIER_PINCTRL_GROUP(emmc), UNIPHIER_PINCTRL_GROUP(emmc_dat8), UNIPHIER_PINCTRL_GROUP(i2c0), UNIPHIER_PINCTRL_GROUP(i2c1), UNIPHIER_PINCTRL_GROUP(i2c2), UNIPHIER_PINCTRL_GROUP(i2c3), UNIPHIER_PINCTRL_GROUP(nand), UNIPHIER_PINCTRL_GROUP(nand_cs1), UNIPHIER_PINCTRL_GROUP(uart0), UNIPHIER_PINCTRL_GROUP(uart0b), UNIPHIER_PINCTRL_GROUP(uart1), UNIPHIER_PINCTRL_GROUP(uart1b), UNIPHIER_PINCTRL_GROUP(uart2), UNIPHIER_PINCTRL_GROUP(uart2b), UNIPHIER_PINCTRL_GROUP(usb0), UNIPHIER_PINCTRL_GROUP(usb1), UNIPHIER_PINCTRL_GROUP(usb2), UNIPHIER_PINCTRL_GROUP(usb3), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range0), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_PORT(port_range1), UNIPHIER_PINCTRL_GROUP_GPIO_RANGE_IRQ(xirq), UNIPHIER_PINCTRL_GROUP_SINGLE(port00, port_range0, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(port01, port_range0, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(port02, port_range0, 2), UNIPHIER_PINCTRL_GROUP_SINGLE(port03, port_range0, 3), UNIPHIER_PINCTRL_GROUP_SINGLE(port04, port_range0, 4), UNIPHIER_PINCTRL_GROUP_SINGLE(port05, port_range0, 5), UNIPHIER_PINCTRL_GROUP_SINGLE(port06, port_range0, 6), UNIPHIER_PINCTRL_GROUP_SINGLE(port07, port_range0, 7), UNIPHIER_PINCTRL_GROUP_SINGLE(port10, port_range0, 8), UNIPHIER_PINCTRL_GROUP_SINGLE(port11, port_range0, 9), UNIPHIER_PINCTRL_GROUP_SINGLE(port12, port_range0, 10), UNIPHIER_PINCTRL_GROUP_SINGLE(port13, port_range0, 11), UNIPHIER_PINCTRL_GROUP_SINGLE(port14, port_range0, 12), UNIPHIER_PINCTRL_GROUP_SINGLE(port15, port_range0, 13), UNIPHIER_PINCTRL_GROUP_SINGLE(port16, port_range0, 14), UNIPHIER_PINCTRL_GROUP_SINGLE(port17, port_range0, 15), UNIPHIER_PINCTRL_GROUP_SINGLE(port20, port_range0, 16), UNIPHIER_PINCTRL_GROUP_SINGLE(port21, port_range0, 17), UNIPHIER_PINCTRL_GROUP_SINGLE(port22, port_range0, 18), UNIPHIER_PINCTRL_GROUP_SINGLE(port23, port_range0, 19), UNIPHIER_PINCTRL_GROUP_SINGLE(port24, port_range0, 20), UNIPHIER_PINCTRL_GROUP_SINGLE(port25, port_range0, 21), UNIPHIER_PINCTRL_GROUP_SINGLE(port26, port_range0, 22), UNIPHIER_PINCTRL_GROUP_SINGLE(port27, port_range0, 23), UNIPHIER_PINCTRL_GROUP_SINGLE(port30, port_range0, 24), UNIPHIER_PINCTRL_GROUP_SINGLE(port31, port_range0, 25), UNIPHIER_PINCTRL_GROUP_SINGLE(port32, port_range0, 26), UNIPHIER_PINCTRL_GROUP_SINGLE(port33, port_range0, 27), UNIPHIER_PINCTRL_GROUP_SINGLE(port34, port_range0, 28), UNIPHIER_PINCTRL_GROUP_SINGLE(port35, port_range0, 29), UNIPHIER_PINCTRL_GROUP_SINGLE(port36, port_range0, 30), UNIPHIER_PINCTRL_GROUP_SINGLE(port37, port_range0, 31), UNIPHIER_PINCTRL_GROUP_SINGLE(port40, port_range0, 32), UNIPHIER_PINCTRL_GROUP_SINGLE(port41, port_range0, 33), UNIPHIER_PINCTRL_GROUP_SINGLE(port42, port_range0, 34), UNIPHIER_PINCTRL_GROUP_SINGLE(port43, port_range0, 35), UNIPHIER_PINCTRL_GROUP_SINGLE(port44, port_range0, 36), UNIPHIER_PINCTRL_GROUP_SINGLE(port45, port_range0, 37), UNIPHIER_PINCTRL_GROUP_SINGLE(port46, port_range0, 38), UNIPHIER_PINCTRL_GROUP_SINGLE(port47, port_range0, 39), UNIPHIER_PINCTRL_GROUP_SINGLE(port50, port_range0, 40), UNIPHIER_PINCTRL_GROUP_SINGLE(port51, port_range0, 41), UNIPHIER_PINCTRL_GROUP_SINGLE(port52, port_range0, 42), UNIPHIER_PINCTRL_GROUP_SINGLE(port53, port_range0, 43), UNIPHIER_PINCTRL_GROUP_SINGLE(port54, port_range0, 44), UNIPHIER_PINCTRL_GROUP_SINGLE(port55, port_range0, 45), UNIPHIER_PINCTRL_GROUP_SINGLE(port56, port_range0, 46), UNIPHIER_PINCTRL_GROUP_SINGLE(port57, port_range0, 47), UNIPHIER_PINCTRL_GROUP_SINGLE(port60, port_range0, 48), UNIPHIER_PINCTRL_GROUP_SINGLE(port61, port_range0, 49), UNIPHIER_PINCTRL_GROUP_SINGLE(port62, port_range0, 50), UNIPHIER_PINCTRL_GROUP_SINGLE(port63, port_range0, 51), UNIPHIER_PINCTRL_GROUP_SINGLE(port64, port_range0, 52), UNIPHIER_PINCTRL_GROUP_SINGLE(port65, port_range0, 53), UNIPHIER_PINCTRL_GROUP_SINGLE(port66, port_range0, 54), UNIPHIER_PINCTRL_GROUP_SINGLE(port67, port_range0, 55), UNIPHIER_PINCTRL_GROUP_SINGLE(port70, port_range0, 56), UNIPHIER_PINCTRL_GROUP_SINGLE(port71, port_range0, 57), UNIPHIER_PINCTRL_GROUP_SINGLE(port72, port_range0, 58), UNIPHIER_PINCTRL_GROUP_SINGLE(port73, port_range0, 59), UNIPHIER_PINCTRL_GROUP_SINGLE(port74, port_range0, 60), UNIPHIER_PINCTRL_GROUP_SINGLE(port75, port_range0, 61), UNIPHIER_PINCTRL_GROUP_SINGLE(port76, port_range0, 62), UNIPHIER_PINCTRL_GROUP_SINGLE(port77, port_range0, 63), UNIPHIER_PINCTRL_GROUP_SINGLE(port80, port_range0, 64), UNIPHIER_PINCTRL_GROUP_SINGLE(port81, port_range0, 65), UNIPHIER_PINCTRL_GROUP_SINGLE(port82, port_range0, 66), UNIPHIER_PINCTRL_GROUP_SINGLE(port83, port_range0, 67), UNIPHIER_PINCTRL_GROUP_SINGLE(port84, port_range0, 68), UNIPHIER_PINCTRL_GROUP_SINGLE(port85, port_range0, 69), UNIPHIER_PINCTRL_GROUP_SINGLE(port86, port_range0, 70), UNIPHIER_PINCTRL_GROUP_SINGLE(port87, port_range0, 71), UNIPHIER_PINCTRL_GROUP_SINGLE(port90, port_range0, 72), UNIPHIER_PINCTRL_GROUP_SINGLE(port91, port_range0, 73), UNIPHIER_PINCTRL_GROUP_SINGLE(port92, port_range0, 74), UNIPHIER_PINCTRL_GROUP_SINGLE(port93, port_range0, 75), UNIPHIER_PINCTRL_GROUP_SINGLE(port94, port_range0, 76), UNIPHIER_PINCTRL_GROUP_SINGLE(port95, port_range0, 77), UNIPHIER_PINCTRL_GROUP_SINGLE(port96, port_range0, 78), UNIPHIER_PINCTRL_GROUP_SINGLE(port97, port_range0, 79), UNIPHIER_PINCTRL_GROUP_SINGLE(port100, port_range0, 80), UNIPHIER_PINCTRL_GROUP_SINGLE(port101, port_range0, 81), UNIPHIER_PINCTRL_GROUP_SINGLE(port102, port_range0, 82), UNIPHIER_PINCTRL_GROUP_SINGLE(port103, port_range0, 83), UNIPHIER_PINCTRL_GROUP_SINGLE(port104, port_range0, 84), UNIPHIER_PINCTRL_GROUP_SINGLE(port105, port_range0, 85), UNIPHIER_PINCTRL_GROUP_SINGLE(port106, port_range0, 86), UNIPHIER_PINCTRL_GROUP_SINGLE(port107, port_range0, 87), UNIPHIER_PINCTRL_GROUP_SINGLE(port120, port_range1, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(port121, port_range1, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(port122, port_range1, 2), UNIPHIER_PINCTRL_GROUP_SINGLE(port123, port_range1, 3), UNIPHIER_PINCTRL_GROUP_SINGLE(port124, port_range1, 4), UNIPHIER_PINCTRL_GROUP_SINGLE(port125, port_range1, 5), UNIPHIER_PINCTRL_GROUP_SINGLE(port126, port_range1, 6), UNIPHIER_PINCTRL_GROUP_SINGLE(port127, port_range1, 7), UNIPHIER_PINCTRL_GROUP_SINGLE(port130, port_range1, 8), UNIPHIER_PINCTRL_GROUP_SINGLE(port131, port_range1, 9), UNIPHIER_PINCTRL_GROUP_SINGLE(port132, port_range1, 10), UNIPHIER_PINCTRL_GROUP_SINGLE(port133, port_range1, 11), UNIPHIER_PINCTRL_GROUP_SINGLE(port134, port_range1, 12), UNIPHIER_PINCTRL_GROUP_SINGLE(port135, port_range1, 13), UNIPHIER_PINCTRL_GROUP_SINGLE(port136, port_range1, 14), UNIPHIER_PINCTRL_GROUP_SINGLE(port137, port_range1, 15), UNIPHIER_PINCTRL_GROUP_SINGLE(port140, port_range1, 16), UNIPHIER_PINCTRL_GROUP_SINGLE(port141, port_range1, 17), UNIPHIER_PINCTRL_GROUP_SINGLE(port142, port_range1, 18), UNIPHIER_PINCTRL_GROUP_SINGLE(port143, port_range1, 19), UNIPHIER_PINCTRL_GROUP_SINGLE(port144, port_range1, 20), UNIPHIER_PINCTRL_GROUP_SINGLE(port145, port_range1, 21), UNIPHIER_PINCTRL_GROUP_SINGLE(port146, port_range1, 22), UNIPHIER_PINCTRL_GROUP_SINGLE(port147, port_range1, 23), UNIPHIER_PINCTRL_GROUP_SINGLE(port150, port_range1, 24), UNIPHIER_PINCTRL_GROUP_SINGLE(port151, port_range1, 25), UNIPHIER_PINCTRL_GROUP_SINGLE(port152, port_range1, 26), UNIPHIER_PINCTRL_GROUP_SINGLE(port153, port_range1, 27), UNIPHIER_PINCTRL_GROUP_SINGLE(port154, port_range1, 28), UNIPHIER_PINCTRL_GROUP_SINGLE(port155, port_range1, 29), UNIPHIER_PINCTRL_GROUP_SINGLE(port156, port_range1, 30), UNIPHIER_PINCTRL_GROUP_SINGLE(port157, port_range1, 31), UNIPHIER_PINCTRL_GROUP_SINGLE(port160, port_range1, 32), UNIPHIER_PINCTRL_GROUP_SINGLE(port161, port_range1, 33), UNIPHIER_PINCTRL_GROUP_SINGLE(port162, port_range1, 34), UNIPHIER_PINCTRL_GROUP_SINGLE(port163, port_range1, 35), UNIPHIER_PINCTRL_GROUP_SINGLE(port164, port_range1, 36), UNIPHIER_PINCTRL_GROUP_SINGLE(port165, port_range1, 37), UNIPHIER_PINCTRL_GROUP_SINGLE(port166, port_range1, 38), UNIPHIER_PINCTRL_GROUP_SINGLE(port167, port_range1, 39), UNIPHIER_PINCTRL_GROUP_SINGLE(port170, port_range1, 40), UNIPHIER_PINCTRL_GROUP_SINGLE(port171, port_range1, 41), UNIPHIER_PINCTRL_GROUP_SINGLE(port172, port_range1, 42), UNIPHIER_PINCTRL_GROUP_SINGLE(port173, port_range1, 43), UNIPHIER_PINCTRL_GROUP_SINGLE(port174, port_range1, 44), UNIPHIER_PINCTRL_GROUP_SINGLE(port175, port_range1, 45), UNIPHIER_PINCTRL_GROUP_SINGLE(port176, port_range1, 46), UNIPHIER_PINCTRL_GROUP_SINGLE(port177, port_range1, 47), UNIPHIER_PINCTRL_GROUP_SINGLE(port180, port_range1, 48), UNIPHIER_PINCTRL_GROUP_SINGLE(port181, port_range1, 49), UNIPHIER_PINCTRL_GROUP_SINGLE(port182, port_range1, 50), UNIPHIER_PINCTRL_GROUP_SINGLE(port183, port_range1, 51), UNIPHIER_PINCTRL_GROUP_SINGLE(port184, port_range1, 52), UNIPHIER_PINCTRL_GROUP_SINGLE(port185, port_range1, 53), UNIPHIER_PINCTRL_GROUP_SINGLE(port186, port_range1, 54), UNIPHIER_PINCTRL_GROUP_SINGLE(port187, port_range1, 55), UNIPHIER_PINCTRL_GROUP_SINGLE(port190, port_range1, 56), UNIPHIER_PINCTRL_GROUP_SINGLE(port191, port_range1, 57), UNIPHIER_PINCTRL_GROUP_SINGLE(port192, port_range1, 58), UNIPHIER_PINCTRL_GROUP_SINGLE(port193, port_range1, 59), UNIPHIER_PINCTRL_GROUP_SINGLE(port194, port_range1, 60), UNIPHIER_PINCTRL_GROUP_SINGLE(port195, port_range1, 61), UNIPHIER_PINCTRL_GROUP_SINGLE(port196, port_range1, 62), UNIPHIER_PINCTRL_GROUP_SINGLE(port197, port_range1, 63), UNIPHIER_PINCTRL_GROUP_SINGLE(port200, port_range1, 64), UNIPHIER_PINCTRL_GROUP_SINGLE(port201, port_range1, 65), UNIPHIER_PINCTRL_GROUP_SINGLE(port202, port_range1, 66), UNIPHIER_PINCTRL_GROUP_SINGLE(port203, port_range1, 67), UNIPHIER_PINCTRL_GROUP_SINGLE(port204, port_range1, 68), UNIPHIER_PINCTRL_GROUP_SINGLE(port205, port_range1, 69), UNIPHIER_PINCTRL_GROUP_SINGLE(port206, port_range1, 70), UNIPHIER_PINCTRL_GROUP_SINGLE(port207, port_range1, 71), UNIPHIER_PINCTRL_GROUP_SINGLE(port210, port_range1, 72), UNIPHIER_PINCTRL_GROUP_SINGLE(port211, port_range1, 73), UNIPHIER_PINCTRL_GROUP_SINGLE(port212, port_range1, 74), UNIPHIER_PINCTRL_GROUP_SINGLE(port213, port_range1, 75), UNIPHIER_PINCTRL_GROUP_SINGLE(port214, port_range1, 76), UNIPHIER_PINCTRL_GROUP_SINGLE(port215, port_range1, 77), UNIPHIER_PINCTRL_GROUP_SINGLE(port216, port_range1, 78), UNIPHIER_PINCTRL_GROUP_SINGLE(port217, port_range1, 79), UNIPHIER_PINCTRL_GROUP_SINGLE(port220, port_range1, 80), UNIPHIER_PINCTRL_GROUP_SINGLE(port221, port_range1, 81), UNIPHIER_PINCTRL_GROUP_SINGLE(port222, port_range1, 82), UNIPHIER_PINCTRL_GROUP_SINGLE(port223, port_range1, 83), UNIPHIER_PINCTRL_GROUP_SINGLE(port224, port_range1, 84), UNIPHIER_PINCTRL_GROUP_SINGLE(port225, port_range1, 85), UNIPHIER_PINCTRL_GROUP_SINGLE(port226, port_range1, 86), UNIPHIER_PINCTRL_GROUP_SINGLE(port227, port_range1, 87), UNIPHIER_PINCTRL_GROUP_SINGLE(port230, port_range1, 88), UNIPHIER_PINCTRL_GROUP_SINGLE(port231, port_range1, 89), UNIPHIER_PINCTRL_GROUP_SINGLE(port232, port_range1, 90), UNIPHIER_PINCTRL_GROUP_SINGLE(port233, port_range1, 91), UNIPHIER_PINCTRL_GROUP_SINGLE(port234, port_range1, 92), UNIPHIER_PINCTRL_GROUP_SINGLE(port235, port_range1, 93), UNIPHIER_PINCTRL_GROUP_SINGLE(port236, port_range1, 94), UNIPHIER_PINCTRL_GROUP_SINGLE(port237, port_range1, 95), UNIPHIER_PINCTRL_GROUP_SINGLE(port240, port_range1, 96), UNIPHIER_PINCTRL_GROUP_SINGLE(port241, port_range1, 97), UNIPHIER_PINCTRL_GROUP_SINGLE(port242, port_range1, 98), UNIPHIER_PINCTRL_GROUP_SINGLE(port243, port_range1, 99), UNIPHIER_PINCTRL_GROUP_SINGLE(port244, port_range1, 100), UNIPHIER_PINCTRL_GROUP_SINGLE(port245, port_range1, 101), UNIPHIER_PINCTRL_GROUP_SINGLE(port246, port_range1, 102), UNIPHIER_PINCTRL_GROUP_SINGLE(port247, port_range1, 103), UNIPHIER_PINCTRL_GROUP_SINGLE(port250, port_range1, 104), UNIPHIER_PINCTRL_GROUP_SINGLE(port251, port_range1, 105), UNIPHIER_PINCTRL_GROUP_SINGLE(port252, port_range1, 106), UNIPHIER_PINCTRL_GROUP_SINGLE(port253, port_range1, 107), UNIPHIER_PINCTRL_GROUP_SINGLE(port254, port_range1, 108), UNIPHIER_PINCTRL_GROUP_SINGLE(port255, port_range1, 109), UNIPHIER_PINCTRL_GROUP_SINGLE(port256, port_range1, 110), UNIPHIER_PINCTRL_GROUP_SINGLE(port257, port_range1, 111), UNIPHIER_PINCTRL_GROUP_SINGLE(port260, port_range1, 112), UNIPHIER_PINCTRL_GROUP_SINGLE(port261, port_range1, 113), UNIPHIER_PINCTRL_GROUP_SINGLE(port262, port_range1, 114), UNIPHIER_PINCTRL_GROUP_SINGLE(port263, port_range1, 115), UNIPHIER_PINCTRL_GROUP_SINGLE(port264, port_range1, 116), UNIPHIER_PINCTRL_GROUP_SINGLE(port265, port_range1, 117), UNIPHIER_PINCTRL_GROUP_SINGLE(port266, port_range1, 118), UNIPHIER_PINCTRL_GROUP_SINGLE(port267, port_range1, 119), UNIPHIER_PINCTRL_GROUP_SINGLE(port270, port_range1, 120), UNIPHIER_PINCTRL_GROUP_SINGLE(port271, port_range1, 121), UNIPHIER_PINCTRL_GROUP_SINGLE(port272, port_range1, 122), UNIPHIER_PINCTRL_GROUP_SINGLE(port273, port_range1, 123), UNIPHIER_PINCTRL_GROUP_SINGLE(port274, port_range1, 124), UNIPHIER_PINCTRL_GROUP_SINGLE(port275, port_range1, 125), UNIPHIER_PINCTRL_GROUP_SINGLE(port276, port_range1, 126), UNIPHIER_PINCTRL_GROUP_SINGLE(port277, port_range1, 127), UNIPHIER_PINCTRL_GROUP_SINGLE(port280, port_range1, 128), UNIPHIER_PINCTRL_GROUP_SINGLE(port281, port_range1, 129), UNIPHIER_PINCTRL_GROUP_SINGLE(port282, port_range1, 130), UNIPHIER_PINCTRL_GROUP_SINGLE(port283, port_range1, 131), UNIPHIER_PINCTRL_GROUP_SINGLE(port284, port_range1, 132), UNIPHIER_PINCTRL_GROUP_SINGLE(port285, port_range1, 133), UNIPHIER_PINCTRL_GROUP_SINGLE(port286, port_range1, 134), UNIPHIER_PINCTRL_GROUP_SINGLE(port287, port_range1, 135), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq0, xirq, 0), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq1, xirq, 1), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq2, xirq, 2), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq3, xirq, 3), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq4, xirq, 4), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq5, xirq, 5), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq6, xirq, 6), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq7, xirq, 7), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq8, xirq, 8), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq9, xirq, 9), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq10, xirq, 10), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq11, xirq, 11), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq12, xirq, 12), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq13, xirq, 13), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq14, xirq, 14), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq15, xirq, 15), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq16, xirq, 16), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq17, xirq, 17), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq18, xirq, 18), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq19, xirq, 19), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq20, xirq, 20), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq21, xirq, 21), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq22, xirq, 22), UNIPHIER_PINCTRL_GROUP_SINGLE(xirq23, xirq, 23), }; static const char * const adinter_groups[] = {"adinter"}; static const char * const emmc_groups[] = {"emmc", "emmc_dat8"}; static const char * const i2c0_groups[] = {"i2c0"}; static const char * const i2c1_groups[] = {"i2c1"}; static const char * const i2c2_groups[] = {"i2c2"}; static const char * const i2c3_groups[] = {"i2c3"}; static const char * const nand_groups[] = {"nand", "nand_cs1"}; static const char * const uart0_groups[] = {"uart0", "uart0b"}; static const char * const uart1_groups[] = {"uart1", "uart1b"}; static const char * const uart2_groups[] = {"uart2", "uart2b"}; static const char * const usb0_groups[] = {"usb0"}; static const char * const usb1_groups[] = {"usb1"}; static const char * const usb2_groups[] = {"usb2"}; static const char * const usb3_groups[] = {"usb3"}; static const char * const port_groups[] = { "port00", "port01", "port02", "port03", "port04", "port05", "port06", "port07", "port10", "port11", "port12", "port13", "port14", "port15", "port16", "port17", "port20", "port21", "port22", "port23", "port24", "port25", "port26", "port27", "port30", "port31", "port32", "port33", "port34", "port35", "port36", "port37", "port40", "port41", "port42", "port43", "port44", "port45", "port46", "port47", "port50", "port51", "port52", "port53", "port54", "port55", "port56", "port57", "port60", "port61", "port62", "port63", "port64", "port65", "port66", "port67", "port70", "port71", "port72", "port73", "port74", "port75", "port76", "port77", "port80", "port81", "port82", "port83", "port84", "port85", "port86", "port87", "port90", "port91", "port92", "port93", "port94", "port95", "port96", "port97", "port100", "port101", "port102", "port103", "port104", "port105", "port106", "port107", /* port110-117 missing */ "port120", "port121", "port122", "port123", "port124", "port125", "port126", "port127", "port130", "port131", "port132", "port133", "port134", "port135", "port136", "port137", "port140", "port141", "port142", "port143", "port144", "port145", "port146", "port147", "port150", "port151", "port152", "port153", "port154", "port155", "port156", "port157", "port160", "port161", "port162", "port163", "port164", "port165", "port166", "port167", "port170", "port171", "port172", "port173", "port174", "port175", "port176", "port177", "port180", "port181", "port182", "port183", "port184", "port185", "port186", "port187", "port190", "port191", "port192", "port193", "port194", "port195", "port196", "port197", "port200", "port201", "port202", "port203", "port204", "port205", "port206", "port207", "port210", "port211", "port212", "port213", "port214", "port215", "port216", "port217", "port220", "port221", "port222", "port223", "port224", "port225", "port226", "port227", "port230", "port231", "port232", "port233", "port234", "port235", "port236", "port237", "port240", "port241", "port242", "port243", "port244", "port245", "port246", "port247", "port250", "port251", "port252", "port253", "port254", "port255", "port256", "port257", "port260", "port261", "port262", "port263", "port264", "port265", "port266", "port267", "port270", "port271", "port272", "port273", "port274", "port275", "port276", "port277", "port280", "port281", "port282", "port283", "port284", "port285", "port286", "port287", }; static const char * const xirq_groups[] = { "xirq0", "xirq1", "xirq2", "xirq3", "xirq4", "xirq5", "xirq6", "xirq7", "xirq8", "xirq9", "xirq10", "xirq11", "xirq12", "xirq13", "xirq14", "xirq15", "xirq16", "xirq17", "xirq18", "xirq19", "xirq20", "xirq21", "xirq22", "xirq23", }; static const struct uniphier_pinmux_function ph1_ld6b_functions[] = { UNIPHIER_PINMUX_FUNCTION(adinter), /* Achip-Dchip interconnect */ UNIPHIER_PINMUX_FUNCTION(emmc), UNIPHIER_PINMUX_FUNCTION(i2c0), UNIPHIER_PINMUX_FUNCTION(i2c1), UNIPHIER_PINMUX_FUNCTION(i2c2), UNIPHIER_PINMUX_FUNCTION(i2c3), UNIPHIER_PINMUX_FUNCTION(nand), UNIPHIER_PINMUX_FUNCTION(uart0), UNIPHIER_PINMUX_FUNCTION(uart1), UNIPHIER_PINMUX_FUNCTION(uart2), UNIPHIER_PINMUX_FUNCTION(usb0), UNIPHIER_PINMUX_FUNCTION(usb1), UNIPHIER_PINMUX_FUNCTION(usb2), UNIPHIER_PINMUX_FUNCTION(usb3), UNIPHIER_PINMUX_FUNCTION(port), UNIPHIER_PINMUX_FUNCTION(xirq), }; static struct uniphier_pinctrl_socdata ph1_ld6b_pindata = { .groups = ph1_ld6b_groups, .groups_count = ARRAY_SIZE(ph1_ld6b_groups), .functions = ph1_ld6b_functions, .functions_count = ARRAY_SIZE(ph1_ld6b_functions), .mux_bits = 8, .reg_stride = 4, .load_pinctrl = false, }; static struct pinctrl_desc ph1_ld6b_pinctrl_desc = { .name = DRIVER_NAME, .pins = ph1_ld6b_pins, .npins = ARRAY_SIZE(ph1_ld6b_pins), .owner = THIS_MODULE, }; static int ph1_ld6b_pinctrl_probe(struct platform_device *pdev) { return uniphier_pinctrl_probe(pdev, &ph1_ld6b_pinctrl_desc, &ph1_ld6b_pindata); } static const struct of_device_id ph1_ld6b_pinctrl_match[] = { { .compatible = "socionext,ph1-ld6b-pinctrl" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, ph1_ld6b_pinctrl_match); static struct platform_driver ph1_ld6b_pinctrl_driver = { .probe = ph1_ld6b_pinctrl_probe, .remove = uniphier_pinctrl_remove, .driver = { .name = DRIVER_NAME, .of_match_table = ph1_ld6b_pinctrl_match, }, }; module_platform_driver(ph1_ld6b_pinctrl_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier PH1-LD6b pinctrl driver"); MODULE_LICENSE("GPL");
gpl-2.0
Khaon/android_kernel_samsung_a3xelte
net/decnet/af_decnet.c
458
54707
/* * DECnet An implementation of the DECnet protocol suite for the LINUX * operating system. DECnet is implemented using the BSD Socket * interface as the means of communication with the user level. * * DECnet Socket Layer Interface * * Authors: Eduardo Marcelo Serrat <emserrat@geocities.com> * Patrick Caulfield <patrick@pandh.demon.co.uk> * * Changes: * Steve Whitehouse: Copied from Eduardo Serrat and Patrick Caulfield's * version of the code. Original copyright preserved * below. * Steve Whitehouse: Some bug fixes, cleaning up some code to make it * compatible with my routing layer. * Steve Whitehouse: Merging changes from Eduardo Serrat and Patrick * Caulfield. * Steve Whitehouse: Further bug fixes, checking module code still works * with new routing layer. * Steve Whitehouse: Additional set/get_sockopt() calls. * Steve Whitehouse: Fixed TIOCINQ ioctl to be same as Eduardo's new * code. * Steve Whitehouse: recvmsg() changed to try and behave in a POSIX like * way. Didn't manage it entirely, but its better. * Steve Whitehouse: ditto for sendmsg(). * Steve Whitehouse: A selection of bug fixes to various things. * Steve Whitehouse: Added TIOCOUTQ ioctl. * Steve Whitehouse: Fixes to username2sockaddr & sockaddr2username. * Steve Whitehouse: Fixes to connect() error returns. * Patrick Caulfield: Fixes to delayed acceptance logic. * David S. Miller: New socket locking * Steve Whitehouse: Socket list hashing/locking * Arnaldo C. Melo: use capable, not suser * Steve Whitehouse: Removed unused code. Fix to use sk->allocation * when required. * Patrick Caulfield: /proc/net/decnet now has object name/number * Steve Whitehouse: Fixed local port allocation, hashed sk list * Matthew Wilcox: Fixes for dn_ioctl() * Steve Whitehouse: New connect/accept logic to allow timeouts and * prepare for sendpage etc. */ /****************************************************************************** (c) 1995-1998 E.M. Serrat emserrat@geocities.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. HISTORY: Version Kernel Date Author/Comments ------- ------ ---- --------------- Version 0.0.1 2.0.30 01-dic-97 Eduardo Marcelo Serrat (emserrat@geocities.com) First Development of DECnet Socket La- yer for Linux. Only supports outgoing connections. Version 0.0.2 2.1.105 20-jun-98 Patrick J. Caulfield (patrick@pandh.demon.co.uk) Port to new kernel development version. Version 0.0.3 2.1.106 25-jun-98 Eduardo Marcelo Serrat (emserrat@geocities.com) _ Added support for incoming connections so we can start developing server apps on Linux. - Module Support Version 0.0.4 2.1.109 21-jul-98 Eduardo Marcelo Serrat (emserrat@geocities.com) _ Added support for X11R6.4. Now we can use DECnet transport for X on Linux!!! - Version 0.0.5 2.1.110 01-aug-98 Eduardo Marcelo Serrat (emserrat@geocities.com) Removed bugs on flow control Removed bugs on incoming accessdata order - Version 0.0.6 2.1.110 07-aug-98 Eduardo Marcelo Serrat dn_recvmsg fixes Patrick J. Caulfield dn_bind fixes *******************************************************************************/ #include <linux/module.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/inet.h> #include <linux/route.h> #include <linux/netfilter.h> #include <linux/seq_file.h> #include <net/sock.h> #include <net/tcp_states.h> #include <net/flow.h> #include <asm/ioctls.h> #include <linux/capability.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/proc_fs.h> #include <linux/stat.h> #include <linux/init.h> #include <linux/poll.h> #include <net/net_namespace.h> #include <net/neighbour.h> #include <net/dst.h> #include <net/fib_rules.h> #include <net/dn.h> #include <net/dn_nsp.h> #include <net/dn_dev.h> #include <net/dn_route.h> #include <net/dn_fib.h> #include <net/dn_neigh.h> struct dn_sock { struct sock sk; struct dn_scp scp; }; static void dn_keepalive(struct sock *sk); #define DN_SK_HASH_SHIFT 8 #define DN_SK_HASH_SIZE (1 << DN_SK_HASH_SHIFT) #define DN_SK_HASH_MASK (DN_SK_HASH_SIZE - 1) static const struct proto_ops dn_proto_ops; static DEFINE_RWLOCK(dn_hash_lock); static struct hlist_head dn_sk_hash[DN_SK_HASH_SIZE]; static struct hlist_head dn_wild_sk; static atomic_long_t decnet_memory_allocated; static int __dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen, int flags); static int __dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen, int flags); static struct hlist_head *dn_find_list(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); if (scp->addr.sdn_flags & SDF_WILD) return hlist_empty(&dn_wild_sk) ? &dn_wild_sk : NULL; return &dn_sk_hash[le16_to_cpu(scp->addrloc) & DN_SK_HASH_MASK]; } /* * Valid ports are those greater than zero and not already in use. */ static int check_port(__le16 port) { struct sock *sk; if (port == 0) return -1; sk_for_each(sk, &dn_sk_hash[le16_to_cpu(port) & DN_SK_HASH_MASK]) { struct dn_scp *scp = DN_SK(sk); if (scp->addrloc == port) return -1; } return 0; } static unsigned short port_alloc(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); static unsigned short port = 0x2000; unsigned short i_port = port; while(check_port(cpu_to_le16(++port)) != 0) { if (port == i_port) return 0; } scp->addrloc = cpu_to_le16(port); return 1; } /* * Since this is only ever called from user * level, we don't need a write_lock() version * of this. */ static int dn_hash_sock(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); struct hlist_head *list; int rv = -EUSERS; BUG_ON(sk_hashed(sk)); write_lock_bh(&dn_hash_lock); if (!scp->addrloc && !port_alloc(sk)) goto out; rv = -EADDRINUSE; if ((list = dn_find_list(sk)) == NULL) goto out; sk_add_node(sk, list); rv = 0; out: write_unlock_bh(&dn_hash_lock); return rv; } static void dn_unhash_sock(struct sock *sk) { write_lock(&dn_hash_lock); sk_del_node_init(sk); write_unlock(&dn_hash_lock); } static void dn_unhash_sock_bh(struct sock *sk) { write_lock_bh(&dn_hash_lock); sk_del_node_init(sk); write_unlock_bh(&dn_hash_lock); } static struct hlist_head *listen_hash(struct sockaddr_dn *addr) { int i; unsigned int hash = addr->sdn_objnum; if (hash == 0) { hash = addr->sdn_objnamel; for(i = 0; i < le16_to_cpu(addr->sdn_objnamel); i++) { hash ^= addr->sdn_objname[i]; hash ^= (hash << 3); } } return &dn_sk_hash[hash & DN_SK_HASH_MASK]; } /* * Called to transform a socket from bound (i.e. with a local address) * into a listening socket (doesn't need a local port number) and rehashes * based upon the object name/number. */ static void dn_rehash_sock(struct sock *sk) { struct hlist_head *list; struct dn_scp *scp = DN_SK(sk); if (scp->addr.sdn_flags & SDF_WILD) return; write_lock_bh(&dn_hash_lock); sk_del_node_init(sk); DN_SK(sk)->addrloc = 0; list = listen_hash(&DN_SK(sk)->addr); sk_add_node(sk, list); write_unlock_bh(&dn_hash_lock); } int dn_sockaddr2username(struct sockaddr_dn *sdn, unsigned char *buf, unsigned char type) { int len = 2; *buf++ = type; switch (type) { case 0: *buf++ = sdn->sdn_objnum; break; case 1: *buf++ = 0; *buf++ = le16_to_cpu(sdn->sdn_objnamel); memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); len = 3 + le16_to_cpu(sdn->sdn_objnamel); break; case 2: memset(buf, 0, 5); buf += 5; *buf++ = le16_to_cpu(sdn->sdn_objnamel); memcpy(buf, sdn->sdn_objname, le16_to_cpu(sdn->sdn_objnamel)); len = 7 + le16_to_cpu(sdn->sdn_objnamel); break; } return len; } /* * On reception of usernames, we handle types 1 and 0 for destination * addresses only. Types 2 and 4 are used for source addresses, but the * UIC, GIC are ignored and they are both treated the same way. Type 3 * is never used as I've no idea what its purpose might be or what its * format is. */ int dn_username2sockaddr(unsigned char *data, int len, struct sockaddr_dn *sdn, unsigned char *fmt) { unsigned char type; int size = len; int namel = 12; sdn->sdn_objnum = 0; sdn->sdn_objnamel = cpu_to_le16(0); memset(sdn->sdn_objname, 0, DN_MAXOBJL); if (len < 2) return -1; len -= 2; *fmt = *data++; type = *data++; switch (*fmt) { case 0: sdn->sdn_objnum = type; return 2; case 1: namel = 16; break; case 2: len -= 4; data += 4; break; case 4: len -= 8; data += 8; break; default: return -1; } len -= 1; if (len < 0) return -1; sdn->sdn_objnamel = cpu_to_le16(*data++); len -= le16_to_cpu(sdn->sdn_objnamel); if ((len < 0) || (le16_to_cpu(sdn->sdn_objnamel) > namel)) return -1; memcpy(sdn->sdn_objname, data, le16_to_cpu(sdn->sdn_objnamel)); return size - len; } struct sock *dn_sklist_find_listener(struct sockaddr_dn *addr) { struct hlist_head *list = listen_hash(addr); struct sock *sk; read_lock(&dn_hash_lock); sk_for_each(sk, list) { struct dn_scp *scp = DN_SK(sk); if (sk->sk_state != TCP_LISTEN) continue; if (scp->addr.sdn_objnum) { if (scp->addr.sdn_objnum != addr->sdn_objnum) continue; } else { if (addr->sdn_objnum) continue; if (scp->addr.sdn_objnamel != addr->sdn_objnamel) continue; if (memcmp(scp->addr.sdn_objname, addr->sdn_objname, le16_to_cpu(addr->sdn_objnamel)) != 0) continue; } sock_hold(sk); read_unlock(&dn_hash_lock); return sk; } sk = sk_head(&dn_wild_sk); if (sk) { if (sk->sk_state == TCP_LISTEN) sock_hold(sk); else sk = NULL; } read_unlock(&dn_hash_lock); return sk; } struct sock *dn_find_by_skb(struct sk_buff *skb) { struct dn_skb_cb *cb = DN_SKB_CB(skb); struct sock *sk; struct dn_scp *scp; read_lock(&dn_hash_lock); sk_for_each(sk, &dn_sk_hash[le16_to_cpu(cb->dst_port) & DN_SK_HASH_MASK]) { scp = DN_SK(sk); if (cb->src != dn_saddr2dn(&scp->peer)) continue; if (cb->dst_port != scp->addrloc) continue; if (scp->addrrem && (cb->src_port != scp->addrrem)) continue; sock_hold(sk); goto found; } sk = NULL; found: read_unlock(&dn_hash_lock); return sk; } static void dn_destruct(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); skb_queue_purge(&scp->data_xmit_queue); skb_queue_purge(&scp->other_xmit_queue); skb_queue_purge(&scp->other_receive_queue); dst_release(rcu_dereference_check(sk->sk_dst_cache, 1)); } static int dn_memory_pressure; static void dn_enter_memory_pressure(struct sock *sk) { if (!dn_memory_pressure) { dn_memory_pressure = 1; } } static struct proto dn_proto = { .name = "NSP", .owner = THIS_MODULE, .enter_memory_pressure = dn_enter_memory_pressure, .memory_pressure = &dn_memory_pressure, .memory_allocated = &decnet_memory_allocated, .sysctl_mem = sysctl_decnet_mem, .sysctl_wmem = sysctl_decnet_wmem, .sysctl_rmem = sysctl_decnet_rmem, .max_header = DN_MAX_NSP_DATA_HEADER + 64, .obj_size = sizeof(struct dn_sock), }; static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp) { struct dn_scp *scp; struct sock *sk = sk_alloc(net, PF_DECnet, gfp, &dn_proto); if (!sk) goto out; if (sock) sock->ops = &dn_proto_ops; sock_init_data(sock, sk); sk->sk_backlog_rcv = dn_nsp_backlog_rcv; sk->sk_destruct = dn_destruct; sk->sk_no_check = 1; sk->sk_family = PF_DECnet; sk->sk_protocol = 0; sk->sk_allocation = gfp; sk->sk_sndbuf = sysctl_decnet_wmem[1]; sk->sk_rcvbuf = sysctl_decnet_rmem[1]; /* Initialization of DECnet Session Control Port */ scp = DN_SK(sk); scp->state = DN_O; /* Open */ scp->numdat = 1; /* Next data seg to tx */ scp->numoth = 1; /* Next oth data to tx */ scp->ackxmt_dat = 0; /* Last data seg ack'ed */ scp->ackxmt_oth = 0; /* Last oth data ack'ed */ scp->ackrcv_dat = 0; /* Highest data ack recv*/ scp->ackrcv_oth = 0; /* Last oth data ack rec*/ scp->flowrem_sw = DN_SEND; scp->flowloc_sw = DN_SEND; scp->flowrem_dat = 0; scp->flowrem_oth = 1; scp->flowloc_dat = 0; scp->flowloc_oth = 1; scp->services_rem = 0; scp->services_loc = 1 | NSP_FC_NONE; scp->info_rem = 0; scp->info_loc = 0x03; /* NSP version 4.1 */ scp->segsize_rem = 230 - DN_MAX_NSP_DATA_HEADER; /* Default: Updated by remote segsize */ scp->nonagle = 0; scp->multi_ireq = 1; scp->accept_mode = ACC_IMMED; scp->addr.sdn_family = AF_DECnet; scp->peer.sdn_family = AF_DECnet; scp->accessdata.acc_accl = 5; memcpy(scp->accessdata.acc_acc, "LINUX", 5); scp->max_window = NSP_MAX_WINDOW; scp->snd_window = NSP_MIN_WINDOW; scp->nsp_srtt = NSP_INITIAL_SRTT; scp->nsp_rttvar = NSP_INITIAL_RTTVAR; scp->nsp_rxtshift = 0; skb_queue_head_init(&scp->data_xmit_queue); skb_queue_head_init(&scp->other_xmit_queue); skb_queue_head_init(&scp->other_receive_queue); scp->persist = 0; scp->persist_fxn = NULL; scp->keepalive = 10 * HZ; scp->keepalive_fxn = dn_keepalive; init_timer(&scp->delack_timer); scp->delack_pending = 0; scp->delack_fxn = dn_nsp_delayed_ack; dn_start_slow_timer(sk); out: return sk; } /* * Keepalive timer. * FIXME: Should respond to SO_KEEPALIVE etc. */ static void dn_keepalive(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); /* * By checking the other_data transmit queue is empty * we are double checking that we are not sending too * many of these keepalive frames. */ if (skb_queue_empty(&scp->other_xmit_queue)) dn_nsp_send_link(sk, DN_NOCHANGE, 0); } /* * Timer for shutdown/destroyed sockets. * When socket is dead & no packets have been sent for a * certain amount of time, they are removed by this * routine. Also takes care of sending out DI & DC * frames at correct times. */ int dn_destroy_timer(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); scp->persist = dn_nsp_persist(sk); switch (scp->state) { case DN_DI: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); if (scp->nsp_rxtshift >= decnet_di_count) scp->state = DN_CN; return 0; case DN_DR: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, GFP_ATOMIC); if (scp->nsp_rxtshift >= decnet_dr_count) scp->state = DN_DRC; return 0; case DN_DN: if (scp->nsp_rxtshift < decnet_dn_count) { /* printk(KERN_DEBUG "dn_destroy_timer: DN\n"); */ dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, GFP_ATOMIC); return 0; } } scp->persist = (HZ * decnet_time_wait); if (sk->sk_socket) return 0; if ((jiffies - scp->stamp) >= (HZ * decnet_time_wait)) { dn_unhash_sock(sk); sock_put(sk); return 1; } return 0; } static void dn_destroy_sock(struct sock *sk) { struct dn_scp *scp = DN_SK(sk); scp->nsp_rxtshift = 0; /* reset back off */ if (sk->sk_socket) { if (sk->sk_socket->state != SS_UNCONNECTED) sk->sk_socket->state = SS_DISCONNECTING; } sk->sk_state = TCP_CLOSE; switch (scp->state) { case DN_DN: dn_nsp_send_disc(sk, NSP_DISCCONF, NSP_REASON_DC, sk->sk_allocation); scp->persist_fxn = dn_destroy_timer; scp->persist = dn_nsp_persist(sk); break; case DN_CR: scp->state = DN_DR; goto disc_reject; case DN_RUN: scp->state = DN_DI; case DN_DI: case DN_DR: disc_reject: dn_nsp_send_disc(sk, NSP_DISCINIT, 0, sk->sk_allocation); case DN_NC: case DN_NR: case DN_RJ: case DN_DIC: case DN_CN: case DN_DRC: case DN_CI: case DN_CD: scp->persist_fxn = dn_destroy_timer; scp->persist = dn_nsp_persist(sk); break; default: printk(KERN_DEBUG "DECnet: dn_destroy_sock passed socket in invalid state\n"); case DN_O: dn_stop_slow_timer(sk); dn_unhash_sock_bh(sk); sock_put(sk); break; } } char *dn_addr2asc(__u16 addr, char *buf) { unsigned short node, area; node = addr & 0x03ff; area = addr >> 10; sprintf(buf, "%hd.%hd", area, node); return buf; } static int dn_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (protocol < 0 || protocol > SK_PROTOCOL_MAX) return -EINVAL; if (!net_eq(net, &init_net)) return -EAFNOSUPPORT; switch (sock->type) { case SOCK_SEQPACKET: if (protocol != DNPROTO_NSP) return -EPROTONOSUPPORT; break; case SOCK_STREAM: break; default: return -ESOCKTNOSUPPORT; } if ((sk = dn_alloc_sock(net, sock, GFP_KERNEL)) == NULL) return -ENOBUFS; sk->sk_protocol = protocol; return 0; } static int dn_release(struct socket *sock) { struct sock *sk = sock->sk; if (sk) { sock_orphan(sk); sock_hold(sk); lock_sock(sk); dn_destroy_sock(sk); release_sock(sk); sock_put(sk); } return 0; } static int dn_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); struct sockaddr_dn *saddr = (struct sockaddr_dn *)uaddr; struct net_device *dev, *ldev; int rv; if (addr_len != sizeof(struct sockaddr_dn)) return -EINVAL; if (saddr->sdn_family != AF_DECnet) return -EINVAL; if (le16_to_cpu(saddr->sdn_nodeaddrl) && (le16_to_cpu(saddr->sdn_nodeaddrl) != 2)) return -EINVAL; if (le16_to_cpu(saddr->sdn_objnamel) > DN_MAXOBJL) return -EINVAL; if (saddr->sdn_flags & ~SDF_WILD) return -EINVAL; if (!capable(CAP_NET_BIND_SERVICE) && (saddr->sdn_objnum || (saddr->sdn_flags & SDF_WILD))) return -EACCES; if (!(saddr->sdn_flags & SDF_WILD)) { if (le16_to_cpu(saddr->sdn_nodeaddrl)) { rcu_read_lock(); ldev = NULL; for_each_netdev_rcu(&init_net, dev) { if (!dev->dn_ptr) continue; if (dn_dev_islocal(dev, dn_saddr2dn(saddr))) { ldev = dev; break; } } rcu_read_unlock(); if (ldev == NULL) return -EADDRNOTAVAIL; } } rv = -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { memcpy(&scp->addr, saddr, addr_len); sock_reset_flag(sk, SOCK_ZAPPED); rv = dn_hash_sock(sk); if (rv) sock_set_flag(sk, SOCK_ZAPPED); } release_sock(sk); return rv; } static int dn_auto_bind(struct socket *sock) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int rv; sock_reset_flag(sk, SOCK_ZAPPED); scp->addr.sdn_flags = 0; scp->addr.sdn_objnum = 0; /* * This stuff is to keep compatibility with Eduardo's * patch. I hope I can dispense with it shortly... */ if ((scp->accessdata.acc_accl != 0) && (scp->accessdata.acc_accl <= 12)) { scp->addr.sdn_objnamel = cpu_to_le16(scp->accessdata.acc_accl); memcpy(scp->addr.sdn_objname, scp->accessdata.acc_acc, le16_to_cpu(scp->addr.sdn_objnamel)); scp->accessdata.acc_accl = 0; memset(scp->accessdata.acc_acc, 0, 40); } /* End of compatibility stuff */ scp->addr.sdn_add.a_len = cpu_to_le16(2); rv = dn_dev_bind_default((__le16 *)scp->addr.sdn_add.a_addr); if (rv == 0) { rv = dn_hash_sock(sk); if (rv) sock_set_flag(sk, SOCK_ZAPPED); } return rv; } static int dn_confirm_accept(struct sock *sk, long *timeo, gfp_t allocation) { struct dn_scp *scp = DN_SK(sk); DEFINE_WAIT(wait); int err; if (scp->state != DN_CR) return -EINVAL; scp->state = DN_CC; scp->segsize_loc = dst_metric_advmss(__sk_dst_get(sk)); dn_send_conn_conf(sk, allocation); prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); if (scp->state == DN_CC) *timeo = schedule_timeout(*timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) break; err = sock_error(sk); if (err) break; err = sock_intr_errno(*timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!*timeo) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); if (err == 0) { sk->sk_socket->state = SS_CONNECTED; } else if (scp->state != DN_CC) { sk->sk_socket->state = SS_UNCONNECTED; } return err; } static int dn_wait_run(struct sock *sk, long *timeo) { struct dn_scp *scp = DN_SK(sk); DEFINE_WAIT(wait); int err = 0; if (scp->state == DN_RUN) goto out; if (!*timeo) return -EALREADY; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); if (scp->state == DN_CI || scp->state == DN_CC) *timeo = schedule_timeout(*timeo); lock_sock(sk); err = 0; if (scp->state == DN_RUN) break; err = sock_error(sk); if (err) break; err = sock_intr_errno(*timeo); if (signal_pending(current)) break; err = -ETIMEDOUT; if (!*timeo) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); out: if (err == 0) { sk->sk_socket->state = SS_CONNECTED; } else if (scp->state != DN_CI && scp->state != DN_CC) { sk->sk_socket->state = SS_UNCONNECTED; } return err; } static int __dn_connect(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) { struct socket *sock = sk->sk_socket; struct dn_scp *scp = DN_SK(sk); int err = -EISCONN; struct flowidn fld; struct dst_entry *dst; if (sock->state == SS_CONNECTED) goto out; if (sock->state == SS_CONNECTING) { err = 0; if (scp->state == DN_RUN) { sock->state = SS_CONNECTED; goto out; } err = -ECONNREFUSED; if (scp->state != DN_CI && scp->state != DN_CC) { sock->state = SS_UNCONNECTED; goto out; } return dn_wait_run(sk, timeo); } err = -EINVAL; if (scp->state != DN_O) goto out; if (addr == NULL || addrlen != sizeof(struct sockaddr_dn)) goto out; if (addr->sdn_family != AF_DECnet) goto out; if (addr->sdn_flags & SDF_WILD) goto out; if (sock_flag(sk, SOCK_ZAPPED)) { err = dn_auto_bind(sk->sk_socket); if (err) goto out; } memcpy(&scp->peer, addr, sizeof(struct sockaddr_dn)); err = -EHOSTUNREACH; memset(&fld, 0, sizeof(fld)); fld.flowidn_oif = sk->sk_bound_dev_if; fld.daddr = dn_saddr2dn(&scp->peer); fld.saddr = dn_saddr2dn(&scp->addr); dn_sk_ports_copy(&fld, scp); fld.flowidn_proto = DNPROTO_NSP; if (dn_route_output_sock(&sk->sk_dst_cache, &fld, sk, flags) < 0) goto out; dst = __sk_dst_get(sk); sk->sk_route_caps = dst->dev->features; sock->state = SS_CONNECTING; scp->state = DN_CI; scp->segsize_loc = dst_metric_advmss(dst); dn_nsp_send_conninit(sk, NSP_CI); err = -EINPROGRESS; if (*timeo) { err = dn_wait_run(sk, timeo); } out: return err; } static int dn_connect(struct socket *sock, struct sockaddr *uaddr, int addrlen, int flags) { struct sockaddr_dn *addr = (struct sockaddr_dn *)uaddr; struct sock *sk = sock->sk; int err; long timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); lock_sock(sk); err = __dn_connect(sk, addr, addrlen, &timeo, 0); release_sock(sk); return err; } static inline int dn_check_state(struct sock *sk, struct sockaddr_dn *addr, int addrlen, long *timeo, int flags) { struct dn_scp *scp = DN_SK(sk); switch (scp->state) { case DN_RUN: return 0; case DN_CR: return dn_confirm_accept(sk, timeo, sk->sk_allocation); case DN_CI: case DN_CC: return dn_wait_run(sk, timeo); case DN_O: return __dn_connect(sk, addr, addrlen, timeo, flags); } return -EINVAL; } static void dn_access_copy(struct sk_buff *skb, struct accessdata_dn *acc) { unsigned char *ptr = skb->data; acc->acc_userl = *ptr++; memcpy(&acc->acc_user, ptr, acc->acc_userl); ptr += acc->acc_userl; acc->acc_passl = *ptr++; memcpy(&acc->acc_pass, ptr, acc->acc_passl); ptr += acc->acc_passl; acc->acc_accl = *ptr++; memcpy(&acc->acc_acc, ptr, acc->acc_accl); skb_pull(skb, acc->acc_accl + acc->acc_passl + acc->acc_userl + 3); } static void dn_user_copy(struct sk_buff *skb, struct optdata_dn *opt) { unsigned char *ptr = skb->data; u16 len = *ptr++; /* yes, it's 8bit on the wire */ BUG_ON(len > 16); /* we've checked the contents earlier */ opt->opt_optl = cpu_to_le16(len); opt->opt_status = 0; memcpy(opt->opt_data, ptr, len); skb_pull(skb, len + 1); } static struct sk_buff *dn_wait_for_connect(struct sock *sk, long *timeo) { DEFINE_WAIT(wait); struct sk_buff *skb = NULL; int err = 0; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); for(;;) { release_sock(sk); skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { *timeo = schedule_timeout(*timeo); skb = skb_dequeue(&sk->sk_receive_queue); } lock_sock(sk); if (skb != NULL) break; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) break; err = sock_intr_errno(*timeo); if (signal_pending(current)) break; err = -EAGAIN; if (!*timeo) break; prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); } finish_wait(sk_sleep(sk), &wait); return skb == NULL ? ERR_PTR(err) : skb; } static int dn_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk, *newsk; struct sk_buff *skb = NULL; struct dn_skb_cb *cb; unsigned char menuver; int err = 0; unsigned char type; long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK); struct dst_entry *dst; lock_sock(sk); if (sk->sk_state != TCP_LISTEN || DN_SK(sk)->state != DN_O) { release_sock(sk); return -EINVAL; } skb = skb_dequeue(&sk->sk_receive_queue); if (skb == NULL) { skb = dn_wait_for_connect(sk, &timeo); if (IS_ERR(skb)) { release_sock(sk); return PTR_ERR(skb); } } cb = DN_SKB_CB(skb); sk->sk_ack_backlog--; newsk = dn_alloc_sock(sock_net(sk), newsock, sk->sk_allocation); if (newsk == NULL) { release_sock(sk); kfree_skb(skb); return -ENOBUFS; } release_sock(sk); dst = skb_dst(skb); sk_dst_set(newsk, dst); skb_dst_set(skb, NULL); DN_SK(newsk)->state = DN_CR; DN_SK(newsk)->addrrem = cb->src_port; DN_SK(newsk)->services_rem = cb->services; DN_SK(newsk)->info_rem = cb->info; DN_SK(newsk)->segsize_rem = cb->segsize; DN_SK(newsk)->accept_mode = DN_SK(sk)->accept_mode; if (DN_SK(newsk)->segsize_rem < 230) DN_SK(newsk)->segsize_rem = 230; if ((DN_SK(newsk)->services_rem & NSP_FC_MASK) == NSP_FC_NONE) DN_SK(newsk)->max_window = decnet_no_fc_max_cwnd; newsk->sk_state = TCP_LISTEN; memcpy(&(DN_SK(newsk)->addr), &(DN_SK(sk)->addr), sizeof(struct sockaddr_dn)); /* * If we are listening on a wild socket, we don't want * the newly created socket on the wrong hash queue. */ DN_SK(newsk)->addr.sdn_flags &= ~SDF_WILD; skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->addr), &type)); skb_pull(skb, dn_username2sockaddr(skb->data, skb->len, &(DN_SK(newsk)->peer), &type)); *(__le16 *)(DN_SK(newsk)->peer.sdn_add.a_addr) = cb->src; *(__le16 *)(DN_SK(newsk)->addr.sdn_add.a_addr) = cb->dst; menuver = *skb->data; skb_pull(skb, 1); if (menuver & DN_MENUVER_ACC) dn_access_copy(skb, &(DN_SK(newsk)->accessdata)); if (menuver & DN_MENUVER_USR) dn_user_copy(skb, &(DN_SK(newsk)->conndata_in)); if (menuver & DN_MENUVER_PRX) DN_SK(newsk)->peer.sdn_flags |= SDF_PROXY; if (menuver & DN_MENUVER_UIC) DN_SK(newsk)->peer.sdn_flags |= SDF_UICPROXY; kfree_skb(skb); memcpy(&(DN_SK(newsk)->conndata_out), &(DN_SK(sk)->conndata_out), sizeof(struct optdata_dn)); memcpy(&(DN_SK(newsk)->discdata_out), &(DN_SK(sk)->discdata_out), sizeof(struct optdata_dn)); lock_sock(newsk); err = dn_hash_sock(newsk); if (err == 0) { sock_reset_flag(newsk, SOCK_ZAPPED); dn_send_conn_ack(newsk); /* * Here we use sk->sk_allocation since although the conn conf is * for the newsk, the context is the old socket. */ if (DN_SK(newsk)->accept_mode == ACC_IMMED) err = dn_confirm_accept(newsk, &timeo, sk->sk_allocation); } release_sock(newsk); return err; } static int dn_getname(struct socket *sock, struct sockaddr *uaddr,int *uaddr_len,int peer) { struct sockaddr_dn *sa = (struct sockaddr_dn *)uaddr; struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); *uaddr_len = sizeof(struct sockaddr_dn); lock_sock(sk); if (peer) { if ((sock->state != SS_CONNECTED && sock->state != SS_CONNECTING) && scp->accept_mode == ACC_IMMED) { release_sock(sk); return -ENOTCONN; } memcpy(sa, &scp->peer, sizeof(struct sockaddr_dn)); } else { memcpy(sa, &scp->addr, sizeof(struct sockaddr_dn)); } release_sock(sk); return 0; } static unsigned int dn_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int mask = datagram_poll(file, sock, wait); if (!skb_queue_empty(&scp->other_receive_queue)) mask |= POLLRDBAND; return mask; } static int dn_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int err = -EOPNOTSUPP; long amount = 0; struct sk_buff *skb; int val; switch(cmd) { case SIOCGIFADDR: case SIOCSIFADDR: return dn_dev_ioctl(cmd, (void __user *)arg); case SIOCATMARK: lock_sock(sk); val = !skb_queue_empty(&scp->other_receive_queue); if (scp->state != DN_RUN) val = -ENOTCONN; release_sock(sk); return val; case TIOCOUTQ: amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (int __user *)arg); break; case TIOCINQ: lock_sock(sk); skb = skb_peek(&scp->other_receive_queue); if (skb) { amount = skb->len; } else { skb_queue_walk(&sk->sk_receive_queue, skb) amount += skb->len; } release_sock(sk); err = put_user(amount, (int __user *)arg); break; default: err = -ENOIOCTLCMD; break; } return err; } static int dn_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err = -EINVAL; lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) goto out; if ((DN_SK(sk)->state != DN_O) || (sk->sk_state == TCP_LISTEN)) goto out; sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = TCP_LISTEN; err = 0; dn_rehash_sock(sk); out: release_sock(sk); return err; } static int dn_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); int err = -ENOTCONN; lock_sock(sk); if (sock->state == SS_UNCONNECTED) goto out; err = 0; if (sock->state == SS_DISCONNECTING) goto out; err = -EINVAL; if (scp->state == DN_O) goto out; if (how != SHUT_RDWR) goto out; sk->sk_shutdown = SHUTDOWN_MASK; dn_destroy_sock(sk); err = 0; out: release_sock(sk); return err; } static int dn_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; int err; lock_sock(sk); err = __dn_setsockopt(sock, level, optname, optval, optlen, 0); release_sock(sk); return err; } static int __dn_setsockopt(struct socket *sock, int level,int optname, char __user *optval, unsigned int optlen, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); long timeo; union { struct optdata_dn opt; struct accessdata_dn acc; int mode; unsigned long win; int val; unsigned char services; unsigned char info; } u; int err; if (optlen && !optval) return -EINVAL; if (optlen > sizeof(u)) return -EINVAL; if (copy_from_user(&u, optval, optlen)) return -EFAULT; switch (optname) { case DSO_CONDATA: if (sock->state == SS_CONNECTED) return -EISCONN; if ((scp->state != DN_O) && (scp->state != DN_CR)) return -EINVAL; if (optlen != sizeof(struct optdata_dn)) return -EINVAL; if (le16_to_cpu(u.opt.opt_optl) > 16) return -EINVAL; memcpy(&scp->conndata_out, &u.opt, optlen); break; case DSO_DISDATA: if (sock->state != SS_CONNECTED && scp->accept_mode == ACC_IMMED) return -ENOTCONN; if (optlen != sizeof(struct optdata_dn)) return -EINVAL; if (le16_to_cpu(u.opt.opt_optl) > 16) return -EINVAL; memcpy(&scp->discdata_out, &u.opt, optlen); break; case DSO_CONACCESS: if (sock->state == SS_CONNECTED) return -EISCONN; if (scp->state != DN_O) return -EINVAL; if (optlen != sizeof(struct accessdata_dn)) return -EINVAL; if ((u.acc.acc_accl > DN_MAXACCL) || (u.acc.acc_passl > DN_MAXACCL) || (u.acc.acc_userl > DN_MAXACCL)) return -EINVAL; memcpy(&scp->accessdata, &u.acc, optlen); break; case DSO_ACCEPTMODE: if (sock->state == SS_CONNECTED) return -EISCONN; if (scp->state != DN_O) return -EINVAL; if (optlen != sizeof(int)) return -EINVAL; if ((u.mode != ACC_IMMED) && (u.mode != ACC_DEFER)) return -EINVAL; scp->accept_mode = (unsigned char)u.mode; break; case DSO_CONACCEPT: if (scp->state != DN_CR) return -EINVAL; timeo = sock_rcvtimeo(sk, 0); err = dn_confirm_accept(sk, &timeo, sk->sk_allocation); return err; case DSO_CONREJECT: if (scp->state != DN_CR) return -EINVAL; scp->state = DN_DR; sk->sk_shutdown = SHUTDOWN_MASK; dn_nsp_send_disc(sk, 0x38, 0, sk->sk_allocation); break; default: #ifdef CONFIG_NETFILTER return nf_setsockopt(sk, PF_DECnet, optname, optval, optlen); #endif case DSO_LINKINFO: case DSO_STREAM: case DSO_SEQPACKET: return -ENOPROTOOPT; case DSO_MAXWINDOW: if (optlen != sizeof(unsigned long)) return -EINVAL; if (u.win > NSP_MAX_WINDOW) u.win = NSP_MAX_WINDOW; if (u.win == 0) return -EINVAL; scp->max_window = u.win; if (scp->snd_window > u.win) scp->snd_window = u.win; break; case DSO_NODELAY: if (optlen != sizeof(int)) return -EINVAL; if (scp->nonagle == 2) return -EINVAL; scp->nonagle = (u.val == 0) ? 0 : 1; /* if (scp->nonagle == 1) { Push pending frames } */ break; case DSO_CORK: if (optlen != sizeof(int)) return -EINVAL; if (scp->nonagle == 1) return -EINVAL; scp->nonagle = (u.val == 0) ? 0 : 2; /* if (scp->nonagle == 0) { Push pending frames } */ break; case DSO_SERVICES: if (optlen != sizeof(unsigned char)) return -EINVAL; if ((u.services & ~NSP_FC_MASK) != 0x01) return -EINVAL; if ((u.services & NSP_FC_MASK) == NSP_FC_MASK) return -EINVAL; scp->services_loc = u.services; break; case DSO_INFO: if (optlen != sizeof(unsigned char)) return -EINVAL; if (u.info & 0xfc) return -EINVAL; scp->info_loc = u.info; break; } return 0; } static int dn_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; int err; lock_sock(sk); err = __dn_getsockopt(sock, level, optname, optval, optlen, 0); release_sock(sk); return err; } static int __dn_getsockopt(struct socket *sock, int level,int optname, char __user *optval,int __user *optlen, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); struct linkinfo_dn link; unsigned int r_len; void *r_data = NULL; unsigned int val; if(get_user(r_len , optlen)) return -EFAULT; switch (optname) { case DSO_CONDATA: if (r_len > sizeof(struct optdata_dn)) r_len = sizeof(struct optdata_dn); r_data = &scp->conndata_in; break; case DSO_DISDATA: if (r_len > sizeof(struct optdata_dn)) r_len = sizeof(struct optdata_dn); r_data = &scp->discdata_in; break; case DSO_CONACCESS: if (r_len > sizeof(struct accessdata_dn)) r_len = sizeof(struct accessdata_dn); r_data = &scp->accessdata; break; case DSO_ACCEPTMODE: if (r_len > sizeof(unsigned char)) r_len = sizeof(unsigned char); r_data = &scp->accept_mode; break; case DSO_LINKINFO: if (r_len > sizeof(struct linkinfo_dn)) r_len = sizeof(struct linkinfo_dn); memset(&link, 0, sizeof(link)); switch (sock->state) { case SS_CONNECTING: link.idn_linkstate = LL_CONNECTING; break; case SS_DISCONNECTING: link.idn_linkstate = LL_DISCONNECTING; break; case SS_CONNECTED: link.idn_linkstate = LL_RUNNING; break; default: link.idn_linkstate = LL_INACTIVE; } link.idn_segsize = scp->segsize_rem; r_data = &link; break; default: #ifdef CONFIG_NETFILTER { int ret, len; if (get_user(len, optlen)) return -EFAULT; ret = nf_getsockopt(sk, PF_DECnet, optname, optval, &len); if (ret >= 0) ret = put_user(len, optlen); return ret; } #endif case DSO_STREAM: case DSO_SEQPACKET: case DSO_CONACCEPT: case DSO_CONREJECT: return -ENOPROTOOPT; case DSO_MAXWINDOW: if (r_len > sizeof(unsigned long)) r_len = sizeof(unsigned long); r_data = &scp->max_window; break; case DSO_NODELAY: if (r_len > sizeof(int)) r_len = sizeof(int); val = (scp->nonagle == 1); r_data = &val; break; case DSO_CORK: if (r_len > sizeof(int)) r_len = sizeof(int); val = (scp->nonagle == 2); r_data = &val; break; case DSO_SERVICES: if (r_len > sizeof(unsigned char)) r_len = sizeof(unsigned char); r_data = &scp->services_rem; break; case DSO_INFO: if (r_len > sizeof(unsigned char)) r_len = sizeof(unsigned char); r_data = &scp->info_rem; break; } if (r_data) { if (copy_to_user(optval, r_data, r_len)) return -EFAULT; if (put_user(r_len, optlen)) return -EFAULT; } return 0; } static int dn_data_ready(struct sock *sk, struct sk_buff_head *q, int flags, int target) { struct sk_buff *skb; int len = 0; if (flags & MSG_OOB) return !skb_queue_empty(q) ? 1 : 0; skb_queue_walk(q, skb) { struct dn_skb_cb *cb = DN_SKB_CB(skb); len += skb->len; if (cb->nsp_flags & 0x40) { /* SOCK_SEQPACKET reads to EOM */ if (sk->sk_type == SOCK_SEQPACKET) return 1; /* so does SOCK_STREAM unless WAITALL is specified */ if (!(flags & MSG_WAITALL)) return 1; } /* minimum data length for read exceeded */ if (len >= target) return 1; } return 0; } static int dn_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); struct sk_buff_head *queue = &sk->sk_receive_queue; size_t target = size > 1 ? 1 : 0; size_t copied = 0; int rv = 0; struct sk_buff *skb, *n; struct dn_skb_cb *cb = NULL; unsigned char eor = 0; long timeo = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); lock_sock(sk); if (sock_flag(sk, SOCK_ZAPPED)) { rv = -EADDRNOTAVAIL; goto out; } if (sk->sk_shutdown & RCV_SHUTDOWN) { rv = 0; goto out; } rv = dn_check_state(sk, NULL, 0, &timeo, flags); if (rv) goto out; if (flags & ~(MSG_CMSG_COMPAT|MSG_PEEK|MSG_OOB|MSG_WAITALL|MSG_DONTWAIT|MSG_NOSIGNAL)) { rv = -EOPNOTSUPP; goto out; } if (flags & MSG_OOB) queue = &scp->other_receive_queue; if (flags & MSG_WAITALL) target = size; /* * See if there is data ready to read, sleep if there isn't */ for(;;) { DEFINE_WAIT(wait); if (sk->sk_err) goto out; if (!skb_queue_empty(&scp->other_receive_queue)) { if (!(flags & MSG_OOB)) { msg->msg_flags |= MSG_OOB; if (!scp->other_report) { scp->other_report = 1; goto out; } } } if (scp->state != DN_RUN) goto out; if (signal_pending(current)) { rv = sock_intr_errno(timeo); goto out; } if (dn_data_ready(sk, queue, flags, target)) break; if (flags & MSG_DONTWAIT) { rv = -EWOULDBLOCK; goto out; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target)); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); finish_wait(sk_sleep(sk), &wait); } skb_queue_walk_safe(queue, skb, n) { unsigned int chunk = skb->len; cb = DN_SKB_CB(skb); if ((chunk + copied) > size) chunk = size - copied; if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) { rv = -EFAULT; break; } copied += chunk; if (!(flags & MSG_PEEK)) skb_pull(skb, chunk); eor = cb->nsp_flags & 0x40; if (skb->len == 0) { skb_unlink(skb, queue); kfree_skb(skb); /* * N.B. Don't refer to skb or cb after this point * in loop. */ if ((scp->flowloc_sw == DN_DONTSEND) && !dn_congested(sk)) { scp->flowloc_sw = DN_SEND; dn_nsp_send_link(sk, DN_SEND, 0); } } if (eor) { if (sk->sk_type == SOCK_SEQPACKET) break; if (!(flags & MSG_WAITALL)) break; } if (flags & MSG_OOB) break; if (copied >= target) break; } rv = copied; if (eor && (sk->sk_type == SOCK_SEQPACKET)) msg->msg_flags |= MSG_EOR; out: if (rv == 0) rv = (flags & MSG_PEEK) ? -sk->sk_err : sock_error(sk); if ((rv >= 0) && msg->msg_name) { memcpy(msg->msg_name, &scp->peer, sizeof(struct sockaddr_dn)); msg->msg_namelen = sizeof(struct sockaddr_dn); } release_sock(sk); return rv; } static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags) { unsigned char fctype = scp->services_rem & NSP_FC_MASK; if (skb_queue_len(queue) >= scp->snd_window) return 1; if (fctype != NSP_FC_NONE) { if (flags & MSG_OOB) { if (scp->flowrem_oth == 0) return 1; } else { if (scp->flowrem_dat == 0) return 1; } } return 0; } /* * The DECnet spec requires that the "routing layer" accepts packets which * are at least 230 bytes in size. This excludes any headers which the NSP * layer might add, so we always assume that we'll be using the maximal * length header on data packets. The variation in length is due to the * inclusion (or not) of the two 16 bit acknowledgement fields so it doesn't * make much practical difference. */ unsigned int dn_mss_from_pmtu(struct net_device *dev, int mtu) { unsigned int mss = 230 - DN_MAX_NSP_DATA_HEADER; if (dev) { struct dn_dev *dn_db = rcu_dereference_raw(dev->dn_ptr); mtu -= LL_RESERVED_SPACE(dev); if (dn_db->use_long) mtu -= 21; else mtu -= 6; mtu -= DN_MAX_NSP_DATA_HEADER; } else { /* * 21 = long header, 16 = guess at MAC header length */ mtu -= (21 + DN_MAX_NSP_DATA_HEADER + 16); } if (mtu > mss) mss = mtu; return mss; } static inline unsigned int dn_current_mss(struct sock *sk, int flags) { struct dst_entry *dst = __sk_dst_get(sk); struct dn_scp *scp = DN_SK(sk); int mss_now = min_t(int, scp->segsize_loc, scp->segsize_rem); /* Other data messages are limited to 16 bytes per packet */ if (flags & MSG_OOB) return 16; /* This works out the maximum size of segment we can send out */ if (dst) { u32 mtu = dst_mtu(dst); mss_now = min_t(int, dn_mss_from_pmtu(dst->dev, mtu), mss_now); } return mss_now; } /* * N.B. We get the timeout wrong here, but then we always did get it * wrong before and this is another step along the road to correcting * it. It ought to get updated each time we pass through the routine, * but in practise it probably doesn't matter too much for now. */ static inline struct sk_buff *dn_alloc_send_pskb(struct sock *sk, unsigned long datalen, int noblock, int *errcode) { struct sk_buff *skb = sock_alloc_send_skb(sk, datalen, noblock, errcode); if (skb) { skb->protocol = htons(ETH_P_DNA_RT); skb->pkt_type = PACKET_OUTGOING; } return skb; } static int dn_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct dn_scp *scp = DN_SK(sk); size_t mss; struct sk_buff_head *queue = &scp->data_xmit_queue; int flags = msg->msg_flags; int err = 0; size_t sent = 0; int addr_len = msg->msg_namelen; struct sockaddr_dn *addr = (struct sockaddr_dn *)msg->msg_name; struct sk_buff *skb = NULL; struct dn_skb_cb *cb; size_t len; unsigned char fctype; long timeo; if (flags & ~(MSG_TRYHARD|MSG_OOB|MSG_DONTWAIT|MSG_EOR|MSG_NOSIGNAL|MSG_MORE|MSG_CMSG_COMPAT)) return -EOPNOTSUPP; if (addr_len && (addr_len != sizeof(struct sockaddr_dn))) return -EINVAL; lock_sock(sk); timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); /* * The only difference between stream sockets and sequenced packet * sockets is that the stream sockets always behave as if MSG_EOR * has been set. */ if (sock->type == SOCK_STREAM) { if (flags & MSG_EOR) { err = -EINVAL; goto out; } flags |= MSG_EOR; } err = dn_check_state(sk, addr, addr_len, &timeo, flags); if (err) goto out_err; if (sk->sk_shutdown & SEND_SHUTDOWN) { err = -EPIPE; if (!(flags & MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); goto out_err; } if ((flags & MSG_TRYHARD) && sk->sk_dst_cache) dst_negative_advice(sk); mss = scp->segsize_rem; fctype = scp->services_rem & NSP_FC_MASK; mss = dn_current_mss(sk, flags); if (flags & MSG_OOB) { queue = &scp->other_xmit_queue; if (size > mss) { err = -EMSGSIZE; goto out; } } scp->persist_fxn = dn_nsp_xmit_timeout; while(sent < size) { err = sock_error(sk); if (err) goto out; if (signal_pending(current)) { err = sock_intr_errno(timeo); goto out; } /* * Calculate size that we wish to send. */ len = size - sent; if (len > mss) len = mss; /* * Wait for queue size to go down below the window * size. */ if (dn_queue_too_long(scp, queue, flags)) { DEFINE_WAIT(wait); if (flags & MSG_DONTWAIT) { err = -EWOULDBLOCK; goto out; } prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); sk_wait_event(sk, &timeo, !dn_queue_too_long(scp, queue, flags)); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); finish_wait(sk_sleep(sk), &wait); continue; } /* * Get a suitably sized skb. * 64 is a bit of a hack really, but its larger than any * link-layer headers and has served us well as a good * guess as to their real length. */ skb = dn_alloc_send_pskb(sk, len + 64 + DN_MAX_NSP_DATA_HEADER, flags & MSG_DONTWAIT, &err); if (err) break; if (!skb) continue; cb = DN_SKB_CB(skb); skb_reserve(skb, 64 + DN_MAX_NSP_DATA_HEADER); if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto out; } if (flags & MSG_OOB) { cb->nsp_flags = 0x30; if (fctype != NSP_FC_NONE) scp->flowrem_oth--; } else { cb->nsp_flags = 0x00; if (scp->seg_total == 0) cb->nsp_flags |= 0x20; scp->seg_total += len; if (((sent + len) == size) && (flags & MSG_EOR)) { cb->nsp_flags |= 0x40; scp->seg_total = 0; if (fctype == NSP_FC_SCMC) scp->flowrem_dat--; } if (fctype == NSP_FC_SRC) scp->flowrem_dat--; } sent += len; dn_nsp_queue_xmit(sk, skb, sk->sk_allocation, flags & MSG_OOB); skb = NULL; scp->persist = dn_nsp_persist(sk); } out: kfree_skb(skb); release_sock(sk); return sent ? sent : err; out_err: err = sk_stream_error(sk, flags, err); release_sock(sk); return err; } static int dn_device_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *dev = (struct net_device *)ptr; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; switch (event) { case NETDEV_UP: dn_dev_up(dev); break; case NETDEV_DOWN: dn_dev_down(dev); break; default: break; } return NOTIFY_DONE; } static struct notifier_block dn_dev_notifier = { .notifier_call = dn_device_event, }; extern int dn_route_rcv(struct sk_buff *, struct net_device *, struct packet_type *, struct net_device *); static struct packet_type dn_dix_packet_type __read_mostly = { .type = cpu_to_be16(ETH_P_DNA_RT), .func = dn_route_rcv, }; #ifdef CONFIG_PROC_FS struct dn_iter_state { int bucket; }; static struct sock *dn_socket_get_first(struct seq_file *seq) { struct dn_iter_state *state = seq->private; struct sock *n = NULL; for(state->bucket = 0; state->bucket < DN_SK_HASH_SIZE; ++state->bucket) { n = sk_head(&dn_sk_hash[state->bucket]); if (n) break; } return n; } static struct sock *dn_socket_get_next(struct seq_file *seq, struct sock *n) { struct dn_iter_state *state = seq->private; n = sk_next(n); try_again: if (n) goto out; if (++state->bucket >= DN_SK_HASH_SIZE) goto out; n = sk_head(&dn_sk_hash[state->bucket]); goto try_again; out: return n; } static struct sock *socket_get_idx(struct seq_file *seq, loff_t *pos) { struct sock *sk = dn_socket_get_first(seq); if (sk) { while(*pos && (sk = dn_socket_get_next(seq, sk))) --*pos; } return *pos ? NULL : sk; } static void *dn_socket_get_idx(struct seq_file *seq, loff_t pos) { void *rc; read_lock_bh(&dn_hash_lock); rc = socket_get_idx(seq, &pos); if (!rc) { read_unlock_bh(&dn_hash_lock); } return rc; } static void *dn_socket_seq_start(struct seq_file *seq, loff_t *pos) { return *pos ? dn_socket_get_idx(seq, *pos - 1) : SEQ_START_TOKEN; } static void *dn_socket_seq_next(struct seq_file *seq, void *v, loff_t *pos) { void *rc; if (v == SEQ_START_TOKEN) { rc = dn_socket_get_idx(seq, 0); goto out; } rc = dn_socket_get_next(seq, v); if (rc) goto out; read_unlock_bh(&dn_hash_lock); out: ++*pos; return rc; } static void dn_socket_seq_stop(struct seq_file *seq, void *v) { if (v && v != SEQ_START_TOKEN) read_unlock_bh(&dn_hash_lock); } #define IS_NOT_PRINTABLE(x) ((x) < 32 || (x) > 126) static void dn_printable_object(struct sockaddr_dn *dn, unsigned char *buf) { int i; switch (le16_to_cpu(dn->sdn_objnamel)) { case 0: sprintf(buf, "%d", dn->sdn_objnum); break; default: for (i = 0; i < le16_to_cpu(dn->sdn_objnamel); i++) { buf[i] = dn->sdn_objname[i]; if (IS_NOT_PRINTABLE(buf[i])) buf[i] = '.'; } buf[i] = 0; } } static char *dn_state2asc(unsigned char state) { switch (state) { case DN_O: return "OPEN"; case DN_CR: return " CR"; case DN_DR: return " DR"; case DN_DRC: return " DRC"; case DN_CC: return " CC"; case DN_CI: return " CI"; case DN_NR: return " NR"; case DN_NC: return " NC"; case DN_CD: return " CD"; case DN_RJ: return " RJ"; case DN_RUN: return " RUN"; case DN_DI: return " DI"; case DN_DIC: return " DIC"; case DN_DN: return " DN"; case DN_CL: return " CL"; case DN_CN: return " CN"; } return "????"; } static inline void dn_socket_format_entry(struct seq_file *seq, struct sock *sk) { struct dn_scp *scp = DN_SK(sk); char buf1[DN_ASCBUF_LEN]; char buf2[DN_ASCBUF_LEN]; char local_object[DN_MAXOBJL+3]; char remote_object[DN_MAXOBJL+3]; dn_printable_object(&scp->addr, local_object); dn_printable_object(&scp->peer, remote_object); seq_printf(seq, "%6s/%04X %04d:%04d %04d:%04d %01d %-16s " "%6s/%04X %04d:%04d %04d:%04d %01d %-16s %4s %s\n", dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->addr)), buf1), scp->addrloc, scp->numdat, scp->numoth, scp->ackxmt_dat, scp->ackxmt_oth, scp->flowloc_sw, local_object, dn_addr2asc(le16_to_cpu(dn_saddr2dn(&scp->peer)), buf2), scp->addrrem, scp->numdat_rcv, scp->numoth_rcv, scp->ackrcv_dat, scp->ackrcv_oth, scp->flowrem_sw, remote_object, dn_state2asc(scp->state), ((scp->accept_mode == ACC_IMMED) ? "IMMED" : "DEFER")); } static int dn_socket_seq_show(struct seq_file *seq, void *v) { if (v == SEQ_START_TOKEN) { seq_puts(seq, "Local Remote\n"); } else { dn_socket_format_entry(seq, v); } return 0; } static const struct seq_operations dn_socket_seq_ops = { .start = dn_socket_seq_start, .next = dn_socket_seq_next, .stop = dn_socket_seq_stop, .show = dn_socket_seq_show, }; static int dn_socket_seq_open(struct inode *inode, struct file *file) { return seq_open_private(file, &dn_socket_seq_ops, sizeof(struct dn_iter_state)); } static const struct file_operations dn_socket_seq_fops = { .owner = THIS_MODULE, .open = dn_socket_seq_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release_private, }; #endif static const struct net_proto_family dn_family_ops = { .family = AF_DECnet, .create = dn_create, .owner = THIS_MODULE, }; static const struct proto_ops dn_proto_ops = { .family = AF_DECnet, .owner = THIS_MODULE, .release = dn_release, .bind = dn_bind, .connect = dn_connect, .socketpair = sock_no_socketpair, .accept = dn_accept, .getname = dn_getname, .poll = dn_poll, .ioctl = dn_ioctl, .listen = dn_listen, .shutdown = dn_shutdown, .setsockopt = dn_setsockopt, .getsockopt = dn_getsockopt, .sendmsg = dn_sendmsg, .recvmsg = dn_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; void dn_register_sysctl(void); void dn_unregister_sysctl(void); MODULE_DESCRIPTION("The Linux DECnet Network Protocol"); MODULE_AUTHOR("Linux DECnet Project Team"); MODULE_LICENSE("GPL"); MODULE_ALIAS_NETPROTO(PF_DECnet); static char banner[] __initdata = KERN_INFO "NET4: DECnet for Linux: V.2.5.68s (C) 1995-2003 Linux DECnet Project Team\n"; static int __init decnet_init(void) { int rc; printk(banner); rc = proto_register(&dn_proto, 1); if (rc != 0) goto out; dn_neigh_init(); dn_dev_init(); dn_route_init(); dn_fib_init(); sock_register(&dn_family_ops); dev_add_pack(&dn_dix_packet_type); register_netdevice_notifier(&dn_dev_notifier); proc_create("decnet", S_IRUGO, init_net.proc_net, &dn_socket_seq_fops); dn_register_sysctl(); out: return rc; } module_init(decnet_init); /* * Prevent DECnet module unloading until its fixed properly. * Requires an audit of the code to check for memory leaks and * initialisation problems etc. */ #if 0 static void __exit decnet_exit(void) { sock_unregister(AF_DECnet); rtnl_unregister_all(PF_DECnet); dev_remove_pack(&dn_dix_packet_type); dn_unregister_sysctl(); unregister_netdevice_notifier(&dn_dev_notifier); dn_route_cleanup(); dn_dev_cleanup(); dn_neigh_cleanup(); dn_fib_cleanup(); remove_proc_entry("decnet", init_net.proc_net); proto_unregister(&dn_proto); rcu_barrier_bh(); /* Wait for completion of call_rcu_bh()'s */ } module_exit(decnet_exit); #endif
gpl-2.0
sultanxda/sultan-kernel-pyramid-CAF-3.4
drivers/char/diag/diagfwd_bridge.c
714
11242
/* Copyright (c) 2012-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/diagchar.h> #include <linux/kmemleak.h> #include <linux/err.h> #include <linux/workqueue.h> #include <linux/ratelimit.h> #include <linux/platform_device.h> #include <linux/smux.h> #ifdef CONFIG_DIAG_OVER_USB #include <mach/usbdiag.h> #endif #include "diagchar.h" #include "diagmem.h" #include "diagfwd_cntl.h" #include "diagfwd_smux.h" #include "diagfwd_hsic.h" #include "diag_masks.h" #include "diagfwd_bridge.h" struct diag_bridge_dev *diag_bridge; /* diagfwd_connect_bridge is called when the USB mdm channel is connected */ int diagfwd_connect_bridge(int process_cable) { int i; pr_debug("diag: in %s\n", __func__); for (i = 0; i < MAX_BRIDGES; i++) if (diag_bridge[i].enabled) connect_bridge(process_cable, i); return 0; } void connect_bridge(int process_cable, int index) { int err; mutex_lock(&diag_bridge[index].bridge_mutex); /* If the usb cable is being connected */ if (process_cable) { err = usb_diag_alloc_req(diag_bridge[index].ch, N_MDM_WRITE, N_MDM_READ); if (err) pr_err("diag: unable to alloc USB req for ch %d err:%d\n", index, err); diag_bridge[index].usb_connected = 1; } if (index == SMUX && driver->diag_smux_enabled) { driver->in_busy_smux = 0; diagfwd_connect_smux(); } else { if (diag_hsic[index].hsic_device_enabled && (driver->logging_mode != MEMORY_DEVICE_MODE || diag_hsic[index].hsic_data_requested)) { diag_hsic[index].in_busy_hsic_read_on_device = 0; diag_hsic[index].in_busy_hsic_write = 0; /* If the HSIC (diag_bridge) platform * device is not open */ if (!diag_hsic[index].hsic_device_opened) { hsic_diag_bridge_ops[index].ctxt = (void *)(index); err = diag_bridge_open(index, &hsic_diag_bridge_ops[index]); if (err) { pr_err("diag: HSIC channel open error: %d\n", err); } else { pr_debug("diag: opened HSIC channel\n"); diag_hsic[index].hsic_device_opened = 1; } } else { pr_debug("diag: HSIC channel already open\n"); } /* * Turn on communication over usb mdm and HSIC, * if the HSIC device driver is enabled * and opened */ if (diag_hsic[index].hsic_device_opened) { diag_hsic[index].hsic_ch = 1; /* Poll USB mdm channel to check for data */ if (driver->logging_mode == USB_MODE) queue_work(diag_bridge[index].wq, &diag_bridge[index]. diag_read_work); /* Poll HSIC channel to check for data */ queue_work(diag_bridge[index].wq, &diag_hsic[index]. diag_read_hsic_work); } } } mutex_unlock(&diag_bridge[index].bridge_mutex); } /* * diagfwd_disconnect_bridge is called when the USB mdm channel * is disconnected. So disconnect should happen for all bridges */ int diagfwd_disconnect_bridge(int process_cable) { int i; pr_debug("diag: In %s, process_cable: %d\n", __func__, process_cable); for (i = 0; i < MAX_BRIDGES; i++) { if (diag_bridge[i].enabled) { mutex_lock(&diag_bridge[i].bridge_mutex); /* If the usb cable is being disconnected */ if (process_cable) { diag_bridge[i].usb_connected = 0; usb_diag_free_req(diag_bridge[i].ch); } if (i == SMUX) { if (driver->diag_smux_enabled && driver->logging_mode == USB_MODE) { driver->in_busy_smux = 1; driver->lcid = LCID_INVALID; driver->smux_connected = 0; /* * Turn off communication over usb * and smux */ msm_smux_close(LCID_VALID); } } else { if (diag_hsic[i].hsic_device_enabled && (driver->logging_mode != MEMORY_DEVICE_MODE || !diag_hsic[i].hsic_data_requested)) { diag_hsic[i]. in_busy_hsic_read_on_device = 1; diag_hsic[i].in_busy_hsic_write = 1; /* Turn off communication over usb * and HSIC */ diag_hsic_close(i); } } mutex_unlock(&diag_bridge[i].bridge_mutex); } } return 0; } /* Called after the asychronous usb_diag_read() on mdm channel is complete */ int diagfwd_read_complete_bridge(struct diag_request *diag_read_ptr) { int index = (int)(diag_read_ptr->context); /* The read of the usb on the mdm (not HSIC/SMUX) has completed */ diag_bridge[index].read_len = diag_read_ptr->actual; if (index == SMUX) { if (driver->diag_smux_enabled) { diagfwd_read_complete_smux(); return 0; } else { pr_warning("diag: incorrect callback for smux\n"); } } /* If SMUX not enabled, check for HSIC */ diag_hsic[index].in_busy_hsic_read_on_device = 0; if (!diag_hsic[index].hsic_ch) { pr_err("DIAG in %s: hsic_ch == 0, ch %d\n", __func__, index); return 0; } /* * The read of the usb driver on the mdm channel has completed. * If there is no write on the HSIC in progress, check if the * read has data to pass on to the HSIC. If so, pass the usb * mdm data on to the HSIC. */ if (!diag_hsic[index].in_busy_hsic_write && diag_bridge[index].usb_buf_out && (diag_bridge[index].read_len > 0)) { /* * Initiate the HSIC write. The HSIC write is * asynchronous. When complete the write * complete callback function will be called */ int err; diag_hsic[index].in_busy_hsic_write = 1; err = diag_bridge_write(index, diag_bridge[index].usb_buf_out, diag_bridge[index].read_len); if (err) { pr_err_ratelimited("diag: mdm data on HSIC write err: %d\n", err); /* * If the error is recoverable, then clear * the write flag, so we will resubmit a * write on the next frame. Otherwise, don't * resubmit a write on the next frame. */ if ((-ENODEV) != err) diag_hsic[index].in_busy_hsic_write = 0; } } /* * If there is no write of the usb mdm data on the * HSIC channel */ if (!diag_hsic[index].in_busy_hsic_write) queue_work(diag_bridge[index].wq, &diag_bridge[index].diag_read_work); return 0; } static void diagfwd_bridge_notifier(void *priv, unsigned event, struct diag_request *d_req) { int index; switch (event) { case USB_DIAG_CONNECT: queue_work(driver->diag_wq, &driver->diag_connect_work); break; case USB_DIAG_DISCONNECT: queue_work(driver->diag_wq, &driver->diag_disconnect_work); break; case USB_DIAG_READ_DONE: index = (int)(d_req->context); queue_work(diag_bridge[index].wq, &diag_bridge[index].usb_read_complete_work); break; case USB_DIAG_WRITE_DONE: index = (int)(d_req->context); if (index == SMUX && driver->diag_smux_enabled) diagfwd_write_complete_smux(); else if (diag_hsic[index].hsic_device_enabled) diagfwd_write_complete_hsic(d_req, index); break; default: pr_err("diag: in %s: Unknown event from USB diag:%u\n", __func__, event); break; } } void diagfwd_bridge_init(int index) { int ret; unsigned char name[20]; if (index == HSIC) { strlcpy(name, "hsic", sizeof(name)); } else if (index == HSIC_2) { strlcpy(name, "hsic_2", sizeof(name)); } else if (index == SMUX) { strlcpy(name, "smux", sizeof(name)); } else { pr_err("diag: incorrect bridge init, instance: %d\n", index); return; } strlcpy(diag_bridge[index].name, name, sizeof(diag_bridge[index].name)); strlcat(name, "_diag_wq", sizeof(diag_bridge[index].name)); diag_bridge[index].id = index; diag_bridge[index].wq = create_singlethread_workqueue(name); diag_bridge[index].read_len = 0; diag_bridge[index].write_len = 0; if (diag_bridge[index].usb_buf_out == NULL) diag_bridge[index].usb_buf_out = kzalloc(USB_MAX_OUT_BUF, GFP_KERNEL); if (diag_bridge[index].usb_buf_out == NULL) goto err; if (diag_bridge[index].usb_read_ptr == NULL) diag_bridge[index].usb_read_ptr = kzalloc(sizeof(struct diag_request), GFP_KERNEL); if (diag_bridge[index].usb_read_ptr == NULL) goto err; if (diag_bridge[index].usb_read_ptr->context == NULL) diag_bridge[index].usb_read_ptr->context = kzalloc(sizeof(int), GFP_KERNEL); if (diag_bridge[index].usb_read_ptr->context == NULL) goto err; mutex_init(&diag_bridge[index].bridge_mutex); if (index == HSIC || index == HSIC_2) { INIT_WORK(&(diag_bridge[index].usb_read_complete_work), diag_usb_read_complete_hsic_fn); #ifdef CONFIG_DIAG_OVER_USB INIT_WORK(&(diag_bridge[index].diag_read_work), diag_read_usb_hsic_work_fn); if (index == HSIC) diag_bridge[index].ch = usb_diag_open(DIAG_MDM, (void *)index, diagfwd_bridge_notifier); else if (index == HSIC_2) diag_bridge[index].ch = usb_diag_open(DIAG_MDM2, (void *)index, diagfwd_bridge_notifier); if (IS_ERR(diag_bridge[index].ch)) { pr_err("diag: Unable to open USB MDM ch = %d\n", index); goto err; } else diag_bridge[index].enabled = 1; #endif } else if (index == SMUX) { INIT_WORK(&(diag_bridge[index].usb_read_complete_work), diag_usb_read_complete_smux_fn); #ifdef CONFIG_DIAG_OVER_USB INIT_WORK(&(diag_bridge[index].diag_read_work), diag_read_usb_smux_work_fn); diag_bridge[index].ch = usb_diag_open(DIAG_QSC, (void *)index, diagfwd_bridge_notifier); if (IS_ERR(diag_bridge[index].ch)) { pr_err("diag: Unable to open USB diag QSC channel\n"); goto err; } else diag_bridge[index].enabled = 1; #endif ret = platform_driver_register(&msm_diagfwd_smux_driver); if (ret) pr_err("diag: could not register SMUX device, ret: %d\n", ret); } return; err: pr_err("diag: Could not initialize for bridge forwarding\n"); kfree(diag_bridge[index].usb_buf_out); kfree(diag_hsic[index].hsic_buf_tbl); kfree(driver->write_ptr_mdm); kfree(diag_bridge[index].usb_read_ptr); if (diag_bridge[index].wq) destroy_workqueue(diag_bridge[index].wq); return; } void diagfwd_bridge_exit(void) { int i; pr_debug("diag: in %s\n", __func__); for (i = 0; i < MAX_HSIC_CH; i++) { if (diag_hsic[i].hsic_device_enabled) { diag_hsic_close(i); diag_hsic[i].hsic_device_enabled = 0; diag_bridge[i].enabled = 0; } diag_hsic[i].hsic_inited = 0; kfree(diag_hsic[i].hsic_buf_tbl); } diagmem_exit(driver, POOL_TYPE_ALL); if (driver->diag_smux_enabled) { driver->lcid = LCID_INVALID; kfree(driver->buf_in_smux); driver->diag_smux_enabled = 0; diag_bridge[SMUX].enabled = 0; } platform_driver_unregister(&msm_hsic_ch_driver); platform_driver_unregister(&msm_diagfwd_smux_driver); /* destroy USB MDM specific variables */ for (i = 0; i < MAX_BRIDGES; i++) { if (diag_bridge[i].enabled) { #ifdef CONFIG_DIAG_OVER_USB if (diag_bridge[i].usb_connected) usb_diag_free_req(diag_bridge[i].ch); usb_diag_close(diag_bridge[i].ch); #endif kfree(diag_bridge[i].usb_buf_out); kfree(diag_bridge[i].usb_read_ptr); destroy_workqueue(diag_bridge[i].wq); diag_bridge[i].enabled = 0; } } kfree(driver->write_ptr_mdm); }
gpl-2.0
manuelnaranjo/goldenleaf
drivers/sbus/char/envctrl.c
714
31111
/* envctrl.c: Temperature and Fan monitoring on Machines providing it. * * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) * Copyright (C) 2000 Vinh Truong (vinh.truong@eng.sun.com) * VT - The implementation is to support Sun Microelectronics (SME) platform * environment monitoring. SME platforms use pcf8584 as the i2c bus * controller to access pcf8591 (8-bit A/D and D/A converter) and * pcf8571 (256 x 8-bit static low-voltage RAM with I2C-bus interface). * At board level, it follows SME Firmware I2C Specification. Reference: * http://www-eu2.semiconductors.com/pip/PCF8584P * http://www-eu2.semiconductors.com/pip/PCF8574AP * http://www-eu2.semiconductors.com/pip/PCF8591P * * EB - Added support for CP1500 Global Address and PS/Voltage monitoring. * Eric Brower <ebrower@usa.net> * * DB - Audit every copy_to_user in envctrl_read. * Daniele Bellucci <bellucda@tiscali.it> */ #include <linux/module.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/miscdevice.h> #include <linux/kmod.h> #include <linux/reboot.h> #include <linux/smp_lock.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/uaccess.h> #include <asm/envctrl.h> #include <asm/io.h> #define DRIVER_NAME "envctrl" #define PFX DRIVER_NAME ": " #define ENVCTRL_MINOR 162 #define PCF8584_ADDRESS 0x55 #define CONTROL_PIN 0x80 #define CONTROL_ES0 0x40 #define CONTROL_ES1 0x20 #define CONTROL_ES2 0x10 #define CONTROL_ENI 0x08 #define CONTROL_STA 0x04 #define CONTROL_STO 0x02 #define CONTROL_ACK 0x01 #define STATUS_PIN 0x80 #define STATUS_STS 0x20 #define STATUS_BER 0x10 #define STATUS_LRB 0x08 #define STATUS_AD0 0x08 #define STATUS_AAB 0x04 #define STATUS_LAB 0x02 #define STATUS_BB 0x01 /* * CLK Mode Register. */ #define BUS_CLK_90 0x00 #define BUS_CLK_45 0x01 #define BUS_CLK_11 0x02 #define BUS_CLK_1_5 0x03 #define CLK_3 0x00 #define CLK_4_43 0x10 #define CLK_6 0x14 #define CLK_8 0x18 #define CLK_12 0x1c #define OBD_SEND_START 0xc5 /* value to generate I2c_bus START condition */ #define OBD_SEND_STOP 0xc3 /* value to generate I2c_bus STOP condition */ /* Monitor type of i2c child device. * Firmware definitions. */ #define PCF8584_MAX_CHANNELS 8 #define PCF8584_GLOBALADDR_TYPE 6 /* global address monitor */ #define PCF8584_FANSTAT_TYPE 3 /* fan status monitor */ #define PCF8584_VOLTAGE_TYPE 2 /* voltage monitor */ #define PCF8584_TEMP_TYPE 1 /* temperature monitor*/ /* Monitor type of i2c child device. * Driver definitions. */ #define ENVCTRL_NOMON 0 #define ENVCTRL_CPUTEMP_MON 1 /* cpu temperature monitor */ #define ENVCTRL_CPUVOLTAGE_MON 2 /* voltage monitor */ #define ENVCTRL_FANSTAT_MON 3 /* fan status monitor */ #define ENVCTRL_ETHERTEMP_MON 4 /* ethernet temperarture */ /* monitor */ #define ENVCTRL_VOLTAGESTAT_MON 5 /* voltage status monitor */ #define ENVCTRL_MTHRBDTEMP_MON 6 /* motherboard temperature */ #define ENVCTRL_SCSITEMP_MON 7 /* scsi temperarture */ #define ENVCTRL_GLOBALADDR_MON 8 /* global address */ /* Child device type. * Driver definitions. */ #define I2C_ADC 0 /* pcf8591 */ #define I2C_GPIO 1 /* pcf8571 */ /* Data read from child device may need to decode * through a data table and a scale. * Translation type as defined by firmware. */ #define ENVCTRL_TRANSLATE_NO 0 #define ENVCTRL_TRANSLATE_PARTIAL 1 #define ENVCTRL_TRANSLATE_COMBINED 2 #define ENVCTRL_TRANSLATE_FULL 3 /* table[data] */ #define ENVCTRL_TRANSLATE_SCALE 4 /* table[data]/scale */ /* Driver miscellaneous definitions. */ #define ENVCTRL_MAX_CPU 4 #define CHANNEL_DESC_SZ 256 /* Mask values for combined GlobalAddress/PowerStatus node */ #define ENVCTRL_GLOBALADDR_ADDR_MASK 0x1F #define ENVCTRL_GLOBALADDR_PSTAT_MASK 0x60 /* Node 0x70 ignored on CompactPCI CP1400/1500 platforms * (see envctrl_init_i2c_child) */ #define ENVCTRL_CPCI_IGNORED_NODE 0x70 #define PCF8584_DATA 0x00 #define PCF8584_CSR 0x01 /* Each child device can be monitored by up to PCF8584_MAX_CHANNELS. * Property of a port or channel as defined by the firmware. */ struct pcf8584_channel { unsigned char chnl_no; unsigned char io_direction; unsigned char type; unsigned char last; }; /* Each child device may have one or more tables of bytes to help decode * data. Table property as defined by the firmware. */ struct pcf8584_tblprop { unsigned int type; unsigned int scale; unsigned int offset; /* offset from the beginning of the table */ unsigned int size; }; /* i2c child */ struct i2c_child_t { /* Either ADC or GPIO. */ unsigned char i2ctype; unsigned long addr; struct pcf8584_channel chnl_array[PCF8584_MAX_CHANNELS]; /* Channel info. */ unsigned int total_chnls; /* Number of monitor channels. */ unsigned char fan_mask; /* Byte mask for fan status channels. */ unsigned char voltage_mask; /* Byte mask for voltage status channels. */ struct pcf8584_tblprop tblprop_array[PCF8584_MAX_CHANNELS]; /* Properties of all monitor channels. */ unsigned int total_tbls; /* Number of monitor tables. */ char *tables; /* Pointer to table(s). */ char chnls_desc[CHANNEL_DESC_SZ]; /* Channel description. */ char mon_type[PCF8584_MAX_CHANNELS]; }; static void __iomem *i2c; static struct i2c_child_t i2c_childlist[ENVCTRL_MAX_CPU*2]; static unsigned char chnls_mask[] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 }; static unsigned int warning_temperature = 0; static unsigned int shutdown_temperature = 0; static char read_cpu; /* Forward declarations. */ static struct i2c_child_t *envctrl_get_i2c_child(unsigned char); /* Function Description: Test the PIN bit (Pending Interrupt Not) * to test when serial transmission is completed . * Return : None. */ static void envtrl_i2c_test_pin(void) { int limit = 1000000; while (--limit > 0) { if (!(readb(i2c + PCF8584_CSR) & STATUS_PIN)) break; udelay(1); } if (limit <= 0) printk(KERN_INFO PFX "Pin status will not clear.\n"); } /* Function Description: Test busy bit. * Return : None. */ static void envctrl_i2c_test_bb(void) { int limit = 1000000; while (--limit > 0) { /* Busy bit 0 means busy. */ if (readb(i2c + PCF8584_CSR) & STATUS_BB) break; udelay(1); } if (limit <= 0) printk(KERN_INFO PFX "Busy bit will not clear.\n"); } /* Function Description: Send the address for a read access. * Return : 0 if not acknowledged, otherwise acknowledged. */ static int envctrl_i2c_read_addr(unsigned char addr) { envctrl_i2c_test_bb(); /* Load address. */ writeb(addr + 1, i2c + PCF8584_DATA); envctrl_i2c_test_bb(); writeb(OBD_SEND_START, i2c + PCF8584_CSR); /* Wait for PIN. */ envtrl_i2c_test_pin(); /* CSR 0 means acknowledged. */ if (!(readb(i2c + PCF8584_CSR) & STATUS_LRB)) { return readb(i2c + PCF8584_DATA); } else { writeb(OBD_SEND_STOP, i2c + PCF8584_CSR); return 0; } } /* Function Description: Send the address for write mode. * Return : None. */ static void envctrl_i2c_write_addr(unsigned char addr) { envctrl_i2c_test_bb(); writeb(addr, i2c + PCF8584_DATA); /* Generate Start condition. */ writeb(OBD_SEND_START, i2c + PCF8584_CSR); } /* Function Description: Read 1 byte of data from addr * set by envctrl_i2c_read_addr() * Return : Data from address set by envctrl_i2c_read_addr(). */ static unsigned char envctrl_i2c_read_data(void) { envtrl_i2c_test_pin(); writeb(CONTROL_ES0, i2c + PCF8584_CSR); /* Send neg ack. */ return readb(i2c + PCF8584_DATA); } /* Function Description: Instruct the device which port to read data from. * Return : None. */ static void envctrl_i2c_write_data(unsigned char port) { envtrl_i2c_test_pin(); writeb(port, i2c + PCF8584_DATA); } /* Function Description: Generate Stop condition after last byte is sent. * Return : None. */ static void envctrl_i2c_stop(void) { envtrl_i2c_test_pin(); writeb(OBD_SEND_STOP, i2c + PCF8584_CSR); } /* Function Description: Read adc device. * Return : Data at address and port. */ static unsigned char envctrl_i2c_read_8591(unsigned char addr, unsigned char port) { /* Send address. */ envctrl_i2c_write_addr(addr); /* Setup port to read. */ envctrl_i2c_write_data(port); envctrl_i2c_stop(); /* Read port. */ envctrl_i2c_read_addr(addr); /* Do a single byte read and send stop. */ envctrl_i2c_read_data(); envctrl_i2c_stop(); return readb(i2c + PCF8584_DATA); } /* Function Description: Read gpio device. * Return : Data at address. */ static unsigned char envctrl_i2c_read_8574(unsigned char addr) { unsigned char rd; envctrl_i2c_read_addr(addr); /* Do a single byte read and send stop. */ rd = envctrl_i2c_read_data(); envctrl_i2c_stop(); return rd; } /* Function Description: Decode data read from an adc device using firmware * table. * Return: Number of read bytes. Data is stored in bufdata in ascii format. */ static int envctrl_i2c_data_translate(unsigned char data, int translate_type, int scale, char *tbl, char *bufdata) { int len = 0; switch (translate_type) { case ENVCTRL_TRANSLATE_NO: /* No decode necessary. */ len = 1; bufdata[0] = data; break; case ENVCTRL_TRANSLATE_FULL: /* Decode this way: data = table[data]. */ len = 1; bufdata[0] = tbl[data]; break; case ENVCTRL_TRANSLATE_SCALE: /* Decode this way: data = table[data]/scale */ sprintf(bufdata,"%d ", (tbl[data] * 10) / (scale)); len = strlen(bufdata); bufdata[len - 1] = bufdata[len - 2]; bufdata[len - 2] = '.'; break; default: break; }; return len; } /* Function Description: Read cpu-related data such as cpu temperature, voltage. * Return: Number of read bytes. Data is stored in bufdata in ascii format. */ static int envctrl_read_cpu_info(int cpu, struct i2c_child_t *pchild, char mon_type, unsigned char *bufdata) { unsigned char data; int i; char *tbl, j = -1; /* Find the right monitor type and channel. */ for (i = 0; i < PCF8584_MAX_CHANNELS; i++) { if (pchild->mon_type[i] == mon_type) { if (++j == cpu) { break; } } } if (j != cpu) return 0; /* Read data from address and port. */ data = envctrl_i2c_read_8591((unsigned char)pchild->addr, (unsigned char)pchild->chnl_array[i].chnl_no); /* Find decoding table. */ tbl = pchild->tables + pchild->tblprop_array[i].offset; return envctrl_i2c_data_translate(data, pchild->tblprop_array[i].type, pchild->tblprop_array[i].scale, tbl, bufdata); } /* Function Description: Read noncpu-related data such as motherboard * temperature. * Return: Number of read bytes. Data is stored in bufdata in ascii format. */ static int envctrl_read_noncpu_info(struct i2c_child_t *pchild, char mon_type, unsigned char *bufdata) { unsigned char data; int i; char *tbl = NULL; for (i = 0; i < PCF8584_MAX_CHANNELS; i++) { if (pchild->mon_type[i] == mon_type) break; } if (i >= PCF8584_MAX_CHANNELS) return 0; /* Read data from address and port. */ data = envctrl_i2c_read_8591((unsigned char)pchild->addr, (unsigned char)pchild->chnl_array[i].chnl_no); /* Find decoding table. */ tbl = pchild->tables + pchild->tblprop_array[i].offset; return envctrl_i2c_data_translate(data, pchild->tblprop_array[i].type, pchild->tblprop_array[i].scale, tbl, bufdata); } /* Function Description: Read fan status. * Return : Always 1 byte. Status stored in bufdata. */ static int envctrl_i2c_fan_status(struct i2c_child_t *pchild, unsigned char data, char *bufdata) { unsigned char tmp, ret = 0; int i, j = 0; tmp = data & pchild->fan_mask; if (tmp == pchild->fan_mask) { /* All bits are on. All fans are functioning. */ ret = ENVCTRL_ALL_FANS_GOOD; } else if (tmp == 0) { /* No bits are on. No fans are functioning. */ ret = ENVCTRL_ALL_FANS_BAD; } else { /* Go through all channels, mark 'on' the matched bits. * Notice that fan_mask may have discontiguous bits but * return mask are always contiguous. For example if we * monitor 4 fans at channels 0,1,2,4, the return mask * should be 00010000 if only fan at channel 4 is working. */ for (i = 0; i < PCF8584_MAX_CHANNELS;i++) { if (pchild->fan_mask & chnls_mask[i]) { if (!(chnls_mask[i] & tmp)) ret |= chnls_mask[j]; j++; } } } bufdata[0] = ret; return 1; } /* Function Description: Read global addressing line. * Return : Always 1 byte. Status stored in bufdata. */ static int envctrl_i2c_globaladdr(struct i2c_child_t *pchild, unsigned char data, char *bufdata) { /* Translatation table is not necessary, as global * addr is the integer value of the GA# bits. * * NOTE: MSB is documented as zero, but I see it as '1' always.... * * ----------------------------------------------- * | 0 | FAL | DEG | GA4 | GA3 | GA2 | GA1 | GA0 | * ----------------------------------------------- * GA0 - GA4 integer value of Global Address (backplane slot#) * DEG 0 = cPCI Power supply output is starting to degrade * 1 = cPCI Power supply output is OK * FAL 0 = cPCI Power supply has failed * 1 = cPCI Power supply output is OK */ bufdata[0] = (data & ENVCTRL_GLOBALADDR_ADDR_MASK); return 1; } /* Function Description: Read standard voltage and power supply status. * Return : Always 1 byte. Status stored in bufdata. */ static unsigned char envctrl_i2c_voltage_status(struct i2c_child_t *pchild, unsigned char data, char *bufdata) { unsigned char tmp, ret = 0; int i, j = 0; tmp = data & pchild->voltage_mask; /* Two channels are used to monitor voltage and power supply. */ if (tmp == pchild->voltage_mask) { /* All bits are on. Voltage and power supply are okay. */ ret = ENVCTRL_VOLTAGE_POWERSUPPLY_GOOD; } else if (tmp == 0) { /* All bits are off. Voltage and power supply are bad */ ret = ENVCTRL_VOLTAGE_POWERSUPPLY_BAD; } else { /* Either voltage or power supply has problem. */ for (i = 0; i < PCF8584_MAX_CHANNELS; i++) { if (pchild->voltage_mask & chnls_mask[i]) { j++; /* Break out when there is a mismatch. */ if (!(chnls_mask[i] & tmp)) break; } } /* Make a wish that hardware will always use the * first channel for voltage and the second for * power supply. */ if (j == 1) ret = ENVCTRL_VOLTAGE_BAD; else ret = ENVCTRL_POWERSUPPLY_BAD; } bufdata[0] = ret; return 1; } /* Function Description: Read a byte from /dev/envctrl. Mapped to user read(). * Return: Number of read bytes. 0 for error. */ static ssize_t envctrl_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct i2c_child_t *pchild; unsigned char data[10]; int ret = 0; /* Get the type of read as decided in ioctl() call. * Find the appropriate i2c child. * Get the data and put back to the user buffer. */ switch ((int)(long)file->private_data) { case ENVCTRL_RD_WARNING_TEMPERATURE: if (warning_temperature == 0) return 0; data[0] = (unsigned char)(warning_temperature); ret = 1; if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_SHUTDOWN_TEMPERATURE: if (shutdown_temperature == 0) return 0; data[0] = (unsigned char)(shutdown_temperature); ret = 1; if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_MTHRBD_TEMPERATURE: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_MTHRBDTEMP_MON))) return 0; ret = envctrl_read_noncpu_info(pchild, ENVCTRL_MTHRBDTEMP_MON, data); if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_CPU_TEMPERATURE: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_CPUTEMP_MON))) return 0; ret = envctrl_read_cpu_info(read_cpu, pchild, ENVCTRL_CPUTEMP_MON, data); /* Reset cpu to the default cpu0. */ if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_CPU_VOLTAGE: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_CPUVOLTAGE_MON))) return 0; ret = envctrl_read_cpu_info(read_cpu, pchild, ENVCTRL_CPUVOLTAGE_MON, data); /* Reset cpu to the default cpu0. */ if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_SCSI_TEMPERATURE: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_SCSITEMP_MON))) return 0; ret = envctrl_read_noncpu_info(pchild, ENVCTRL_SCSITEMP_MON, data); if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_ETHERNET_TEMPERATURE: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_ETHERTEMP_MON))) return 0; ret = envctrl_read_noncpu_info(pchild, ENVCTRL_ETHERTEMP_MON, data); if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_FAN_STATUS: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_FANSTAT_MON))) return 0; data[0] = envctrl_i2c_read_8574(pchild->addr); ret = envctrl_i2c_fan_status(pchild,data[0], data); if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_GLOBALADDRESS: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_GLOBALADDR_MON))) return 0; data[0] = envctrl_i2c_read_8574(pchild->addr); ret = envctrl_i2c_globaladdr(pchild, data[0], data); if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; case ENVCTRL_RD_VOLTAGE_STATUS: if (!(pchild = envctrl_get_i2c_child(ENVCTRL_VOLTAGESTAT_MON))) /* If voltage monitor not present, check for CPCI equivalent */ if (!(pchild = envctrl_get_i2c_child(ENVCTRL_GLOBALADDR_MON))) return 0; data[0] = envctrl_i2c_read_8574(pchild->addr); ret = envctrl_i2c_voltage_status(pchild, data[0], data); if (copy_to_user(buf, data, ret)) ret = -EFAULT; break; default: break; }; return ret; } /* Function Description: Command what to read. Mapped to user ioctl(). * Return: Gives 0 for implemented commands, -EINVAL otherwise. */ static long envctrl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { char __user *infobuf; switch (cmd) { case ENVCTRL_RD_WARNING_TEMPERATURE: case ENVCTRL_RD_SHUTDOWN_TEMPERATURE: case ENVCTRL_RD_MTHRBD_TEMPERATURE: case ENVCTRL_RD_FAN_STATUS: case ENVCTRL_RD_VOLTAGE_STATUS: case ENVCTRL_RD_ETHERNET_TEMPERATURE: case ENVCTRL_RD_SCSI_TEMPERATURE: case ENVCTRL_RD_GLOBALADDRESS: file->private_data = (void *)(long)cmd; break; case ENVCTRL_RD_CPU_TEMPERATURE: case ENVCTRL_RD_CPU_VOLTAGE: /* Check to see if application passes in any cpu number, * the default is cpu0. */ infobuf = (char __user *) arg; if (infobuf == NULL) { read_cpu = 0; }else { get_user(read_cpu, infobuf); } /* Save the command for use when reading. */ file->private_data = (void *)(long)cmd; break; default: return -EINVAL; }; return 0; } /* Function Description: open device. Mapped to user open(). * Return: Always 0. */ static int envctrl_open(struct inode *inode, struct file *file) { cycle_kernel_lock(); file->private_data = NULL; return 0; } /* Function Description: Open device. Mapped to user close(). * Return: Always 0. */ static int envctrl_release(struct inode *inode, struct file *file) { return 0; } static const struct file_operations envctrl_fops = { .owner = THIS_MODULE, .read = envctrl_read, .unlocked_ioctl = envctrl_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = envctrl_ioctl, #endif .open = envctrl_open, .release = envctrl_release, }; static struct miscdevice envctrl_dev = { ENVCTRL_MINOR, "envctrl", &envctrl_fops }; /* Function Description: Set monitor type based on firmware description. * Return: None. */ static void envctrl_set_mon(struct i2c_child_t *pchild, const char *chnl_desc, int chnl_no) { /* Firmware only has temperature type. It does not distinguish * different kinds of temperatures. We use channel description * to disinguish them. */ if (!(strcmp(chnl_desc,"temp,cpu")) || !(strcmp(chnl_desc,"temp,cpu0")) || !(strcmp(chnl_desc,"temp,cpu1")) || !(strcmp(chnl_desc,"temp,cpu2")) || !(strcmp(chnl_desc,"temp,cpu3"))) pchild->mon_type[chnl_no] = ENVCTRL_CPUTEMP_MON; if (!(strcmp(chnl_desc,"vddcore,cpu0")) || !(strcmp(chnl_desc,"vddcore,cpu1")) || !(strcmp(chnl_desc,"vddcore,cpu2")) || !(strcmp(chnl_desc,"vddcore,cpu3"))) pchild->mon_type[chnl_no] = ENVCTRL_CPUVOLTAGE_MON; if (!(strcmp(chnl_desc,"temp,motherboard"))) pchild->mon_type[chnl_no] = ENVCTRL_MTHRBDTEMP_MON; if (!(strcmp(chnl_desc,"temp,scsi"))) pchild->mon_type[chnl_no] = ENVCTRL_SCSITEMP_MON; if (!(strcmp(chnl_desc,"temp,ethernet"))) pchild->mon_type[chnl_no] = ENVCTRL_ETHERTEMP_MON; } /* Function Description: Initialize monitor channel with channel desc, * decoding tables, monitor type, optional properties. * Return: None. */ static void envctrl_init_adc(struct i2c_child_t *pchild, struct device_node *dp) { int i = 0, len; const char *pos; const unsigned int *pval; /* Firmware describe channels into a stream separated by a '\0'. */ pos = of_get_property(dp, "channels-description", &len); while (len > 0) { int l = strlen(pos) + 1; envctrl_set_mon(pchild, pos, i++); len -= l; pos += l; } /* Get optional properties. */ pval = of_get_property(dp, "warning-temp", NULL); if (pval) warning_temperature = *pval; pval = of_get_property(dp, "shutdown-temp", NULL); if (pval) shutdown_temperature = *pval; } /* Function Description: Initialize child device monitoring fan status. * Return: None. */ static void envctrl_init_fanstat(struct i2c_child_t *pchild) { int i; /* Go through all channels and set up the mask. */ for (i = 0; i < pchild->total_chnls; i++) pchild->fan_mask |= chnls_mask[(pchild->chnl_array[i]).chnl_no]; /* We only need to know if this child has fan status monitored. * We don't care which channels since we have the mask already. */ pchild->mon_type[0] = ENVCTRL_FANSTAT_MON; } /* Function Description: Initialize child device for global addressing line. * Return: None. */ static void envctrl_init_globaladdr(struct i2c_child_t *pchild) { int i; /* Voltage/PowerSupply monitoring is piggybacked * with Global Address on CompactPCI. See comments * within envctrl_i2c_globaladdr for bit assignments. * * The mask is created here by assigning mask bits to each * bit position that represents PCF8584_VOLTAGE_TYPE data. * Channel numbers are not consecutive within the globaladdr * node (why?), so we use the actual counter value as chnls_mask * index instead of the chnl_array[x].chnl_no value. * * NOTE: This loop could be replaced with a constant representing * a mask of bits 5&6 (ENVCTRL_GLOBALADDR_PSTAT_MASK). */ for (i = 0; i < pchild->total_chnls; i++) { if (PCF8584_VOLTAGE_TYPE == pchild->chnl_array[i].type) { pchild->voltage_mask |= chnls_mask[i]; } } /* We only need to know if this child has global addressing * line monitored. We don't care which channels since we know * the mask already (ENVCTRL_GLOBALADDR_ADDR_MASK). */ pchild->mon_type[0] = ENVCTRL_GLOBALADDR_MON; } /* Initialize child device monitoring voltage status. */ static void envctrl_init_voltage_status(struct i2c_child_t *pchild) { int i; /* Go through all channels and set up the mask. */ for (i = 0; i < pchild->total_chnls; i++) pchild->voltage_mask |= chnls_mask[(pchild->chnl_array[i]).chnl_no]; /* We only need to know if this child has voltage status monitored. * We don't care which channels since we have the mask already. */ pchild->mon_type[0] = ENVCTRL_VOLTAGESTAT_MON; } /* Function Description: Initialize i2c child device. * Return: None. */ static void envctrl_init_i2c_child(struct device_node *dp, struct i2c_child_t *pchild) { int len, i, tbls_size = 0; const void *pval; /* Get device address. */ pval = of_get_property(dp, "reg", &len); memcpy(&pchild->addr, pval, len); /* Get tables property. Read firmware temperature tables. */ pval = of_get_property(dp, "translation", &len); if (pval && len > 0) { memcpy(pchild->tblprop_array, pval, len); pchild->total_tbls = len / sizeof(struct pcf8584_tblprop); for (i = 0; i < pchild->total_tbls; i++) { if ((pchild->tblprop_array[i].size + pchild->tblprop_array[i].offset) > tbls_size) { tbls_size = pchild->tblprop_array[i].size + pchild->tblprop_array[i].offset; } } pchild->tables = kmalloc(tbls_size, GFP_KERNEL); if (pchild->tables == NULL){ printk(KERN_ERR PFX "Failed to allocate table.\n"); return; } pval = of_get_property(dp, "tables", &len); if (!pval || len <= 0) { printk(KERN_ERR PFX "Failed to get table.\n"); return; } memcpy(pchild->tables, pval, len); } /* SPARCengine ASM Reference Manual (ref. SMI doc 805-7581-04) * sections 2.5, 3.5, 4.5 state node 0x70 for CP1400/1500 is * "For Factory Use Only." * * We ignore the node on these platforms by assigning the * 'NULL' monitor type. */ if (ENVCTRL_CPCI_IGNORED_NODE == pchild->addr) { struct device_node *root_node; int len; root_node = of_find_node_by_path("/"); if (!strcmp(root_node->name, "SUNW,UltraSPARC-IIi-cEngine")) { for (len = 0; len < PCF8584_MAX_CHANNELS; ++len) { pchild->mon_type[len] = ENVCTRL_NOMON; } return; } } /* Get the monitor channels. */ pval = of_get_property(dp, "channels-in-use", &len); memcpy(pchild->chnl_array, pval, len); pchild->total_chnls = len / sizeof(struct pcf8584_channel); for (i = 0; i < pchild->total_chnls; i++) { switch (pchild->chnl_array[i].type) { case PCF8584_TEMP_TYPE: envctrl_init_adc(pchild, dp); break; case PCF8584_GLOBALADDR_TYPE: envctrl_init_globaladdr(pchild); i = pchild->total_chnls; break; case PCF8584_FANSTAT_TYPE: envctrl_init_fanstat(pchild); i = pchild->total_chnls; break; case PCF8584_VOLTAGE_TYPE: if (pchild->i2ctype == I2C_ADC) { envctrl_init_adc(pchild,dp); } else { envctrl_init_voltage_status(pchild); } i = pchild->total_chnls; break; default: break; }; } } /* Function Description: Search the child device list for a device. * Return : The i2c child if found. NULL otherwise. */ static struct i2c_child_t *envctrl_get_i2c_child(unsigned char mon_type) { int i, j; for (i = 0; i < ENVCTRL_MAX_CPU*2; i++) { for (j = 0; j < PCF8584_MAX_CHANNELS; j++) { if (i2c_childlist[i].mon_type[j] == mon_type) { return (struct i2c_child_t *)(&(i2c_childlist[i])); } } } return NULL; } static void envctrl_do_shutdown(void) { static int inprog = 0; int ret; if (inprog != 0) return; inprog = 1; printk(KERN_CRIT "kenvctrld: WARNING: Shutting down the system now.\n"); ret = orderly_poweroff(true); if (ret < 0) { printk(KERN_CRIT "kenvctrld: WARNING: system shutdown failed!\n"); inprog = 0; /* unlikely to succeed, but we could try again */ } } static struct task_struct *kenvctrld_task; static int kenvctrld(void *__unused) { int poll_interval; int whichcpu; char tempbuf[10]; struct i2c_child_t *cputemp; if (NULL == (cputemp = envctrl_get_i2c_child(ENVCTRL_CPUTEMP_MON))) { printk(KERN_ERR PFX "kenvctrld unable to monitor CPU temp-- exiting\n"); return -ENODEV; } poll_interval = 5000; /* TODO env_mon_interval */ printk(KERN_INFO PFX "%s starting...\n", current->comm); for (;;) { msleep_interruptible(poll_interval); if (kthread_should_stop()) break; for (whichcpu = 0; whichcpu < ENVCTRL_MAX_CPU; ++whichcpu) { if (0 < envctrl_read_cpu_info(whichcpu, cputemp, ENVCTRL_CPUTEMP_MON, tempbuf)) { if (tempbuf[0] >= shutdown_temperature) { printk(KERN_CRIT "%s: WARNING: CPU%i temperature %i C meets or exceeds "\ "shutdown threshold %i C\n", current->comm, whichcpu, tempbuf[0], shutdown_temperature); envctrl_do_shutdown(); } } } } printk(KERN_INFO PFX "%s exiting...\n", current->comm); return 0; } static int __devinit envctrl_probe(struct of_device *op, const struct of_device_id *match) { struct device_node *dp; int index, err; if (i2c) return -EINVAL; i2c = of_ioremap(&op->resource[0], 0, 0x2, DRIVER_NAME); if (!i2c) return -ENOMEM; index = 0; dp = op->node->child; while (dp) { if (!strcmp(dp->name, "gpio")) { i2c_childlist[index].i2ctype = I2C_GPIO; envctrl_init_i2c_child(dp, &(i2c_childlist[index++])); } else if (!strcmp(dp->name, "adc")) { i2c_childlist[index].i2ctype = I2C_ADC; envctrl_init_i2c_child(dp, &(i2c_childlist[index++])); } dp = dp->sibling; } /* Set device address. */ writeb(CONTROL_PIN, i2c + PCF8584_CSR); writeb(PCF8584_ADDRESS, i2c + PCF8584_DATA); /* Set system clock and SCL frequencies. */ writeb(CONTROL_PIN | CONTROL_ES1, i2c + PCF8584_CSR); writeb(CLK_4_43 | BUS_CLK_90, i2c + PCF8584_DATA); /* Enable serial interface. */ writeb(CONTROL_PIN | CONTROL_ES0 | CONTROL_ACK, i2c + PCF8584_CSR); udelay(200); /* Register the device as a minor miscellaneous device. */ err = misc_register(&envctrl_dev); if (err) { printk(KERN_ERR PFX "Unable to get misc minor %d\n", envctrl_dev.minor); goto out_iounmap; } /* Note above traversal routine post-incremented 'i' to accommodate * a next child device, so we decrement before reverse-traversal of * child devices. */ printk(KERN_INFO PFX "Initialized "); for (--index; index >= 0; --index) { printk("[%s 0x%lx]%s", (I2C_ADC == i2c_childlist[index].i2ctype) ? "adc" : ((I2C_GPIO == i2c_childlist[index].i2ctype) ? "gpio" : "unknown"), i2c_childlist[index].addr, (0 == index) ? "\n" : " "); } kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld"); if (IS_ERR(kenvctrld_task)) { err = PTR_ERR(kenvctrld_task); goto out_deregister; } return 0; out_deregister: misc_deregister(&envctrl_dev); out_iounmap: of_iounmap(&op->resource[0], i2c, 0x2); for (index = 0; index < ENVCTRL_MAX_CPU * 2; index++) kfree(i2c_childlist[index].tables); return err; } static int __devexit envctrl_remove(struct of_device *op) { int index; kthread_stop(kenvctrld_task); of_iounmap(&op->resource[0], i2c, 0x2); misc_deregister(&envctrl_dev); for (index = 0; index < ENVCTRL_MAX_CPU * 2; index++) kfree(i2c_childlist[index].tables); return 0; } static const struct of_device_id envctrl_match[] = { { .name = "i2c", .compatible = "i2cpcf,8584", }, {}, }; MODULE_DEVICE_TABLE(of, envctrl_match); static struct of_platform_driver envctrl_driver = { .name = DRIVER_NAME, .match_table = envctrl_match, .probe = envctrl_probe, .remove = __devexit_p(envctrl_remove), }; static int __init envctrl_init(void) { return of_register_driver(&envctrl_driver, &of_bus_type); } static void __exit envctrl_exit(void) { of_unregister_driver(&envctrl_driver); } module_init(envctrl_init); module_exit(envctrl_exit); MODULE_LICENSE("GPL");
gpl-2.0
friedrich420/AEL_NOTE4_N910FXXU1ANK4
drivers/gpu/drm/i915/intel_opregion.c
1994
15471
/* * Copyright 2008 Intel Corporation <hong.liu@intel.com> * Copyright 2008 Red Hat <mjg@redhat.com> * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/acpi.h> #include <linux/acpi_io.h> #include <acpi/video.h> #include <drm/drmP.h> #include <drm/i915_drm.h> #include "i915_drv.h" #include "intel_drv.h" #define PCI_ASLE 0xe4 #define PCI_ASLS 0xfc #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 #define ACPI_CLID 0x01ac /* current lid state indicator */ #define ACPI_CDCK 0x01b0 /* current docking state indicator */ #define OPREGION_SWSCI_OFFSET 0x200 #define OPREGION_ASLE_OFFSET 0x300 #define OPREGION_VBT_OFFSET 0x400 #define OPREGION_SIGNATURE "IntelGraphicsMem" #define MBOX_ACPI (1<<0) #define MBOX_SWSCI (1<<1) #define MBOX_ASLE (1<<2) struct opregion_header { u8 signature[16]; u32 size; u32 opregion_ver; u8 bios_ver[32]; u8 vbios_ver[16]; u8 driver_ver[16]; u32 mboxes; u8 reserved[164]; } __attribute__((packed)); /* OpRegion mailbox #1: public ACPI methods */ struct opregion_acpi { u32 drdy; /* driver readiness */ u32 csts; /* notification status */ u32 cevt; /* current event */ u8 rsvd1[20]; u32 didl[8]; /* supported display devices ID list */ u32 cpdl[8]; /* currently presented display list */ u32 cadl[8]; /* currently active display list */ u32 nadl[8]; /* next active devices list */ u32 aslp; /* ASL sleep time-out */ u32 tidx; /* toggle table index */ u32 chpd; /* current hotplug enable indicator */ u32 clid; /* current lid state*/ u32 cdck; /* current docking state */ u32 sxsw; /* Sx state resume */ u32 evts; /* ASL supported events */ u32 cnot; /* current OS notification */ u32 nrdy; /* driver status */ u8 rsvd2[60]; } __attribute__((packed)); /* OpRegion mailbox #2: SWSCI */ struct opregion_swsci { u32 scic; /* SWSCI command|status|data */ u32 parm; /* command parameters */ u32 dslp; /* driver sleep time-out */ u8 rsvd[244]; } __attribute__((packed)); /* OpRegion mailbox #3: ASLE */ struct opregion_asle { u32 ardy; /* driver readiness */ u32 aslc; /* ASLE interrupt command */ u32 tche; /* technology enabled indicator */ u32 alsi; /* current ALS illuminance reading */ u32 bclp; /* backlight brightness to set */ u32 pfit; /* panel fitting state */ u32 cblv; /* current brightness level */ u16 bclm[20]; /* backlight level duty cycle mapping table */ u32 cpfm; /* current panel fitting mode */ u32 epfm; /* enabled panel fitting modes */ u8 plut[74]; /* panel LUT and identifier */ u32 pfmb; /* PWM freq and min brightness */ u8 rsvd[102]; } __attribute__((packed)); /* ASLE irq request bits */ #define ASLE_SET_ALS_ILLUM (1 << 0) #define ASLE_SET_BACKLIGHT (1 << 1) #define ASLE_SET_PFIT (1 << 2) #define ASLE_SET_PWM_FREQ (1 << 3) #define ASLE_REQ_MSK 0xf /* response bits of ASLE irq request */ #define ASLE_ALS_ILLUM_FAILED (1<<10) #define ASLE_BACKLIGHT_FAILED (1<<12) #define ASLE_PFIT_FAILED (1<<14) #define ASLE_PWM_FREQ_FAILED (1<<16) /* ASLE backlight brightness to set */ #define ASLE_BCLP_VALID (1<<31) #define ASLE_BCLP_MSK (~(1<<31)) /* ASLE panel fitting request */ #define ASLE_PFIT_VALID (1<<31) #define ASLE_PFIT_CENTER (1<<0) #define ASLE_PFIT_STRETCH_TEXT (1<<1) #define ASLE_PFIT_STRETCH_GFX (1<<2) /* PWM frequency and minimum brightness */ #define ASLE_PFMB_BRIGHTNESS_MASK (0xff) #define ASLE_PFMB_BRIGHTNESS_VALID (1<<8) #define ASLE_PFMB_PWM_MASK (0x7ffffe00) #define ASLE_PFMB_PWM_VALID (1<<31) #define ASLE_CBLV_VALID (1<<31) #define ACPI_OTHER_OUTPUT (0<<8) #define ACPI_VGA_OUTPUT (1<<8) #define ACPI_TV_OUTPUT (2<<8) #define ACPI_DIGITAL_OUTPUT (3<<8) #define ACPI_LVDS_OUTPUT (4<<8) #ifdef CONFIG_ACPI static u32 asle_set_backlight(struct drm_device *dev, u32 bclp) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle __iomem *asle = dev_priv->opregion.asle; u32 max; DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); if (!(bclp & ASLE_BCLP_VALID)) return ASLE_BACKLIGHT_FAILED; bclp &= ASLE_BCLP_MSK; if (bclp > 255) return ASLE_BACKLIGHT_FAILED; max = intel_panel_get_max_backlight(dev); intel_panel_set_backlight(dev, bclp * max / 255); iowrite32((bclp*0x64)/0xff | ASLE_CBLV_VALID, &asle->cblv); return 0; } static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi) { /* alsi is the current ALS reading in lux. 0 indicates below sensor range, 0xffff indicates above sensor range. 1-0xfffe are valid */ return 0; } static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb) { struct drm_i915_private *dev_priv = dev->dev_private; if (pfmb & ASLE_PFMB_PWM_VALID) { u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL); u32 pwm = pfmb & ASLE_PFMB_PWM_MASK; blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK; pwm = pwm >> 9; /* FIXME - what do we do with the PWM? */ } return 0; } static u32 asle_set_pfit(struct drm_device *dev, u32 pfit) { /* Panel fitting is currently controlled by the X code, so this is a noop until modesetting support works fully */ if (!(pfit & ASLE_PFIT_VALID)) return ASLE_PFIT_FAILED; return 0; } void intel_opregion_asle_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle __iomem *asle = dev_priv->opregion.asle; u32 asle_stat = 0; u32 asle_req; if (!asle) return; asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; if (!asle_req) { DRM_DEBUG_DRIVER("non asle set request??\n"); return; } if (asle_req & ASLE_SET_ALS_ILLUM) asle_stat |= asle_set_als_illum(dev, ioread32(&asle->alsi)); if (asle_req & ASLE_SET_BACKLIGHT) asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); if (asle_req & ASLE_SET_PFIT) asle_stat |= asle_set_pfit(dev, ioread32(&asle->pfit)); if (asle_req & ASLE_SET_PWM_FREQ) asle_stat |= asle_set_pwm_freq(dev, ioread32(&asle->pfmb)); iowrite32(asle_stat, &asle->aslc); } void intel_opregion_gse_intr(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle __iomem *asle = dev_priv->opregion.asle; u32 asle_stat = 0; u32 asle_req; if (!asle) return; asle_req = ioread32(&asle->aslc) & ASLE_REQ_MSK; if (!asle_req) { DRM_DEBUG_DRIVER("non asle set request??\n"); return; } if (asle_req & ASLE_SET_ALS_ILLUM) { DRM_DEBUG_DRIVER("Illum is not supported\n"); asle_stat |= ASLE_ALS_ILLUM_FAILED; } if (asle_req & ASLE_SET_BACKLIGHT) asle_stat |= asle_set_backlight(dev, ioread32(&asle->bclp)); if (asle_req & ASLE_SET_PFIT) { DRM_DEBUG_DRIVER("Pfit is not supported\n"); asle_stat |= ASLE_PFIT_FAILED; } if (asle_req & ASLE_SET_PWM_FREQ) { DRM_DEBUG_DRIVER("PWM freq is not supported\n"); asle_stat |= ASLE_PWM_FREQ_FAILED; } iowrite32(asle_stat, &asle->aslc); } #define ASLE_ALS_EN (1<<0) #define ASLE_BLC_EN (1<<1) #define ASLE_PFIT_EN (1<<2) #define ASLE_PFMB_EN (1<<3) void intel_opregion_enable_asle(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct opregion_asle __iomem *asle = dev_priv->opregion.asle; if (asle) { if (IS_MOBILE(dev)) intel_enable_asle(dev); iowrite32(ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN | ASLE_PFMB_EN, &asle->tche); iowrite32(1, &asle->ardy); } } #define ACPI_EV_DISPLAY_SWITCH (1<<0) #define ACPI_EV_LID (1<<1) #define ACPI_EV_DOCK (1<<2) static struct intel_opregion *system_opregion; static int intel_opregion_video_event(struct notifier_block *nb, unsigned long val, void *data) { /* The only video events relevant to opregion are 0x80. These indicate either a docking event, lid switch or display switch request. In Linux, these are handled by the dock, button and video drivers. */ struct opregion_acpi __iomem *acpi; struct acpi_bus_event *event = data; int ret = NOTIFY_OK; if (strcmp(event->device_class, ACPI_VIDEO_CLASS) != 0) return NOTIFY_DONE; if (!system_opregion) return NOTIFY_DONE; acpi = system_opregion->acpi; if (event->type == 0x80 && (ioread32(&acpi->cevt) & 1) == 0) ret = NOTIFY_BAD; iowrite32(0, &acpi->csts); return ret; } static struct notifier_block intel_opregion_notifier = { .notifier_call = intel_opregion_video_event, }; /* * Initialise the DIDL field in opregion. This passes a list of devices to * the firmware. Values are defined by section B.4.2 of the ACPI specification * (version 3) */ static void intel_didl_outputs(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; struct drm_connector *connector; acpi_handle handle; struct acpi_device *acpi_dev, *acpi_cdev, *acpi_video_bus = NULL; unsigned long long device_id; acpi_status status; u32 temp; int i = 0; handle = DEVICE_ACPI_HANDLE(&dev->pdev->dev); if (!handle || acpi_bus_get_device(handle, &acpi_dev)) return; if (acpi_is_video_device(handle)) acpi_video_bus = acpi_dev; else { list_for_each_entry(acpi_cdev, &acpi_dev->children, node) { if (acpi_is_video_device(acpi_cdev->handle)) { acpi_video_bus = acpi_cdev; break; } } } if (!acpi_video_bus) { pr_warn("No ACPI video bus found\n"); return; } list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) { if (i >= 8) { dev_printk(KERN_ERR, &dev->pdev->dev, "More than 8 outputs detected\n"); return; } status = acpi_evaluate_integer(acpi_cdev->handle, "_ADR", NULL, &device_id); if (ACPI_SUCCESS(status)) { if (!device_id) goto blind_set; iowrite32((u32)(device_id & 0x0f0f), &opregion->acpi->didl[i]); i++; } } end: /* If fewer than 8 outputs, the list must be null terminated */ if (i < 8) iowrite32(0, &opregion->acpi->didl[i]); return; blind_set: i = 0; list_for_each_entry(connector, &dev->mode_config.connector_list, head) { int output_type = ACPI_OTHER_OUTPUT; if (i >= 8) { dev_printk(KERN_ERR, &dev->pdev->dev, "More than 8 outputs detected\n"); return; } switch (connector->connector_type) { case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_DVIA: output_type = ACPI_VGA_OUTPUT; break; case DRM_MODE_CONNECTOR_Composite: case DRM_MODE_CONNECTOR_SVIDEO: case DRM_MODE_CONNECTOR_Component: case DRM_MODE_CONNECTOR_9PinDIN: output_type = ACPI_TV_OUTPUT; break; case DRM_MODE_CONNECTOR_DVII: case DRM_MODE_CONNECTOR_DVID: case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_HDMIA: case DRM_MODE_CONNECTOR_HDMIB: output_type = ACPI_DIGITAL_OUTPUT; break; case DRM_MODE_CONNECTOR_LVDS: output_type = ACPI_LVDS_OUTPUT; break; } temp = ioread32(&opregion->acpi->didl[i]); iowrite32(temp | (1<<31) | output_type | i, &opregion->acpi->didl[i]); i++; } goto end; } static void intel_setup_cadls(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; int i = 0; u32 disp_id; /* Initialize the CADL field by duplicating the DIDL values. * Technically, this is not always correct as display outputs may exist, * but not active. This initialization is necessary for some Clevo * laptops that check this field before processing the brightness and * display switching hotkeys. Just like DIDL, CADL is NULL-terminated if * there are less than eight devices. */ do { disp_id = ioread32(&opregion->acpi->didl[i]); iowrite32(disp_id, &opregion->acpi->cadl[i]); } while (++i < 8 && disp_id != 0); } void intel_opregion_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) return; if (opregion->acpi) { if (drm_core_check_feature(dev, DRIVER_MODESET)) { intel_didl_outputs(dev); intel_setup_cadls(dev); } /* Notify BIOS we are ready to handle ACPI video ext notifs. * Right now, all the events are handled by the ACPI video module. * We don't actually need to do anything with them. */ iowrite32(0, &opregion->acpi->csts); iowrite32(1, &opregion->acpi->drdy); system_opregion = opregion; register_acpi_notifier(&intel_opregion_notifier); } if (opregion->asle) intel_opregion_enable_asle(dev); } void intel_opregion_fini(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; if (!opregion->header) return; if (opregion->acpi) { iowrite32(0, &opregion->acpi->drdy); system_opregion = NULL; unregister_acpi_notifier(&intel_opregion_notifier); } /* just clear all opregion memory pointers now */ iounmap(opregion->header); opregion->header = NULL; opregion->acpi = NULL; opregion->swsci = NULL; opregion->asle = NULL; opregion->vbt = NULL; } #endif int intel_opregion_setup(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct intel_opregion *opregion = &dev_priv->opregion; void __iomem *base; u32 asls, mboxes; char buf[sizeof(OPREGION_SIGNATURE)]; int err = 0; pci_read_config_dword(dev->pdev, PCI_ASLS, &asls); DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls); if (asls == 0) { DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n"); return -ENOTSUPP; } base = acpi_os_ioremap(asls, OPREGION_SIZE); if (!base) return -ENOMEM; memcpy_fromio(buf, base, sizeof(buf)); if (memcmp(buf, OPREGION_SIGNATURE, 16)) { DRM_DEBUG_DRIVER("opregion signature mismatch\n"); err = -EINVAL; goto err_out; } opregion->header = base; opregion->vbt = base + OPREGION_VBT_OFFSET; opregion->lid_state = base + ACPI_CLID; mboxes = ioread32(&opregion->header->mboxes); if (mboxes & MBOX_ACPI) { DRM_DEBUG_DRIVER("Public ACPI methods supported\n"); opregion->acpi = base + OPREGION_ACPI_OFFSET; } if (mboxes & MBOX_SWSCI) { DRM_DEBUG_DRIVER("SWSCI supported\n"); opregion->swsci = base + OPREGION_SWSCI_OFFSET; } if (mboxes & MBOX_ASLE) { DRM_DEBUG_DRIVER("ASLE supported\n"); opregion->asle = base + OPREGION_ASLE_OFFSET; } return 0; err_out: iounmap(base); return err; }
gpl-2.0
kirananto/RAZOR
drivers/net/wan/wanxl.c
1994
21206
/* * wanXL serial card driver for Linux * host part * * Copyright (C) 2003 Krzysztof Halasa <khc@pm.waw.pl> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * Status: * - Only DTE (external clock) support with NRZ and NRZI encodings * - wanXL100 will require minor driver modifications, no access to hw */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/hdlc.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <asm/io.h> #include "wanxl.h" static const char* version = "wanXL serial card driver version: 0.48"; #define PLX_CTL_RESET 0x40000000 /* adapter reset */ #undef DEBUG_PKT #undef DEBUG_PCI /* MAILBOX #1 - PUTS COMMANDS */ #define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */ #ifdef __LITTLE_ENDIAN #define MBX1_CMD_BSWAP 0x8C000001 /* little-endian Byte Swap Mode */ #else #define MBX1_CMD_BSWAP 0x8C000000 /* big-endian Byte Swap Mode */ #endif /* MAILBOX #2 - DRAM SIZE */ #define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */ typedef struct { struct net_device *dev; struct card_t *card; spinlock_t lock; /* for wanxl_xmit */ int node; /* physical port #0 - 3 */ unsigned int clock_type; int tx_in, tx_out; struct sk_buff *tx_skbs[TX_BUFFERS]; }port_t; typedef struct { desc_t rx_descs[RX_QUEUE_LENGTH]; port_status_t port_status[4]; }card_status_t; typedef struct card_t { int n_ports; /* 1, 2 or 4 ports */ u8 irq; u8 __iomem *plx; /* PLX PCI9060 virtual base address */ struct pci_dev *pdev; /* for pci_name(pdev) */ int rx_in; struct sk_buff *rx_skbs[RX_QUEUE_LENGTH]; card_status_t *status; /* shared between host and card */ dma_addr_t status_address; port_t ports[0]; /* 1 - 4 port_t structures follow */ }card_t; static inline port_t* dev_to_port(struct net_device *dev) { return (port_t *)dev_to_hdlc(dev)->priv; } static inline port_status_t* get_status(port_t *port) { return &port->card->status->port_status[port->node]; } #ifdef DEBUG_PCI static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr, size_t size, int direction) { dma_addr_t addr = pci_map_single(pdev, ptr, size, direction); if (addr + size > 0x100000000LL) pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n", pci_name(pdev), (unsigned long long)addr); return addr; } #undef pci_map_single #define pci_map_single pci_map_single_debug #endif /* Cable and/or personality module change interrupt service */ static inline void wanxl_cable_intr(port_t *port) { u32 value = get_status(port)->cable; int valid = 1; const char *cable, *pm, *dte = "", *dsr = "", *dcd = ""; switch(value & 0x7) { case STATUS_CABLE_V35: cable = "V.35"; break; case STATUS_CABLE_X21: cable = "X.21"; break; case STATUS_CABLE_V24: cable = "V.24"; break; case STATUS_CABLE_EIA530: cable = "EIA530"; break; case STATUS_CABLE_NONE: cable = "no"; break; default: cable = "invalid"; } switch((value >> STATUS_CABLE_PM_SHIFT) & 0x7) { case STATUS_CABLE_V35: pm = "V.35"; break; case STATUS_CABLE_X21: pm = "X.21"; break; case STATUS_CABLE_V24: pm = "V.24"; break; case STATUS_CABLE_EIA530: pm = "EIA530"; break; case STATUS_CABLE_NONE: pm = "no personality"; valid = 0; break; default: pm = "invalid personality"; valid = 0; } if (valid) { if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) { dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" : ", DSR off"; dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" : ", carrier off"; } dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE"; } netdev_info(port->dev, "%s%s module, %s cable%s%s\n", pm, dte, cable, dsr, dcd); if (value & STATUS_CABLE_DCD) netif_carrier_on(port->dev); else netif_carrier_off(port->dev); } /* Transmit complete interrupt service */ static inline void wanxl_tx_intr(port_t *port) { struct net_device *dev = port->dev; while (1) { desc_t *desc = &get_status(port)->tx_descs[port->tx_in]; struct sk_buff *skb = port->tx_skbs[port->tx_in]; switch (desc->stat) { case PACKET_FULL: case PACKET_EMPTY: netif_wake_queue(dev); return; case PACKET_UNDERRUN: dev->stats.tx_errors++; dev->stats.tx_fifo_errors++; break; default: dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; } desc->stat = PACKET_EMPTY; /* Free descriptor */ pci_unmap_single(port->card->pdev, desc->address, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); port->tx_in = (port->tx_in + 1) % TX_BUFFERS; } } /* Receive complete interrupt service */ static inline void wanxl_rx_intr(card_t *card) { desc_t *desc; while (desc = &card->status->rx_descs[card->rx_in], desc->stat != PACKET_EMPTY) { if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) pr_crit("%s: received packet for nonexistent port\n", pci_name(card->pdev)); else { struct sk_buff *skb = card->rx_skbs[card->rx_in]; port_t *port = &card->ports[desc->stat & PACKET_PORT_MASK]; struct net_device *dev = port->dev; if (!skb) dev->stats.rx_dropped++; else { pci_unmap_single(card->pdev, desc->address, BUFFER_LENGTH, PCI_DMA_FROMDEVICE); skb_put(skb, desc->length); #ifdef DEBUG_PKT printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len); debug_frame(skb); #endif dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; skb->protocol = hdlc_type_trans(skb, dev); netif_rx(skb); skb = NULL; } if (!skb) { skb = dev_alloc_skb(BUFFER_LENGTH); desc->address = skb ? pci_map_single(card->pdev, skb->data, BUFFER_LENGTH, PCI_DMA_FROMDEVICE) : 0; card->rx_skbs[card->rx_in] = skb; } } desc->stat = PACKET_EMPTY; /* Free descriptor */ card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH; } } static irqreturn_t wanxl_intr(int irq, void* dev_id) { card_t *card = dev_id; int i; u32 stat; int handled = 0; while((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) { handled = 1; writel(stat, card->plx + PLX_DOORBELL_FROM_CARD); for (i = 0; i < card->n_ports; i++) { if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i))) wanxl_tx_intr(&card->ports[i]); if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i))) wanxl_cable_intr(&card->ports[i]); } if (stat & (1 << DOORBELL_FROM_CARD_RX)) wanxl_rx_intr(card); } return IRQ_RETVAL(handled); } static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev) { port_t *port = dev_to_port(dev); desc_t *desc; spin_lock(&port->lock); desc = &get_status(port)->tx_descs[port->tx_out]; if (desc->stat != PACKET_EMPTY) { /* should never happen - previous xmit should stop queue */ #ifdef DEBUG_PKT printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name); #endif netif_stop_queue(dev); spin_unlock(&port->lock); return NETDEV_TX_BUSY; /* request packet to be queued */ } #ifdef DEBUG_PKT printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len); debug_frame(skb); #endif port->tx_skbs[port->tx_out] = skb; desc->address = pci_map_single(port->card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE); desc->length = skb->len; desc->stat = PACKET_FULL; writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node), port->card->plx + PLX_DOORBELL_TO_CARD); port->tx_out = (port->tx_out + 1) % TX_BUFFERS; if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) { netif_stop_queue(dev); #ifdef DEBUG_PKT printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name); #endif } spin_unlock(&port->lock); return NETDEV_TX_OK; } static int wanxl_attach(struct net_device *dev, unsigned short encoding, unsigned short parity) { port_t *port = dev_to_port(dev); if (encoding != ENCODING_NRZ && encoding != ENCODING_NRZI) return -EINVAL; if (parity != PARITY_NONE && parity != PARITY_CRC32_PR1_CCITT && parity != PARITY_CRC16_PR1_CCITT && parity != PARITY_CRC32_PR0_CCITT && parity != PARITY_CRC16_PR0_CCITT) return -EINVAL; get_status(port)->encoding = encoding; get_status(port)->parity = parity; return 0; } static int wanxl_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { const size_t size = sizeof(sync_serial_settings); sync_serial_settings line; port_t *port = dev_to_port(dev); if (cmd != SIOCWANDEV) return hdlc_ioctl(dev, ifr, cmd); switch (ifr->ifr_settings.type) { case IF_GET_IFACE: ifr->ifr_settings.type = IF_IFACE_SYNC_SERIAL; if (ifr->ifr_settings.size < size) { ifr->ifr_settings.size = size; /* data size wanted */ return -ENOBUFS; } memset(&line, 0, sizeof(line)); line.clock_type = get_status(port)->clocking; line.clock_rate = 0; line.loopback = 0; if (copy_to_user(ifr->ifr_settings.ifs_ifsu.sync, &line, size)) return -EFAULT; return 0; case IF_IFACE_SYNC_SERIAL: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (dev->flags & IFF_UP) return -EBUSY; if (copy_from_user(&line, ifr->ifr_settings.ifs_ifsu.sync, size)) return -EFAULT; if (line.clock_type != CLOCK_EXT && line.clock_type != CLOCK_TXFROMRX) return -EINVAL; /* No such clock setting */ if (line.loopback != 0) return -EINVAL; get_status(port)->clocking = line.clock_type; return 0; default: return hdlc_ioctl(dev, ifr, cmd); } } static int wanxl_open(struct net_device *dev) { port_t *port = dev_to_port(dev); u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD; unsigned long timeout; int i; if (get_status(port)->open) { netdev_err(dev, "port already open\n"); return -EIO; } if ((i = hdlc_open(dev)) != 0) return i; port->tx_in = port->tx_out = 0; for (i = 0; i < TX_BUFFERS; i++) get_status(port)->tx_descs[i].stat = PACKET_EMPTY; /* signal the card */ writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr); timeout = jiffies + HZ; do { if (get_status(port)->open) { netif_start_queue(dev); return 0; } } while (time_after(timeout, jiffies)); netdev_err(dev, "unable to open port\n"); /* ask the card to close the port, should it be still alive */ writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr); return -EFAULT; } static int wanxl_close(struct net_device *dev) { port_t *port = dev_to_port(dev); unsigned long timeout; int i; hdlc_close(dev); /* signal the card */ writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), port->card->plx + PLX_DOORBELL_TO_CARD); timeout = jiffies + HZ; do { if (!get_status(port)->open) break; } while (time_after(timeout, jiffies)); if (get_status(port)->open) netdev_err(dev, "unable to close port\n"); netif_stop_queue(dev); for (i = 0; i < TX_BUFFERS; i++) { desc_t *desc = &get_status(port)->tx_descs[i]; if (desc->stat != PACKET_EMPTY) { desc->stat = PACKET_EMPTY; pci_unmap_single(port->card->pdev, desc->address, port->tx_skbs[i]->len, PCI_DMA_TODEVICE); dev_kfree_skb(port->tx_skbs[i]); } } return 0; } static struct net_device_stats *wanxl_get_stats(struct net_device *dev) { port_t *port = dev_to_port(dev); dev->stats.rx_over_errors = get_status(port)->rx_overruns; dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors; dev->stats.rx_errors = dev->stats.rx_over_errors + dev->stats.rx_frame_errors; return &dev->stats; } static int wanxl_puts_command(card_t *card, u32 cmd) { unsigned long timeout = jiffies + 5 * HZ; writel(cmd, card->plx + PLX_MAILBOX_1); do { if (readl(card->plx + PLX_MAILBOX_1) == 0) return 0; schedule(); }while (time_after(timeout, jiffies)); return -1; } static void wanxl_reset(card_t *card) { u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET; writel(0x80, card->plx + PLX_MAILBOX_0); writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL); readl(card->plx + PLX_CONTROL); /* wait for posted write */ udelay(1); writel(old_value, card->plx + PLX_CONTROL); readl(card->plx + PLX_CONTROL); /* wait for posted write */ } static void wanxl_pci_remove_one(struct pci_dev *pdev) { card_t *card = pci_get_drvdata(pdev); int i; for (i = 0; i < card->n_ports; i++) { unregister_hdlc_device(card->ports[i].dev); free_netdev(card->ports[i].dev); } /* unregister and free all host resources */ if (card->irq) free_irq(card->irq, card); wanxl_reset(card); for (i = 0; i < RX_QUEUE_LENGTH; i++) if (card->rx_skbs[i]) { pci_unmap_single(card->pdev, card->status->rx_descs[i].address, BUFFER_LENGTH, PCI_DMA_FROMDEVICE); dev_kfree_skb(card->rx_skbs[i]); } if (card->plx) iounmap(card->plx); if (card->status) pci_free_consistent(pdev, sizeof(card_status_t), card->status, card->status_address); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); kfree(card); } #include "wanxlfw.inc" static const struct net_device_ops wanxl_ops = { .ndo_open = wanxl_open, .ndo_stop = wanxl_close, .ndo_change_mtu = hdlc_change_mtu, .ndo_start_xmit = hdlc_start_xmit, .ndo_do_ioctl = wanxl_ioctl, .ndo_get_stats = wanxl_get_stats, }; static int wanxl_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { card_t *card; u32 ramsize, stat; unsigned long timeout; u32 plx_phy; /* PLX PCI base address */ u32 mem_phy; /* memory PCI base addr */ u8 __iomem *mem; /* memory virtual base addr */ int i, ports, alloc_size; #ifndef MODULE pr_info_once("%s\n", version); #endif i = pci_enable_device(pdev); if (i) return i; /* QUICC can only access first 256 MB of host RAM directly, but PLX9060 DMA does 32-bits for actual packet data transfers */ /* FIXME when PCI/DMA subsystems are fixed. We set both dma_mask and consistent_dma_mask to 28 bits and pray pci_alloc_consistent() will use this info. It should work on most platforms */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(28)) || pci_set_dma_mask(pdev, DMA_BIT_MASK(28))) { pr_err("No usable DMA configuration\n"); return -EIO; } i = pci_request_regions(pdev, "wanXL"); if (i) { pci_disable_device(pdev); return i; } switch (pdev->device) { case PCI_DEVICE_ID_SBE_WANXL100: ports = 1; break; case PCI_DEVICE_ID_SBE_WANXL200: ports = 2; break; default: ports = 4; } alloc_size = sizeof(card_t) + ports * sizeof(port_t); card = kzalloc(alloc_size, GFP_KERNEL); if (card == NULL) { pci_release_regions(pdev); pci_disable_device(pdev); return -ENOBUFS; } pci_set_drvdata(pdev, card); card->pdev = pdev; card->status = pci_alloc_consistent(pdev, sizeof(card_status_t), &card->status_address); if (card->status == NULL) { wanxl_pci_remove_one(pdev); return -ENOBUFS; } #ifdef DEBUG_PCI printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory" " at 0x%LX\n", pci_name(pdev), (unsigned long long)card->status_address); #endif /* FIXME when PCI/DMA subsystems are fixed. We set both dma_mask and consistent_dma_mask back to 32 bits to indicate the card can do 32-bit DMA addressing */ if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) || pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { pr_err("No usable DMA configuration\n"); wanxl_pci_remove_one(pdev); return -EIO; } /* set up PLX mapping */ plx_phy = pci_resource_start(pdev, 0); card->plx = ioremap_nocache(plx_phy, 0x70); if (!card->plx) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); return -EFAULT; } #if RESET_WHILE_LOADING wanxl_reset(card); #endif timeout = jiffies + 20 * HZ; while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) { if (time_before(timeout, jiffies)) { pr_warn("%s: timeout waiting for PUTS to complete\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } switch(stat & 0xC0) { case 0x00: /* hmm - PUTS completed with non-zero code? */ case 0x80: /* PUTS still testing the hardware */ break; default: pr_warn("%s: PUTS test 0x%X failed\n", pci_name(pdev), stat & 0x30); wanxl_pci_remove_one(pdev); return -ENODEV; } schedule(); } /* get on-board memory size (PUTS detects no more than 4 MB) */ ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK; /* set up on-board RAM mapping */ mem_phy = pci_resource_start(pdev, 2); /* sanity check the board's reported memory size */ if (ramsize < BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) { pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n", pci_name(pdev), ramsize, BUFFERS_ADDR + (TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports); wanxl_pci_remove_one(pdev); return -ENODEV; } if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) { pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } for (i = 0; i < RX_QUEUE_LENGTH; i++) { struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH); card->rx_skbs[i] = skb; if (skb) card->status->rx_descs[i].address = pci_map_single(card->pdev, skb->data, BUFFER_LENGTH, PCI_DMA_FROMDEVICE); } mem = ioremap_nocache(mem_phy, PDM_OFFSET + sizeof(firmware)); if (!mem) { pr_err("ioremap() failed\n"); wanxl_pci_remove_one(pdev); return -EFAULT; } for (i = 0; i < sizeof(firmware); i += 4) writel(ntohl(*(__be32*)(firmware + i)), mem + PDM_OFFSET + i); for (i = 0; i < ports; i++) writel(card->status_address + (void *)&card->status->port_status[i] - (void *)card->status, mem + PDM_OFFSET + 4 + i * 4); writel(card->status_address, mem + PDM_OFFSET + 20); writel(PDM_OFFSET, mem); iounmap(mem); writel(0, card->plx + PLX_MAILBOX_5); if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) { pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } stat = 0; timeout = jiffies + 5 * HZ; do { if ((stat = readl(card->plx + PLX_MAILBOX_5)) != 0) break; schedule(); }while (time_after(timeout, jiffies)); if (!stat) { pr_warn("%s: timeout while initializing card firmware\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENODEV; } #if DETECT_RAM ramsize = stat; #endif pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n", pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq); /* Allocate IRQ */ if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) { pr_warn("%s: could not allocate IRQ%i\n", pci_name(pdev), pdev->irq); wanxl_pci_remove_one(pdev); return -EBUSY; } card->irq = pdev->irq; for (i = 0; i < ports; i++) { hdlc_device *hdlc; port_t *port = &card->ports[i]; struct net_device *dev = alloc_hdlcdev(port); if (!dev) { pr_err("%s: unable to allocate memory\n", pci_name(pdev)); wanxl_pci_remove_one(pdev); return -ENOMEM; } port->dev = dev; hdlc = dev_to_hdlc(dev); spin_lock_init(&port->lock); dev->tx_queue_len = 50; dev->netdev_ops = &wanxl_ops; hdlc->attach = wanxl_attach; hdlc->xmit = wanxl_xmit; port->card = card; port->node = i; get_status(port)->clocking = CLOCK_EXT; if (register_hdlc_device(dev)) { pr_err("%s: unable to register hdlc device\n", pci_name(pdev)); free_netdev(dev); wanxl_pci_remove_one(pdev); return -ENOBUFS; } card->n_ports++; } pr_info("%s: port", pci_name(pdev)); for (i = 0; i < ports; i++) pr_cont("%s #%i: %s", i ? "," : "", i, card->ports[i].dev->name); pr_cont("\n"); for (i = 0; i < ports; i++) wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/ return 0; } static DEFINE_PCI_DEVICE_TABLE(wanxl_pci_tbl) = { { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0, } }; static struct pci_driver wanxl_pci_driver = { .name = "wanXL", .id_table = wanxl_pci_tbl, .probe = wanxl_pci_init_one, .remove = wanxl_pci_remove_one, }; static int __init wanxl_init_module(void) { #ifdef MODULE pr_info("%s\n", version); #endif return pci_register_driver(&wanxl_pci_driver); } static void __exit wanxl_cleanup_module(void) { pci_unregister_driver(&wanxl_pci_driver); } MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>"); MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver"); MODULE_LICENSE("GPL v2"); MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl); module_init(wanxl_init_module); module_exit(wanxl_cleanup_module);
gpl-2.0
i-maravic/MPLS-Linux
drivers/gpu/drm/nouveau/core/subdev/bar/base.c
2506
3822
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/object.h> #include <subdev/bar.h> struct nouveau_barobj { struct nouveau_object base; struct nouveau_vma vma; void __iomem *iomem; }; static int nouveau_barobj_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *mem, u32 size, struct nouveau_object **pobject) { struct nouveau_bar *bar = (void *)engine; struct nouveau_barobj *barobj; int ret; ret = nouveau_object_create(parent, engine, oclass, 0, &barobj); *pobject = nv_object(barobj); if (ret) return ret; ret = bar->kmap(bar, mem, NV_MEM_ACCESS_RW, &barobj->vma); if (ret) return ret; barobj->iomem = bar->iomem + (u32)barobj->vma.offset; return 0; } static void nouveau_barobj_dtor(struct nouveau_object *object) { struct nouveau_bar *bar = (void *)object->engine; struct nouveau_barobj *barobj = (void *)object; if (barobj->vma.node) bar->unmap(bar, &barobj->vma); nouveau_object_destroy(&barobj->base); } static u32 nouveau_barobj_rd32(struct nouveau_object *object, u64 addr) { struct nouveau_barobj *barobj = (void *)object; return ioread32_native(barobj->iomem + addr); } static void nouveau_barobj_wr32(struct nouveau_object *object, u64 addr, u32 data) { struct nouveau_barobj *barobj = (void *)object; iowrite32_native(data, barobj->iomem + addr); } static struct nouveau_oclass nouveau_barobj_oclass = { .ofuncs = &(struct nouveau_ofuncs) { .ctor = nouveau_barobj_ctor, .dtor = nouveau_barobj_dtor, .init = nouveau_object_init, .fini = nouveau_object_fini, .rd32 = nouveau_barobj_rd32, .wr32 = nouveau_barobj_wr32, }, }; int nouveau_bar_alloc(struct nouveau_bar *bar, struct nouveau_object *parent, struct nouveau_mem *mem, struct nouveau_object **pobject) { struct nouveau_object *engine = nv_object(bar); return nouveau_object_ctor(parent, engine, &nouveau_barobj_oclass, mem, 0, pobject); } int nouveau_bar_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int length, void **pobject) { struct nouveau_device *device = nv_device(parent); struct nouveau_bar *bar; int ret; ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL", "bar", length, pobject); bar = *pobject; if (ret) return ret; bar->iomem = ioremap(pci_resource_start(device->pdev, 3), pci_resource_len(device->pdev, 3)); return 0; } void nouveau_bar_destroy(struct nouveau_bar *bar) { if (bar->iomem) iounmap(bar->iomem); nouveau_subdev_destroy(&bar->base); } void _nouveau_bar_dtor(struct nouveau_object *object) { struct nouveau_bar *bar = (void *)object; nouveau_bar_destroy(bar); }
gpl-2.0
Jamesjue/linux_kernel_db
arch/sh/kernel/cpu/sh4a/setup-sh7366.c
2506
11839
/* * SH7366 Setup * * Copyright (C) 2008 Renesas Solutions * * Based on linux/arch/sh/kernel/cpu/sh4a/setup-sh7722.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/uio_driver.h> #include <linux/sh_timer.h> #include <linux/sh_intc.h> #include <linux/usb/r8a66597.h> #include <asm/clock.h> static struct plat_sci_port scif0_platform_data = { .mapbase = 0xffe00000, .port_reg = 0xa405013e, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = SCIx_IRQ_MUXED(evt2irq(0xc00)), }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct resource iic_resources[] = { [0] = { .name = "IIC", .start = 0x04470000, .end = 0x04470017, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xe00), .end = evt2irq(0xe60), .flags = IORESOURCE_IRQ, }, }; static struct platform_device iic_device = { .name = "i2c-sh_mobile", .id = 0, /* "i2c0" clock */ .num_resources = ARRAY_SIZE(iic_resources), .resource = iic_resources, }; static struct r8a66597_platdata r8a66597_data = { .on_chip = 1, }; static struct resource usb_host_resources[] = { [0] = { .start = 0xa4d80000, .end = 0xa4d800ff, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xa20), .end = evt2irq(0xa20), .flags = IORESOURCE_IRQ | IRQF_TRIGGER_LOW, }, }; static struct platform_device usb_host_device = { .name = "r8a66597_hcd", .id = -1, .dev = { .dma_mask = NULL, .coherent_dma_mask = 0xffffffff, .platform_data = &r8a66597_data, }, .num_resources = ARRAY_SIZE(usb_host_resources), .resource = usb_host_resources, }; static struct uio_info vpu_platform_data = { .name = "VPU5", .version = "0", .irq = evt2irq(0x980), }; static struct resource vpu_resources[] = { [0] = { .name = "VPU", .start = 0xfe900000, .end = 0xfe902807, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device vpu_device = { .name = "uio_pdrv_genirq", .id = 0, .dev = { .platform_data = &vpu_platform_data, }, .resource = vpu_resources, .num_resources = ARRAY_SIZE(vpu_resources), }; static struct uio_info veu0_platform_data = { .name = "VEU", .version = "0", .irq = evt2irq(0x8c0), }; static struct resource veu0_resources[] = { [0] = { .name = "VEU(1)", .start = 0xfe920000, .end = 0xfe9200b7, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu0_device = { .name = "uio_pdrv_genirq", .id = 1, .dev = { .platform_data = &veu0_platform_data, }, .resource = veu0_resources, .num_resources = ARRAY_SIZE(veu0_resources), }; static struct uio_info veu1_platform_data = { .name = "VEU", .version = "0", .irq = evt2irq(0x560), }; static struct resource veu1_resources[] = { [0] = { .name = "VEU(2)", .start = 0xfe924000, .end = 0xfe9240b7, .flags = IORESOURCE_MEM, }, [1] = { /* place holder for contiguous memory */ }, }; static struct platform_device veu1_device = { .name = "uio_pdrv_genirq", .id = 2, .dev = { .platform_data = &veu1_platform_data, }, .resource = veu1_resources, .num_resources = ARRAY_SIZE(veu1_resources), }; static struct sh_timer_config cmt_platform_data = { .channel_offset = 0x60, .timer_bit = 5, .clockevent_rating = 125, .clocksource_rating = 200, }; static struct resource cmt_resources[] = { [0] = { .start = 0x044a0060, .end = 0x044a006b, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0xf00), .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt_device = { .name = "sh_cmt", .id = 0, .dev = { .platform_data = &cmt_platform_data, }, .resource = cmt_resources, .num_resources = ARRAY_SIZE(cmt_resources), }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x420), .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002b, .flags = IORESOURCE_MEM, }, [1] = { .start = evt2irq(0x440), .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct platform_device *sh7366_devices[] __initdata = { &scif0_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, &iic_device, &usb_host_device, &vpu_device, &veu0_device, &veu1_device, }; static int __init sh7366_devices_setup(void) { platform_resource_setup_memory(&vpu_device, "vpu", 2 << 20); platform_resource_setup_memory(&veu0_device, "veu0", 2 << 20); platform_resource_setup_memory(&veu1_device, "veu1", 2 << 20); return platform_add_devices(sh7366_devices, ARRAY_SIZE(sh7366_devices)); } arch_initcall(sh7366_devices_setup); static struct platform_device *sh7366_early_devices[] __initdata = { &scif0_device, &cmt_device, &tmu0_device, &tmu1_device, &tmu2_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7366_early_devices, ARRAY_SIZE(sh7366_early_devices)); } enum { UNUSED=0, ENABLED, DISABLED, /* interrupt sources */ IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7, ICB, DMAC0, DMAC1, DMAC2, DMAC3, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU, MFI, VPU, USB, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I, DMAC4, DMAC5, DMAC_DADERR, SCIF, SCIFA1, SCIFA2, DENC, MSIOF, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI, SDHI, CMT, TSIF, SIU, TMU0, TMU1, TMU2, VEU2, LCDC, /* interrupt groups */ DMAC0123, VIOVOU, MMC, DMAC45, FLCTL, I2C, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(IRQ0, 0x600), INTC_VECT(IRQ1, 0x620), INTC_VECT(IRQ2, 0x640), INTC_VECT(IRQ3, 0x660), INTC_VECT(IRQ4, 0x680), INTC_VECT(IRQ5, 0x6a0), INTC_VECT(IRQ6, 0x6c0), INTC_VECT(IRQ7, 0x6e0), INTC_VECT(ICB, 0x700), INTC_VECT(DMAC0, 0x800), INTC_VECT(DMAC1, 0x820), INTC_VECT(DMAC2, 0x840), INTC_VECT(DMAC3, 0x860), INTC_VECT(VIO_CEUI, 0x880), INTC_VECT(VIO_BEUI, 0x8a0), INTC_VECT(VIO_VEUI, 0x8c0), INTC_VECT(VOU, 0x8e0), INTC_VECT(MFI, 0x900), INTC_VECT(VPU, 0x980), INTC_VECT(USB, 0xa20), INTC_VECT(MMC_MMC1I, 0xb00), INTC_VECT(MMC_MMC2I, 0xb20), INTC_VECT(MMC_MMC3I, 0xb40), INTC_VECT(DMAC4, 0xb80), INTC_VECT(DMAC5, 0xba0), INTC_VECT(DMAC_DADERR, 0xbc0), INTC_VECT(SCIF, 0xc00), INTC_VECT(SCIFA1, 0xc20), INTC_VECT(SCIFA2, 0xc40), INTC_VECT(DENC, 0xc60), INTC_VECT(MSIOF, 0xc80), INTC_VECT(FLCTL_FLSTEI, 0xd80), INTC_VECT(FLCTL_FLENDI, 0xda0), INTC_VECT(FLCTL_FLTREQ0I, 0xdc0), INTC_VECT(FLCTL_FLTREQ1I, 0xde0), INTC_VECT(I2C_ALI, 0xe00), INTC_VECT(I2C_TACKI, 0xe20), INTC_VECT(I2C_WAITI, 0xe40), INTC_VECT(I2C_DTEI, 0xe60), INTC_VECT(SDHI, 0xe80), INTC_VECT(SDHI, 0xea0), INTC_VECT(SDHI, 0xec0), INTC_VECT(SDHI, 0xee0), INTC_VECT(CMT, 0xf00), INTC_VECT(TSIF, 0xf20), INTC_VECT(SIU, 0xf80), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(VEU2, 0x560), INTC_VECT(LCDC, 0x580), }; static struct intc_group groups[] __initdata = { INTC_GROUP(DMAC0123, DMAC0, DMAC1, DMAC2, DMAC3), INTC_GROUP(VIOVOU, VIO_CEUI, VIO_BEUI, VIO_VEUI, VOU), INTC_GROUP(MMC, MMC_MMC1I, MMC_MMC2I, MMC_MMC3I), INTC_GROUP(DMAC45, DMAC4, DMAC5, DMAC_DADERR), INTC_GROUP(FLCTL, FLCTL_FLSTEI, FLCTL_FLENDI, FLCTL_FLTREQ0I, FLCTL_FLTREQ1I), INTC_GROUP(I2C, I2C_ALI, I2C_TACKI, I2C_WAITI, I2C_DTEI), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xa4080080, 0xa40800c0, 8, /* IMR0 / IMCR0 */ { } }, { 0xa4080084, 0xa40800c4, 8, /* IMR1 / IMCR1 */ { VOU, VIO_VEUI, VIO_BEUI, VIO_CEUI, DMAC3, DMAC2, DMAC1, DMAC0 } }, { 0xa4080088, 0xa40800c8, 8, /* IMR2 / IMCR2 */ { 0, 0, 0, VPU, 0, 0, 0, MFI } }, { 0xa408008c, 0xa40800cc, 8, /* IMR3 / IMCR3 */ { 0, 0, 0, ICB } }, { 0xa4080090, 0xa40800d0, 8, /* IMR4 / IMCR4 */ { 0, TMU2, TMU1, TMU0, VEU2, 0, 0, LCDC } }, { 0xa4080094, 0xa40800d4, 8, /* IMR5 / IMCR5 */ { 0, DMAC_DADERR, DMAC5, DMAC4, DENC, SCIFA2, SCIFA1, SCIF } }, { 0xa4080098, 0xa40800d8, 8, /* IMR6 / IMCR6 */ { 0, 0, 0, 0, 0, 0, 0, MSIOF } }, { 0xa408009c, 0xa40800dc, 8, /* IMR7 / IMCR7 */ { I2C_DTEI, I2C_WAITI, I2C_TACKI, I2C_ALI, FLCTL_FLTREQ1I, FLCTL_FLTREQ0I, FLCTL_FLENDI, FLCTL_FLSTEI } }, { 0xa40800a0, 0xa40800e0, 8, /* IMR8 / IMCR8 */ { DISABLED, ENABLED, ENABLED, ENABLED, 0, 0, 0, SIU } }, { 0xa40800a4, 0xa40800e4, 8, /* IMR9 / IMCR9 */ { 0, 0, 0, CMT, 0, USB, } }, { 0xa40800a8, 0xa40800e8, 8, /* IMR10 / IMCR10 */ { 0, MMC_MMC3I, MMC_MMC2I, MMC_MMC1I } }, { 0xa40800ac, 0xa40800ec, 8, /* IMR11 / IMCR11 */ { 0, 0, 0, 0, 0, 0, 0, TSIF } }, { 0xa4140044, 0xa4140064, 8, /* INTMSK00 / INTMSKCLR00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xa4080000, 0, 16, 4, /* IPRA */ { TMU0, TMU1, TMU2 } }, { 0xa4080004, 0, 16, 4, /* IPRB */ { VEU2, LCDC, ICB } }, { 0xa4080008, 0, 16, 4, /* IPRC */ { } }, { 0xa408000c, 0, 16, 4, /* IPRD */ { } }, { 0xa4080010, 0, 16, 4, /* IPRE */ { DMAC0123, VIOVOU, MFI, VPU } }, { 0xa4080014, 0, 16, 4, /* IPRF */ { 0, DMAC45, USB, CMT } }, { 0xa4080018, 0, 16, 4, /* IPRG */ { SCIF, SCIFA1, SCIFA2, DENC } }, { 0xa408001c, 0, 16, 4, /* IPRH */ { MSIOF, 0, FLCTL, I2C } }, { 0xa4080020, 0, 16, 4, /* IPRI */ { 0, 0, TSIF, } }, { 0xa4080024, 0, 16, 4, /* IPRJ */ { 0, 0, SIU } }, { 0xa4080028, 0, 16, 4, /* IPRK */ { 0, MMC, 0, SDHI } }, { 0xa408002c, 0, 16, 4, /* IPRL */ { } }, { 0xa4140010, 0, 32, 4, /* INTPRI00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_sense_reg sense_registers[] __initdata = { { 0xa414001c, 16, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_mask_reg ack_registers[] __initdata = { { 0xa4140024, 0, 8, /* INTREQ00 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, IRQ6, IRQ7 } }, }; static struct intc_desc intc_desc __initdata = { .name = "sh7366", .force_enable = ENABLED, .force_disable = DISABLED, .hw = INTC_HW_DESC(vectors, groups, mask_registers, prio_registers, sense_registers, ack_registers), }; void __init plat_irq_setup(void) { register_intc_controller(&intc_desc); } void __init plat_mem_setup(void) { /* TODO: Register Node 1 */ }
gpl-2.0
philozheng/kernel-msm
drivers/net/wireless/mwifiex/cfg80211.c
2762
40922
/* * Marvell Wireless LAN device driver: CFG80211 * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "cfg80211.h" #include "main.h" /* * This function maps the nl802.11 channel type into driver channel type. * * The mapping is as follows - * NL80211_CHAN_NO_HT -> IEEE80211_HT_PARAM_CHA_SEC_NONE * NL80211_CHAN_HT20 -> IEEE80211_HT_PARAM_CHA_SEC_NONE * NL80211_CHAN_HT40PLUS -> IEEE80211_HT_PARAM_CHA_SEC_ABOVE * NL80211_CHAN_HT40MINUS -> IEEE80211_HT_PARAM_CHA_SEC_BELOW * Others -> IEEE80211_HT_PARAM_CHA_SEC_NONE */ static u8 mwifiex_cfg80211_channel_type_to_sec_chan_offset(enum nl80211_channel_type channel_type) { switch (channel_type) { case NL80211_CHAN_NO_HT: case NL80211_CHAN_HT20: return IEEE80211_HT_PARAM_CHA_SEC_NONE; case NL80211_CHAN_HT40PLUS: return IEEE80211_HT_PARAM_CHA_SEC_ABOVE; case NL80211_CHAN_HT40MINUS: return IEEE80211_HT_PARAM_CHA_SEC_BELOW; default: return IEEE80211_HT_PARAM_CHA_SEC_NONE; } } /* * This function checks whether WEP is set. */ static int mwifiex_is_alg_wep(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return 1; default: break; } return 0; } /* * This function retrieves the private structure from kernel wiphy structure. */ static void *mwifiex_cfg80211_get_priv(struct wiphy *wiphy) { return (void *) (*(unsigned long *) wiphy_priv(wiphy)); } /* * CFG802.11 operation handler to delete a network key. */ static int mwifiex_cfg80211_del_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev); if (mwifiex_set_encode(priv, NULL, 0, key_index, 1)) { wiphy_err(wiphy, "deleting the crypto keys\n"); return -EFAULT; } wiphy_dbg(wiphy, "info: crypto keys deleted\n"); return 0; } /* * CFG802.11 operation handler to set Tx power. */ static int mwifiex_cfg80211_set_tx_power(struct wiphy *wiphy, enum nl80211_tx_power_setting type, int mbm) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); struct mwifiex_power_cfg power_cfg; int dbm = MBM_TO_DBM(mbm); if (type == NL80211_TX_POWER_FIXED) { power_cfg.is_power_auto = 0; power_cfg.power_level = dbm; } else { power_cfg.is_power_auto = 1; } return mwifiex_set_tx_power(priv, &power_cfg); } /* * CFG802.11 operation handler to set Power Save option. * * The timeout value, if provided, is currently ignored. */ static int mwifiex_cfg80211_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); u32 ps_mode; if (timeout) wiphy_dbg(wiphy, "info: ignore timeout value for IEEE Power Save\n"); ps_mode = enabled; return mwifiex_drv_set_power(priv, &ps_mode); } /* * CFG802.11 operation handler to set the default network key. */ static int mwifiex_cfg80211_set_default_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool unicast, bool multicast) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev); /* Return if WEP key not configured */ if (!priv->sec_info.wep_enabled) return 0; if (mwifiex_set_encode(priv, NULL, 0, key_index, 0)) { wiphy_err(wiphy, "set default Tx key index\n"); return -EFAULT; } return 0; } /* * CFG802.11 operation handler to add a network key. */ static int mwifiex_cfg80211_add_key(struct wiphy *wiphy, struct net_device *netdev, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(netdev); if (mwifiex_set_encode(priv, params->key, params->key_len, key_index, 0)) { wiphy_err(wiphy, "crypto keys added\n"); return -EFAULT; } return 0; } /* * This function sends domain information to the firmware. * * The following information are passed to the firmware - * - Country codes * - Sub bands (first channel, number of channels, maximum Tx power) */ static int mwifiex_send_domain_info_cmd_fw(struct wiphy *wiphy) { u8 no_of_triplet = 0; struct ieee80211_country_ie_triplet *t; u8 no_of_parsed_chan = 0; u8 first_chan = 0, next_chan = 0, max_pwr = 0; u8 i, flag = 0; enum ieee80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_802_11d_domain_reg *domain_info = &adapter->domain_reg; /* Set country code */ domain_info->country_code[0] = priv->country_code[0]; domain_info->country_code[1] = priv->country_code[1]; domain_info->country_code[2] = ' '; band = mwifiex_band_to_radio_type(adapter->config_bands); if (!wiphy->bands[band]) { wiphy_err(wiphy, "11D: setting domain info in FW\n"); return -1; } sband = wiphy->bands[band]; for (i = 0; i < sband->n_channels ; i++) { ch = &sband->channels[i]; if (ch->flags & IEEE80211_CHAN_DISABLED) continue; if (!flag) { flag = 1; first_chan = (u32) ch->hw_value; next_chan = first_chan; max_pwr = ch->max_power; no_of_parsed_chan = 1; continue; } if (ch->hw_value == next_chan + 1 && ch->max_power == max_pwr) { next_chan++; no_of_parsed_chan++; } else { t = &domain_info->triplet[no_of_triplet]; t->chans.first_channel = first_chan; t->chans.num_channels = no_of_parsed_chan; t->chans.max_power = max_pwr; no_of_triplet++; first_chan = (u32) ch->hw_value; next_chan = first_chan; max_pwr = ch->max_power; no_of_parsed_chan = 1; } } if (flag) { t = &domain_info->triplet[no_of_triplet]; t->chans.first_channel = first_chan; t->chans.num_channels = no_of_parsed_chan; t->chans.max_power = max_pwr; no_of_triplet++; } domain_info->no_of_triplet = no_of_triplet; if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, HostCmd_ACT_GEN_SET, 0, NULL)) { wiphy_err(wiphy, "11D: setting domain info in FW\n"); return -1; } return 0; } /* * CFG802.11 regulatory domain callback function. * * This function is called when the regulatory domain is changed due to the * following reasons - * - Set by driver * - Set by system core * - Set by user * - Set bt Country IE */ static int mwifiex_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); wiphy_dbg(wiphy, "info: cfg80211 regulatory domain callback for domain" " %c%c\n", request->alpha2[0], request->alpha2[1]); memcpy(priv->country_code, request->alpha2, sizeof(request->alpha2)); switch (request->initiator) { case NL80211_REGDOM_SET_BY_DRIVER: case NL80211_REGDOM_SET_BY_CORE: case NL80211_REGDOM_SET_BY_USER: break; /* Todo: apply driver specific changes in channel flags based on the request initiator if necessary. */ case NL80211_REGDOM_SET_BY_COUNTRY_IE: break; } mwifiex_send_domain_info_cmd_fw(wiphy); return 0; } /* * This function sets the RF channel. * * This function creates multiple IOCTL requests, populates them accordingly * and issues them to set the band/channel and frequency. */ static int mwifiex_set_rf_channel(struct mwifiex_private *priv, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { struct mwifiex_chan_freq_power cfp; u32 config_bands = 0; struct wiphy *wiphy = priv->wdev->wiphy; struct mwifiex_adapter *adapter = priv->adapter; if (chan) { /* Set appropriate bands */ if (chan->band == IEEE80211_BAND_2GHZ) { if (channel_type == NL80211_CHAN_NO_HT) if (priv->adapter->config_bands == BAND_B || priv->adapter->config_bands == BAND_G) config_bands = priv->adapter->config_bands; else config_bands = BAND_B | BAND_G; else config_bands = BAND_B | BAND_G | BAND_GN; } else { if (channel_type == NL80211_CHAN_NO_HT) config_bands = BAND_A; else config_bands = BAND_AN | BAND_A; } if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands)) { adapter->config_bands = config_bands; if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { adapter->adhoc_start_band = config_bands; if ((config_bands & BAND_GN) || (config_bands & BAND_AN)) adapter->adhoc_11n_enabled = true; else adapter->adhoc_11n_enabled = false; } } adapter->sec_chan_offset = mwifiex_cfg80211_channel_type_to_sec_chan_offset (channel_type); adapter->channel_type = channel_type; mwifiex_send_domain_info_cmd_fw(wiphy); } wiphy_dbg(wiphy, "info: setting band %d, chan offset %d, mode %d\n", config_bands, adapter->sec_chan_offset, priv->bss_mode); if (!chan) return 0; memset(&cfp, 0, sizeof(cfp)); cfp.freq = chan->center_freq; cfp.channel = ieee80211_frequency_to_channel(chan->center_freq); if (mwifiex_bss_set_channel(priv, &cfp)) return -EFAULT; return mwifiex_drv_change_adhoc_chan(priv, cfp.channel); } /* * CFG802.11 operation handler to set channel. * * This function can only be used when station is not connected. */ static int mwifiex_cfg80211_set_channel(struct wiphy *wiphy, struct net_device *dev, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { struct mwifiex_private *priv; if (dev) priv = mwifiex_netdev_get_priv(dev); else priv = mwifiex_cfg80211_get_priv(wiphy); if (priv->media_connected) { wiphy_err(wiphy, "This setting is valid only when station " "is not connected\n"); return -EINVAL; } return mwifiex_set_rf_channel(priv, chan, channel_type); } /* * This function sets the fragmentation threshold. * * The fragmentation threshold value must lie between MWIFIEX_FRAG_MIN_VALUE * and MWIFIEX_FRAG_MAX_VALUE. */ static int mwifiex_set_frag(struct mwifiex_private *priv, u32 frag_thr) { int ret; if (frag_thr < MWIFIEX_FRAG_MIN_VALUE || frag_thr > MWIFIEX_FRAG_MAX_VALUE) return -EINVAL; /* Send request to firmware */ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, HostCmd_ACT_GEN_SET, FRAG_THRESH_I, &frag_thr); return ret; } /* * This function sets the RTS threshold. * The rts value must lie between MWIFIEX_RTS_MIN_VALUE * and MWIFIEX_RTS_MAX_VALUE. */ static int mwifiex_set_rts(struct mwifiex_private *priv, u32 rts_thr) { if (rts_thr < MWIFIEX_RTS_MIN_VALUE || rts_thr > MWIFIEX_RTS_MAX_VALUE) rts_thr = MWIFIEX_RTS_MAX_VALUE; return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, HostCmd_ACT_GEN_SET, RTS_THRESH_I, &rts_thr); } /* * CFG802.11 operation handler to set wiphy parameters. * * This function can be used to set the RTS threshold and the * Fragmentation threshold of the driver. */ static int mwifiex_cfg80211_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); int ret = 0; if (changed & WIPHY_PARAM_RTS_THRESHOLD) { ret = mwifiex_set_rts(priv, wiphy->rts_threshold); if (ret) return ret; } if (changed & WIPHY_PARAM_FRAG_THRESHOLD) ret = mwifiex_set_frag(priv, wiphy->frag_threshold); return ret; } /* * CFG802.11 operation handler to change interface type. */ static int mwifiex_cfg80211_change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { int ret; struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (priv->bss_mode == type) { wiphy_warn(wiphy, "already set to required type\n"); return 0; } priv->bss_mode = type; switch (type) { case NL80211_IFTYPE_ADHOC: dev->ieee80211_ptr->iftype = NL80211_IFTYPE_ADHOC; wiphy_dbg(wiphy, "info: setting interface type to adhoc\n"); break; case NL80211_IFTYPE_STATION: dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; wiphy_dbg(wiphy, "info: setting interface type to managed\n"); break; case NL80211_IFTYPE_UNSPECIFIED: dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; wiphy_dbg(wiphy, "info: setting interface type to auto\n"); return 0; default: wiphy_err(wiphy, "unknown interface type: %d\n", type); return -EINVAL; } mwifiex_deauthenticate(priv, NULL); priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM; ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE, HostCmd_ACT_GEN_SET, 0, NULL); return ret; } /* * This function dumps the station information on a buffer. * * The following information are shown - * - Total bytes transmitted * - Total bytes received * - Total packets transmitted * - Total packets received * - Signal quality level * - Transmission rate */ static int mwifiex_dump_station_info(struct mwifiex_private *priv, struct station_info *sinfo) { struct mwifiex_ds_get_signal signal; struct mwifiex_rate_cfg rate; int ret = 0; sinfo->filled = STATION_INFO_RX_BYTES | STATION_INFO_TX_BYTES | STATION_INFO_RX_PACKETS | STATION_INFO_TX_PACKETS | STATION_INFO_SIGNAL | STATION_INFO_TX_BITRATE; /* Get signal information from the firmware */ memset(&signal, 0, sizeof(struct mwifiex_ds_get_signal)); if (mwifiex_get_signal_info(priv, &signal)) { dev_err(priv->adapter->dev, "getting signal information\n"); ret = -EFAULT; } if (mwifiex_drv_get_data_rate(priv, &rate)) { dev_err(priv->adapter->dev, "getting data rate\n"); ret = -EFAULT; } /* Get DTIM period information from firmware */ mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_SNMP_MIB, HostCmd_ACT_GEN_GET, DTIM_PERIOD_I, &priv->dtim_period); /* * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid * MCS index values for us are 0 to 7. */ if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) { sinfo->txrate.mcs = priv->tx_rate; sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS; /* 40MHz rate */ if (priv->tx_htinfo & BIT(1)) sinfo->txrate.flags |= RATE_INFO_FLAGS_40_MHZ_WIDTH; /* SGI enabled */ if (priv->tx_htinfo & BIT(2)) sinfo->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; } sinfo->rx_bytes = priv->stats.rx_bytes; sinfo->tx_bytes = priv->stats.tx_bytes; sinfo->rx_packets = priv->stats.rx_packets; sinfo->tx_packets = priv->stats.tx_packets; sinfo->signal = priv->qual_level; /* bit rate is in 500 kb/s units. Convert it to 100kb/s units */ sinfo->txrate.legacy = rate.rate * 5; if (priv->bss_mode == NL80211_IFTYPE_STATION) { sinfo->filled |= STATION_INFO_BSS_PARAM; sinfo->bss_param.flags = 0; if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap & WLAN_CAPABILITY_SHORT_PREAMBLE) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_PREAMBLE; if (priv->curr_bss_params.bss_descriptor.cap_info_bitmap & WLAN_CAPABILITY_SHORT_SLOT_TIME) sinfo->bss_param.flags |= BSS_PARAM_FLAGS_SHORT_SLOT_TIME; sinfo->bss_param.dtim_period = priv->dtim_period; sinfo->bss_param.beacon_interval = priv->curr_bss_params.bss_descriptor.beacon_period; } return ret; } /* * CFG802.11 operation handler to get station information. * * This function only works in connected mode, and dumps the * requested station information, if available. */ static int mwifiex_cfg80211_get_station(struct wiphy *wiphy, struct net_device *dev, u8 *mac, struct station_info *sinfo) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (!priv->media_connected) return -ENOENT; if (memcmp(mac, priv->cfg_bssid, ETH_ALEN)) return -ENOENT; return mwifiex_dump_station_info(priv, sinfo); } /* Supported rates to be advertised to the cfg80211 */ static struct ieee80211_rate mwifiex_rates[] = { {.bitrate = 10, .hw_value = 2, }, {.bitrate = 20, .hw_value = 4, }, {.bitrate = 55, .hw_value = 11, }, {.bitrate = 110, .hw_value = 22, }, {.bitrate = 60, .hw_value = 12, }, {.bitrate = 90, .hw_value = 18, }, {.bitrate = 120, .hw_value = 24, }, {.bitrate = 180, .hw_value = 36, }, {.bitrate = 240, .hw_value = 48, }, {.bitrate = 360, .hw_value = 72, }, {.bitrate = 480, .hw_value = 96, }, {.bitrate = 540, .hw_value = 108, }, }; /* Channel definitions to be advertised to cfg80211 */ static struct ieee80211_channel mwifiex_channels_2ghz[] = { {.center_freq = 2412, .hw_value = 1, }, {.center_freq = 2417, .hw_value = 2, }, {.center_freq = 2422, .hw_value = 3, }, {.center_freq = 2427, .hw_value = 4, }, {.center_freq = 2432, .hw_value = 5, }, {.center_freq = 2437, .hw_value = 6, }, {.center_freq = 2442, .hw_value = 7, }, {.center_freq = 2447, .hw_value = 8, }, {.center_freq = 2452, .hw_value = 9, }, {.center_freq = 2457, .hw_value = 10, }, {.center_freq = 2462, .hw_value = 11, }, {.center_freq = 2467, .hw_value = 12, }, {.center_freq = 2472, .hw_value = 13, }, {.center_freq = 2484, .hw_value = 14, }, }; static struct ieee80211_supported_band mwifiex_band_2ghz = { .channels = mwifiex_channels_2ghz, .n_channels = ARRAY_SIZE(mwifiex_channels_2ghz), .bitrates = mwifiex_rates, .n_bitrates = ARRAY_SIZE(mwifiex_rates), }; static struct ieee80211_channel mwifiex_channels_5ghz[] = { {.center_freq = 5040, .hw_value = 8, }, {.center_freq = 5060, .hw_value = 12, }, {.center_freq = 5080, .hw_value = 16, }, {.center_freq = 5170, .hw_value = 34, }, {.center_freq = 5190, .hw_value = 38, }, {.center_freq = 5210, .hw_value = 42, }, {.center_freq = 5230, .hw_value = 46, }, {.center_freq = 5180, .hw_value = 36, }, {.center_freq = 5200, .hw_value = 40, }, {.center_freq = 5220, .hw_value = 44, }, {.center_freq = 5240, .hw_value = 48, }, {.center_freq = 5260, .hw_value = 52, }, {.center_freq = 5280, .hw_value = 56, }, {.center_freq = 5300, .hw_value = 60, }, {.center_freq = 5320, .hw_value = 64, }, {.center_freq = 5500, .hw_value = 100, }, {.center_freq = 5520, .hw_value = 104, }, {.center_freq = 5540, .hw_value = 108, }, {.center_freq = 5560, .hw_value = 112, }, {.center_freq = 5580, .hw_value = 116, }, {.center_freq = 5600, .hw_value = 120, }, {.center_freq = 5620, .hw_value = 124, }, {.center_freq = 5640, .hw_value = 128, }, {.center_freq = 5660, .hw_value = 132, }, {.center_freq = 5680, .hw_value = 136, }, {.center_freq = 5700, .hw_value = 140, }, {.center_freq = 5745, .hw_value = 149, }, {.center_freq = 5765, .hw_value = 153, }, {.center_freq = 5785, .hw_value = 157, }, {.center_freq = 5805, .hw_value = 161, }, {.center_freq = 5825, .hw_value = 165, }, }; static struct ieee80211_supported_band mwifiex_band_5ghz = { .channels = mwifiex_channels_5ghz, .n_channels = ARRAY_SIZE(mwifiex_channels_5ghz), .bitrates = mwifiex_rates + 4, .n_bitrates = ARRAY_SIZE(mwifiex_rates) - 4, }; /* Supported crypto cipher suits to be advertised to cfg80211 */ static const u32 mwifiex_cipher_suites[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, }; /* * CFG802.11 operation handler for setting bit rates. * * Function selects legacy bang B/G/BG from corresponding bitrates selection. * Currently only 2.4GHz band is supported. */ static int mwifiex_cfg80211_set_bitrate_mask(struct wiphy *wiphy, struct net_device *dev, const u8 *peer, const struct cfg80211_bitrate_mask *mask) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int index = 0, mode = 0, i; struct mwifiex_adapter *adapter = priv->adapter; /* Currently only 2.4GHz is supported */ for (i = 0; i < mwifiex_band_2ghz.n_bitrates; i++) { /* * Rates below 6 Mbps in the table are CCK rates; 802.11b * and from 6 they are OFDM; 802.11G */ if (mwifiex_rates[i].bitrate == 60) { index = 1 << i; break; } } if (mask->control[IEEE80211_BAND_2GHZ].legacy < index) { mode = BAND_B; } else { mode = BAND_G; if (mask->control[IEEE80211_BAND_2GHZ].legacy % index) mode |= BAND_B; } if (!((mode | adapter->fw_bands) & ~adapter->fw_bands)) { adapter->config_bands = mode; if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { adapter->adhoc_start_band = mode; adapter->adhoc_11n_enabled = false; } } adapter->sec_chan_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; adapter->channel_type = NL80211_CHAN_NO_HT; wiphy_debug(wiphy, "info: device configured in 802.11%s%s mode\n", (mode & BAND_B) ? "b" : "", (mode & BAND_G) ? "g" : ""); return 0; } /* * CFG802.11 operation handler for disconnection request. * * This function does not work when there is already a disconnection * procedure going on. */ static int mwifiex_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; wiphy_dbg(wiphy, "info: successfully disconnected from %pM:" " reason code %d\n", priv->cfg_bssid, reason_code); memset(priv->cfg_bssid, 0, ETH_ALEN); return 0; } /* * This function informs the CFG802.11 subsystem of a new IBSS. * * The following information are sent to the CFG802.11 subsystem * to register the new IBSS. If we do not register the new IBSS, * a kernel panic will result. * - SSID * - SSID length * - BSSID * - Channel */ static int mwifiex_cfg80211_inform_ibss_bss(struct mwifiex_private *priv) { struct ieee80211_channel *chan; struct mwifiex_bss_info bss_info; struct cfg80211_bss *bss; int ie_len; u8 ie_buf[IEEE80211_MAX_SSID_LEN + sizeof(struct ieee_types_header)]; enum ieee80211_band band; if (mwifiex_get_bss_info(priv, &bss_info)) return -1; ie_buf[0] = WLAN_EID_SSID; ie_buf[1] = bss_info.ssid.ssid_len; memcpy(&ie_buf[sizeof(struct ieee_types_header)], &bss_info.ssid.ssid, bss_info.ssid.ssid_len); ie_len = ie_buf[1] + sizeof(struct ieee_types_header); band = mwifiex_band_to_radio_type(priv->curr_bss_params.band); chan = __ieee80211_get_channel(priv->wdev->wiphy, ieee80211_channel_to_frequency(bss_info.bss_chan, band)); bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, bss_info.bssid, 0, WLAN_CAPABILITY_IBSS, 0, ie_buf, ie_len, 0, GFP_KERNEL); cfg80211_put_bss(bss); memcpy(priv->cfg_bssid, bss_info.bssid, ETH_ALEN); return 0; } /* * This function connects with a BSS. * * This function handles both Infra and Ad-Hoc modes. It also performs * validity checking on the provided parameters, disconnects from the * current BSS (if any), sets up the association/scan parameters, * including security settings, and performs specific SSID scan before * trying to connect. * * For Infra mode, the function returns failure if the specified SSID * is not found in scan table. However, for Ad-Hoc mode, it can create * the IBSS if it does not exist. On successful completion in either case, * the function notifies the CFG802.11 subsystem of the new BSS connection. */ static int mwifiex_cfg80211_assoc(struct mwifiex_private *priv, size_t ssid_len, u8 *ssid, u8 *bssid, int mode, struct ieee80211_channel *channel, struct cfg80211_connect_params *sme, bool privacy) { struct cfg80211_ssid req_ssid; int ret, auth_type = 0; struct cfg80211_bss *bss = NULL; u8 is_scanning_required = 0; memset(&req_ssid, 0, sizeof(struct cfg80211_ssid)); req_ssid.ssid_len = ssid_len; if (ssid_len > IEEE80211_MAX_SSID_LEN) { dev_err(priv->adapter->dev, "invalid SSID - aborting\n"); return -EINVAL; } memcpy(req_ssid.ssid, ssid, ssid_len); if (!req_ssid.ssid_len || req_ssid.ssid[0] < 0x20) { dev_err(priv->adapter->dev, "invalid SSID - aborting\n"); return -EINVAL; } /* disconnect before try to associate */ mwifiex_deauthenticate(priv, NULL); if (channel) ret = mwifiex_set_rf_channel(priv, channel, priv->adapter->channel_type); /* As this is new association, clear locally stored * keys and security related flags */ priv->sec_info.wpa_enabled = false; priv->sec_info.wpa2_enabled = false; priv->wep_key_curr_index = 0; priv->sec_info.encryption_mode = 0; priv->sec_info.is_authtype_auto = 0; ret = mwifiex_set_encode(priv, NULL, 0, 0, 1); if (mode == NL80211_IFTYPE_ADHOC) { /* "privacy" is set only for ad-hoc mode */ if (privacy) { /* * Keep WLAN_CIPHER_SUITE_WEP104 for now so that * the firmware can find a matching network from the * scan. The cfg80211 does not give us the encryption * mode at this stage so just setting it to WEP here. */ priv->sec_info.encryption_mode = WLAN_CIPHER_SUITE_WEP104; priv->sec_info.authentication_mode = NL80211_AUTHTYPE_OPEN_SYSTEM; } goto done; } /* Now handle infra mode. "sme" is valid for infra mode only */ if (sme->auth_type == NL80211_AUTHTYPE_AUTOMATIC) { auth_type = NL80211_AUTHTYPE_OPEN_SYSTEM; priv->sec_info.is_authtype_auto = 1; } else { auth_type = sme->auth_type; } if (sme->crypto.n_ciphers_pairwise) { priv->sec_info.encryption_mode = sme->crypto.ciphers_pairwise[0]; priv->sec_info.authentication_mode = auth_type; } if (sme->crypto.cipher_group) { priv->sec_info.encryption_mode = sme->crypto.cipher_group; priv->sec_info.authentication_mode = auth_type; } if (sme->ie) ret = mwifiex_set_gen_ie(priv, sme->ie, sme->ie_len); if (sme->key) { if (mwifiex_is_alg_wep(priv->sec_info.encryption_mode)) { dev_dbg(priv->adapter->dev, "info: setting wep encryption" " with key len %d\n", sme->key_len); priv->wep_key_curr_index = sme->key_idx; ret = mwifiex_set_encode(priv, sme->key, sme->key_len, sme->key_idx, 0); } } done: /* * Scan entries are valid for some time (15 sec). So we can save one * active scan time if we just try cfg80211_get_bss first. If it fails * then request scan and cfg80211_get_bss() again for final output. */ while (1) { if (is_scanning_required) { /* Do specific SSID scanning */ if (mwifiex_request_scan(priv, &req_ssid)) { dev_err(priv->adapter->dev, "scan error\n"); return -EFAULT; } } /* Find the BSS we want using available scan results */ if (mode == NL80211_IFTYPE_ADHOC) bss = cfg80211_get_bss(priv->wdev->wiphy, channel, bssid, ssid, ssid_len, WLAN_CAPABILITY_IBSS, WLAN_CAPABILITY_IBSS); else bss = cfg80211_get_bss(priv->wdev->wiphy, channel, bssid, ssid, ssid_len, WLAN_CAPABILITY_ESS, WLAN_CAPABILITY_ESS); if (!bss) { if (is_scanning_required) { dev_warn(priv->adapter->dev, "assoc: requested bss not found in scan results\n"); break; } is_scanning_required = 1; } else { dev_dbg(priv->adapter->dev, "info: trying to associate to '%s' bssid %pM\n", (char *) req_ssid.ssid, bss->bssid); memcpy(&priv->cfg_bssid, bss->bssid, ETH_ALEN); break; } } if (mwifiex_bss_start(priv, bss, &req_ssid)) return -EFAULT; if (mode == NL80211_IFTYPE_ADHOC) { /* Inform the BSS information to kernel, otherwise * kernel will give a panic after successful assoc */ if (mwifiex_cfg80211_inform_ibss_bss(priv)) return -EFAULT; } return ret; } /* * CFG802.11 operation handler for association request. * * This function does not work when the current mode is set to Ad-Hoc, or * when there is already an association procedure going on. The given BSS * information is used to associate. */ static int mwifiex_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int ret = 0; if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { wiphy_err(wiphy, "received infra assoc request " "when station is in ibss mode\n"); goto done; } wiphy_dbg(wiphy, "info: Trying to associate to %s and bssid %pM\n", (char *) sme->ssid, sme->bssid); ret = mwifiex_cfg80211_assoc(priv, sme->ssid_len, sme->ssid, sme->bssid, priv->bss_mode, sme->channel, sme, 0); done: if (!ret) { cfg80211_connect_result(priv->netdev, priv->cfg_bssid, NULL, 0, NULL, 0, WLAN_STATUS_SUCCESS, GFP_KERNEL); dev_dbg(priv->adapter->dev, "info: associated to bssid %pM successfully\n", priv->cfg_bssid); } else { dev_dbg(priv->adapter->dev, "info: association to bssid %pM failed\n", priv->cfg_bssid); memset(priv->cfg_bssid, 0, ETH_ALEN); } return ret; } /* * CFG802.11 operation handler to join an IBSS. * * This function does not work in any mode other than Ad-Hoc, or if * a join operation is already in progress. */ static int mwifiex_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ibss_params *params) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int ret = 0; if (priv->bss_mode != NL80211_IFTYPE_ADHOC) { wiphy_err(wiphy, "request to join ibss received " "when station is not in ibss mode\n"); goto done; } wiphy_dbg(wiphy, "info: trying to join to %s and bssid %pM\n", (char *) params->ssid, params->bssid); ret = mwifiex_cfg80211_assoc(priv, params->ssid_len, params->ssid, params->bssid, priv->bss_mode, params->channel, NULL, params->privacy); done: if (!ret) { cfg80211_ibss_joined(priv->netdev, priv->cfg_bssid, GFP_KERNEL); dev_dbg(priv->adapter->dev, "info: joined/created adhoc network with bssid" " %pM successfully\n", priv->cfg_bssid); } else { dev_dbg(priv->adapter->dev, "info: failed creating/joining adhoc network\n"); } return ret; } /* * CFG802.11 operation handler to leave an IBSS. * * This function does not work if a leave operation is * already in progress. */ static int mwifiex_cfg80211_leave_ibss(struct wiphy *wiphy, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); wiphy_dbg(wiphy, "info: disconnecting from essid %pM\n", priv->cfg_bssid); if (mwifiex_deauthenticate(priv, NULL)) return -EFAULT; memset(priv->cfg_bssid, 0, ETH_ALEN); return 0; } /* * CFG802.11 operation handler for scan request. * * This function issues a scan request to the firmware based upon * the user specified scan configuration. On successfull completion, * it also informs the results. */ static int mwifiex_cfg80211_scan(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_scan_request *request) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); int i; struct ieee80211_channel *chan; wiphy_dbg(wiphy, "info: received scan request on %s\n", dev->name); priv->scan_request = request; priv->user_scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL); if (!priv->user_scan_cfg) { dev_err(priv->adapter->dev, "failed to alloc scan_req\n"); return -ENOMEM; } priv->user_scan_cfg->num_ssids = request->n_ssids; priv->user_scan_cfg->ssid_list = request->ssids; for (i = 0; i < request->n_channels; i++) { chan = request->channels[i]; priv->user_scan_cfg->chan_list[i].chan_number = chan->hw_value; priv->user_scan_cfg->chan_list[i].radio_type = chan->band; if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) priv->user_scan_cfg->chan_list[i].scan_type = MWIFIEX_SCAN_TYPE_PASSIVE; else priv->user_scan_cfg->chan_list[i].scan_type = MWIFIEX_SCAN_TYPE_ACTIVE; priv->user_scan_cfg->chan_list[i].scan_time = 0; } if (mwifiex_set_user_scan_ioctl(priv, priv->user_scan_cfg)) return -EFAULT; return 0; } /* * This function sets up the CFG802.11 specific HT capability fields * with default values. * * The following default values are set - * - HT Supported = True * - Maximum AMPDU length factor = IEEE80211_HT_MAX_AMPDU_64K * - Minimum AMPDU spacing = IEEE80211_HT_MPDU_DENSITY_NONE * - HT Capabilities supported by firmware * - MCS information, Rx mask = 0xff * - MCD information, Tx parameters = IEEE80211_HT_MCS_TX_DEFINED (0x01) */ static void mwifiex_setup_ht_caps(struct ieee80211_sta_ht_cap *ht_info, struct mwifiex_private *priv) { int rx_mcs_supp; struct ieee80211_mcs_info mcs_set; u8 *mcs = (u8 *)&mcs_set; struct mwifiex_adapter *adapter = priv->adapter; ht_info->ht_supported = true; ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); /* Fill HT capability information */ if (ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap)) ht_info->cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; else ht_info->cap &= ~IEEE80211_HT_CAP_SUP_WIDTH_20_40; if (ISSUPP_SHORTGI20(adapter->hw_dot_11n_dev_cap)) ht_info->cap |= IEEE80211_HT_CAP_SGI_20; else ht_info->cap &= ~IEEE80211_HT_CAP_SGI_20; if (ISSUPP_SHORTGI40(adapter->hw_dot_11n_dev_cap)) ht_info->cap |= IEEE80211_HT_CAP_SGI_40; else ht_info->cap &= ~IEEE80211_HT_CAP_SGI_40; if (ISSUPP_RXSTBC(adapter->hw_dot_11n_dev_cap)) ht_info->cap |= 1 << IEEE80211_HT_CAP_RX_STBC_SHIFT; else ht_info->cap &= ~(3 << IEEE80211_HT_CAP_RX_STBC_SHIFT); if (ISSUPP_TXSTBC(adapter->hw_dot_11n_dev_cap)) ht_info->cap |= IEEE80211_HT_CAP_TX_STBC; else ht_info->cap &= ~IEEE80211_HT_CAP_TX_STBC; ht_info->cap &= ~IEEE80211_HT_CAP_MAX_AMSDU; ht_info->cap |= IEEE80211_HT_CAP_SM_PS; rx_mcs_supp = GET_RXMCSSUPP(adapter->hw_dev_mcs_support); /* Set MCS for 1x1 */ memset(mcs, 0xff, rx_mcs_supp); /* Clear all the other values */ memset(&mcs[rx_mcs_supp], 0, sizeof(struct ieee80211_mcs_info) - rx_mcs_supp); if (priv->bss_mode == NL80211_IFTYPE_STATION || ISSUPP_CHANWIDTH40(adapter->hw_dot_11n_dev_cap)) /* Set MCS32 for infra mode or ad-hoc mode with 40MHz support */ SETHT_MCS32(mcs_set.rx_mask); memcpy((u8 *) &ht_info->mcs, mcs, sizeof(struct ieee80211_mcs_info)); ht_info->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; } /* * create a new virtual interface with the given name */ struct net_device *mwifiex_add_virtual_intf(struct wiphy *wiphy, char *name, enum nl80211_iftype type, u32 *flags, struct vif_params *params) { struct mwifiex_private *priv = mwifiex_cfg80211_get_priv(wiphy); struct mwifiex_adapter *adapter; struct net_device *dev; void *mdev_priv; if (!priv) return NULL; adapter = priv->adapter; if (!adapter) return NULL; switch (type) { case NL80211_IFTYPE_UNSPECIFIED: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: if (priv->bss_mode) { wiphy_err(wiphy, "cannot create multiple" " station/adhoc interfaces\n"); return NULL; } if (type == NL80211_IFTYPE_UNSPECIFIED) priv->bss_mode = NL80211_IFTYPE_STATION; else priv->bss_mode = type; priv->bss_type = MWIFIEX_BSS_TYPE_STA; priv->frame_type = MWIFIEX_DATA_FRAME_TYPE_ETH_II; priv->bss_priority = 0; priv->bss_role = MWIFIEX_BSS_ROLE_STA; priv->bss_num = 0; break; default: wiphy_err(wiphy, "type not supported\n"); return NULL; } dev = alloc_netdev_mq(sizeof(struct mwifiex_private *), name, ether_setup, 1); if (!dev) { wiphy_err(wiphy, "no memory available for netdevice\n"); goto error; } dev_net_set(dev, wiphy_net(wiphy)); dev->ieee80211_ptr = priv->wdev; dev->ieee80211_ptr->iftype = priv->bss_mode; memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN); SET_NETDEV_DEV(dev, wiphy_dev(wiphy)); dev->flags |= IFF_BROADCAST | IFF_MULTICAST; dev->watchdog_timeo = MWIFIEX_DEFAULT_WATCHDOG_TIMEOUT; dev->hard_header_len += MWIFIEX_MIN_DATA_HEADER_LEN; mdev_priv = netdev_priv(dev); *((unsigned long *) mdev_priv) = (unsigned long) priv; priv->netdev = dev; mwifiex_init_priv_params(priv, dev); SET_NETDEV_DEV(dev, adapter->dev); /* Register network device */ if (register_netdevice(dev)) { wiphy_err(wiphy, "cannot register virtual network device\n"); goto error; } sema_init(&priv->async_sem, 1); priv->scan_pending_on_block = false; dev_dbg(adapter->dev, "info: %s: Marvell 802.11 Adapter\n", dev->name); #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_init(priv); #endif return dev; error: if (dev && (dev->reg_state == NETREG_UNREGISTERED)) free_netdev(dev); priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; return NULL; } EXPORT_SYMBOL_GPL(mwifiex_add_virtual_intf); /* * del_virtual_intf: remove the virtual interface determined by dev */ int mwifiex_del_virtual_intf(struct wiphy *wiphy, struct net_device *dev) { struct mwifiex_private *priv = mwifiex_netdev_get_priv(dev); #ifdef CONFIG_DEBUG_FS mwifiex_dev_debugfs_remove(priv); #endif if (!netif_queue_stopped(priv->netdev)) netif_stop_queue(priv->netdev); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); if (dev->reg_state == NETREG_REGISTERED) unregister_netdevice(dev); if (dev->reg_state == NETREG_UNREGISTERED) free_netdev(dev); /* Clear the priv in adapter */ priv->netdev = NULL; priv->media_connected = false; priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED; return 0; } EXPORT_SYMBOL_GPL(mwifiex_del_virtual_intf); /* station cfg80211 operations */ static struct cfg80211_ops mwifiex_cfg80211_ops = { .add_virtual_intf = mwifiex_add_virtual_intf, .del_virtual_intf = mwifiex_del_virtual_intf, .change_virtual_intf = mwifiex_cfg80211_change_virtual_intf, .scan = mwifiex_cfg80211_scan, .connect = mwifiex_cfg80211_connect, .disconnect = mwifiex_cfg80211_disconnect, .get_station = mwifiex_cfg80211_get_station, .set_wiphy_params = mwifiex_cfg80211_set_wiphy_params, .set_channel = mwifiex_cfg80211_set_channel, .join_ibss = mwifiex_cfg80211_join_ibss, .leave_ibss = mwifiex_cfg80211_leave_ibss, .add_key = mwifiex_cfg80211_add_key, .del_key = mwifiex_cfg80211_del_key, .set_default_key = mwifiex_cfg80211_set_default_key, .set_power_mgmt = mwifiex_cfg80211_set_power_mgmt, .set_tx_power = mwifiex_cfg80211_set_tx_power, .set_bitrate_mask = mwifiex_cfg80211_set_bitrate_mask, }; /* * This function registers the device with CFG802.11 subsystem. * * The function creates the wireless device/wiphy, populates it with * default parameters and handler function pointers, and finally * registers the device. */ int mwifiex_register_cfg80211(struct mwifiex_private *priv) { int ret; void *wdev_priv; struct wireless_dev *wdev; struct ieee80211_sta_ht_cap *ht_info; wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!wdev) { dev_err(priv->adapter->dev, "%s: allocating wireless device\n", __func__); return -ENOMEM; } wdev->wiphy = wiphy_new(&mwifiex_cfg80211_ops, sizeof(struct mwifiex_private *)); if (!wdev->wiphy) { kfree(wdev); return -ENOMEM; } wdev->iftype = NL80211_IFTYPE_STATION; wdev->wiphy->max_scan_ssids = 10; wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &mwifiex_band_2ghz; ht_info = &wdev->wiphy->bands[IEEE80211_BAND_2GHZ]->ht_cap; mwifiex_setup_ht_caps(ht_info, priv); if (priv->adapter->config_bands & BAND_A) { wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = &mwifiex_band_5ghz; ht_info = &wdev->wiphy->bands[IEEE80211_BAND_5GHZ]->ht_cap; mwifiex_setup_ht_caps(ht_info, priv); } else { wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = NULL; } /* Initialize cipher suits */ wdev->wiphy->cipher_suites = mwifiex_cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(mwifiex_cipher_suites); memcpy(wdev->wiphy->perm_addr, priv->curr_addr, ETH_ALEN); wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; /* Reserve space for bss band information */ wdev->wiphy->bss_priv_size = sizeof(u8); wdev->wiphy->reg_notifier = mwifiex_reg_notifier; /* Set struct mwifiex_private pointer in wiphy_priv */ wdev_priv = wiphy_priv(wdev->wiphy); *(unsigned long *) wdev_priv = (unsigned long) priv; set_wiphy_dev(wdev->wiphy, (struct device *) priv->adapter->dev); ret = wiphy_register(wdev->wiphy); if (ret < 0) { dev_err(priv->adapter->dev, "%s: registering cfg80211 device\n", __func__); wiphy_free(wdev->wiphy); kfree(wdev); return ret; } else { dev_dbg(priv->adapter->dev, "info: successfully registered wiphy device\n"); } priv->wdev = wdev; return ret; }
gpl-2.0
ivanalgo/linux-arm-kmemcheck
net/caif/cfmuxl.c
2762
6436
/* * Copyright (C) ST-Ericsson AB 2010 * Author: Sjur Brendeland * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(fmt) KBUILD_MODNAME ":%s(): " fmt, __func__ #include <linux/stddef.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/rculist.h> #include <net/caif/cfpkt.h> #include <net/caif/cfmuxl.h> #include <net/caif/cfsrvl.h> #include <net/caif/cffrml.h> #define container_obj(layr) container_of(layr, struct cfmuxl, layer) #define CAIF_CTRL_CHANNEL 0 #define UP_CACHE_SIZE 8 #define DN_CACHE_SIZE 8 struct cfmuxl { struct cflayer layer; struct list_head srvl_list; struct list_head frml_list; struct cflayer *up_cache[UP_CACHE_SIZE]; struct cflayer *dn_cache[DN_CACHE_SIZE]; /* * Set when inserting or removing downwards layers. */ spinlock_t transmit_lock; /* * Set when inserting or removing upwards layers. */ spinlock_t receive_lock; }; static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt); static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt); static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid); static struct cflayer *get_up(struct cfmuxl *muxl, u16 id); struct cflayer *cfmuxl_create(void) { struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC); if (!this) return NULL; memset(this, 0, sizeof(*this)); this->layer.receive = cfmuxl_receive; this->layer.transmit = cfmuxl_transmit; this->layer.ctrlcmd = cfmuxl_ctrlcmd; INIT_LIST_HEAD(&this->srvl_list); INIT_LIST_HEAD(&this->frml_list); spin_lock_init(&this->transmit_lock); spin_lock_init(&this->receive_lock); snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux"); return &this->layer; } int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid) { struct cfmuxl *muxl = (struct cfmuxl *) layr; spin_lock_bh(&muxl->transmit_lock); list_add_rcu(&dn->node, &muxl->frml_list); spin_unlock_bh(&muxl->transmit_lock); return 0; } static struct cflayer *get_from_id(struct list_head *list, u16 id) { struct cflayer *lyr; list_for_each_entry_rcu(lyr, list, node) { if (lyr->id == id) return lyr; } return NULL; } int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid) { struct cfmuxl *muxl = container_obj(layr); struct cflayer *old; spin_lock_bh(&muxl->receive_lock); /* Two entries with same id is wrong, so remove old layer from mux */ old = get_from_id(&muxl->srvl_list, linkid); if (old != NULL) list_del_rcu(&old->node); list_add_rcu(&up->node, &muxl->srvl_list); spin_unlock_bh(&muxl->receive_lock); return 0; } struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid) { struct cfmuxl *muxl = container_obj(layr); struct cflayer *dn; int idx = phyid % DN_CACHE_SIZE; spin_lock_bh(&muxl->transmit_lock); RCU_INIT_POINTER(muxl->dn_cache[idx], NULL); dn = get_from_id(&muxl->frml_list, phyid); if (dn == NULL) goto out; list_del_rcu(&dn->node); caif_assert(dn != NULL); out: spin_unlock_bh(&muxl->transmit_lock); return dn; } static struct cflayer *get_up(struct cfmuxl *muxl, u16 id) { struct cflayer *up; int idx = id % UP_CACHE_SIZE; up = rcu_dereference(muxl->up_cache[idx]); if (up == NULL || up->id != id) { spin_lock_bh(&muxl->receive_lock); up = get_from_id(&muxl->srvl_list, id); rcu_assign_pointer(muxl->up_cache[idx], up); spin_unlock_bh(&muxl->receive_lock); } return up; } static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info) { struct cflayer *dn; int idx = dev_info->id % DN_CACHE_SIZE; dn = rcu_dereference(muxl->dn_cache[idx]); if (dn == NULL || dn->id != dev_info->id) { spin_lock_bh(&muxl->transmit_lock); dn = get_from_id(&muxl->frml_list, dev_info->id); rcu_assign_pointer(muxl->dn_cache[idx], dn); spin_unlock_bh(&muxl->transmit_lock); } return dn; } struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id) { struct cflayer *up; struct cfmuxl *muxl = container_obj(layr); int idx = id % UP_CACHE_SIZE; if (id == 0) { pr_warn("Trying to remove control layer\n"); return NULL; } spin_lock_bh(&muxl->receive_lock); up = get_from_id(&muxl->srvl_list, id); if (up == NULL) goto out; RCU_INIT_POINTER(muxl->up_cache[idx], NULL); list_del_rcu(&up->node); out: spin_unlock_bh(&muxl->receive_lock); return up; } static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt) { int ret; struct cfmuxl *muxl = container_obj(layr); u8 id; struct cflayer *up; if (cfpkt_extr_head(pkt, &id, 1) < 0) { pr_err("erroneous Caif Packet\n"); cfpkt_destroy(pkt); return -EPROTO; } rcu_read_lock(); up = get_up(muxl, id); if (up == NULL) { pr_debug("Received data on unknown link ID = %d (0x%x)" " up == NULL", id, id); cfpkt_destroy(pkt); /* * Don't return ERROR, since modem misbehaves and sends out * flow on before linksetup response. */ rcu_read_unlock(); return /* CFGLU_EPROT; */ 0; } /* We can't hold rcu_lock during receive, so take a ref count instead */ cfsrvl_get(up); rcu_read_unlock(); ret = up->receive(up, pkt); cfsrvl_put(up); return ret; } static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt) { struct cfmuxl *muxl = container_obj(layr); int err; u8 linkid; struct cflayer *dn; struct caif_payload_info *info = cfpkt_info(pkt); BUG_ON(!info); rcu_read_lock(); dn = get_dn(muxl, info->dev_info); if (dn == NULL) { pr_debug("Send data on unknown phy ID = %d (0x%x)\n", info->dev_info->id, info->dev_info->id); rcu_read_unlock(); cfpkt_destroy(pkt); return -ENOTCONN; } info->hdr_len += 1; linkid = info->channel_id; cfpkt_add_head(pkt, &linkid, 1); /* We can't hold rcu_lock during receive, so take a ref count instead */ cffrml_hold(dn); rcu_read_unlock(); err = dn->transmit(dn, pkt); cffrml_put(dn); return err; } static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, int phyid) { struct cfmuxl *muxl = container_obj(layr); struct cflayer *layer; rcu_read_lock(); list_for_each_entry_rcu(layer, &muxl->srvl_list, node) { if (cfsrvl_phyid_match(layer, phyid) && layer->ctrlcmd) { if ((ctrl == _CAIF_CTRLCMD_PHYIF_DOWN_IND || ctrl == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND) && layer->id != 0) cfmuxl_remove_uplayer(layr, layer->id); /* NOTE: ctrlcmd is not allowed to block */ layer->ctrlcmd(layer, ctrl, phyid); } } rcu_read_unlock(); }
gpl-2.0
curbthepain/revkernel_titan
arch/arm/mach-tegra/cpu-tegra.c
4554
5803
/* * arch/arm/mach-tegra/cpu-tegra.c * * Copyright (C) 2010 Google, Inc. * * Author: * Colin Cross <ccross@google.com> * Based on arch/arm/plat-omap/cpu-omap.c, (C) 2005 Nokia Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/cpufreq.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/suspend.h> #include <mach/clk.h> /* Frequency table index must be sequential starting at 0 */ static struct cpufreq_frequency_table freq_table[] = { { 0, 216000 }, { 1, 312000 }, { 2, 456000 }, { 3, 608000 }, { 4, 760000 }, { 5, 816000 }, { 6, 912000 }, { 7, 1000000 }, { 8, CPUFREQ_TABLE_END }, }; #define NUM_CPUS 2 static struct clk *cpu_clk; static struct clk *emc_clk; static unsigned long target_cpu_speed[NUM_CPUS]; static DEFINE_MUTEX(tegra_cpu_lock); static bool is_suspended; static int tegra_verify_speed(struct cpufreq_policy *policy) { return cpufreq_frequency_table_verify(policy, freq_table); } static unsigned int tegra_getspeed(unsigned int cpu) { unsigned long rate; if (cpu >= NUM_CPUS) return 0; rate = clk_get_rate(cpu_clk) / 1000; return rate; } static int tegra_update_cpu_speed(unsigned long rate) { int ret = 0; struct cpufreq_freqs freqs; freqs.old = tegra_getspeed(0); freqs.new = rate; if (freqs.old == freqs.new) return ret; /* * Vote on memory bus frequency based on cpu frequency * This sets the minimum frequency, display or avp may request higher */ if (rate >= 816000) clk_set_rate(emc_clk, 600000000); /* cpu 816 MHz, emc max */ else if (rate >= 456000) clk_set_rate(emc_clk, 300000000); /* cpu 456 MHz, emc 150Mhz */ else clk_set_rate(emc_clk, 100000000); /* emc 50Mhz */ for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); #ifdef CONFIG_CPU_FREQ_DEBUG printk(KERN_DEBUG "cpufreq-tegra: transition: %u --> %u\n", freqs.old, freqs.new); #endif ret = clk_set_rate(cpu_clk, freqs.new * 1000); if (ret) { pr_err("cpu-tegra: Failed to set cpu frequency to %d kHz\n", freqs.new); return ret; } for_each_online_cpu(freqs.cpu) cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); return 0; } static unsigned long tegra_cpu_highest_speed(void) { unsigned long rate = 0; int i; for_each_online_cpu(i) rate = max(rate, target_cpu_speed[i]); return rate; } static int tegra_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { unsigned int idx; unsigned int freq; int ret = 0; mutex_lock(&tegra_cpu_lock); if (is_suspended) { ret = -EBUSY; goto out; } cpufreq_frequency_table_target(policy, freq_table, target_freq, relation, &idx); freq = freq_table[idx].frequency; target_cpu_speed[policy->cpu] = freq; ret = tegra_update_cpu_speed(tegra_cpu_highest_speed()); out: mutex_unlock(&tegra_cpu_lock); return ret; } static int tegra_pm_notify(struct notifier_block *nb, unsigned long event, void *dummy) { mutex_lock(&tegra_cpu_lock); if (event == PM_SUSPEND_PREPARE) { is_suspended = true; pr_info("Tegra cpufreq suspend: setting frequency to %d kHz\n", freq_table[0].frequency); tegra_update_cpu_speed(freq_table[0].frequency); } else if (event == PM_POST_SUSPEND) { is_suspended = false; } mutex_unlock(&tegra_cpu_lock); return NOTIFY_OK; } static struct notifier_block tegra_cpu_pm_notifier = { .notifier_call = tegra_pm_notify, }; static int tegra_cpu_init(struct cpufreq_policy *policy) { if (policy->cpu >= NUM_CPUS) return -EINVAL; cpu_clk = clk_get_sys(NULL, "cpu"); if (IS_ERR(cpu_clk)) return PTR_ERR(cpu_clk); emc_clk = clk_get_sys("cpu", "emc"); if (IS_ERR(emc_clk)) { clk_put(cpu_clk); return PTR_ERR(emc_clk); } clk_enable(emc_clk); clk_enable(cpu_clk); cpufreq_frequency_table_cpuinfo(policy, freq_table); cpufreq_frequency_table_get_attr(freq_table, policy->cpu); policy->cur = tegra_getspeed(policy->cpu); target_cpu_speed[policy->cpu] = policy->cur; /* FIXME: what's the actual transition time? */ policy->cpuinfo.transition_latency = 300 * 1000; policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; cpumask_copy(policy->related_cpus, cpu_possible_mask); if (policy->cpu == 0) register_pm_notifier(&tegra_cpu_pm_notifier); return 0; } static int tegra_cpu_exit(struct cpufreq_policy *policy) { cpufreq_frequency_table_cpuinfo(policy, freq_table); clk_disable(emc_clk); clk_put(emc_clk); clk_put(cpu_clk); return 0; } static struct freq_attr *tegra_cpufreq_attr[] = { &cpufreq_freq_attr_scaling_available_freqs, NULL, }; static struct cpufreq_driver tegra_cpufreq_driver = { .verify = tegra_verify_speed, .target = tegra_target, .get = tegra_getspeed, .init = tegra_cpu_init, .exit = tegra_cpu_exit, .name = "tegra", .attr = tegra_cpufreq_attr, }; static int __init tegra_cpufreq_init(void) { return cpufreq_register_driver(&tegra_cpufreq_driver); } static void __exit tegra_cpufreq_exit(void) { cpufreq_unregister_driver(&tegra_cpufreq_driver); } MODULE_AUTHOR("Colin Cross <ccross@android.com>"); MODULE_DESCRIPTION("cpufreq driver for Nvidia Tegra2"); MODULE_LICENSE("GPL"); module_init(tegra_cpufreq_init); module_exit(tegra_cpufreq_exit);
gpl-2.0
antaril/AGK
drivers/edac/e7xxx_edac.c
4810
15699
/* * Intel e7xxx Memory Controller kernel module * (C) 2003 Linux Networx (http://lnxi.com) * This file may be distributed under the terms of the * GNU General Public License. * * See "enum e7xxx_chips" below for supported chipsets * * Written by Thayne Harbaugh * Based on work by Dan Hollis <goemon at anime dot net> and others. * http://www.anime.net/~goemon/linux-ecc/ * * Contributors: * Eric Biederman (Linux Networx) * Tom Zimmerman (Linux Networx) * Jim Garlick (Lawrence Livermore National Labs) * Dave Peterson (Lawrence Livermore National Labs) * That One Guy (Some other place) * Wang Zhenyu (intel.com) * * $Id: edac_e7xxx.c,v 1.5.2.9 2005/10/05 00:43:44 dsp_llnl Exp $ * */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include "edac_core.h" #define E7XXX_REVISION " Ver: 2.0.2" #define EDAC_MOD_STR "e7xxx_edac" #define e7xxx_printk(level, fmt, arg...) \ edac_printk(level, "e7xxx", fmt, ##arg) #define e7xxx_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "e7xxx", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_7205_0 #define PCI_DEVICE_ID_INTEL_7205_0 0x255d #endif /* PCI_DEVICE_ID_INTEL_7205_0 */ #ifndef PCI_DEVICE_ID_INTEL_7205_1_ERR #define PCI_DEVICE_ID_INTEL_7205_1_ERR 0x2551 #endif /* PCI_DEVICE_ID_INTEL_7205_1_ERR */ #ifndef PCI_DEVICE_ID_INTEL_7500_0 #define PCI_DEVICE_ID_INTEL_7500_0 0x2540 #endif /* PCI_DEVICE_ID_INTEL_7500_0 */ #ifndef PCI_DEVICE_ID_INTEL_7500_1_ERR #define PCI_DEVICE_ID_INTEL_7500_1_ERR 0x2541 #endif /* PCI_DEVICE_ID_INTEL_7500_1_ERR */ #ifndef PCI_DEVICE_ID_INTEL_7501_0 #define PCI_DEVICE_ID_INTEL_7501_0 0x254c #endif /* PCI_DEVICE_ID_INTEL_7501_0 */ #ifndef PCI_DEVICE_ID_INTEL_7501_1_ERR #define PCI_DEVICE_ID_INTEL_7501_1_ERR 0x2541 #endif /* PCI_DEVICE_ID_INTEL_7501_1_ERR */ #ifndef PCI_DEVICE_ID_INTEL_7505_0 #define PCI_DEVICE_ID_INTEL_7505_0 0x2550 #endif /* PCI_DEVICE_ID_INTEL_7505_0 */ #ifndef PCI_DEVICE_ID_INTEL_7505_1_ERR #define PCI_DEVICE_ID_INTEL_7505_1_ERR 0x2551 #endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ #define E7XXX_NR_CSROWS 8 /* number of csrows */ #define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ /* E7XXX register addresses - device 0 function 0 */ #define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ #define E7XXX_DRA 0x70 /* DRAM row attribute register (8b) */ /* * 31 Device width row 7 0=x8 1=x4 * 27 Device width row 6 * 23 Device width row 5 * 19 Device width row 4 * 15 Device width row 3 * 11 Device width row 2 * 7 Device width row 1 * 3 Device width row 0 */ #define E7XXX_DRC 0x7C /* DRAM controller mode reg (32b) */ /* * 22 Number channels 0=1,1=2 * 19:18 DRB Granularity 32/64MB */ #define E7XXX_TOLM 0xC4 /* DRAM top of low memory reg (16b) */ #define E7XXX_REMAPBASE 0xC6 /* DRAM remap base address reg (16b) */ #define E7XXX_REMAPLIMIT 0xC8 /* DRAM remap limit address reg (16b) */ /* E7XXX register addresses - device 0 function 1 */ #define E7XXX_DRAM_FERR 0x80 /* DRAM first error register (8b) */ #define E7XXX_DRAM_NERR 0x82 /* DRAM next error register (8b) */ #define E7XXX_DRAM_CELOG_ADD 0xA0 /* DRAM first correctable memory */ /* error address register (32b) */ /* * 31:28 Reserved * 27:6 CE address (4k block 33:12) * 5:0 Reserved */ #define E7XXX_DRAM_UELOG_ADD 0xB0 /* DRAM first uncorrectable memory */ /* error address register (32b) */ /* * 31:28 Reserved * 27:6 CE address (4k block 33:12) * 5:0 Reserved */ #define E7XXX_DRAM_CELOG_SYNDROME 0xD0 /* DRAM first correctable memory */ /* error syndrome register (16b) */ enum e7xxx_chips { E7500 = 0, E7501, E7505, E7205, }; struct e7xxx_pvt { struct pci_dev *bridge_ck; u32 tolm; u32 remapbase; u32 remaplimit; const struct e7xxx_dev_info *dev_info; }; struct e7xxx_dev_info { u16 err_dev; const char *ctl_name; }; struct e7xxx_error_info { u8 dram_ferr; u8 dram_nerr; u32 dram_celog_add; u16 dram_celog_syndrome; u32 dram_uelog_add; }; static struct edac_pci_ctl_info *e7xxx_pci; static const struct e7xxx_dev_info e7xxx_devs[] = { [E7500] = { .err_dev = PCI_DEVICE_ID_INTEL_7500_1_ERR, .ctl_name = "E7500"}, [E7501] = { .err_dev = PCI_DEVICE_ID_INTEL_7501_1_ERR, .ctl_name = "E7501"}, [E7505] = { .err_dev = PCI_DEVICE_ID_INTEL_7505_1_ERR, .ctl_name = "E7505"}, [E7205] = { .err_dev = PCI_DEVICE_ID_INTEL_7205_1_ERR, .ctl_name = "E7205"}, }; /* FIXME - is this valid for both SECDED and S4ECD4ED? */ static inline int e7xxx_find_channel(u16 syndrome) { debugf3("%s()\n", __func__); if ((syndrome & 0xff00) == 0) return 0; if ((syndrome & 0x00ff) == 0) return 1; if ((syndrome & 0xf000) == 0 || (syndrome & 0x0f00) == 0) return 0; return 1; } static unsigned long ctl_page_to_phys(struct mem_ctl_info *mci, unsigned long page) { u32 remap; struct e7xxx_pvt *pvt = (struct e7xxx_pvt *)mci->pvt_info; debugf3("%s()\n", __func__); if ((page < pvt->tolm) || ((page >= 0x100000) && (page < pvt->remapbase))) return page; remap = (page - pvt->tolm) + pvt->remapbase; if (remap < pvt->remaplimit) return remap; e7xxx_printk(KERN_ERR, "Invalid page %lx - out of range\n", page); return pvt->tolm - 1; } static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { u32 error_1b, page; u16 syndrome; int row; int channel; debugf3("%s()\n", __func__); /* read the error address */ error_1b = info->dram_celog_add; /* FIXME - should use PAGE_SHIFT */ page = error_1b >> 6; /* convert the address to 4k page */ /* read the syndrome */ syndrome = info->dram_celog_syndrome; /* FIXME - check for -1 */ row = edac_mc_find_csrow_by_page(mci, page); /* convert syndrome to channel */ channel = e7xxx_find_channel(syndrome); edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); } static void process_ce_no_info(struct mem_ctl_info *mci) { debugf3("%s()\n", __func__); edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); } static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { u32 error_2b, block_page; int row; debugf3("%s()\n", __func__); /* read the error address */ error_2b = info->dram_uelog_add; /* FIXME - should use PAGE_SHIFT */ block_page = error_2b >> 6; /* convert to 4k address */ row = edac_mc_find_csrow_by_page(mci, block_page); edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); } static void process_ue_no_info(struct mem_ctl_info *mci) { debugf3("%s()\n", __func__); edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); } static void e7xxx_get_error_info(struct mem_ctl_info *mci, struct e7xxx_error_info *info) { struct e7xxx_pvt *pvt; pvt = (struct e7xxx_pvt *)mci->pvt_info; pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_FERR, &info->dram_ferr); pci_read_config_byte(pvt->bridge_ck, E7XXX_DRAM_NERR, &info->dram_nerr); if ((info->dram_ferr & 1) || (info->dram_nerr & 1)) { pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_CELOG_ADD, &info->dram_celog_add); pci_read_config_word(pvt->bridge_ck, E7XXX_DRAM_CELOG_SYNDROME, &info->dram_celog_syndrome); } if ((info->dram_ferr & 2) || (info->dram_nerr & 2)) pci_read_config_dword(pvt->bridge_ck, E7XXX_DRAM_UELOG_ADD, &info->dram_uelog_add); if (info->dram_ferr & 3) pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_FERR, 0x03, 0x03); if (info->dram_nerr & 3) pci_write_bits8(pvt->bridge_ck, E7XXX_DRAM_NERR, 0x03, 0x03); } static int e7xxx_process_error_info(struct mem_ctl_info *mci, struct e7xxx_error_info *info, int handle_errors) { int error_found; error_found = 0; /* decode and report errors */ if (info->dram_ferr & 1) { /* check first error correctable */ error_found = 1; if (handle_errors) process_ce(mci, info); } if (info->dram_ferr & 2) { /* check first error uncorrectable */ error_found = 1; if (handle_errors) process_ue(mci, info); } if (info->dram_nerr & 1) { /* check next error correctable */ error_found = 1; if (handle_errors) { if (info->dram_ferr & 1) process_ce_no_info(mci); else process_ce(mci, info); } } if (info->dram_nerr & 2) { /* check next error uncorrectable */ error_found = 1; if (handle_errors) { if (info->dram_ferr & 2) process_ue_no_info(mci); else process_ue(mci, info); } } return error_found; } static void e7xxx_check(struct mem_ctl_info *mci) { struct e7xxx_error_info info; debugf3("%s()\n", __func__); e7xxx_get_error_info(mci, &info); e7xxx_process_error_info(mci, &info, 1); } /* Return 1 if dual channel mode is active. Else return 0. */ static inline int dual_channel_active(u32 drc, int dev_idx) { return (dev_idx == E7501) ? ((drc >> 22) & 0x1) : 1; } /* Return DRB granularity (0=32mb, 1=64mb). */ static inline int drb_granularity(u32 drc, int dev_idx) { /* only e7501 can be single channel */ return (dev_idx == E7501) ? ((drc >> 18) & 0x3) : 1; } static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev, int dev_idx, u32 drc) { unsigned long last_cumul_size; int index; u8 value; u32 dra, cumul_size; int drc_chan, drc_drbg, drc_ddim, mem_dev; struct csrow_info *csrow; pci_read_config_dword(pdev, E7XXX_DRA, &dra); drc_chan = dual_channel_active(drc, dev_idx); drc_drbg = drb_granularity(drc, dev_idx); drc_ddim = (drc >> 20) & 0x3; last_cumul_size = 0; /* The dram row boundary (DRB) reg values are boundary address * for each DRAM row with a granularity of 32 or 64MB (single/dual * channel operation). DRB regs are cumulative; therefore DRB7 will * contain the total memory contained in all eight rows. */ for (index = 0; index < mci->nr_csrows; index++) { /* mem_dev 0=x8, 1=x4 */ mem_dev = (dra >> (index * 4 + 3)) & 0x1; csrow = &mci->csrows[index]; pci_read_config_byte(pdev, E7XXX_DRB + index, &value); /* convert a 64 or 32 MiB DRB to a page size. */ cumul_size = value << (25 + drc_drbg - PAGE_SHIFT); debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ csrow->mtype = MEM_RDDR; /* only one type supported */ csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; /* * if single channel or x8 devices then SECDED * if dual channel and x4 then S4ECD4ED */ if (drc_ddim) { if (drc_chan && mem_dev) { csrow->edac_mode = EDAC_S4ECD4ED; mci->edac_cap |= EDAC_FLAG_S4ECD4ED; } else { csrow->edac_mode = EDAC_SECDED; mci->edac_cap |= EDAC_FLAG_SECDED; } } else csrow->edac_mode = EDAC_NONE; } } static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx) { u16 pci_data; struct mem_ctl_info *mci = NULL; struct e7xxx_pvt *pvt = NULL; u32 drc; int drc_chan; struct e7xxx_error_info discard; debugf0("%s(): mci\n", __func__); pci_read_config_dword(pdev, E7XXX_DRC, &drc); drc_chan = dual_channel_active(drc, dev_idx); mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0); if (mci == NULL) return -ENOMEM; debugf3("%s(): init mci\n", __func__); mci->mtype_cap = MEM_FLAG_RDDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED | EDAC_FLAG_S4ECD4ED; /* FIXME - what if different memory types are in different csrows? */ mci->mod_name = EDAC_MOD_STR; mci->mod_ver = E7XXX_REVISION; mci->dev = &pdev->dev; debugf3("%s(): init pvt\n", __func__); pvt = (struct e7xxx_pvt *)mci->pvt_info; pvt->dev_info = &e7xxx_devs[dev_idx]; pvt->bridge_ck = pci_get_device(PCI_VENDOR_ID_INTEL, pvt->dev_info->err_dev, pvt->bridge_ck); if (!pvt->bridge_ck) { e7xxx_printk(KERN_ERR, "error reporting device not found:" "vendor %x device 0x%x (broken BIOS?)\n", PCI_VENDOR_ID_INTEL, e7xxx_devs[dev_idx].err_dev); goto fail0; } debugf3("%s(): more mci init\n", __func__); mci->ctl_name = pvt->dev_info->ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = e7xxx_check; mci->ctl_page_to_phys = ctl_page_to_phys; e7xxx_init_csrows(mci, pdev, dev_idx, drc); mci->edac_cap |= EDAC_FLAG_NONE; debugf3("%s(): tolm, remapbase, remaplimit\n", __func__); /* load the top of low memory, remap base, and remap limit vars */ pci_read_config_word(pdev, E7XXX_TOLM, &pci_data); pvt->tolm = ((u32) pci_data) << 4; pci_read_config_word(pdev, E7XXX_REMAPBASE, &pci_data); pvt->remapbase = ((u32) pci_data) << 14; pci_read_config_word(pdev, E7XXX_REMAPLIMIT, &pci_data); pvt->remaplimit = ((u32) pci_data) << 14; e7xxx_printk(KERN_INFO, "tolm = %x, remapbase = %x, remaplimit = %x\n", pvt->tolm, pvt->remapbase, pvt->remaplimit); /* clear any pending errors, or initial state bits */ e7xxx_get_error_info(mci, &discard); /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail1; } /* allocating generic PCI control info */ e7xxx_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!e7xxx_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } /* get this far and it's successful */ debugf3("%s(): success\n", __func__); return 0; fail1: pci_dev_put(pvt->bridge_ck); fail0: edac_mc_free(mci); return -ENODEV; } /* returns count (>= 0), or negative on error */ static int __devinit e7xxx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { debugf0("%s()\n", __func__); /* wake up and enable device */ return pci_enable_device(pdev) ? -EIO : e7xxx_probe1(pdev, ent->driver_data); } static void __devexit e7xxx_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct e7xxx_pvt *pvt; debugf0("%s()\n", __func__); if (e7xxx_pci) edac_pci_release_generic_ctl(e7xxx_pci); if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; pvt = (struct e7xxx_pvt *)mci->pvt_info; pci_dev_put(pvt->bridge_ck); edac_mc_free(mci); } static DEFINE_PCI_DEVICE_TABLE(e7xxx_pci_tbl) = { { PCI_VEND_DEV(INTEL, 7205_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7205}, { PCI_VEND_DEV(INTEL, 7500_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7500}, { PCI_VEND_DEV(INTEL, 7501_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7501}, { PCI_VEND_DEV(INTEL, 7505_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, E7505}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, e7xxx_pci_tbl); static struct pci_driver e7xxx_driver = { .name = EDAC_MOD_STR, .probe = e7xxx_init_one, .remove = __devexit_p(e7xxx_remove_one), .id_table = e7xxx_pci_tbl, }; static int __init e7xxx_init(void) { /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); return pci_register_driver(&e7xxx_driver); } static void __exit e7xxx_exit(void) { pci_unregister_driver(&e7xxx_driver); } module_init(e7xxx_init); module_exit(e7xxx_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Linux Networx (http://lnxi.com) Thayne Harbaugh et al\n" "Based on.work by Dan Hollis et al"); MODULE_DESCRIPTION("MC support for Intel e7xxx memory controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
kogone/android_kernel_oneplus_msm8974
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
4810
50173
/* bnx2x_stats.c: Broadcom Everest network driver. * * Copyright (c) 2007-2012 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Maintained by: Eilon Greenstein <eilong@broadcom.com> * Written by: Eliezer Tamir * Based on code from Michael Chan's bnx2 driver * UDP CSUM errata workaround by Arik Gendelman * Slowpath and fastpath rework by Vladislav Zolotarov * Statistics and Link management by Yitchak Gertner * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include "bnx2x_stats.h" #include "bnx2x_cmn.h" /* Statistics */ /* * General service functions */ static inline long bnx2x_hilo(u32 *hiref) { u32 lo = *(hiref + 1); #if (BITS_PER_LONG == 64) u32 hi = *hiref; return HILO_U64(hi, lo); #else return lo; #endif } static u16 bnx2x_get_port_stats_dma_len(struct bnx2x *bp) { u16 res = sizeof(struct host_port_stats) >> 2; /* if PFC stats are not supported by the MFW, don't DMA them */ if (!(bp->flags & BC_SUPPORTS_PFC_STATS)) res -= (sizeof(u32)*4) >> 2; return res; } /* * Init service functions */ /* Post the next statistics ramrod. Protect it with the spin in * order to ensure the strict order between statistics ramrods * (each ramrod has a sequence number passed in a * bp->fw_stats_req->hdr.drv_stats_counter and ramrods must be * sent in order). */ static void bnx2x_storm_stats_post(struct bnx2x *bp) { if (!bp->stats_pending) { int rc; spin_lock_bh(&bp->stats_lock); if (bp->stats_pending) { spin_unlock_bh(&bp->stats_lock); return; } bp->fw_stats_req->hdr.drv_stats_counter = cpu_to_le16(bp->stats_counter++); DP(BNX2X_MSG_STATS, "Sending statistics ramrod %d\n", bp->fw_stats_req->hdr.drv_stats_counter); /* send FW stats ramrod */ rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, U64_HI(bp->fw_stats_req_mapping), U64_LO(bp->fw_stats_req_mapping), NONE_CONNECTION_TYPE); if (rc == 0) bp->stats_pending = 1; spin_unlock_bh(&bp->stats_lock); } } static void bnx2x_hw_stats_post(struct bnx2x *bp) { struct dmae_command *dmae = &bp->stats_dmae; u32 *stats_comp = bnx2x_sp(bp, stats_comp); *stats_comp = DMAE_COMP_VAL; if (CHIP_REV_IS_SLOW(bp)) return; /* loader */ if (bp->executer_idx) { int loader_idx = PMF_DMAE_C(bp); u32 opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, true, DMAE_COMP_GRC); opcode = bnx2x_dmae_opcode_clr_src_reset(opcode); memset(dmae, 0, sizeof(struct dmae_command)); dmae->opcode = opcode; dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0])); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0])); dmae->dst_addr_lo = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * (loader_idx + 1)) >> 2; dmae->dst_addr_hi = 0; dmae->len = sizeof(struct dmae_command) >> 2; if (CHIP_IS_E1(bp)) dmae->len--; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; *stats_comp = 0; bnx2x_post_dmae(bp, dmae, loader_idx); } else if (bp->func_stx) { *stats_comp = 0; memcpy(bnx2x_sp(bp, func_stats), &bp->func_stats, sizeof(bp->func_stats)); bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp)); } } static int bnx2x_stats_comp(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); int cnt = 10; might_sleep(); while (*stats_comp != DMAE_COMP_VAL) { if (!cnt) { BNX2X_ERR("timeout waiting for stats finished\n"); break; } cnt--; usleep_range(1000, 1000); } return 1; } /* * Statistics service functions */ static void bnx2x_stats_pmf_update(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; int loader_idx = PMF_DMAE_C(bp); u32 *stats_comp = bnx2x_sp(bp, stats_comp); /* sanity */ if (!bp->port.pmf || !bp->port.port_stx) { BNX2X_ERR("BUG!\n"); return; } bp->executer_idx = 0; opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, false, 0); dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_GRC); dmae->src_addr_lo = bp->port.port_stx >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->len = DMAE_LEN32_RD_MAX; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) + DMAE_LEN32_RD_MAX * 4); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) + DMAE_LEN32_RD_MAX * 4); dmae->len = bnx2x_get_port_stats_dma_len(bp) - DMAE_LEN32_RD_MAX; dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } static void bnx2x_port_stats_init(struct bnx2x *bp) { struct dmae_command *dmae; int port = BP_PORT(bp); u32 opcode; int loader_idx = PMF_DMAE_C(bp); u32 mac_addr; u32 *stats_comp = bnx2x_sp(bp, stats_comp); /* sanity */ if (!bp->link_vars.link_up || !bp->port.pmf) { BNX2X_ERR("BUG!\n"); return; } bp->executer_idx = 0; /* MCP */ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, true, DMAE_COMP_GRC); if (bp->port.port_stx) { dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = bnx2x_get_port_stats_dma_len(bp); dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } if (bp->func_stx) { dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); dmae->dst_addr_lo = bp->func_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = sizeof(struct host_func_stats) >> 2; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } /* MAC */ opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, true, DMAE_COMP_GRC); /* EMAC is special */ if (bp->link_vars.mac_type == MAC_TYPE_EMAC) { mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0); /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; /* EMAC_REG_EMAC_RX_STAT_AC_28 */ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_RX_STAT_AC_28) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + offsetof(struct emac_stats, rx_stat_falsecarriererrors)); dmae->len = 1; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (mac_addr + EMAC_REG_EMAC_TX_STAT_AC) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + offsetof(struct emac_stats, tx_stat_ifhcoutoctets)); dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } else { u32 tx_src_addr_lo, rx_src_addr_lo; u16 rx_len, tx_len; /* configure the params according to MAC type */ switch (bp->link_vars.mac_type) { case MAC_TYPE_BMAC: mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM : NIG_REG_INGRESS_BMAC0_MEM); /* BIGMAC_REGISTER_TX_STAT_GTPKT .. BIGMAC_REGISTER_TX_STAT_GTBYT */ if (CHIP_IS_E1x(bp)) { tx_src_addr_lo = (mac_addr + BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; tx_len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT - BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2; rx_src_addr_lo = (mac_addr + BIGMAC_REGISTER_RX_STAT_GR64) >> 2; rx_len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ - BIGMAC_REGISTER_RX_STAT_GR64) >> 2; } else { tx_src_addr_lo = (mac_addr + BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; tx_len = (8 + BIGMAC2_REGISTER_TX_STAT_GTBYT - BIGMAC2_REGISTER_TX_STAT_GTPOK) >> 2; rx_src_addr_lo = (mac_addr + BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; rx_len = (8 + BIGMAC2_REGISTER_RX_STAT_GRIPJ - BIGMAC2_REGISTER_RX_STAT_GR64) >> 2; } break; case MAC_TYPE_UMAC: /* handled by MSTAT */ case MAC_TYPE_XMAC: /* handled by MSTAT */ default: mac_addr = port ? GRCBASE_MSTAT1 : GRCBASE_MSTAT0; tx_src_addr_lo = (mac_addr + MSTAT_REG_TX_STAT_GTXPOK_LO) >> 2; rx_src_addr_lo = (mac_addr + MSTAT_REG_RX_STAT_GR64_LO) >> 2; tx_len = sizeof(bp->slowpath-> mac_stats.mstat_stats.stats_tx) >> 2; rx_len = sizeof(bp->slowpath-> mac_stats.mstat_stats.stats_rx) >> 2; break; } /* TX stats */ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = tx_src_addr_lo; dmae->src_addr_hi = 0; dmae->len = tx_len; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats)); dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; /* RX stats */ dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_hi = 0; dmae->src_addr_lo = rx_src_addr_lo; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) + (tx_len << 2)); dmae->len = rx_len; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } /* NIG */ if (!CHIP_IS_E3(bp)) { dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 : NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt0_lo)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt0_lo)); dmae->len = (2*sizeof(u32)) >> 2; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = opcode; dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 : NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt1_lo)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) + offsetof(struct nig_stats, egress_mac_pkt1_lo)); dmae->len = (2*sizeof(u32)) >> 2; dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_GRC, DMAE_DST_PCI, true, DMAE_COMP_PCI); dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD : NIG_REG_STAT0_BRB_DISCARD) >> 2; dmae->src_addr_hi = 0; dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats)); dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats)); dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2; dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } static void bnx2x_func_stats_init(struct bnx2x *bp) { struct dmae_command *dmae = &bp->stats_dmae; u32 *stats_comp = bnx2x_sp(bp, stats_comp); /* sanity */ if (!bp->func_stx) { BNX2X_ERR("BUG!\n"); return; } bp->executer_idx = 0; memset(dmae, 0, sizeof(struct dmae_command)); dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, true, DMAE_COMP_PCI); dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); dmae->dst_addr_lo = bp->func_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = sizeof(struct host_func_stats) >> 2; dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } static void bnx2x_stats_start(struct bnx2x *bp) { if (bp->port.pmf) bnx2x_port_stats_init(bp); else if (bp->func_stx) bnx2x_func_stats_init(bp); bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); } static void bnx2x_stats_pmf_start(struct bnx2x *bp) { bnx2x_stats_comp(bp); bnx2x_stats_pmf_update(bp); bnx2x_stats_start(bp); } static void bnx2x_stats_restart(struct bnx2x *bp) { bnx2x_stats_comp(bp); bnx2x_stats_start(bp); } static void bnx2x_bmac_stats_update(struct bnx2x *bp) { struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); struct bnx2x_eth_stats *estats = &bp->eth_stats; struct { u32 lo; u32 hi; } diff; if (CHIP_IS_E1x(bp)) { struct bmac1_stats *new = bnx2x_sp(bp, mac_stats.bmac1_stats); /* the macros below will use "bmac1_stats" type */ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); UPDATE_STAT64(tx_stat_gt127, tx_stat_etherstatspkts65octetsto127octets); UPDATE_STAT64(tx_stat_gt255, tx_stat_etherstatspkts128octetsto255octets); UPDATE_STAT64(tx_stat_gt511, tx_stat_etherstatspkts256octetsto511octets); UPDATE_STAT64(tx_stat_gt1023, tx_stat_etherstatspkts512octetsto1023octets); UPDATE_STAT64(tx_stat_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); } else { struct bmac2_stats *new = bnx2x_sp(bp, mac_stats.bmac2_stats); /* the macros below will use "bmac2_stats" type */ UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets); UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors); UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts); UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong); UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments); UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers); UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived); UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered); UPDATE_STAT64(rx_stat_grxpf, rx_stat_mac_xpf); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent); UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone); UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets); UPDATE_STAT64(tx_stat_gt127, tx_stat_etherstatspkts65octetsto127octets); UPDATE_STAT64(tx_stat_gt255, tx_stat_etherstatspkts128octetsto255octets); UPDATE_STAT64(tx_stat_gt511, tx_stat_etherstatspkts256octetsto511octets); UPDATE_STAT64(tx_stat_gt1023, tx_stat_etherstatspkts512octetsto1023octets); UPDATE_STAT64(tx_stat_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); UPDATE_STAT64(tx_stat_gt2047, tx_stat_mac_2047); UPDATE_STAT64(tx_stat_gt4095, tx_stat_mac_4095); UPDATE_STAT64(tx_stat_gt9216, tx_stat_mac_9216); UPDATE_STAT64(tx_stat_gt16383, tx_stat_mac_16383); UPDATE_STAT64(tx_stat_gterr, tx_stat_dot3statsinternalmactransmiterrors); UPDATE_STAT64(tx_stat_gtufl, tx_stat_mac_ufl); /* collect PFC stats */ pstats->pfc_frames_tx_hi = new->tx_stat_gtpp_hi; pstats->pfc_frames_tx_lo = new->tx_stat_gtpp_lo; pstats->pfc_frames_rx_hi = new->rx_stat_grpp_hi; pstats->pfc_frames_rx_lo = new->rx_stat_grpp_lo; } estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; } static void bnx2x_mstat_stats_update(struct bnx2x *bp) { struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); struct bnx2x_eth_stats *estats = &bp->eth_stats; struct mstat_stats *new = bnx2x_sp(bp, mac_stats.mstat_stats); ADD_STAT64(stats_rx.rx_grerb, rx_stat_ifhcinbadoctets); ADD_STAT64(stats_rx.rx_grfcs, rx_stat_dot3statsfcserrors); ADD_STAT64(stats_rx.rx_grund, rx_stat_etherstatsundersizepkts); ADD_STAT64(stats_rx.rx_grovr, rx_stat_dot3statsframestoolong); ADD_STAT64(stats_rx.rx_grfrg, rx_stat_etherstatsfragments); ADD_STAT64(stats_rx.rx_grxcf, rx_stat_maccontrolframesreceived); ADD_STAT64(stats_rx.rx_grxpf, rx_stat_xoffstateentered); ADD_STAT64(stats_rx.rx_grxpf, rx_stat_mac_xpf); ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_outxoffsent); ADD_STAT64(stats_tx.tx_gtxpf, tx_stat_flowcontroldone); /* collect pfc stats */ ADD_64(pstats->pfc_frames_tx_hi, new->stats_tx.tx_gtxpp_hi, pstats->pfc_frames_tx_lo, new->stats_tx.tx_gtxpp_lo); ADD_64(pstats->pfc_frames_rx_hi, new->stats_rx.rx_grxpp_hi, pstats->pfc_frames_rx_lo, new->stats_rx.rx_grxpp_lo); ADD_STAT64(stats_tx.tx_gt64, tx_stat_etherstatspkts64octets); ADD_STAT64(stats_tx.tx_gt127, tx_stat_etherstatspkts65octetsto127octets); ADD_STAT64(stats_tx.tx_gt255, tx_stat_etherstatspkts128octetsto255octets); ADD_STAT64(stats_tx.tx_gt511, tx_stat_etherstatspkts256octetsto511octets); ADD_STAT64(stats_tx.tx_gt1023, tx_stat_etherstatspkts512octetsto1023octets); ADD_STAT64(stats_tx.tx_gt1518, tx_stat_etherstatspkts1024octetsto1522octets); ADD_STAT64(stats_tx.tx_gt2047, tx_stat_mac_2047); ADD_STAT64(stats_tx.tx_gt4095, tx_stat_mac_4095); ADD_STAT64(stats_tx.tx_gt9216, tx_stat_mac_9216); ADD_STAT64(stats_tx.tx_gt16383, tx_stat_mac_16383); ADD_STAT64(stats_tx.tx_gterr, tx_stat_dot3statsinternalmactransmiterrors); ADD_STAT64(stats_tx.tx_gtufl, tx_stat_mac_ufl); estats->etherstatspkts1024octetsto1522octets_hi = pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_hi; estats->etherstatspkts1024octetsto1522octets_lo = pstats->mac_stx[1].tx_stat_etherstatspkts1024octetsto1522octets_lo; estats->etherstatspktsover1522octets_hi = pstats->mac_stx[1].tx_stat_mac_2047_hi; estats->etherstatspktsover1522octets_lo = pstats->mac_stx[1].tx_stat_mac_2047_lo; ADD_64(estats->etherstatspktsover1522octets_hi, pstats->mac_stx[1].tx_stat_mac_4095_hi, estats->etherstatspktsover1522octets_lo, pstats->mac_stx[1].tx_stat_mac_4095_lo); ADD_64(estats->etherstatspktsover1522octets_hi, pstats->mac_stx[1].tx_stat_mac_9216_hi, estats->etherstatspktsover1522octets_lo, pstats->mac_stx[1].tx_stat_mac_9216_lo); ADD_64(estats->etherstatspktsover1522octets_hi, pstats->mac_stx[1].tx_stat_mac_16383_hi, estats->etherstatspktsover1522octets_lo, pstats->mac_stx[1].tx_stat_mac_16383_lo); estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_mac_xpf_hi; estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_mac_xpf_lo; estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxoffsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxoffsent_lo; estats->pfc_frames_received_hi = pstats->pfc_frames_rx_hi; estats->pfc_frames_received_lo = pstats->pfc_frames_rx_lo; estats->pfc_frames_sent_hi = pstats->pfc_frames_tx_hi; estats->pfc_frames_sent_lo = pstats->pfc_frames_tx_lo; } static void bnx2x_emac_stats_update(struct bnx2x *bp) { struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats); struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); struct bnx2x_eth_stats *estats = &bp->eth_stats; UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets); UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets); UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors); UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors); UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors); UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors); UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts); UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong); UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments); UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers); UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived); UPDATE_EXTEND_STAT(rx_stat_xoffstateentered); UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived); UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived); UPDATE_EXTEND_STAT(tx_stat_outxonsent); UPDATE_EXTEND_STAT(tx_stat_outxoffsent); UPDATE_EXTEND_STAT(tx_stat_flowcontroldone); UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions); UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes); UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes); UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions); UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions); UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets); UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets); UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors); estats->pause_frames_received_hi = pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi; estats->pause_frames_received_lo = pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo; ADD_64(estats->pause_frames_received_hi, pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi, estats->pause_frames_received_lo, pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo); estats->pause_frames_sent_hi = pstats->mac_stx[1].tx_stat_outxonsent_hi; estats->pause_frames_sent_lo = pstats->mac_stx[1].tx_stat_outxonsent_lo; ADD_64(estats->pause_frames_sent_hi, pstats->mac_stx[1].tx_stat_outxoffsent_hi, estats->pause_frames_sent_lo, pstats->mac_stx[1].tx_stat_outxoffsent_lo); } static int bnx2x_hw_stats_update(struct bnx2x *bp) { struct nig_stats *new = bnx2x_sp(bp, nig_stats); struct nig_stats *old = &(bp->port.old_nig_stats); struct host_port_stats *pstats = bnx2x_sp(bp, port_stats); struct bnx2x_eth_stats *estats = &bp->eth_stats; struct { u32 lo; u32 hi; } diff; switch (bp->link_vars.mac_type) { case MAC_TYPE_BMAC: bnx2x_bmac_stats_update(bp); break; case MAC_TYPE_EMAC: bnx2x_emac_stats_update(bp); break; case MAC_TYPE_UMAC: case MAC_TYPE_XMAC: bnx2x_mstat_stats_update(bp); break; case MAC_TYPE_NONE: /* unreached */ DP(BNX2X_MSG_STATS, "stats updated by DMAE but no MAC active\n"); return -1; default: /* unreached */ BNX2X_ERR("Unknown MAC type\n"); } ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo, new->brb_discard - old->brb_discard); ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo, new->brb_truncate - old->brb_truncate); if (!CHIP_IS_E3(bp)) { UPDATE_STAT64_NIG(egress_mac_pkt0, etherstatspkts1024octetsto1522octets); UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets); } memcpy(old, new, sizeof(struct nig_stats)); memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]), sizeof(struct mac_stx)); estats->brb_drop_hi = pstats->brb_drop_hi; estats->brb_drop_lo = pstats->brb_drop_lo; pstats->host_port_stats_counter++; if (!BP_NOMCP(bp)) { u32 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer); if (nig_timer_max != estats->nig_timer_max) { estats->nig_timer_max = nig_timer_max; BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max); } } return 0; } static int bnx2x_storm_stats_update(struct bnx2x *bp) { struct tstorm_per_port_stats *tport = &bp->fw_stats_data->port.tstorm_port_statistics; struct tstorm_per_pf_stats *tfunc = &bp->fw_stats_data->pf.tstorm_pf_statistics; struct host_func_stats *fstats = &bp->func_stats; struct bnx2x_eth_stats *estats = &bp->eth_stats; struct bnx2x_eth_stats_old *estats_old = &bp->eth_stats_old; struct stats_counter *counters = &bp->fw_stats_data->storm_counters; int i; u16 cur_stats_counter; /* Make sure we use the value of the counter * used for sending the last stats ramrod. */ spin_lock_bh(&bp->stats_lock); cur_stats_counter = bp->stats_counter - 1; spin_unlock_bh(&bp->stats_lock); /* are storm stats valid? */ if (le16_to_cpu(counters->xstats_counter) != cur_stats_counter) { DP(BNX2X_MSG_STATS, "stats not updated by xstorm xstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->xstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->ustats_counter) != cur_stats_counter) { DP(BNX2X_MSG_STATS, "stats not updated by ustorm ustorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->ustats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->cstats_counter) != cur_stats_counter) { DP(BNX2X_MSG_STATS, "stats not updated by cstorm cstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->cstats_counter), bp->stats_counter); return -EAGAIN; } if (le16_to_cpu(counters->tstats_counter) != cur_stats_counter) { DP(BNX2X_MSG_STATS, "stats not updated by tstorm tstorm counter (0x%x) != stats_counter (0x%x)\n", le16_to_cpu(counters->tstats_counter), bp->stats_counter); return -EAGAIN; } estats->error_bytes_received_hi = 0; estats->error_bytes_received_lo = 0; for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; struct tstorm_per_queue_stats *tclient = &bp->fw_stats_data->queue_stats[i]. tstorm_queue_statistics; struct tstorm_per_queue_stats *old_tclient = &fp->old_tclient; struct ustorm_per_queue_stats *uclient = &bp->fw_stats_data->queue_stats[i]. ustorm_queue_statistics; struct ustorm_per_queue_stats *old_uclient = &fp->old_uclient; struct xstorm_per_queue_stats *xclient = &bp->fw_stats_data->queue_stats[i]. xstorm_queue_statistics; struct xstorm_per_queue_stats *old_xclient = &fp->old_xclient; struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; u32 diff; DP(BNX2X_MSG_STATS, "queue[%d]: ucast_sent 0x%x, bcast_sent 0x%x mcast_sent 0x%x\n", i, xclient->ucast_pkts_sent, xclient->bcast_pkts_sent, xclient->mcast_pkts_sent); DP(BNX2X_MSG_STATS, "---------------\n"); UPDATE_QSTAT(tclient->rcv_bcast_bytes, total_broadcast_bytes_received); UPDATE_QSTAT(tclient->rcv_mcast_bytes, total_multicast_bytes_received); UPDATE_QSTAT(tclient->rcv_ucast_bytes, total_unicast_bytes_received); /* * sum to total_bytes_received all * unicast/multicast/broadcast */ qstats->total_bytes_received_hi = qstats->total_broadcast_bytes_received_hi; qstats->total_bytes_received_lo = qstats->total_broadcast_bytes_received_lo; ADD_64(qstats->total_bytes_received_hi, qstats->total_multicast_bytes_received_hi, qstats->total_bytes_received_lo, qstats->total_multicast_bytes_received_lo); ADD_64(qstats->total_bytes_received_hi, qstats->total_unicast_bytes_received_hi, qstats->total_bytes_received_lo, qstats->total_unicast_bytes_received_lo); qstats->valid_bytes_received_hi = qstats->total_bytes_received_hi; qstats->valid_bytes_received_lo = qstats->total_bytes_received_lo; UPDATE_EXTEND_TSTAT(rcv_ucast_pkts, total_unicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_mcast_pkts, total_multicast_packets_received); UPDATE_EXTEND_TSTAT(rcv_bcast_pkts, total_broadcast_packets_received); UPDATE_EXTEND_E_TSTAT(pkts_too_big_discard, etherstatsoverrsizepkts); UPDATE_EXTEND_E_TSTAT(no_buff_discard, no_buff_discard); SUB_EXTEND_USTAT(ucast_no_buff_pkts, total_unicast_packets_received); SUB_EXTEND_USTAT(mcast_no_buff_pkts, total_multicast_packets_received); SUB_EXTEND_USTAT(bcast_no_buff_pkts, total_broadcast_packets_received); UPDATE_EXTEND_E_USTAT(ucast_no_buff_pkts, no_buff_discard); UPDATE_EXTEND_E_USTAT(mcast_no_buff_pkts, no_buff_discard); UPDATE_EXTEND_E_USTAT(bcast_no_buff_pkts, no_buff_discard); UPDATE_QSTAT(xclient->bcast_bytes_sent, total_broadcast_bytes_transmitted); UPDATE_QSTAT(xclient->mcast_bytes_sent, total_multicast_bytes_transmitted); UPDATE_QSTAT(xclient->ucast_bytes_sent, total_unicast_bytes_transmitted); /* * sum to total_bytes_transmitted all * unicast/multicast/broadcast */ qstats->total_bytes_transmitted_hi = qstats->total_unicast_bytes_transmitted_hi; qstats->total_bytes_transmitted_lo = qstats->total_unicast_bytes_transmitted_lo; ADD_64(qstats->total_bytes_transmitted_hi, qstats->total_broadcast_bytes_transmitted_hi, qstats->total_bytes_transmitted_lo, qstats->total_broadcast_bytes_transmitted_lo); ADD_64(qstats->total_bytes_transmitted_hi, qstats->total_multicast_bytes_transmitted_hi, qstats->total_bytes_transmitted_lo, qstats->total_multicast_bytes_transmitted_lo); UPDATE_EXTEND_XSTAT(ucast_pkts_sent, total_unicast_packets_transmitted); UPDATE_EXTEND_XSTAT(mcast_pkts_sent, total_multicast_packets_transmitted); UPDATE_EXTEND_XSTAT(bcast_pkts_sent, total_broadcast_packets_transmitted); UPDATE_EXTEND_TSTAT(checksum_discard, total_packets_received_checksum_discarded); UPDATE_EXTEND_TSTAT(ttl0_discard, total_packets_received_ttl0_discarded); UPDATE_EXTEND_XSTAT(error_drop_pkts, total_transmitted_dropped_packets_error); /* TPA aggregations completed */ UPDATE_EXTEND_E_USTAT(coalesced_events, total_tpa_aggregations); /* Number of network frames aggregated by TPA */ UPDATE_EXTEND_E_USTAT(coalesced_pkts, total_tpa_aggregated_frames); /* Total number of bytes in completed TPA aggregations */ UPDATE_QSTAT(uclient->coalesced_bytes, total_tpa_bytes); UPDATE_ESTAT_QSTAT_64(total_tpa_bytes); UPDATE_FSTAT_QSTAT(total_bytes_received); UPDATE_FSTAT_QSTAT(total_bytes_transmitted); UPDATE_FSTAT_QSTAT(total_unicast_packets_received); UPDATE_FSTAT_QSTAT(total_multicast_packets_received); UPDATE_FSTAT_QSTAT(total_broadcast_packets_received); UPDATE_FSTAT_QSTAT(total_unicast_packets_transmitted); UPDATE_FSTAT_QSTAT(total_multicast_packets_transmitted); UPDATE_FSTAT_QSTAT(total_broadcast_packets_transmitted); UPDATE_FSTAT_QSTAT(valid_bytes_received); } ADD_64(estats->total_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, estats->total_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); ADD_64(estats->total_bytes_received_hi, le32_to_cpu(tfunc->rcv_error_bytes.hi), estats->total_bytes_received_lo, le32_to_cpu(tfunc->rcv_error_bytes.lo)); ADD_64(estats->error_bytes_received_hi, le32_to_cpu(tfunc->rcv_error_bytes.hi), estats->error_bytes_received_lo, le32_to_cpu(tfunc->rcv_error_bytes.lo)); UPDATE_ESTAT(etherstatsoverrsizepkts, rx_stat_dot3statsframestoolong); ADD_64(estats->error_bytes_received_hi, estats->rx_stat_ifhcinbadoctets_hi, estats->error_bytes_received_lo, estats->rx_stat_ifhcinbadoctets_lo); if (bp->port.pmf) { struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; UPDATE_FW_STAT(mac_filter_discard); UPDATE_FW_STAT(mf_tag_discard); UPDATE_FW_STAT(brb_truncate_discard); UPDATE_FW_STAT(mac_discard); } fstats->host_func_stats_start = ++fstats->host_func_stats_end; bp->stats_pending = 0; return 0; } static void bnx2x_net_stats_update(struct bnx2x *bp) { struct bnx2x_eth_stats *estats = &bp->eth_stats; struct net_device_stats *nstats = &bp->dev->stats; unsigned long tmp; int i; nstats->rx_packets = bnx2x_hilo(&estats->total_unicast_packets_received_hi) + bnx2x_hilo(&estats->total_multicast_packets_received_hi) + bnx2x_hilo(&estats->total_broadcast_packets_received_hi); nstats->tx_packets = bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) + bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) + bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi); nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi); nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); tmp = estats->mac_discard; for_each_rx_queue(bp, i) tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); nstats->rx_dropped = tmp + bp->net_stats_old.rx_dropped; nstats->tx_dropped = 0; nstats->multicast = bnx2x_hilo(&estats->total_multicast_packets_received_hi); nstats->collisions = bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi); nstats->rx_length_errors = bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) + bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi); nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) + bnx2x_hilo(&estats->brb_truncate_hi); nstats->rx_crc_errors = bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi); nstats->rx_frame_errors = bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi); nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi); nstats->rx_missed_errors = 0; nstats->rx_errors = nstats->rx_length_errors + nstats->rx_over_errors + nstats->rx_crc_errors + nstats->rx_frame_errors + nstats->rx_fifo_errors + nstats->rx_missed_errors; nstats->tx_aborted_errors = bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) + bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi); nstats->tx_carrier_errors = bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi); nstats->tx_fifo_errors = 0; nstats->tx_heartbeat_errors = 0; nstats->tx_window_errors = 0; nstats->tx_errors = nstats->tx_aborted_errors + nstats->tx_carrier_errors + bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi); } static void bnx2x_drv_stats_update(struct bnx2x *bp) { struct bnx2x_eth_stats *estats = &bp->eth_stats; int i; for_each_queue(bp, i) { struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats; struct bnx2x_eth_q_stats_old *qstats_old = &bp->fp[i].eth_q_stats_old; UPDATE_ESTAT_QSTAT(driver_xoff); UPDATE_ESTAT_QSTAT(rx_err_discard_pkt); UPDATE_ESTAT_QSTAT(rx_skb_alloc_failed); UPDATE_ESTAT_QSTAT(hw_csum_err); } } static bool bnx2x_edebug_stats_stopped(struct bnx2x *bp) { u32 val; if (SHMEM2_HAS(bp, edebug_driver_if[1])) { val = SHMEM2_RD(bp, edebug_driver_if[1]); if (val == EDEBUG_DRIVER_IF_OP_CODE_DISABLE_STAT) return true; } return false; } static void bnx2x_stats_update(struct bnx2x *bp) { u32 *stats_comp = bnx2x_sp(bp, stats_comp); if (bnx2x_edebug_stats_stopped(bp)) return; if (*stats_comp != DMAE_COMP_VAL) return; if (bp->port.pmf) bnx2x_hw_stats_update(bp); if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) { BNX2X_ERR("storm stats were not updated for 3 times\n"); bnx2x_panic(); return; } bnx2x_net_stats_update(bp); bnx2x_drv_stats_update(bp); if (netif_msg_timer(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; netdev_dbg(bp->dev, "brb drops %u brb truncate %u\n", estats->brb_drop_lo, estats->brb_truncate_lo); } bnx2x_hw_stats_post(bp); bnx2x_storm_stats_post(bp); } static void bnx2x_port_stats_stop(struct bnx2x *bp) { struct dmae_command *dmae; u32 opcode; int loader_idx = PMF_DMAE_C(bp); u32 *stats_comp = bnx2x_sp(bp, stats_comp); bp->executer_idx = 0; opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, false, 0); if (bp->port.port_stx) { dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); if (bp->func_stx) dmae->opcode = bnx2x_dmae_opcode_add_comp( opcode, DMAE_COMP_GRC); else dmae->opcode = bnx2x_dmae_opcode_add_comp( opcode, DMAE_COMP_PCI); dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = bnx2x_get_port_stats_dma_len(bp); if (bp->func_stx) { dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2; dmae->comp_addr_hi = 0; dmae->comp_val = 1; } else { dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } } if (bp->func_stx) { dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = bnx2x_dmae_opcode_add_comp(opcode, DMAE_COMP_PCI); dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats)); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats)); dmae->dst_addr_lo = bp->func_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = sizeof(struct host_func_stats) >> 2; dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; } } static void bnx2x_stats_stop(struct bnx2x *bp) { int update = 0; bnx2x_stats_comp(bp); if (bp->port.pmf) update = (bnx2x_hw_stats_update(bp) == 0); update |= (bnx2x_storm_stats_update(bp) == 0); if (update) { bnx2x_net_stats_update(bp); if (bp->port.pmf) bnx2x_port_stats_stop(bp); bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } } static void bnx2x_stats_do_nothing(struct bnx2x *bp) { } static const struct { void (*action)(struct bnx2x *bp); enum bnx2x_stats_state next_state; } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = { /* state event */ { /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED}, /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED}, /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}, /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED} }, { /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED}, /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED}, /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED}, /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED} } }; void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event) { enum bnx2x_stats_state state; if (unlikely(bp->panic)) return; spin_lock_bh(&bp->stats_lock); state = bp->stats_state; bp->stats_state = bnx2x_stats_stm[state][event].next_state; spin_unlock_bh(&bp->stats_lock); bnx2x_stats_stm[state][event].action(bp); if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp)) DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n", state, event, bp->stats_state); } static void bnx2x_port_stats_base_init(struct bnx2x *bp) { struct dmae_command *dmae; u32 *stats_comp = bnx2x_sp(bp, stats_comp); /* sanity */ if (!bp->port.pmf || !bp->port.port_stx) { BNX2X_ERR("BUG!\n"); return; } bp->executer_idx = 0; dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]); dmae->opcode = bnx2x_dmae_opcode(bp, DMAE_SRC_PCI, DMAE_DST_GRC, true, DMAE_COMP_PCI); dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats)); dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats)); dmae->dst_addr_lo = bp->port.port_stx >> 2; dmae->dst_addr_hi = 0; dmae->len = bnx2x_get_port_stats_dma_len(bp); dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp)); dmae->comp_val = DMAE_COMP_VAL; *stats_comp = 0; bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } /** * This function will prepare the statistics ramrod data the way * we will only have to increment the statistics counter and * send the ramrod each time we have to. * * @param bp */ static inline void bnx2x_prep_fw_stats_req(struct bnx2x *bp) { int i; int first_queue_query_index; struct stats_query_header *stats_hdr = &bp->fw_stats_req->hdr; dma_addr_t cur_data_offset; struct stats_query_entry *cur_query_entry; stats_hdr->cmd_num = bp->fw_stats_num; stats_hdr->drv_stats_counter = 0; /* storm_counters struct contains the counters of completed * statistics requests per storm which are incremented by FW * each time it completes hadning a statistics ramrod. We will * check these counters in the timer handler and discard a * (statistics) ramrod completion. */ cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, storm_counters); stats_hdr->stats_counters_addrs.hi = cpu_to_le32(U64_HI(cur_data_offset)); stats_hdr->stats_counters_addrs.lo = cpu_to_le32(U64_LO(cur_data_offset)); /* prepare to the first stats ramrod (will be completed with * the counters equal to zero) - init counters to somethig different. */ memset(&bp->fw_stats_data->storm_counters, 0xff, sizeof(struct stats_counter)); /**** Port FW statistics data ****/ cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, port); cur_query_entry = &bp->fw_stats_req->query[BNX2X_PORT_QUERY_IDX]; cur_query_entry->kind = STATS_TYPE_PORT; /* For port query index is a DONT CARE */ cur_query_entry->index = BP_PORT(bp); /* For port query funcID is a DONT CARE */ cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); /**** PF FW statistics data ****/ cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, pf); cur_query_entry = &bp->fw_stats_req->query[BNX2X_PF_QUERY_IDX]; cur_query_entry->kind = STATS_TYPE_PF; /* For PF query index is a DONT CARE */ cur_query_entry->index = BP_PORT(bp); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); /**** FCoE FW statistics data ****/ if (!NO_FCOE(bp)) { cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, fcoe); cur_query_entry = &bp->fw_stats_req->query[BNX2X_FCOE_QUERY_IDX]; cur_query_entry->kind = STATS_TYPE_FCOE; /* For FCoE query index is a DONT CARE */ cur_query_entry->index = BP_PORT(bp); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); } /**** Clients' queries ****/ cur_data_offset = bp->fw_stats_data_mapping + offsetof(struct bnx2x_fw_stats_data, queue_stats); /* first queue query index depends whether FCoE offloaded request will * be included in the ramrod */ if (!NO_FCOE(bp)) first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX; else first_queue_query_index = BNX2X_FIRST_QUEUE_QUERY_IDX - 1; for_each_eth_queue(bp, i) { cur_query_entry = &bp->fw_stats_req-> query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; cur_query_entry->index = bnx2x_stats_id(&bp->fp[i]); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); cur_data_offset += sizeof(struct per_queue_stats); } /* add FCoE queue query if needed */ if (!NO_FCOE(bp)) { cur_query_entry = &bp->fw_stats_req-> query[first_queue_query_index + i]; cur_query_entry->kind = STATS_TYPE_QUEUE; cur_query_entry->index = bnx2x_stats_id(&bp->fp[FCOE_IDX]); cur_query_entry->funcID = cpu_to_le16(BP_FUNC(bp)); cur_query_entry->address.hi = cpu_to_le32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = cpu_to_le32(U64_LO(cur_data_offset)); } } void bnx2x_stats_init(struct bnx2x *bp) { int /*abs*/port = BP_PORT(bp); int mb_idx = BP_FW_MB_IDX(bp); int i; bp->stats_pending = 0; bp->executer_idx = 0; bp->stats_counter = 0; /* port and func stats for management */ if (!BP_NOMCP(bp)) { bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx); bp->func_stx = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_param); } else { bp->port.port_stx = 0; bp->func_stx = 0; } DP(BNX2X_MSG_STATS, "port_stx 0x%x func_stx 0x%x\n", bp->port.port_stx, bp->func_stx); /* pmf should retrieve port statistics from SP on a non-init*/ if (!bp->stats_init && bp->port.pmf && bp->port.port_stx) bnx2x_stats_handle(bp, STATS_EVENT_PMF); port = BP_PORT(bp); /* port stats */ memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats)); bp->port.old_nig_stats.brb_discard = REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38); bp->port.old_nig_stats.brb_truncate = REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38); if (!CHIP_IS_E3(bp)) { REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50, &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2); REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50, &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2); } /* function stats */ for_each_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; memset(&fp->old_tclient, 0, sizeof(fp->old_tclient)); memset(&fp->old_uclient, 0, sizeof(fp->old_uclient)); memset(&fp->old_xclient, 0, sizeof(fp->old_xclient)); if (bp->stats_init) { memset(&fp->eth_q_stats, 0, sizeof(fp->eth_q_stats)); memset(&fp->eth_q_stats_old, 0, sizeof(fp->eth_q_stats_old)); } } /* Prepare statistics ramrod data */ bnx2x_prep_fw_stats_req(bp); memset(&bp->dev->stats, 0, sizeof(bp->dev->stats)); if (bp->stats_init) { memset(&bp->net_stats_old, 0, sizeof(bp->net_stats_old)); memset(&bp->fw_stats_old, 0, sizeof(bp->fw_stats_old)); memset(&bp->eth_stats_old, 0, sizeof(bp->eth_stats_old)); memset(&bp->eth_stats, 0, sizeof(bp->eth_stats)); memset(&bp->func_stats, 0, sizeof(bp->func_stats)); /* Clean SP from previous statistics */ if (bp->func_stx) { memset(bnx2x_sp(bp, func_stats), 0, sizeof(struct host_func_stats)); bnx2x_func_stats_init(bp); bnx2x_hw_stats_post(bp); bnx2x_stats_comp(bp); } } bp->stats_state = STATS_STATE_DISABLED; if (bp->port.pmf && bp->port.port_stx) bnx2x_port_stats_base_init(bp); /* mark the end of statistics initializiation */ bp->stats_init = false; } void bnx2x_save_statistics(struct bnx2x *bp) { int i; struct net_device_stats *nstats = &bp->dev->stats; /* save queue statistics */ for_each_eth_queue(bp, i) { struct bnx2x_fastpath *fp = &bp->fp[i]; struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; struct bnx2x_eth_q_stats_old *qstats_old = &fp->eth_q_stats_old; UPDATE_QSTAT_OLD(total_unicast_bytes_received_hi); UPDATE_QSTAT_OLD(total_unicast_bytes_received_lo); UPDATE_QSTAT_OLD(total_broadcast_bytes_received_hi); UPDATE_QSTAT_OLD(total_broadcast_bytes_received_lo); UPDATE_QSTAT_OLD(total_multicast_bytes_received_hi); UPDATE_QSTAT_OLD(total_multicast_bytes_received_lo); UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_hi); UPDATE_QSTAT_OLD(total_unicast_bytes_transmitted_lo); UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_hi); UPDATE_QSTAT_OLD(total_broadcast_bytes_transmitted_lo); UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_hi); UPDATE_QSTAT_OLD(total_multicast_bytes_transmitted_lo); UPDATE_QSTAT_OLD(total_tpa_bytes_hi); UPDATE_QSTAT_OLD(total_tpa_bytes_lo); } /* save net_device_stats statistics */ bp->net_stats_old.rx_dropped = nstats->rx_dropped; /* store port firmware statistics */ if (bp->port.pmf && IS_MF(bp)) { struct bnx2x_eth_stats *estats = &bp->eth_stats; struct bnx2x_fw_port_stats_old *fwstats = &bp->fw_stats_old; UPDATE_FW_STAT_OLD(mac_filter_discard); UPDATE_FW_STAT_OLD(mf_tag_discard); UPDATE_FW_STAT_OLD(brb_truncate_discard); UPDATE_FW_STAT_OLD(mac_discard); } }
gpl-2.0
CyanogenMod/android_kernel_sony_msm8930
drivers/edac/i3200_edac.c
4810
12459
/* * Intel 3200/3210 Memory Controller kernel module * Copyright (C) 2008-2009 Akamai Technologies, Inc. * Portions by Hitoshi Mitake <h.mitake@gmail.com>. * * This file may be distributed under the terms of the * GNU General Public License. */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include <linux/io.h> #include "edac_core.h" #include <asm-generic/io-64-nonatomic-lo-hi.h> #define I3200_REVISION "1.1" #define EDAC_MOD_STR "i3200_edac" #define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 #define I3200_RANKS 8 #define I3200_RANKS_PER_CHANNEL 4 #define I3200_CHANNELS 2 /* Intel 3200 register addresses - device 0 function 0 - DRAM Controller */ #define I3200_MCHBAR_LOW 0x48 /* MCH Memory Mapped Register BAR */ #define I3200_MCHBAR_HIGH 0x4c #define I3200_MCHBAR_MASK 0xfffffc000ULL /* bits 35:14 */ #define I3200_MMR_WINDOW_SIZE 16384 #define I3200_TOM 0xa0 /* Top of Memory (16b) * * 15:10 reserved * 9:0 total populated physical memory */ #define I3200_TOM_MASK 0x3ff /* bits 9:0 */ #define I3200_TOM_SHIFT 26 /* 64MiB grain */ #define I3200_ERRSTS 0xc8 /* Error Status Register (16b) * * 15 reserved * 14 Isochronous TBWRR Run Behind FIFO Full * (ITCV) * 13 Isochronous TBWRR Run Behind FIFO Put * (ITSTV) * 12 reserved * 11 MCH Thermal Sensor Event * for SMI/SCI/SERR (GTSE) * 10 reserved * 9 LOCK to non-DRAM Memory Flag (LCKF) * 8 reserved * 7 DRAM Throttle Flag (DTF) * 6:2 reserved * 1 Multi-bit DRAM ECC Error Flag (DMERR) * 0 Single-bit DRAM ECC Error Flag (DSERR) */ #define I3200_ERRSTS_UE 0x0002 #define I3200_ERRSTS_CE 0x0001 #define I3200_ERRSTS_BITS (I3200_ERRSTS_UE | I3200_ERRSTS_CE) /* Intel MMIO register space - device 0 function 0 - MMR space */ #define I3200_C0DRB 0x200 /* Channel 0 DRAM Rank Boundary (16b x 4) * * 15:10 reserved * 9:0 Channel 0 DRAM Rank Boundary Address */ #define I3200_C1DRB 0x600 /* Channel 1 DRAM Rank Boundary (16b x 4) */ #define I3200_DRB_MASK 0x3ff /* bits 9:0 */ #define I3200_DRB_SHIFT 26 /* 64MiB grain */ #define I3200_C0ECCERRLOG 0x280 /* Channel 0 ECC Error Log (64b) * * 63:48 Error Column Address (ERRCOL) * 47:32 Error Row Address (ERRROW) * 31:29 Error Bank Address (ERRBANK) * 28:27 Error Rank Address (ERRRANK) * 26:24 reserved * 23:16 Error Syndrome (ERRSYND) * 15: 2 reserved * 1 Multiple Bit Error Status (MERRSTS) * 0 Correctable Error Status (CERRSTS) */ #define I3200_C1ECCERRLOG 0x680 /* Chan 1 ECC Error Log (64b) */ #define I3200_ECCERRLOG_CE 0x1 #define I3200_ECCERRLOG_UE 0x2 #define I3200_ECCERRLOG_RANK_BITS 0x18000000 #define I3200_ECCERRLOG_RANK_SHIFT 27 #define I3200_ECCERRLOG_SYNDROME_BITS 0xff0000 #define I3200_ECCERRLOG_SYNDROME_SHIFT 16 #define I3200_CAPID0 0xe0 /* P.95 of spec for details */ struct i3200_priv { void __iomem *window; }; static int nr_channels; static int how_many_channels(struct pci_dev *pdev) { unsigned char capid0_8b; /* 8th byte of CAPID0 */ pci_read_config_byte(pdev, I3200_CAPID0 + 8, &capid0_8b); if (capid0_8b & 0x20) { /* check DCD: Dual Channel Disable */ debugf0("In single channel mode.\n"); return 1; } else { debugf0("In dual channel mode.\n"); return 2; } } static unsigned long eccerrlog_syndrome(u64 log) { return (log & I3200_ECCERRLOG_SYNDROME_BITS) >> I3200_ECCERRLOG_SYNDROME_SHIFT; } static int eccerrlog_row(int channel, u64 log) { u64 rank = ((log & I3200_ECCERRLOG_RANK_BITS) >> I3200_ECCERRLOG_RANK_SHIFT); return rank | (channel * I3200_RANKS_PER_CHANNEL); } enum i3200_chips { I3200 = 0, }; struct i3200_dev_info { const char *ctl_name; }; struct i3200_error_info { u16 errsts; u16 errsts2; u64 eccerrlog[I3200_CHANNELS]; }; static const struct i3200_dev_info i3200_devs[] = { [I3200] = { .ctl_name = "i3200" }, }; static struct pci_dev *mci_pdev; static int i3200_registered = 1; static void i3200_clear_error_info(struct mem_ctl_info *mci) { struct pci_dev *pdev; pdev = to_pci_dev(mci->dev); /* * Clear any error bits. * (Yes, we really clear bits by writing 1 to them.) */ pci_write_bits16(pdev, I3200_ERRSTS, I3200_ERRSTS_BITS, I3200_ERRSTS_BITS); } static void i3200_get_and_clear_error_info(struct mem_ctl_info *mci, struct i3200_error_info *info) { struct pci_dev *pdev; struct i3200_priv *priv = mci->pvt_info; void __iomem *window = priv->window; pdev = to_pci_dev(mci->dev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts); if (!(info->errsts & I3200_ERRSTS_BITS)) return; info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); if (nr_channels == 2) info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); pci_read_config_word(pdev, I3200_ERRSTS, &info->errsts2); /* * If the error is the same for both reads then the first set * of reads is valid. If there is a change then there is a CE * with no info and the second set of reads is valid and * should be UE info. */ if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { info->eccerrlog[0] = readq(window + I3200_C0ECCERRLOG); if (nr_channels == 2) info->eccerrlog[1] = readq(window + I3200_C1ECCERRLOG); } i3200_clear_error_info(mci); } static void i3200_process_error_info(struct mem_ctl_info *mci, struct i3200_error_info *info) { int channel; u64 log; if (!(info->errsts & I3200_ERRSTS_BITS)) return; if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); info->errsts = info->errsts2; } for (channel = 0; channel < nr_channels; channel++) { log = info->eccerrlog[channel]; if (log & I3200_ECCERRLOG_UE) { edac_mc_handle_ue(mci, 0, 0, eccerrlog_row(channel, log), "i3200 UE"); } else if (log & I3200_ECCERRLOG_CE) { edac_mc_handle_ce(mci, 0, 0, eccerrlog_syndrome(log), eccerrlog_row(channel, log), 0, "i3200 CE"); } } } static void i3200_check(struct mem_ctl_info *mci) { struct i3200_error_info info; debugf1("MC%d: %s()\n", mci->mc_idx, __func__); i3200_get_and_clear_error_info(mci, &info); i3200_process_error_info(mci, &info); } void __iomem *i3200_map_mchbar(struct pci_dev *pdev) { union { u64 mchbar; struct { u32 mchbar_low; u32 mchbar_high; }; } u; void __iomem *window; pci_read_config_dword(pdev, I3200_MCHBAR_LOW, &u.mchbar_low); pci_read_config_dword(pdev, I3200_MCHBAR_HIGH, &u.mchbar_high); u.mchbar &= I3200_MCHBAR_MASK; if (u.mchbar != (resource_size_t)u.mchbar) { printk(KERN_ERR "i3200: mmio space beyond accessible range (0x%llx)\n", (unsigned long long)u.mchbar); return NULL; } window = ioremap_nocache(u.mchbar, I3200_MMR_WINDOW_SIZE); if (!window) printk(KERN_ERR "i3200: cannot map mmio space at 0x%llx\n", (unsigned long long)u.mchbar); return window; } static void i3200_get_drbs(void __iomem *window, u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) { int i; for (i = 0; i < I3200_RANKS_PER_CHANNEL; i++) { drbs[0][i] = readw(window + I3200_C0DRB + 2*i) & I3200_DRB_MASK; drbs[1][i] = readw(window + I3200_C1DRB + 2*i) & I3200_DRB_MASK; } } static bool i3200_is_stacked(struct pci_dev *pdev, u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]) { u16 tom; pci_read_config_word(pdev, I3200_TOM, &tom); tom &= I3200_TOM_MASK; return drbs[I3200_CHANNELS - 1][I3200_RANKS_PER_CHANNEL - 1] == tom; } static unsigned long drb_to_nr_pages( u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL], bool stacked, int channel, int rank) { int n; n = drbs[channel][rank]; if (rank > 0) n -= drbs[channel][rank - 1]; if (stacked && (channel == 1) && drbs[channel][rank] == drbs[channel][I3200_RANKS_PER_CHANNEL - 1]) n -= drbs[0][I3200_RANKS_PER_CHANNEL - 1]; n <<= (I3200_DRB_SHIFT - PAGE_SHIFT); return n; } static int i3200_probe1(struct pci_dev *pdev, int dev_idx) { int rc; int i; struct mem_ctl_info *mci = NULL; unsigned long last_page; u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; bool stacked; void __iomem *window; struct i3200_priv *priv; debugf0("MC: %s()\n", __func__); window = i3200_map_mchbar(pdev); if (!window) return -ENODEV; i3200_get_drbs(window, drbs); nr_channels = how_many_channels(pdev); mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS, nr_channels, 0); if (!mci) return -ENOMEM; debugf3("MC: %s(): init mci\n", __func__); mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR2; mci->edac_ctl_cap = EDAC_FLAG_SECDED; mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I3200_REVISION; mci->ctl_name = i3200_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = i3200_check; mci->ctl_page_to_phys = NULL; priv = mci->pvt_info; priv->window = window; stacked = i3200_is_stacked(pdev, drbs); /* * The dram rank boundary (DRB) reg values are boundary addresses * for each DRAM rank with a granularity of 64MB. DRB regs are * cumulative; the last one will contain the total memory * contained in all ranks. */ last_page = -1UL; for (i = 0; i < mci->nr_csrows; i++) { unsigned long nr_pages; struct csrow_info *csrow = &mci->csrows[i]; nr_pages = drb_to_nr_pages(drbs, stacked, i / I3200_RANKS_PER_CHANNEL, i % I3200_RANKS_PER_CHANNEL); if (nr_pages == 0) { csrow->mtype = MEM_EMPTY; continue; } csrow->first_page = last_page + 1; last_page += nr_pages; csrow->last_page = last_page; csrow->nr_pages = nr_pages; csrow->grain = nr_pages << PAGE_SHIFT; csrow->mtype = MEM_DDR2; csrow->dtype = DEV_UNKNOWN; csrow->edac_mode = EDAC_UNKNOWN; } i3200_clear_error_info(mci); rc = -ENODEV; if (edac_mc_add_mc(mci)) { debugf3("MC: %s(): failed edac_mc_add_mc()\n", __func__); goto fail; } /* get this far and it's successful */ debugf3("MC: %s(): success\n", __func__); return 0; fail: iounmap(window); if (mci) edac_mc_free(mci); return rc; } static int __devinit i3200_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; debugf0("MC: %s()\n", __func__); if (pci_enable_device(pdev) < 0) return -EIO; rc = i3200_probe1(pdev, ent->driver_data); if (!mci_pdev) mci_pdev = pci_dev_get(pdev); return rc; } static void __devexit i3200_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; struct i3200_priv *priv; debugf0("%s()\n", __func__); mci = edac_mc_del_mc(&pdev->dev); if (!mci) return; priv = mci->pvt_info; iounmap(priv->window); edac_mc_free(mci); } static DEFINE_PCI_DEVICE_TABLE(i3200_pci_tbl) = { { PCI_VEND_DEV(INTEL, 3200_HB), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I3200}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i3200_pci_tbl); static struct pci_driver i3200_driver = { .name = EDAC_MOD_STR, .probe = i3200_init_one, .remove = __devexit_p(i3200_remove_one), .id_table = i3200_pci_tbl, }; static int __init i3200_init(void) { int pci_rc; debugf3("MC: %s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); pci_rc = pci_register_driver(&i3200_driver); if (pci_rc < 0) goto fail0; if (!mci_pdev) { i3200_registered = 0; mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_3200_HB, NULL); if (!mci_pdev) { debugf0("i3200 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i3200_init_one(mci_pdev, i3200_pci_tbl); if (pci_rc < 0) { debugf0("i3200 init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&i3200_driver); fail0: if (mci_pdev) pci_dev_put(mci_pdev); return pci_rc; } static void __exit i3200_exit(void) { debugf3("MC: %s()\n", __func__); pci_unregister_driver(&i3200_driver); if (!i3200_registered) { i3200_remove_one(mci_pdev); pci_dev_put(mci_pdev); } } module_init(i3200_init); module_exit(i3200_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Akamai Technologies, Inc."); MODULE_DESCRIPTION("MC support for Intel 3200 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
Team-SennyC2/senny_kernel-3.4
arch/x86/mm/init_32.c
4810
25288
/* * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/proc_fs.h> #include <linux/memory_hotplug.h> #include <linux/initrd.h> #include <linux/cpumask.h> #include <linux/gfp.h> #include <asm/asm.h> #include <asm/bios_ebda.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/dma.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/bugs.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/olpc_ofw.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/paravirt.h> #include <asm/setup.h> #include <asm/cacheflush.h> #include <asm/page_types.h> #include <asm/init.h> unsigned long highstart_pfn, highend_pfn; static noinline int do_test_wp_bit(void); bool __read_mostly __vmalloc_start_set = false; static __init void *alloc_low_page(void) { unsigned long pfn = pgt_buf_end++; void *adr; if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); clear_page(adr); return adr; } /* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t * __init one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { if (after_bootmem) pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); else pmd_table = (pmd_t *)alloc_low_page(); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); return pmd_table; } #endif pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); return pmd_table; } /* * Create a page table and place a pointer to it in a middle page * directory entry: */ static pte_t * __init one_page_table_init(pmd_t *pmd) { if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = NULL; if (after_bootmem) { #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) page_table = (pte_t *)alloc_bootmem_pages(PAGE_SIZE); } else page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); int pmd_idx = pmd_index(vaddr); return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; } pte_t * __init populate_extra_pte(unsigned long vaddr) { int pte_idx = pte_index(vaddr); pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return one_page_table_init(pmd) + pte_idx; } static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte) { #ifdef CONFIG_HIGHMEM /* * Something (early fixmap) may already have put a pte * page here, which causes the page table allocation * to become nonlinear. Attempt to fix it, and if it * is still nonlinear then we have to bug. */ int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; if (pmd_idx_kmap_begin != pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { pte_t *newpte; int i; BUG_ON(after_bootmem); newpte = alloc_low_page(); for (i = 0; i < PTRS_PER_PTE; i++) set_pte(newpte + i, pte[i]); paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); BUG_ON(newpte != pte_offset_kernel(pmd, 0)); __flush_tlb_all(); paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); pte = newpte; } BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) && vaddr > fix_to_virt(FIX_KMAP_END) && lastpte && lastpte + PTRS_PER_PTE != pte); #endif return pte; } /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in * the given range. * * NOTE: The pagetables are allocated contiguous on the physical space * so we can cache the place of the first one and move around without * checking the pgd every time. */ static void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); pmd_idx = pmd_index(vaddr); pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), pmd, vaddr, pte); vaddr += PMD_SIZE; } pmd_idx = 0; } } static inline int is_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) return 1; return 0; } /* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET: */ unsigned long __init kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { int use_pse = page_size_mask == (1<<PG_LEVEL_2M); unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; int mapping_iter; start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; /* * First iteration will setup identity mapping using large/small pages * based on use_pse, with other attributes same as set by * the early code in head_32.S * * Second iteration will setup the appropriate attributes (NX, GLOBAL..) * as desired for the kernel identity mapping. * * This two pass mechanism conforms to the TLB app note which says: * * "Software should not write to a paging-structure entry in a way * that would change, for any linear address, both the page size * and either the page frame or attributes." */ mapping_iter = 1; if (!cpu_has_pse) use_pse = 0; repeat: pages_2m = pages_4k = 0; pfn = start_pfn; pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pgd = pgd_base + pgd_idx; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); if (pfn >= end_pfn) continue; #ifdef CONFIG_X86_PAE pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pmd += pmd_idx; #else pmd_idx = 0; #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial * identity mapping attribute + _PAGE_PSE. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR | _PAGE_PSE); addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; if (is_kernel_text(addr) || is_kernel_text(addr2)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; if (mapping_iter == 1) set_pmd(pmd, pfn_pmd(pfn, init_prot)); else set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; continue; } pte = one_page_table_init(pmd); pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pte += pte_ofs; for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { pgprot_t prot = PAGE_KERNEL; /* * first pass will use the same initial * identity mapping attribute. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); if (is_kernel_text(addr)) prot = PAGE_KERNEL_EXEC; pages_4k++; if (mapping_iter == 1) { set_pte(pte, pfn_pte(pfn, init_prot)); last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; } else set_pte(pte, pfn_pte(pfn, prot)); } } } if (mapping_iter == 1) { /* * update direct mapping page count only in the first * iteration. */ update_page_count(PG_LEVEL_2M, pages_2m); update_page_count(PG_LEVEL_4K, pages_4k); /* * local global flush tlb, which will flush the previous * mappings present in both small and large page TLB's. */ __flush_tlb_all(); /* * Second iteration will set the actual desired PTE attributes. */ mapping_iter = 2; goto repeat; } return last_map_addr; } pte_t *kmap_pte; pgprot_t kmap_prot; static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) { return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr), vaddr); } static void __init kmap_init(void) { unsigned long kmap_vstart; /* * Cache the first kmap pte: */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL; } #ifdef CONFIG_HIGHMEM static void __init permanent_kmaps_init(pgd_t *pgd_base) { unsigned long vaddr; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; vaddr = PKMAP_BASE; page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; } static void __init add_one_highpage_init(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); totalhigh_pages++; } void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { phys_addr_t start, end; u64 i; for_each_free_mem_range(i, nid, &start, &end, NULL) { unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), start_pfn, end_pfn); unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), start_pfn, end_pfn); for ( ; pfn < e_pfn; pfn++) if (pfn_valid(pfn)) add_one_highpage_init(pfn_to_page(pfn)); } } #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { } #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) { unsigned long pfn, va; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; /* * Remove any mappings which extend past the end of physical * memory from the boot time page table: */ for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); pgd = base + pgd_index(va); if (!pgd_present(*pgd)) break; pud = pud_offset(pgd, va); pmd = pmd_offset(pud, va); if (!pmd_present(*pmd)) break; pte = pte_offset_kernel(pmd, va); if (!pte_present(*pte)) break; pte_clear(NULL, va, pte); } paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); } void __init native_pagetable_setup_done(pgd_t *base) { } /* * Build a proper pagetable for the kernel mappings. Up until this * point, we've been running on some set of pagetables constructed by * the boot process. * * If we're booting on native hardware, this will be a pagetable * constructed in arch/x86/kernel/head_32.S. The root of the * pagetable will be swapper_pg_dir. * * If we're booting paravirtualized under a hypervisor, then there are * more options: we may already be running PAE, and the pagetable may * or may not be based in swapper_pg_dir. In any case, * paravirt_pagetable_setup_start() will set up swapper_pg_dir * appropriately for the rest of the initialization to work. * * In general, pagetable_init() assumes that the pagetable may already * be partially populated, and so it avoids stomping on any existing * mappings. */ void __init early_ioremap_page_table_range_init(void) { pgd_t *pgd_base = swapper_pg_dir; unsigned long vaddr, end; /* * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); early_ioremap_reset(); } static void __init pagetable_init(void) { pgd_t *pgd_base = swapper_pg_dir; permanent_kmaps_init(pgd_base); } pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); /* user-defined highmem size */ static unsigned int highmem_pages = -1; /* * highmem=size forces highmem to be exactly 'size' bytes. * This works even on boxes that have no highmem otherwise. * This also works to reduce highmem size on bigger boxes. */ static int __init parse_highmem(char *arg) { if (!arg) return -EINVAL; highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; return 0; } early_param("highmem", parse_highmem); #define MSG_HIGHMEM_TOO_BIG \ "highmem size (%luMB) is bigger than pages available (%luMB)!\n" #define MSG_LOWMEM_TOO_SMALL \ "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" /* * All of RAM fits into lowmem - but if user wants highmem * artificially via the highmem=x boot parameter then create * it: */ void __init lowmem_pfn_init(void) { /* max_low_pfn is 0, we already have early_res support */ max_low_pfn = max_pfn; if (highmem_pages == -1) highmem_pages = 0; #ifdef CONFIG_HIGHMEM if (highmem_pages >= max_pfn) { printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); highmem_pages = 0; } if (highmem_pages) { if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, pages_to_mb(highmem_pages)); highmem_pages = 0; } max_low_pfn -= highmem_pages; } #else if (highmem_pages) printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); #endif } #define MSG_HIGHMEM_TOO_SMALL \ "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" #define MSG_HIGHMEM_TRIMMED \ "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" /* * We have more RAM than fits into lowmem - we try to put it into * highmem, also taking the highmem=x boot parameter into account: */ void __init highmem_pfn_init(void) { max_low_pfn = MAXMEM_PFN; if (highmem_pages == -1) highmem_pages = max_pfn - MAXMEM_PFN; if (highmem_pages + MAXMEM_PFN < max_pfn) max_pfn = MAXMEM_PFN + highmem_pages; if (highmem_pages + MAXMEM_PFN > max_pfn) { printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); highmem_pages = 0; } #ifndef CONFIG_HIGHMEM /* Maximum memory usable is what is directly addressable */ printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); if (max_pfn > MAX_NONPAE_PFN) printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); else printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); max_pfn = MAXMEM_PFN; #else /* !CONFIG_HIGHMEM */ #ifndef CONFIG_HIGHMEM64G if (max_pfn > MAX_NONPAE_PFN) { max_pfn = MAX_NONPAE_PFN; printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); } #endif /* !CONFIG_HIGHMEM64G */ #endif /* !CONFIG_HIGHMEM */ } /* * Determine low and high memory ranges: */ void __init find_low_pfn_range(void) { /* it could update max_pfn */ if (max_pfn <= MAXMEM_PFN) lowmem_pfn_init(); else highmem_pfn_init(); } #ifndef CONFIG_NEED_MULTIPLE_NODES void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); sparse_memory_present_with_active_regions(0); #ifdef CONFIG_FLATMEM max_mapnr = num_physpages; #endif __vmalloc_start_set = true; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); setup_bootmem_allocator(); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ void __init setup_bootmem_allocator(void) { printk(KERN_INFO " mapped low ram: 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); after_bootmem = 1; } /* * paging_init() sets up the page tables - note that the first 8MB are * already mapped by head.S. * * This routines also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { pagetable_init(); __flush_tlb_all(); kmap_init(); /* * NOTE: at this point the bootmem allocator is fully available. */ olpc_dt_build_devicetree(); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_sizes_init(); } /* * Test if the WP bit works in supervisor mode. It isn't supported on 386's * and also on some strange 486's. All 586+'s are OK. This used to involve * black magic jumps to work around some nasty CPU bugs, but fortunately the * switch to using exceptions got rid of all that. */ static void __init test_wp_bit(void) { printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); /* Any page-aligned address will do, the test is non-destructive */ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); boot_cpu_data.wp_works_ok = do_test_wp_bit(); clear_fixmap(FIX_WP_TEST); if (!boot_cpu_data.wp_works_ok) { printk(KERN_CONT "No.\n"); #ifdef CONFIG_X86_WP_WORKS_OK panic( "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); #endif } else { printk(KERN_CONT "Ok.\n"); } } void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; int tmp; pci_iommu_alloc(); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif /* * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to * be done before free_all_bootmem(). Memblock use free low memory for * temporary data (see find_range_array()) and for this purpose can use * pages that was already passed to the buddy allocator, hence marked as * not accessible in the page tables when compiled with * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not * important here. */ set_highmem_pages_init(); /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) /* * Only count reserved RAM pages: */ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) reservedpages++; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init, %ldk highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, #endif VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, (unsigned long)__va(0), (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, (unsigned long)&_edata, ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, (unsigned long)&_text, (unsigned long)&_etext, ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */ #define __FIXADDR_TOP (-PAGE_SIZE) #ifdef CONFIG_HIGHMEM BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); #endif #define high_memory (-128UL << 20) BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); #undef high_memory #undef __FIXADDR_TOP #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUG_ON(VMALLOC_END > PKMAP_BASE); #endif BUG_ON(VMALLOC_START >= VMALLOC_END); BUG_ON((unsigned long)high_memory > VMALLOC_START); if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); } #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata = NODE_DATA(nid); struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; return __add_pages(nid, zone, start_pfn, nr_pages); } #endif /* * This function cannot be __init, since exceptions don't work in that * section. Put this after the callers, so that it cannot be inlined. */ static noinline int do_test_wp_bit(void) { char tmp_reg; int flag; __asm__ __volatile__( " movb %0, %1 \n" "1: movb %1, %0 \n" " xorl %2, %2 \n" "2: \n" _ASM_EXTABLE(1b,2b) :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), "=q" (tmp_reg), "=r" (flag) :"2" (1) :"memory"); return flag; } #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); int kernel_set_to_readonly __read_mostly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); } void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); } static void mark_nxdata_nx(void) { /* * When this called, init has already been executed and released, * so everything past _etext should be NX. */ unsigned long start = PFN_ALIGN(_etext); /* * This comes from is_kernel_text upper limit. Also HPAGE where used: */ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; if (__supported_pte_mask & _PAGE_NX) printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); } void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); kernel_set_to_readonly = 1; #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); #endif start += size; size = (unsigned long)__end_rodata - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10); rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif mark_nxdata_nx(); } #endif
gpl-2.0
Kaik541/kernel_lge_gee
drivers/edac/i82860_edac.c
4810
8841
/* * Intel 82860 Memory Controller kernel module * (C) 2005 Red Hat (http://www.redhat.com) * This file may be distributed under the terms of the * GNU General Public License. * * Written by Ben Woodard <woodard@redhat.com> * shamelessly copied from and based upon the edac_i82875 driver * by Thayne Harbaugh of Linux Networx. (http://lnxi.com) */ #include <linux/module.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/edac.h> #include "edac_core.h" #define I82860_REVISION " Ver: 2.0.2" #define EDAC_MOD_STR "i82860_edac" #define i82860_printk(level, fmt, arg...) \ edac_printk(level, "i82860", fmt, ##arg) #define i82860_mc_printk(mci, level, fmt, arg...) \ edac_mc_chipset_printk(mci, level, "i82860", fmt, ##arg) #ifndef PCI_DEVICE_ID_INTEL_82860_0 #define PCI_DEVICE_ID_INTEL_82860_0 0x2531 #endif /* PCI_DEVICE_ID_INTEL_82860_0 */ #define I82860_MCHCFG 0x50 #define I82860_GBA 0x60 #define I82860_GBA_MASK 0x7FF #define I82860_GBA_SHIFT 24 #define I82860_ERRSTS 0xC8 #define I82860_EAP 0xE4 #define I82860_DERRCTL_STS 0xE2 enum i82860_chips { I82860 = 0, }; struct i82860_dev_info { const char *ctl_name; }; struct i82860_error_info { u16 errsts; u32 eap; u16 derrsyn; u16 errsts2; }; static const struct i82860_dev_info i82860_devs[] = { [I82860] = { .ctl_name = "i82860"}, }; static struct pci_dev *mci_pdev; /* init dev: in case that AGP code * has already registered driver */ static struct edac_pci_ctl_info *i82860_pci; static void i82860_get_error_info(struct mem_ctl_info *mci, struct i82860_error_info *info) { struct pci_dev *pdev; pdev = to_pci_dev(mci->dev); /* * This is a mess because there is no atomic way to read all the * registers at once and the registers can transition from CE being * overwritten by UE. */ pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts); pci_read_config_dword(pdev, I82860_EAP, &info->eap); pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn); pci_read_config_word(pdev, I82860_ERRSTS, &info->errsts2); pci_write_bits16(pdev, I82860_ERRSTS, 0x0003, 0x0003); /* * If the error is the same for both reads then the first set of reads * is valid. If there is a change then there is a CE no info and the * second set of reads is valid and should be UE info. */ if (!(info->errsts2 & 0x0003)) return; if ((info->errsts ^ info->errsts2) & 0x0003) { pci_read_config_dword(pdev, I82860_EAP, &info->eap); pci_read_config_word(pdev, I82860_DERRCTL_STS, &info->derrsyn); } } static int i82860_process_error_info(struct mem_ctl_info *mci, struct i82860_error_info *info, int handle_errors) { int row; if (!(info->errsts2 & 0x0003)) return 0; if (!handle_errors) return 1; if ((info->errsts ^ info->errsts2) & 0x0003) { edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); info->errsts = info->errsts2; } info->eap >>= PAGE_SHIFT; row = edac_mc_find_csrow_by_page(mci, info->eap); if (info->errsts & 0x0002) edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); else edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, "i82860 UE"); return 1; } static void i82860_check(struct mem_ctl_info *mci) { struct i82860_error_info info; debugf1("MC%d: %s()\n", mci->mc_idx, __func__); i82860_get_error_info(mci, &info); i82860_process_error_info(mci, &info, 1); } static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev) { unsigned long last_cumul_size; u16 mchcfg_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ u16 value; u32 cumul_size; struct csrow_info *csrow; int index; pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); mchcfg_ddim = mchcfg_ddim & 0x180; last_cumul_size = 0; /* The group row boundary (GRA) reg values are boundary address * for each DRAM row with a granularity of 16MB. GRA regs are * cumulative; therefore GRA15 will contain the total memory contained * in all eight rows. */ for (index = 0; index < mci->nr_csrows; index++) { csrow = &mci->csrows[index]; pci_read_config_word(pdev, I82860_GBA + index * 2, &value); cumul_size = (value & I82860_GBA_MASK) << (I82860_GBA_SHIFT - PAGE_SHIFT); debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, cumul_size); if (cumul_size == last_cumul_size) continue; /* not populated */ csrow->first_page = last_cumul_size; csrow->last_page = cumul_size - 1; csrow->nr_pages = cumul_size - last_cumul_size; last_cumul_size = cumul_size; csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ csrow->mtype = MEM_RMBS; csrow->dtype = DEV_UNKNOWN; csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; } } static int i82860_probe1(struct pci_dev *pdev, int dev_idx) { struct mem_ctl_info *mci; struct i82860_error_info discard; /* RDRAM has channels but these don't map onto the abstractions that edac uses. The device groups from the GRA registers seem to map reasonably well onto the notion of a chip select row. There are 16 GRA registers and since the name is associated with the channel and the GRA registers map to physical devices so we are going to make 1 channel for group. */ mci = edac_mc_alloc(0, 16, 1, 0); if (!mci) return -ENOMEM; debugf3("%s(): init mci\n", __func__); mci->dev = &pdev->dev; mci->mtype_cap = MEM_FLAG_DDR; mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED; /* I"m not sure about this but I think that all RDRAM is SECDED */ mci->edac_cap = EDAC_FLAG_SECDED; mci->mod_name = EDAC_MOD_STR; mci->mod_ver = I82860_REVISION; mci->ctl_name = i82860_devs[dev_idx].ctl_name; mci->dev_name = pci_name(pdev); mci->edac_check = i82860_check; mci->ctl_page_to_phys = NULL; i82860_init_csrows(mci, pdev); i82860_get_error_info(mci, &discard); /* clear counters */ /* Here we assume that we will never see multiple instances of this * type of memory controller. The ID is therefore hardcoded to 0. */ if (edac_mc_add_mc(mci)) { debugf3("%s(): failed edac_mc_add_mc()\n", __func__); goto fail; } /* allocating generic PCI control info */ i82860_pci = edac_pci_create_generic_ctl(&pdev->dev, EDAC_MOD_STR); if (!i82860_pci) { printk(KERN_WARNING "%s(): Unable to create PCI control\n", __func__); printk(KERN_WARNING "%s(): PCI error report via EDAC not setup\n", __func__); } /* get this far and it's successful */ debugf3("%s(): success\n", __func__); return 0; fail: edac_mc_free(mci); return -ENODEV; } /* returns count (>= 0), or negative on error */ static int __devinit i82860_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int rc; debugf0("%s()\n", __func__); i82860_printk(KERN_INFO, "i82860 init one\n"); if (pci_enable_device(pdev) < 0) return -EIO; rc = i82860_probe1(pdev, ent->driver_data); if (rc == 0) mci_pdev = pci_dev_get(pdev); return rc; } static void __devexit i82860_remove_one(struct pci_dev *pdev) { struct mem_ctl_info *mci; debugf0("%s()\n", __func__); if (i82860_pci) edac_pci_release_generic_ctl(i82860_pci); if ((mci = edac_mc_del_mc(&pdev->dev)) == NULL) return; edac_mc_free(mci); } static DEFINE_PCI_DEVICE_TABLE(i82860_pci_tbl) = { { PCI_VEND_DEV(INTEL, 82860_0), PCI_ANY_ID, PCI_ANY_ID, 0, 0, I82860}, { 0, } /* 0 terminated list. */ }; MODULE_DEVICE_TABLE(pci, i82860_pci_tbl); static struct pci_driver i82860_driver = { .name = EDAC_MOD_STR, .probe = i82860_init_one, .remove = __devexit_p(i82860_remove_one), .id_table = i82860_pci_tbl, }; static int __init i82860_init(void) { int pci_rc; debugf3("%s()\n", __func__); /* Ensure that the OPSTATE is set correctly for POLL or NMI */ opstate_init(); if ((pci_rc = pci_register_driver(&i82860_driver)) < 0) goto fail0; if (!mci_pdev) { mci_pdev = pci_get_device(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82860_0, NULL); if (mci_pdev == NULL) { debugf0("860 pci_get_device fail\n"); pci_rc = -ENODEV; goto fail1; } pci_rc = i82860_init_one(mci_pdev, i82860_pci_tbl); if (pci_rc < 0) { debugf0("860 init fail\n"); pci_rc = -ENODEV; goto fail1; } } return 0; fail1: pci_unregister_driver(&i82860_driver); fail0: if (mci_pdev != NULL) pci_dev_put(mci_pdev); return pci_rc; } static void __exit i82860_exit(void) { debugf3("%s()\n", __func__); pci_unregister_driver(&i82860_driver); if (mci_pdev != NULL) pci_dev_put(mci_pdev); } module_init(i82860_init); module_exit(i82860_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com) " "Ben Woodard <woodard@redhat.com>"); MODULE_DESCRIPTION("ECC support for Intel 82860 memory hub controllers"); module_param(edac_op_state, int, 0444); MODULE_PARM_DESC(edac_op_state, "EDAC Error Reporting state: 0=Poll,1=NMI");
gpl-2.0
ghbhaha/furnace-bacon
drivers/clocksource/sh_tmu.c
4810
10943
/* * SuperH Timer Support - TMU * * Copyright (C) 2009 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/err.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/sh_timer.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/pm_domain.h> struct sh_tmu_priv { void __iomem *mapbase; struct clk *clk; struct irqaction irqaction; struct platform_device *pdev; unsigned long rate; unsigned long periodic; struct clock_event_device ced; struct clocksource cs; }; static DEFINE_SPINLOCK(sh_tmu_lock); #define TSTR -1 /* shared register */ #define TCOR 0 /* channel register */ #define TCNT 1 /* channel register */ #define TCR 2 /* channel register */ static inline unsigned long sh_tmu_read(struct sh_tmu_priv *p, int reg_nr) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == TSTR) return ioread8(base - cfg->channel_offset); offs = reg_nr << 2; if (reg_nr == TCR) return ioread16(base + offs); else return ioread32(base + offs); } static inline void sh_tmu_write(struct sh_tmu_priv *p, int reg_nr, unsigned long value) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == TSTR) { iowrite8(value, base - cfg->channel_offset); return; } offs = reg_nr << 2; if (reg_nr == TCR) iowrite16(value, base + offs); else iowrite32(value, base + offs); } static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ spin_lock_irqsave(&sh_tmu_lock, flags); value = sh_tmu_read(p, TSTR); if (start) value |= 1 << cfg->timer_bit; else value &= ~(1 << cfg->timer_bit); sh_tmu_write(p, TSTR, value); spin_unlock_irqrestore(&sh_tmu_lock, flags); } static int sh_tmu_enable(struct sh_tmu_priv *p) { int ret; /* enable clock */ ret = clk_enable(p->clk); if (ret) { dev_err(&p->pdev->dev, "cannot enable clock\n"); return ret; } /* make sure channel is disabled */ sh_tmu_start_stop_ch(p, 0); /* maximum timeout */ sh_tmu_write(p, TCOR, 0xffffffff); sh_tmu_write(p, TCNT, 0xffffffff); /* configure channel to parent clock / 4, irq off */ p->rate = clk_get_rate(p->clk) / 4; sh_tmu_write(p, TCR, 0x0000); /* enable channel */ sh_tmu_start_stop_ch(p, 1); return 0; } static void sh_tmu_disable(struct sh_tmu_priv *p) { /* disable channel */ sh_tmu_start_stop_ch(p, 0); /* disable interrupts in TMU block */ sh_tmu_write(p, TCR, 0x0000); /* stop clock */ clk_disable(p->clk); } static void sh_tmu_set_next(struct sh_tmu_priv *p, unsigned long delta, int periodic) { /* stop timer */ sh_tmu_start_stop_ch(p, 0); /* acknowledge interrupt */ sh_tmu_read(p, TCR); /* enable interrupt */ sh_tmu_write(p, TCR, 0x0020); /* reload delta value in case of periodic timer */ if (periodic) sh_tmu_write(p, TCOR, delta); else sh_tmu_write(p, TCOR, 0xffffffff); sh_tmu_write(p, TCNT, delta); /* start timer */ sh_tmu_start_stop_ch(p, 1); } static irqreturn_t sh_tmu_interrupt(int irq, void *dev_id) { struct sh_tmu_priv *p = dev_id; /* disable or acknowledge interrupt */ if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) sh_tmu_write(p, TCR, 0x0000); else sh_tmu_write(p, TCR, 0x0020); /* notify clockevent layer */ p->ced.event_handler(&p->ced); return IRQ_HANDLED; } static struct sh_tmu_priv *cs_to_sh_tmu(struct clocksource *cs) { return container_of(cs, struct sh_tmu_priv, cs); } static cycle_t sh_tmu_clocksource_read(struct clocksource *cs) { struct sh_tmu_priv *p = cs_to_sh_tmu(cs); return sh_tmu_read(p, TCNT) ^ 0xffffffff; } static int sh_tmu_clocksource_enable(struct clocksource *cs) { struct sh_tmu_priv *p = cs_to_sh_tmu(cs); int ret; ret = sh_tmu_enable(p); if (!ret) __clocksource_updatefreq_hz(cs, p->rate); return ret; } static void sh_tmu_clocksource_disable(struct clocksource *cs) { sh_tmu_disable(cs_to_sh_tmu(cs)); } static int sh_tmu_register_clocksource(struct sh_tmu_priv *p, char *name, unsigned long rating) { struct clocksource *cs = &p->cs; cs->name = name; cs->rating = rating; cs->read = sh_tmu_clocksource_read; cs->enable = sh_tmu_clocksource_enable; cs->disable = sh_tmu_clocksource_disable; cs->mask = CLOCKSOURCE_MASK(32); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; dev_info(&p->pdev->dev, "used as clock source\n"); /* Register with dummy 1 Hz value, gets updated in ->enable() */ clocksource_register_hz(cs, 1); return 0; } static struct sh_tmu_priv *ced_to_sh_tmu(struct clock_event_device *ced) { return container_of(ced, struct sh_tmu_priv, ced); } static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic) { struct clock_event_device *ced = &p->ced; sh_tmu_enable(p); /* TODO: calculate good shift from rate and counter bit width */ ced->shift = 32; ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced); ced->min_delta_ns = 5000; if (periodic) { p->periodic = (p->rate + HZ/2) / HZ; sh_tmu_set_next(p, p->periodic, 1); } } static void sh_tmu_clock_event_mode(enum clock_event_mode mode, struct clock_event_device *ced) { struct sh_tmu_priv *p = ced_to_sh_tmu(ced); int disabled = 0; /* deal with old setting first */ switch (ced->mode) { case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_ONESHOT: sh_tmu_disable(p); disabled = 1; break; default: break; } switch (mode) { case CLOCK_EVT_MODE_PERIODIC: dev_info(&p->pdev->dev, "used for periodic clock events\n"); sh_tmu_clock_event_start(p, 1); break; case CLOCK_EVT_MODE_ONESHOT: dev_info(&p->pdev->dev, "used for oneshot clock events\n"); sh_tmu_clock_event_start(p, 0); break; case CLOCK_EVT_MODE_UNUSED: if (!disabled) sh_tmu_disable(p); break; case CLOCK_EVT_MODE_SHUTDOWN: default: break; } } static int sh_tmu_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct sh_tmu_priv *p = ced_to_sh_tmu(ced); BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); /* program new delta value */ sh_tmu_set_next(p, delta, 0); return 0; } static void sh_tmu_register_clockevent(struct sh_tmu_priv *p, char *name, unsigned long rating) { struct clock_event_device *ced = &p->ced; int ret; memset(ced, 0, sizeof(*ced)); ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; ced->rating = rating; ced->cpumask = cpumask_of(0); ced->set_next_event = sh_tmu_clock_event_next; ced->set_mode = sh_tmu_clock_event_mode; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_register_device(ced); ret = setup_irq(p->irqaction.irq, &p->irqaction); if (ret) { dev_err(&p->pdev->dev, "failed to request irq %d\n", p->irqaction.irq); return; } } static int sh_tmu_register(struct sh_tmu_priv *p, char *name, unsigned long clockevent_rating, unsigned long clocksource_rating) { if (clockevent_rating) sh_tmu_register_clockevent(p, name, clockevent_rating); else if (clocksource_rating) sh_tmu_register_clocksource(p, name, clocksource_rating); return 0; } static int sh_tmu_setup(struct sh_tmu_priv *p, struct platform_device *pdev) { struct sh_timer_config *cfg = pdev->dev.platform_data; struct resource *res; int irq, ret; ret = -ENXIO; memset(p, 0, sizeof(*p)); p->pdev = pdev; if (!cfg) { dev_err(&p->pdev->dev, "missing platform data\n"); goto err0; } platform_set_drvdata(pdev, p); res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&p->pdev->dev, "failed to get I/O memory\n"); goto err0; } irq = platform_get_irq(p->pdev, 0); if (irq < 0) { dev_err(&p->pdev->dev, "failed to get irq\n"); goto err0; } /* map memory, let mapbase point to our channel */ p->mapbase = ioremap_nocache(res->start, resource_size(res)); if (p->mapbase == NULL) { dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); goto err0; } /* setup data for setup_irq() (too early for request_irq()) */ p->irqaction.name = dev_name(&p->pdev->dev); p->irqaction.handler = sh_tmu_interrupt; p->irqaction.dev_id = p; p->irqaction.irq = irq; p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ IRQF_IRQPOLL | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "tmu_fck"); if (IS_ERR(p->clk)) { dev_err(&p->pdev->dev, "cannot get clock\n"); ret = PTR_ERR(p->clk); goto err1; } return sh_tmu_register(p, (char *)dev_name(&p->pdev->dev), cfg->clockevent_rating, cfg->clocksource_rating); err1: iounmap(p->mapbase); err0: return ret; } static int __devinit sh_tmu_probe(struct platform_device *pdev) { struct sh_tmu_priv *p = platform_get_drvdata(pdev); int ret; if (!is_early_platform_device(pdev)) pm_genpd_dev_always_on(&pdev->dev, true); if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); return 0; } p = kmalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } ret = sh_tmu_setup(p, pdev); if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); } return ret; } static int __devexit sh_tmu_remove(struct platform_device *pdev) { return -EBUSY; /* cannot unregister clockevent and clocksource */ } static struct platform_driver sh_tmu_device_driver = { .probe = sh_tmu_probe, .remove = __devexit_p(sh_tmu_remove), .driver = { .name = "sh_tmu", } }; static int __init sh_tmu_init(void) { return platform_driver_register(&sh_tmu_device_driver); } static void __exit sh_tmu_exit(void) { platform_driver_unregister(&sh_tmu_device_driver); } early_platform_init("earlytimer", &sh_tmu_device_driver); module_init(sh_tmu_init); module_exit(sh_tmu_exit); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("SuperH TMU Timer Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
coolbho3k/kernel-roth
drivers/isdn/hardware/eicon/divasproc.c
5066
10556
/* $Id: divasproc.c,v 1.19.4.3 2005/01/31 12:22:20 armin Exp $ * * Low level driver for Eicon DIVA Server ISDN cards. * /proc functions * * Copyright 2000-2003 by Armin Schindler (mac@melware.de) * Copyright 2000-2003 Cytronics & Melware (info@melware.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/list.h> #include <asm/uaccess.h> #include "platform.h" #include "debuglib.h" #undef ID_MASK #undef N_DATA #include "pc.h" #include "di_defs.h" #include "divasync.h" #include "di.h" #include "io.h" #include "xdi_msg.h" #include "xdi_adapter.h" #include "diva.h" #include "diva_pci.h" extern PISDN_ADAPTER IoAdapters[MAX_ADAPTER]; extern void divas_get_version(char *); extern void diva_get_vserial_number(PISDN_ADAPTER IoAdapter, char *buf); /********************************************************* ** Functions for /proc interface / File operations *********************************************************/ static char *divas_proc_name = "divas"; static char *adapter_dir_name = "adapter"; static char *info_proc_name = "info"; static char *grp_opt_proc_name = "group_optimization"; static char *d_l1_down_proc_name = "dynamic_l1_down"; /* ** "divas" entry */ extern struct proc_dir_entry *proc_net_eicon; static struct proc_dir_entry *divas_proc_entry = NULL; static ssize_t divas_read(struct file *file, char __user *buf, size_t count, loff_t *off) { int len = 0; int cadapter; char tmpbuf[80]; char tmpser[16]; if (*off) return 0; divas_get_version(tmpbuf); if (copy_to_user(buf + len, &tmpbuf, strlen(tmpbuf))) return -EFAULT; len += strlen(tmpbuf); for (cadapter = 0; cadapter < MAX_ADAPTER; cadapter++) { if (IoAdapters[cadapter]) { diva_get_vserial_number(IoAdapters[cadapter], tmpser); sprintf(tmpbuf, "%2d: %-30s Serial:%-10s IRQ:%2d\n", cadapter + 1, IoAdapters[cadapter]->Properties.Name, tmpser, IoAdapters[cadapter]->irq_info.irq_nr); if ((strlen(tmpbuf) + len) > count) break; if (copy_to_user (buf + len, &tmpbuf, strlen(tmpbuf))) return -EFAULT; len += strlen(tmpbuf); } } *off += len; return (len); } static ssize_t divas_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { return (-ENODEV); } static unsigned int divas_poll(struct file *file, poll_table *wait) { return (POLLERR); } static int divas_open(struct inode *inode, struct file *file) { return nonseekable_open(inode, file); } static int divas_close(struct inode *inode, struct file *file) { return (0); } static const struct file_operations divas_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = divas_read, .write = divas_write, .poll = divas_poll, .open = divas_open, .release = divas_close }; int create_divas_proc(void) { divas_proc_entry = proc_create(divas_proc_name, S_IFREG | S_IRUGO, proc_net_eicon, &divas_fops); if (!divas_proc_entry) return (0); return (1); } void remove_divas_proc(void) { if (divas_proc_entry) { remove_proc_entry(divas_proc_name, proc_net_eicon); divas_proc_entry = NULL; } } static ssize_t grp_opt_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data; PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; if ((count == 1) || (count == 2)) { char c; if (get_user(c, buffer)) return -EFAULT; switch (c) { case '0': IoAdapter->capi_cfg.cfg_1 &= ~DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON; break; case '1': IoAdapter->capi_cfg.cfg_1 |= DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON; break; default: return (-EINVAL); } return (count); } return (-EINVAL); } static ssize_t d_l1_down_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data; PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; if ((count == 1) || (count == 2)) { char c; if (get_user(c, buffer)) return -EFAULT; switch (c) { case '0': IoAdapter->capi_cfg.cfg_1 &= ~DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON; break; case '1': IoAdapter->capi_cfg.cfg_1 |= DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON; break; default: return (-EINVAL); } return (count); } return (-EINVAL); } static int d_l1_down_proc_show(struct seq_file *m, void *v) { diva_os_xdi_adapter_t *a = m->private; PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; seq_printf(m, "%s\n", (IoAdapter->capi_cfg. cfg_1 & DIVA_XDI_CAPI_CFG_1_DYNAMIC_L1_ON) ? "1" : "0"); return 0; } static int d_l1_down_proc_open(struct inode *inode, struct file *file) { return single_open(file, d_l1_down_proc_show, PDE(inode)->data); } static const struct file_operations d_l1_down_proc_fops = { .owner = THIS_MODULE, .open = d_l1_down_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = d_l1_down_proc_write, }; static int grp_opt_proc_show(struct seq_file *m, void *v) { diva_os_xdi_adapter_t *a = m->private; PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; seq_printf(m, "%s\n", (IoAdapter->capi_cfg. cfg_1 & DIVA_XDI_CAPI_CFG_1_GROUP_POPTIMIZATION_ON) ? "1" : "0"); return 0; } static int grp_opt_proc_open(struct inode *inode, struct file *file) { return single_open(file, grp_opt_proc_show, PDE(inode)->data); } static const struct file_operations grp_opt_proc_fops = { .owner = THIS_MODULE, .open = grp_opt_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = grp_opt_proc_write, }; static ssize_t info_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { diva_os_xdi_adapter_t *a = PDE(file->f_path.dentry->d_inode)->data; PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; char c[4]; if (count <= 4) return -EINVAL; if (copy_from_user(c, buffer, 4)) return -EFAULT; /* this is for test purposes only */ if (!memcmp(c, "trap", 4)) { (*(IoAdapter->os_trap_nfy_Fnc)) (IoAdapter, IoAdapter->ANum); return (count); } return (-EINVAL); } static int info_proc_show(struct seq_file *m, void *v) { int i = 0; char *p; char tmpser[16]; diva_os_xdi_adapter_t *a = m->private; PISDN_ADAPTER IoAdapter = IoAdapters[a->controller - 1]; seq_printf(m, "Name : %s\n", IoAdapter->Properties.Name); seq_printf(m, "DSP state : %08x\n", a->dsp_mask); seq_printf(m, "Channels : %02d\n", IoAdapter->Properties.Channels); seq_printf(m, "E. max/used : %03d/%03d\n", IoAdapter->e_max, IoAdapter->e_count); diva_get_vserial_number(IoAdapter, tmpser); seq_printf(m, "Serial : %s\n", tmpser); seq_printf(m, "IRQ : %d\n", IoAdapter->irq_info.irq_nr); seq_printf(m, "CardIndex : %d\n", a->CardIndex); seq_printf(m, "CardOrdinal : %d\n", a->CardOrdinal); seq_printf(m, "Controller : %d\n", a->controller); seq_printf(m, "Bus-Type : %s\n", (a->Bus == DIVAS_XDI_ADAPTER_BUS_ISA) ? "ISA" : "PCI"); seq_printf(m, "Port-Name : %s\n", a->port_name); if (a->Bus == DIVAS_XDI_ADAPTER_BUS_PCI) { seq_printf(m, "PCI-bus : %d\n", a->resources.pci.bus); seq_printf(m, "PCI-func : %d\n", a->resources.pci.func); for (i = 0; i < 8; i++) { if (a->resources.pci.bar[i]) { seq_printf(m, "Mem / I/O %d : 0x%x / mapped : 0x%lx", i, a->resources.pci.bar[i], (unsigned long) a->resources. pci.addr[i]); if (a->resources.pci.length[i]) { seq_printf(m, " / length : %d", a->resources.pci. length[i]); } seq_putc(m, '\n'); } } } if ((!a->xdi_adapter.port) && ((!a->xdi_adapter.ram) || (!a->xdi_adapter.reset) || (!a->xdi_adapter.cfg))) { if (!IoAdapter->irq_info.irq_nr) { p = "slave"; } else { p = "out of service"; } } else if (a->xdi_adapter.trapped) { p = "trapped"; } else if (a->xdi_adapter.Initialized) { p = "active"; } else { p = "ready"; } seq_printf(m, "State : %s\n", p); return 0; } static int info_proc_open(struct inode *inode, struct file *file) { return single_open(file, info_proc_show, PDE(inode)->data); } static const struct file_operations info_proc_fops = { .owner = THIS_MODULE, .open = info_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = info_proc_write, }; /* ** adapter proc init/de-init */ /* -------------------------------------------------------------------------- Create adapter directory and files in proc file system -------------------------------------------------------------------------- */ int create_adapter_proc(diva_os_xdi_adapter_t *a) { struct proc_dir_entry *de, *pe; char tmp[16]; sprintf(tmp, "%s%d", adapter_dir_name, a->controller); if (!(de = proc_mkdir(tmp, proc_net_eicon))) return (0); a->proc_adapter_dir = (void *) de; pe = proc_create_data(info_proc_name, S_IRUGO | S_IWUSR, de, &info_proc_fops, a); if (!pe) return (0); a->proc_info = (void *) pe; pe = proc_create_data(grp_opt_proc_name, S_IRUGO | S_IWUSR, de, &grp_opt_proc_fops, a); if (pe) a->proc_grp_opt = (void *) pe; pe = proc_create_data(d_l1_down_proc_name, S_IRUGO | S_IWUSR, de, &d_l1_down_proc_fops, a); if (pe) a->proc_d_l1_down = (void *) pe; DBG_TRC(("proc entry %s created", tmp)); return (1); } /* -------------------------------------------------------------------------- Remove adapter directory and files in proc file system -------------------------------------------------------------------------- */ void remove_adapter_proc(diva_os_xdi_adapter_t *a) { char tmp[16]; if (a->proc_adapter_dir) { if (a->proc_d_l1_down) { remove_proc_entry(d_l1_down_proc_name, (struct proc_dir_entry *) a->proc_adapter_dir); } if (a->proc_grp_opt) { remove_proc_entry(grp_opt_proc_name, (struct proc_dir_entry *) a->proc_adapter_dir); } if (a->proc_info) { remove_proc_entry(info_proc_name, (struct proc_dir_entry *) a->proc_adapter_dir); } sprintf(tmp, "%s%d", adapter_dir_name, a->controller); remove_proc_entry(tmp, proc_net_eicon); DBG_TRC(("proc entry %s%d removed", adapter_dir_name, a->controller)); } }
gpl-2.0
SM-G920P-MM/G920P-MM
arch/powerpc/platforms/44x/ppc44x_simple.c
7370
2411
/* * Generic PowerPC 44x platform support * * Copyright 2008 IBM Corporation * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License. * * This implements simple platform support for PowerPC 44x chips. This is * mostly used for eval boards or other simple and "generic" 44x boards. If * your board has custom functions or hardware, then you will likely want to * implement your own board.c file to accommodate it. */ #include <asm/machdep.h> #include <asm/pci-bridge.h> #include <asm/ppc4xx.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/udbg.h> #include <asm/uic.h> #include <linux/init.h> #include <linux/of_platform.h> static __initdata struct of_device_id ppc44x_of_bus[] = { { .compatible = "ibm,plb4", }, { .compatible = "ibm,opb", }, { .compatible = "ibm,ebc", }, { .compatible = "simple-bus", }, {}, }; static int __init ppc44x_device_probe(void) { of_platform_bus_probe(NULL, ppc44x_of_bus, NULL); return 0; } machine_device_initcall(ppc44x_simple, ppc44x_device_probe); /* This is the list of boards that can be supported by this simple * platform code. This does _not_ mean the boards are compatible, * as they most certainly are not from a device tree perspective. * However, their differences are handled by the device tree and the * drivers and therefore they don't need custom board support files. * * Again, if your board needs to do things differently then create a * board.c file for it rather than adding it to this list. */ static char *board[] __initdata = { "amcc,arches", "amcc,bamboo", "apm,bluestone", "amcc,glacier", "ibm,ebony", "amcc,eiger", "amcc,katmai", "amcc,rainier", "amcc,redwood", "amcc,sequoia", "amcc,taishan", "amcc,yosemite", "mosaixtech,icon" }; static int __init ppc44x_probe(void) { unsigned long root = of_get_flat_dt_root(); int i = 0; for (i = 0; i < ARRAY_SIZE(board); i++) { if (of_flat_dt_is_compatible(root, board[i])) { pci_set_flags(PCI_REASSIGN_ALL_RSRC); return 1; } } return 0; } define_machine(ppc44x_simple) { .name = "PowerPC 44x Platform", .probe = ppc44x_probe, .progress = udbg_progress, .init_IRQ = uic_init_tree, .get_irq = uic_get_irq, .restart = ppc4xx_reset_system, .calibrate_decr = generic_calibrate_decr, };
gpl-2.0
vl197602/android_kernel_cyanogen_msm8916
sound/core/oss/route.c
13002
3140
/* * Route Plug-In * Copyright (c) 2000 by Abramo Bagnara <abramo@alsa-project.org> * * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Library General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU Library General Public License for more details. * * You should have received a copy of the GNU Library General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/pcm.h> #include "pcm_plugin.h" static void zero_areas(struct snd_pcm_plugin_channel *dvp, int ndsts, snd_pcm_uframes_t frames, snd_pcm_format_t format) { int dst = 0; for (; dst < ndsts; ++dst) { if (dvp->wanted) snd_pcm_area_silence(&dvp->area, 0, frames, format); dvp->enabled = 0; dvp++; } } static inline void copy_area(const struct snd_pcm_plugin_channel *src_channel, struct snd_pcm_plugin_channel *dst_channel, snd_pcm_uframes_t frames, snd_pcm_format_t format) { dst_channel->enabled = 1; snd_pcm_area_copy(&src_channel->area, 0, &dst_channel->area, 0, frames, format); } static snd_pcm_sframes_t route_transfer(struct snd_pcm_plugin *plugin, const struct snd_pcm_plugin_channel *src_channels, struct snd_pcm_plugin_channel *dst_channels, snd_pcm_uframes_t frames) { int nsrcs, ndsts, dst; struct snd_pcm_plugin_channel *dvp; snd_pcm_format_t format; if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) return -ENXIO; if (frames == 0) return 0; nsrcs = plugin->src_format.channels; ndsts = plugin->dst_format.channels; format = plugin->dst_format.format; dvp = dst_channels; if (nsrcs <= 1) { /* expand to all channels */ for (dst = 0; dst < ndsts; ++dst) { copy_area(src_channels, dvp, frames, format); dvp++; } return frames; } for (dst = 0; dst < ndsts && dst < nsrcs; ++dst) { copy_area(src_channels, dvp, frames, format); dvp++; src_channels++; } if (dst < ndsts) zero_areas(dvp, ndsts - dst, frames, format); return frames; } int snd_pcm_plugin_build_route(struct snd_pcm_substream *plug, struct snd_pcm_plugin_format *src_format, struct snd_pcm_plugin_format *dst_format, struct snd_pcm_plugin **r_plugin) { struct snd_pcm_plugin *plugin; int err; if (snd_BUG_ON(!r_plugin)) return -ENXIO; *r_plugin = NULL; if (snd_BUG_ON(src_format->rate != dst_format->rate)) return -ENXIO; if (snd_BUG_ON(src_format->format != dst_format->format)) return -ENXIO; err = snd_pcm_plugin_build(plug, "route conversion", src_format, dst_format, 0, &plugin); if (err < 0) return err; plugin->transfer = route_transfer; *r_plugin = plugin; return 0; }
gpl-2.0
vfalico/popcorn
drivers/media/video/zoran/zr36050.c
13002
27029
/* * Zoran ZR36050 basic configuration functions * * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at> * * $Id: zr36050.c,v 1.1.2.11 2003/08/03 14:54:53 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR050_VERSION "v0.7.1" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* headerfile of this module */ #include "zr36050.h" /* codec io API */ #include "videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36050_codecs; /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36050_read (struct zr36050 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36050_write (struct zr36050 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* ========================================================================= Local helper function: status read ========================================================================= */ /* status is kept in datastructure */ static u8 zr36050_read_status1 (struct zr36050 *ptr) { ptr->status1 = zr36050_read(ptr, ZR050_STATUS_1); zr36050_read(ptr, 0); return ptr->status1; } /* ========================================================================= Local helper function: scale factor read ========================================================================= */ /* scale factor is kept in datastructure */ static u16 zr36050_read_scalefactor (struct zr36050 *ptr) { ptr->scalefact = (zr36050_read(ptr, ZR050_SF_HI) << 8) | (zr36050_read(ptr, ZR050_SF_LO) & 0xFF); /* leave 0 selected for an eventually GO from master */ zr36050_read(ptr, 0); return ptr->scalefact; } /* ========================================================================= Local helper function: wait if codec is ready to proceed (end of processing) or time is over ========================================================================= */ static void zr36050_wait_end (struct zr36050 *ptr) { int i = 0; while (!(zr36050_read_status1(ptr) & 0x4)) { udelay(1); if (i++ > 200000) { // 200ms, there is for sure something wrong!!! dprintk(1, "%s: timeout at wait_end (last status: 0x%02x)\n", ptr->name, ptr->status1); break; } } } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from memory the SOF marker ========================================================================= */ static int zr36050_basic_test (struct zr36050 *ptr) { zr36050_write(ptr, ZR050_SOF_IDX, 0x00); zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00); if ((zr36050_read(ptr, ZR050_SOF_IDX) | zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36050_write(ptr, ZR050_SOF_IDX, 0xff); zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0); if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) | zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36050_wait_end(ptr); if ((ptr->status1 & 0x4) == 0) { dprintk(1, KERN_ERR "%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name); return -EBUSY; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets ========================================================================= */ static int zr36050_pushit (struct zr36050 *ptr, u16 startreg, u16 len, const char *data) { int i = 0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg, len); while (i < len) { zr36050_write(ptr, startreg++, data[i++]); } return i; } /* ========================================================================= Basic datasets: jpeg baseline setup data (you find it on lots places in internet, or just extract it from any regular .jpg image...) Could be variable, but until it's not needed it they are just fixed to save memory. Otherwise expand zr36050 structure with arrays, push the values to it and initialize from there, as e.g. the linux zr36057/60 driver does it. ========================================================================= */ static const char zr36050_dqt[0x86] = { 0xff, 0xdb, //Marker: DQT 0x00, 0x84, //Length: 2*65+2 0x00, //Pq,Tq first table 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 0x01, //Pq,Tq second table 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 }; static const char zr36050_dht[0x1a4] = { 0xff, 0xc4, //Marker: DHT 0x01, 0xa2, //Length: 2*AC, 2*DC 0x00, //DC first table 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, //DC second table 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x10, //AC first table 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0x11, //AC second table 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA }; /* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ #define NO_OF_COMPONENTS 0x3 //Y,U,V #define BASELINE_PRECISION 0x8 //MCU size (?) static const char zr36050_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT static const char zr36050_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC static const char zr36050_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC /* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ static const char zr36050_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; /* ========================================================================= Local helper functions: calculation and setup of parameter-dependent JPEG baseline segments (needed for compression only) ========================================================================= */ /* ------------------------------------------------------------------------- */ /* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */ static int zr36050_set_sof (struct zr36050 *ptr) { char sof_data[34]; // max. size of register set int i; dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, ptr->width, ptr->height, NO_OF_COMPONENTS); sof_data[0] = 0xff; sof_data[1] = 0xc0; sof_data[2] = 0x00; sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36050 sof_data[5] = (ptr->height) >> 8; sof_data[6] = (ptr->height) & 0xff; sof_data[7] = (ptr->width) >> 8; sof_data[8] = (ptr->width) & 0xff; sof_data[9] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sof_data[10 + (i * 3)] = i; // index identifier sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios sof_data[12 + (i * 3)] = zr36050_tq[i]; // Q table selection } return zr36050_pushit(ptr, ZR050_SOF_IDX, (3 * NO_OF_COMPONENTS) + 10, sof_data); } /* ------------------------------------------------------------------------- */ /* SOS (start of scan) segment depends on the used scan components of each color component */ static int zr36050_set_sos (struct zr36050 *ptr) { char sos_data[16]; // max. size of register set int i; dprintk(3, "%s: write SOS\n", ptr->name); sos_data[0] = 0xff; sos_data[1] = 0xda; sos_data[2] = 0x00; sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; sos_data[4] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sos_data[5 + (i * 2)] = i; // index sos_data[6 + (i * 2)] = (zr36050_td[i] << 4) | zr36050_ta[i]; // AC/DC tbl.sel. } sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3F; sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; return zr36050_pushit(ptr, ZR050_SOS1_IDX, 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, sos_data); } /* ------------------------------------------------------------------------- */ /* DRI (define restart interval) */ static int zr36050_set_dri (struct zr36050 *ptr) { char dri_data[6]; // max. size of register set dprintk(3, "%s: write DRI\n", ptr->name); dri_data[0] = 0xff; dri_data[1] = 0xdd; dri_data[2] = 0x00; dri_data[3] = 0x04; dri_data[4] = ptr->dri >> 8; dri_data[5] = ptr->dri & 0xff; return zr36050_pushit(ptr, ZR050_DRI_IDX, 6, dri_data); } /* ========================================================================= Setup function: Setup compression/decompression of Zoran's JPEG processor ( see also zoran 36050 manual ) ... sorry for the spaghetti code ... ========================================================================= */ static void zr36050_init (struct zr36050 *ptr) { int sum = 0; long bitcnt, tmp; if (ptr->mode == CODEC_DO_COMPRESSION) { dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); /* 050 communicates with 057 in master mode */ zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR); /* encoding table preload for compression */ zr36050_write(ptr, ZR050_MODE, ZR050_MO_COMP | ZR050_MO_TLM); zr36050_write(ptr, ZR050_OPTIONS, 0); /* disable all IRQs */ zr36050_write(ptr, ZR050_INT_REQ_0, 0); zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 /* volume control settings */ /*zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);*/ zr36050_write(ptr, ZR050_SF_HI, ptr->scalefact >> 8); zr36050_write(ptr, ZR050_SF_LO, ptr->scalefact & 0xff); zr36050_write(ptr, ZR050_AF_HI, 0xff); zr36050_write(ptr, ZR050_AF_M, 0xff); zr36050_write(ptr, ZR050_AF_LO, 0xff); /* setup the variable jpeg tables */ sum += zr36050_set_sof(ptr); sum += zr36050_set_sos(ptr); sum += zr36050_set_dri(ptr); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name); sum += zr36050_pushit(ptr, ZR050_DQT_IDX, sizeof(zr36050_dqt), zr36050_dqt); sum += zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht), zr36050_dht); zr36050_write(ptr, ZR050_APP_IDX, 0xff); zr36050_write(ptr, ZR050_APP_IDX + 1, 0xe0 + ptr->app.appn); zr36050_write(ptr, ZR050_APP_IDX + 2, 0x00); zr36050_write(ptr, ZR050_APP_IDX + 3, ptr->app.len + 2); sum += zr36050_pushit(ptr, ZR050_APP_IDX + 4, 60, ptr->app.data) + 4; zr36050_write(ptr, ZR050_COM_IDX, 0xff); zr36050_write(ptr, ZR050_COM_IDX + 1, 0xfe); zr36050_write(ptr, ZR050_COM_IDX + 2, 0x00); zr36050_write(ptr, ZR050_COM_IDX + 3, ptr->com.len + 2); sum += zr36050_pushit(ptr, ZR050_COM_IDX + 4, 60, ptr->com.data) + 4; /* do the internal huffman table preload */ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); zr36050_write(ptr, ZR050_GO, 1); // launch codec zr36050_wait_end(ptr); dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status1); if ((ptr->status1 & 0x4) == 0) { dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } /* setup misc. data for compression (target code sizes) */ /* size of compressed code to reach without header data */ sum = ptr->real_code_vol - sum; bitcnt = sum << 3; /* need the size in bits */ tmp = bitcnt >> 16; dprintk(3, "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8); zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36050_write(ptr, ZR050_TCV_NET_ML, tmp >> 8); zr36050_write(ptr, ZR050_TCV_NET_LO, tmp & 0xff); bitcnt -= bitcnt >> 7; // bits without stuffing bitcnt -= ((bitcnt * 5) >> 6); // bits without eob tmp = bitcnt >> 16; dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", ptr->name, bitcnt, tmp); zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8); zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36050_write(ptr, ZR050_TCV_DATA_ML, tmp >> 8); zr36050_write(ptr, ZR050_TCV_DATA_LO, tmp & 0xff); /* compression setup with or without bitrate control */ zr36050_write(ptr, ZR050_MODE, ZR050_MO_COMP | ZR050_MO_PASS2 | (ptr->bitrate_ctrl ? ZR050_MO_BRC : 0)); /* this headers seem to deliver "valid AVI" jpeg frames */ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DQT | ZR050_ME_DHT | ((ptr->app.len > 0) ? ZR050_ME_APP : 0) | ((ptr->com.len > 0) ? ZR050_ME_COM : 0)); } else { dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); /* 050 communicates with 055 in master mode */ zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR | ZR050_HW_CFIS_2_CLK); /* encoding table preload */ zr36050_write(ptr, ZR050_MODE, ZR050_MO_TLM); /* disable all IRQs */ zr36050_write(ptr, ZR050_INT_REQ_0, 0); zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 dprintk(3, "%s: write DHT\n", ptr->name); zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht), zr36050_dht); /* do the internal huffman table preload */ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); zr36050_write(ptr, ZR050_GO, 1); // launch codec zr36050_wait_end(ptr); dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status1); if ((ptr->status1 & 0x4) == 0) { dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } /* setup misc. data for expansion */ zr36050_write(ptr, ZR050_MODE, 0); zr36050_write(ptr, ZR050_MARKERS_EN, 0); } /* adr on selected, to allow GO from master */ zr36050_read(ptr, 0); } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36050_set_mode (struct videocodec *codec, int mode) { struct zr36050 *ptr = (struct zr36050 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36050_init(ptr); return 0; } /* set picture size (norm is ignored as the codec doesn't know about it) */ static int zr36050_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36050 *ptr = (struct zr36050 *) codec->data; int size; dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n", ptr->name, norm->HStart, norm->VStart, cap->x, cap->y, cap->width, cap->height, cap->decimation, cap->quality); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y and norm for now ... */ ptr->width = cap->width / (cap->decimation & 0xff); ptr->height = cap->height / ((cap->decimation >> 8) & 0xff); /* (KM) JPEG quality */ size = ptr->width * ptr->height; size *= 16; /* size in bits */ /* apply quality setting */ size = size * cap->quality / 200; /* Minimum: 1kb */ if (size < 8192) size = 8192; /* Maximum: 7/8 of code buffer */ if (size > ptr->total_code_vol * 7) size = ptr->total_code_vol * 7; ptr->real_code_vol = size >> 3; /* in bytes */ /* Set max_block_vol here (previously in zr36050_init, moved * here for consistency with zr36060 code */ zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol); return 0; } /* additional control functions */ static int zr36050_control (struct videocodec *codec, int type, int size, void *data) { struct zr36050 *ptr = (struct zr36050 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status */ if (size != sizeof(int)) return -EFAULT; zr36050_read_status1(ptr); *ival = ptr->status1; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = CODEC_MODE_BJPG; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != CODEC_MODE_BJPG) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: /* not needed, do nothing */ return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; *ival = ptr->total_code_vol; break; case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; ptr->total_code_vol = *ival; /* (Kieran Morrissey) * code copied from zr36060.c to ensure proper bitrate */ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; break; case CODEC_G_JPEG_SCALE: /* get scaling factor */ if (size != sizeof(int)) return -EFAULT; *ival = zr36050_read_scalefactor(ptr); break; case CODEC_S_JPEG_SCALE: /* set scaling factor */ if (size != sizeof(int)) return -EFAULT; ptr->scalefact = *ival; break; case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; *app = ptr->app; break; } case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; ptr->app = *app; break; } case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; *com = ptr->com; break; } case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; ptr->com = *com; break; } default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36050_unset (struct videocodec *codec) { struct zr36050 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36050_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36050_setup (struct videocodec *codec) { struct zr36050 *ptr; int res; dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n", zr36050_codecs); if (zr36050_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36050: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36050: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36050[%d]", zr36050_codecs); ptr->num = zr36050_codecs++; ptr->codec = codec; //testing res = zr36050_basic_test(ptr); if (res < 0) { zr36050_unset(codec); return res; } //final setup memcpy(ptr->h_samp_ratio, zr36050_decimation_h, 8); memcpy(ptr->v_samp_ratio, zr36050_decimation_v, 8); ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag * (what is the difference?) */ ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 384; ptr->height = 288; ptr->total_code_vol = 16000; ptr->max_block_vol = 240; ptr->scalefact = 0x100; ptr->dri = 1; /* no app/com marker by default */ ptr->app.appn = 0; ptr->app.len = 0; ptr->com.len = 0; zr36050_init(ptr); dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name); return 0; } static const struct videocodec zr36050_codec = { .owner = THIS_MODULE, .name = "zr36050", .magic = 0L, // magic not used .flags = CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER, .type = CODEC_TYPE_ZR36050, .setup = zr36050_setup, // functionality .unset = zr36050_unset, .set_mode = zr36050_set_mode, .set_video = zr36050_set_video, .control = zr36050_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36050_init_module (void) { //dprintk(1, "ZR36050 driver %s\n",ZR050_VERSION); zr36050_codecs = 0; return videocodec_register(&zr36050_codec); } static void __exit zr36050_cleanup_module (void) { if (zr36050_codecs) { dprintk(1, "zr36050: something's wrong - %d codecs left somehow.\n", zr36050_codecs); } videocodec_unregister(&zr36050_codec); } module_init(zr36050_init_module); module_exit(zr36050_cleanup_module); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Driver module for ZR36050 jpeg processors " ZR050_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
laruence/linux
arch/arm/plat-samsung/pm.c
203
8664
/* linux/arch/arm/plat-s3c/pm.c * * Copyright 2008 Openmoko, Inc. * Copyright 2004-2008 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * http://armlinux.simtec.co.uk/ * * S3C common power management (suspend to ram) support. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/suspend.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/serial_core.h> #include <linux/io.h> #include <asm/cacheflush.h> #include <asm/suspend.h> #include <mach/hardware.h> #include <mach/map.h> #include <plat/regs-serial.h> #include <mach/regs-clock.h> #include <mach/regs-irq.h> #include <asm/irq.h> #include <plat/pm.h> #include <mach/pm-core.h> /* for external use */ unsigned long s3c_pm_flags; /* Debug code: * * This code supports debug output to the low level UARTs for use on * resume before the console layer is available. */ #ifdef CONFIG_SAMSUNG_PM_DEBUG extern void printascii(const char *); void s3c_pm_dbg(const char *fmt, ...) { va_list va; char buff[256]; va_start(va, fmt); vsprintf(buff, fmt, va); va_end(va); printascii(buff); } static inline void s3c_pm_debug_init(void) { /* restart uart clocks so we can use them to output */ s3c_pm_debug_init_uart(); } #else #define s3c_pm_debug_init() do { } while(0) #endif /* CONFIG_SAMSUNG_PM_DEBUG */ /* Save the UART configurations if we are configured for debug. */ unsigned char pm_uart_udivslot; #ifdef CONFIG_SAMSUNG_PM_DEBUG static struct pm_uart_save uart_save[CONFIG_SERIAL_SAMSUNG_UARTS]; static void s3c_pm_save_uart(unsigned int uart, struct pm_uart_save *save) { void __iomem *regs = S3C_VA_UARTx(uart); save->ulcon = __raw_readl(regs + S3C2410_ULCON); save->ucon = __raw_readl(regs + S3C2410_UCON); save->ufcon = __raw_readl(regs + S3C2410_UFCON); save->umcon = __raw_readl(regs + S3C2410_UMCON); save->ubrdiv = __raw_readl(regs + S3C2410_UBRDIV); if (pm_uart_udivslot) save->udivslot = __raw_readl(regs + S3C2443_DIVSLOT); S3C_PMDBG("UART[%d]: ULCON=%04x, UCON=%04x, UFCON=%04x, UBRDIV=%04x\n", uart, save->ulcon, save->ucon, save->ufcon, save->ubrdiv); } static void s3c_pm_save_uarts(void) { struct pm_uart_save *save = uart_save; unsigned int uart; for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++) s3c_pm_save_uart(uart, save); } static void s3c_pm_restore_uart(unsigned int uart, struct pm_uart_save *save) { void __iomem *regs = S3C_VA_UARTx(uart); s3c_pm_arch_update_uart(regs, save); __raw_writel(save->ulcon, regs + S3C2410_ULCON); __raw_writel(save->ucon, regs + S3C2410_UCON); __raw_writel(save->ufcon, regs + S3C2410_UFCON); __raw_writel(save->umcon, regs + S3C2410_UMCON); __raw_writel(save->ubrdiv, regs + S3C2410_UBRDIV); if (pm_uart_udivslot) __raw_writel(save->udivslot, regs + S3C2443_DIVSLOT); } static void s3c_pm_restore_uarts(void) { struct pm_uart_save *save = uart_save; unsigned int uart; for (uart = 0; uart < CONFIG_SERIAL_SAMSUNG_UARTS; uart++, save++) s3c_pm_restore_uart(uart, save); } #else static void s3c_pm_save_uarts(void) { } static void s3c_pm_restore_uarts(void) { } #endif /* The IRQ ext-int code goes here, it is too small to currently bother * with its own file. */ unsigned long s3c_irqwake_intmask = 0xffffffffL; unsigned long s3c_irqwake_eintmask = 0xffffffffL; int s3c_irqext_wake(struct irq_data *data, unsigned int state) { unsigned long bit = 1L << IRQ_EINT_BIT(data->irq); if (!(s3c_irqwake_eintallow & bit)) return -ENOENT; printk(KERN_INFO "wake %s for irq %d\n", state ? "enabled" : "disabled", data->irq); if (!state) s3c_irqwake_eintmask |= bit; else s3c_irqwake_eintmask &= ~bit; return 0; } /* helper functions to save and restore register state */ /** * s3c_pm_do_save() - save a set of registers for restoration on resume. * @ptr: Pointer to an array of registers. * @count: Size of the ptr array. * * Run through the list of registers given, saving their contents in the * array for later restoration when we wakeup. */ void s3c_pm_do_save(struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) { ptr->val = __raw_readl(ptr->reg); S3C_PMDBG("saved %p value %08lx\n", ptr->reg, ptr->val); } } /** * s3c_pm_do_restore() - restore register values from the save list. * @ptr: Pointer to an array of registers. * @count: Size of the ptr array. * * Restore the register values saved from s3c_pm_do_save(). * * Note, we do not use S3C_PMDBG() in here, as the system may not have * restore the UARTs state yet */ void s3c_pm_do_restore(struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) { printk(KERN_DEBUG "restore %p (restore %08lx, was %08x)\n", ptr->reg, ptr->val, __raw_readl(ptr->reg)); __raw_writel(ptr->val, ptr->reg); } } /** * s3c_pm_do_restore_core() - early restore register values from save list. * * This is similar to s3c_pm_do_restore() except we try and minimise the * side effects of the function in case registers that hardware might need * to work has been restored. * * WARNING: Do not put any debug in here that may effect memory or use * peripherals, as things may be changing! */ void s3c_pm_do_restore_core(struct sleep_save *ptr, int count) { for (; count > 0; count--, ptr++) __raw_writel(ptr->val, ptr->reg); } /* s3c2410_pm_show_resume_irqs * * print any IRQs asserted at resume time (ie, we woke from) */ static void __maybe_unused s3c_pm_show_resume_irqs(int start, unsigned long which, unsigned long mask) { int i; which &= ~mask; for (i = 0; i <= 31; i++) { if (which & (1L<<i)) { S3C_PMDBG("IRQ %d asserted at resume\n", start+i); } } } void (*pm_cpu_prep)(void); int (*pm_cpu_sleep)(unsigned long); #define any_allowed(mask, allow) (((mask) & (allow)) != (allow)) /* s3c_pm_enter * * central control for sleep/resume process */ static int s3c_pm_enter(suspend_state_t state) { /* ensure the debug is initialised (if enabled) */ s3c_pm_debug_init(); S3C_PMDBG("%s(%d)\n", __func__, state); if (pm_cpu_prep == NULL || pm_cpu_sleep == NULL) { printk(KERN_ERR "%s: error: no cpu sleep function\n", __func__); return -EINVAL; } /* check if we have anything to wake-up with... bad things seem * to happen if you suspend with no wakeup (system will often * require a full power-cycle) */ if (!any_allowed(s3c_irqwake_intmask, s3c_irqwake_intallow) && !any_allowed(s3c_irqwake_eintmask, s3c_irqwake_eintallow)) { printk(KERN_ERR "%s: No wake-up sources!\n", __func__); printk(KERN_ERR "%s: Aborting sleep\n", __func__); return -EINVAL; } /* save all necessary core registers not covered by the drivers */ samsung_pm_save_gpios(); samsung_pm_saved_gpios(); s3c_pm_save_uarts(); s3c_pm_save_core(); /* set the irq configuration for wake */ s3c_pm_configure_extint(); S3C_PMDBG("sleep: irq wakeup masks: %08lx,%08lx\n", s3c_irqwake_intmask, s3c_irqwake_eintmask); s3c_pm_arch_prepare_irqs(); /* call cpu specific preparation */ pm_cpu_prep(); /* flush cache back to ram */ flush_cache_all(); s3c_pm_check_store(); /* send the cpu to sleep... */ s3c_pm_arch_stop_clocks(); /* this will also act as our return point from when * we resume as it saves its own register state and restores it * during the resume. */ cpu_suspend(0, pm_cpu_sleep); /* restore the system state */ s3c_pm_restore_core(); s3c_pm_restore_uarts(); samsung_pm_restore_gpios(); s3c_pm_restored_gpios(); s3c_pm_debug_init(); /* check what irq (if any) restored the system */ s3c_pm_arch_show_resume_irqs(); S3C_PMDBG("%s: post sleep, preparing to return\n", __func__); /* LEDs should now be 1110 */ s3c_pm_debug_smdkled(1 << 1, 0); s3c_pm_check_restore(); /* ok, let's return from sleep */ S3C_PMDBG("S3C PM Resume (post-restore)\n"); return 0; } static int s3c_pm_prepare(void) { /* prepare check area if configured */ s3c_pm_check_prepare(); return 0; } static void s3c_pm_finish(void) { s3c_pm_check_cleanup(); } static const struct platform_suspend_ops s3c_pm_ops = { .enter = s3c_pm_enter, .prepare = s3c_pm_prepare, .finish = s3c_pm_finish, .valid = suspend_valid_only_mem, }; /* s3c_pm_init * * Attach the power management functions. This should be called * from the board specific initialisation if the board supports * it. */ int __init s3c_pm_init(void) { printk("S3C Power Management, Copyright 2004 Simtec Electronics\n"); suspend_set_ops(&s3c_pm_ops); return 0; }
gpl-2.0
warped-rudi/linux-linaro-stable-mx6
drivers/hwmon/max197.c
459
8945
/* * Maxim MAX197 A/D Converter driver * * Copyright (c) 2012 Savoir-faire Linux Inc. * Vivien Didelot <vivien.didelot@savoirfairelinux.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * For further information, see the Documentation/hwmon/max197 file. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/sysfs.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/platform_device.h> #include <linux/platform_data/max197.h> #define MAX199_LIMIT 4000 /* 4V */ #define MAX197_LIMIT 10000 /* 10V */ #define MAX197_NUM_CH 8 /* 8 Analog Input Channels */ /* Control byte format */ #define MAX197_BIP (1 << 3) /* Bipolarity */ #define MAX197_RNG (1 << 4) /* Full range */ #define MAX197_SCALE 12207 /* Scale coefficient for raw data */ /* List of supported chips */ enum max197_chips { max197, max199 }; /** * struct max197_data - device instance specific data * @pdata: Platform data. * @hwmon_dev: The hwmon device. * @lock: Read/Write mutex. * @limit: Max range value (10V for MAX197, 4V for MAX199). * @scale: Need to scale. * @ctrl_bytes: Channels control byte. */ struct max197_data { struct max197_platform_data *pdata; struct device *hwmon_dev; struct mutex lock; int limit; bool scale; u8 ctrl_bytes[MAX197_NUM_CH]; }; static inline void max197_set_unipolarity(struct max197_data *data, int channel) { data->ctrl_bytes[channel] &= ~MAX197_BIP; } static inline void max197_set_bipolarity(struct max197_data *data, int channel) { data->ctrl_bytes[channel] |= MAX197_BIP; } static inline void max197_set_half_range(struct max197_data *data, int channel) { data->ctrl_bytes[channel] &= ~MAX197_RNG; } static inline void max197_set_full_range(struct max197_data *data, int channel) { data->ctrl_bytes[channel] |= MAX197_RNG; } static inline bool max197_is_bipolar(struct max197_data *data, int channel) { return data->ctrl_bytes[channel] & MAX197_BIP; } static inline bool max197_is_full_range(struct max197_data *data, int channel) { return data->ctrl_bytes[channel] & MAX197_RNG; } /* Function called on read access on in{0,1,2,3,4,5,6,7}_{min,max} */ static ssize_t max197_show_range(struct device *dev, struct device_attribute *devattr, char *buf) { struct max197_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int channel = attr->index; bool is_min = attr->nr; int range; if (mutex_lock_interruptible(&data->lock)) return -ERESTARTSYS; range = max197_is_full_range(data, channel) ? data->limit : data->limit / 2; if (is_min) { if (max197_is_bipolar(data, channel)) range = -range; else range = 0; } mutex_unlock(&data->lock); return sprintf(buf, "%d\n", range); } /* Function called on write access on in{0,1,2,3,4,5,6,7}_{min,max} */ static ssize_t max197_store_range(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct max197_data *data = dev_get_drvdata(dev); struct sensor_device_attribute_2 *attr = to_sensor_dev_attr_2(devattr); int channel = attr->index; bool is_min = attr->nr; long value; int half = data->limit / 2; int full = data->limit; if (kstrtol(buf, 10, &value)) return -EINVAL; if (is_min) { if (value <= -full) value = -full; else if (value < 0) value = -half; else value = 0; } else { if (value >= full) value = full; else value = half; } if (mutex_lock_interruptible(&data->lock)) return -ERESTARTSYS; if (value == 0) { /* We can deduce only the polarity */ max197_set_unipolarity(data, channel); } else if (value == -half) { max197_set_bipolarity(data, channel); max197_set_half_range(data, channel); } else if (value == -full) { max197_set_bipolarity(data, channel); max197_set_full_range(data, channel); } else if (value == half) { /* We can deduce only the range */ max197_set_half_range(data, channel); } else if (value == full) { /* We can deduce only the range */ max197_set_full_range(data, channel); } mutex_unlock(&data->lock); return count; } /* Function called on read access on in{0,1,2,3,4,5,6,7}_input */ static ssize_t max197_show_input(struct device *dev, struct device_attribute *devattr, char *buf) { struct max197_data *data = dev_get_drvdata(dev); struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int channel = attr->index; s32 value; int ret; if (mutex_lock_interruptible(&data->lock)) return -ERESTARTSYS; ret = data->pdata->convert(data->ctrl_bytes[channel]); if (ret < 0) { dev_err(dev, "conversion failed\n"); goto unlock; } value = ret; /* * Coefficient to apply on raw value. * See Table 1. Full Scale and Zero Scale in the MAX197 datasheet. */ if (data->scale) { value *= MAX197_SCALE; if (max197_is_full_range(data, channel)) value *= 2; value /= 10000; } ret = sprintf(buf, "%d\n", value); unlock: mutex_unlock(&data->lock); return ret; } static ssize_t max197_show_name(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *pdev = to_platform_device(dev); return sprintf(buf, "%s\n", pdev->name); } #define MAX197_SENSOR_DEVICE_ATTR_CH(chan) \ static SENSOR_DEVICE_ATTR(in##chan##_input, S_IRUGO, \ max197_show_input, NULL, chan); \ static SENSOR_DEVICE_ATTR_2(in##chan##_min, S_IRUGO | S_IWUSR, \ max197_show_range, \ max197_store_range, \ true, chan); \ static SENSOR_DEVICE_ATTR_2(in##chan##_max, S_IRUGO | S_IWUSR, \ max197_show_range, \ max197_store_range, \ false, chan) #define MAX197_SENSOR_DEV_ATTR_IN(chan) \ &sensor_dev_attr_in##chan##_input.dev_attr.attr, \ &sensor_dev_attr_in##chan##_max.dev_attr.attr, \ &sensor_dev_attr_in##chan##_min.dev_attr.attr static DEVICE_ATTR(name, S_IRUGO, max197_show_name, NULL); MAX197_SENSOR_DEVICE_ATTR_CH(0); MAX197_SENSOR_DEVICE_ATTR_CH(1); MAX197_SENSOR_DEVICE_ATTR_CH(2); MAX197_SENSOR_DEVICE_ATTR_CH(3); MAX197_SENSOR_DEVICE_ATTR_CH(4); MAX197_SENSOR_DEVICE_ATTR_CH(5); MAX197_SENSOR_DEVICE_ATTR_CH(6); MAX197_SENSOR_DEVICE_ATTR_CH(7); static const struct attribute_group max197_sysfs_group = { .attrs = (struct attribute *[]) { &dev_attr_name.attr, MAX197_SENSOR_DEV_ATTR_IN(0), MAX197_SENSOR_DEV_ATTR_IN(1), MAX197_SENSOR_DEV_ATTR_IN(2), MAX197_SENSOR_DEV_ATTR_IN(3), MAX197_SENSOR_DEV_ATTR_IN(4), MAX197_SENSOR_DEV_ATTR_IN(5), MAX197_SENSOR_DEV_ATTR_IN(6), MAX197_SENSOR_DEV_ATTR_IN(7), NULL }, }; static int max197_probe(struct platform_device *pdev) { int ch, ret; struct max197_data *data; struct max197_platform_data *pdata = dev_get_platdata(&pdev->dev); enum max197_chips chip = platform_get_device_id(pdev)->driver_data; if (pdata == NULL) { dev_err(&pdev->dev, "no platform data supplied\n"); return -EINVAL; } if (pdata->convert == NULL) { dev_err(&pdev->dev, "no convert function supplied\n"); return -EINVAL; } data = devm_kzalloc(&pdev->dev, sizeof(struct max197_data), GFP_KERNEL); if (!data) { dev_err(&pdev->dev, "devm_kzalloc failed\n"); return -ENOMEM; } data->pdata = pdata; mutex_init(&data->lock); if (chip == max197) { data->limit = MAX197_LIMIT; data->scale = true; } else { data->limit = MAX199_LIMIT; data->scale = false; } for (ch = 0; ch < MAX197_NUM_CH; ch++) data->ctrl_bytes[ch] = (u8) ch; platform_set_drvdata(pdev, data); ret = sysfs_create_group(&pdev->dev.kobj, &max197_sysfs_group); if (ret) { dev_err(&pdev->dev, "sysfs create group failed\n"); return ret; } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); dev_err(&pdev->dev, "hwmon device register failed\n"); goto error; } return 0; error: sysfs_remove_group(&pdev->dev.kobj, &max197_sysfs_group); return ret; } static int max197_remove(struct platform_device *pdev) { struct max197_data *data = platform_get_drvdata(pdev); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&pdev->dev.kobj, &max197_sysfs_group); return 0; } static struct platform_device_id max197_device_ids[] = { { "max197", max197 }, { "max199", max199 }, { } }; MODULE_DEVICE_TABLE(platform, max197_device_ids); static struct platform_driver max197_driver = { .driver = { .name = "max197", .owner = THIS_MODULE, }, .probe = max197_probe, .remove = max197_remove, .id_table = max197_device_ids, }; module_platform_driver(max197_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Savoir-faire Linux Inc. <kernel@savoirfairelinux.com>"); MODULE_DESCRIPTION("Maxim MAX197 A/D Converter driver");
gpl-2.0
ultrasystem/system
drivers/target/target_core_fabric_configfs.c
459
36169
/******************************************************************************* * Filename: target_core_fabric_configfs.c * * This file contains generic fabric module configfs infrastructure for * TCM v4.x code * * (c) Copyright 2010-2013 Datera, Inc. * * Nicholas A. Bellinger <nab@linux-iscsi.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. ****************************************************************************/ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/utsname.h> #include <linux/init.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/unistd.h> #include <linux/string.h> #include <linux/syscalls.h> #include <linux/configfs.h> #include <target/target_core_base.h> #include <target/target_core_fabric.h> #include <target/target_core_fabric_configfs.h> #include <target/target_core_configfs.h> #include <target/configfs_macros.h> #include "target_core_internal.h" #include "target_core_alua.h" #include "target_core_pr.h" #define TF_CIT_SETUP(_name, _item_ops, _group_ops, _attrs) \ static void target_fabric_setup_##_name##_cit(struct target_fabric_configfs *tf) \ { \ struct target_fabric_configfs_template *tfc = &tf->tf_cit_tmpl; \ struct config_item_type *cit = &tfc->tfc_##_name##_cit; \ \ cit->ct_item_ops = _item_ops; \ cit->ct_group_ops = _group_ops; \ cit->ct_attrs = _attrs; \ cit->ct_owner = tf->tf_module; \ pr_debug("Setup generic %s\n", __stringify(_name)); \ } /* Start of tfc_tpg_mappedlun_cit */ static int target_fabric_mappedlun_link( struct config_item *lun_acl_ci, struct config_item *lun_ci) { struct se_dev_entry *deve; struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), struct se_lun_acl, se_lun_group); struct se_portal_group *se_tpg; struct config_item *nacl_ci, *tpg_ci, *tpg_ci_s, *wwn_ci, *wwn_ci_s; int ret = 0, lun_access; if (lun->lun_link_magic != SE_LUN_LINK_MAGIC) { pr_err("Bad lun->lun_link_magic, not a valid lun_ci pointer:" " %p to struct lun: %p\n", lun_ci, lun); return -EFAULT; } /* * Ensure that the source port exists */ if (!lun->lun_sep || !lun->lun_sep->sep_tpg) { pr_err("Source se_lun->lun_sep or lun->lun_sep->sep" "_tpg does not exist\n"); return -EINVAL; } se_tpg = lun->lun_sep->sep_tpg; nacl_ci = &lun_acl_ci->ci_parent->ci_group->cg_item; tpg_ci = &nacl_ci->ci_group->cg_item; wwn_ci = &tpg_ci->ci_group->cg_item; tpg_ci_s = &lun_ci->ci_parent->ci_group->cg_item; wwn_ci_s = &tpg_ci_s->ci_group->cg_item; /* * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT */ if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) { pr_err("Illegal Initiator ACL SymLink outside of %s\n", config_item_name(wwn_ci)); return -EINVAL; } if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) { pr_err("Illegal Initiator ACL Symlink outside of %s" " TPGT: %s\n", config_item_name(wwn_ci), config_item_name(tpg_ci)); return -EINVAL; } /* * If this struct se_node_acl was dynamically generated with * tpg_1/attrib/generate_node_acls=1, use the existing deve->lun_flags, * which be will write protected (READ-ONLY) when * tpg_1/attrib/demo_mode_write_protect=1 */ spin_lock_irq(&lacl->se_lun_nacl->device_list_lock); deve = lacl->se_lun_nacl->device_list[lacl->mapped_lun]; if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) lun_access = deve->lun_flags; else lun_access = (se_tpg->se_tpg_tfo->tpg_check_prod_mode_write_protect( se_tpg)) ? TRANSPORT_LUNFLAGS_READ_ONLY : TRANSPORT_LUNFLAGS_READ_WRITE; spin_unlock_irq(&lacl->se_lun_nacl->device_list_lock); /* * Determine the actual mapped LUN value user wants.. * * This value is what the SCSI Initiator actually sees the * iscsi/$IQN/$TPGT/lun/lun_* as on their SCSI Initiator Ports. */ ret = core_dev_add_initiator_node_lun_acl(se_tpg, lacl, lun->unpacked_lun, lun_access); return (ret < 0) ? -EINVAL : 0; } static int target_fabric_mappedlun_unlink( struct config_item *lun_acl_ci, struct config_item *lun_ci) { struct se_lun *lun; struct se_lun_acl *lacl = container_of(to_config_group(lun_acl_ci), struct se_lun_acl, se_lun_group); struct se_node_acl *nacl = lacl->se_lun_nacl; struct se_dev_entry *deve = nacl->device_list[lacl->mapped_lun]; struct se_portal_group *se_tpg; /* * Determine if the underlying MappedLUN has already been released.. */ if (!deve->se_lun) return 0; lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); se_tpg = lun->lun_sep->sep_tpg; core_dev_del_initiator_node_lun_acl(se_tpg, lun, lacl); return 0; } CONFIGFS_EATTR_STRUCT(target_fabric_mappedlun, se_lun_acl); #define TCM_MAPPEDLUN_ATTR(_name, _mode) \ static struct target_fabric_mappedlun_attribute target_fabric_mappedlun_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_fabric_mappedlun_show_##_name, \ target_fabric_mappedlun_store_##_name); static ssize_t target_fabric_mappedlun_show_write_protect( struct se_lun_acl *lacl, char *page) { struct se_node_acl *se_nacl = lacl->se_lun_nacl; struct se_dev_entry *deve; ssize_t len; spin_lock_irq(&se_nacl->device_list_lock); deve = se_nacl->device_list[lacl->mapped_lun]; len = sprintf(page, "%d\n", (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY) ? 1 : 0); spin_unlock_irq(&se_nacl->device_list_lock); return len; } static ssize_t target_fabric_mappedlun_store_write_protect( struct se_lun_acl *lacl, const char *page, size_t count) { struct se_node_acl *se_nacl = lacl->se_lun_nacl; struct se_portal_group *se_tpg = se_nacl->se_tpg; unsigned long op; int ret; ret = kstrtoul(page, 0, &op); if (ret) return ret; if ((op != 1) && (op != 0)) return -EINVAL; core_update_device_list_access(lacl->mapped_lun, (op) ? TRANSPORT_LUNFLAGS_READ_ONLY : TRANSPORT_LUNFLAGS_READ_WRITE, lacl->se_lun_nacl); pr_debug("%s_ConfigFS: Changed Initiator ACL: %s" " Mapped LUN: %u Write Protect bit to %s\n", se_tpg->se_tpg_tfo->get_fabric_name(), lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF"); return count; } TCM_MAPPEDLUN_ATTR(write_protect, S_IRUGO | S_IWUSR); CONFIGFS_EATTR_OPS(target_fabric_mappedlun, se_lun_acl, se_lun_group); static void target_fabric_mappedlun_release(struct config_item *item) { struct se_lun_acl *lacl = container_of(to_config_group(item), struct se_lun_acl, se_lun_group); struct se_portal_group *se_tpg = lacl->se_lun_nacl->se_tpg; core_dev_free_initiator_node_lun_acl(se_tpg, lacl); } static struct configfs_attribute *target_fabric_mappedlun_attrs[] = { &target_fabric_mappedlun_write_protect.attr, NULL, }; static struct configfs_item_operations target_fabric_mappedlun_item_ops = { .release = target_fabric_mappedlun_release, .show_attribute = target_fabric_mappedlun_attr_show, .store_attribute = target_fabric_mappedlun_attr_store, .allow_link = target_fabric_mappedlun_link, .drop_link = target_fabric_mappedlun_unlink, }; TF_CIT_SETUP(tpg_mappedlun, &target_fabric_mappedlun_item_ops, NULL, target_fabric_mappedlun_attrs); /* End of tfc_tpg_mappedlun_cit */ /* Start of tfc_tpg_mappedlun_port_cit */ static struct config_group *target_core_mappedlun_stat_mkdir( struct config_group *group, const char *name) { return ERR_PTR(-ENOSYS); } static void target_core_mappedlun_stat_rmdir( struct config_group *group, struct config_item *item) { return; } static struct configfs_group_operations target_fabric_mappedlun_stat_group_ops = { .make_group = target_core_mappedlun_stat_mkdir, .drop_item = target_core_mappedlun_stat_rmdir, }; TF_CIT_SETUP(tpg_mappedlun_stat, NULL, &target_fabric_mappedlun_stat_group_ops, NULL); /* End of tfc_tpg_mappedlun_port_cit */ /* Start of tfc_tpg_nacl_attrib_cit */ CONFIGFS_EATTR_OPS(target_fabric_nacl_attrib, se_node_acl, acl_attrib_group); static struct configfs_item_operations target_fabric_nacl_attrib_item_ops = { .show_attribute = target_fabric_nacl_attrib_attr_show, .store_attribute = target_fabric_nacl_attrib_attr_store, }; TF_CIT_SETUP(tpg_nacl_attrib, &target_fabric_nacl_attrib_item_ops, NULL, NULL); /* End of tfc_tpg_nacl_attrib_cit */ /* Start of tfc_tpg_nacl_auth_cit */ CONFIGFS_EATTR_OPS(target_fabric_nacl_auth, se_node_acl, acl_auth_group); static struct configfs_item_operations target_fabric_nacl_auth_item_ops = { .show_attribute = target_fabric_nacl_auth_attr_show, .store_attribute = target_fabric_nacl_auth_attr_store, }; TF_CIT_SETUP(tpg_nacl_auth, &target_fabric_nacl_auth_item_ops, NULL, NULL); /* End of tfc_tpg_nacl_auth_cit */ /* Start of tfc_tpg_nacl_param_cit */ CONFIGFS_EATTR_OPS(target_fabric_nacl_param, se_node_acl, acl_param_group); static struct configfs_item_operations target_fabric_nacl_param_item_ops = { .show_attribute = target_fabric_nacl_param_attr_show, .store_attribute = target_fabric_nacl_param_attr_store, }; TF_CIT_SETUP(tpg_nacl_param, &target_fabric_nacl_param_item_ops, NULL, NULL); /* End of tfc_tpg_nacl_param_cit */ /* Start of tfc_tpg_nacl_base_cit */ CONFIGFS_EATTR_OPS(target_fabric_nacl_base, se_node_acl, acl_group); static struct config_group *target_fabric_make_mappedlun( struct config_group *group, const char *name) { struct se_node_acl *se_nacl = container_of(group, struct se_node_acl, acl_group); struct se_portal_group *se_tpg = se_nacl->se_tpg; struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct se_lun_acl *lacl; struct config_item *acl_ci; struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; char *buf; unsigned long mapped_lun; int ret = 0; acl_ci = &group->cg_item; if (!acl_ci) { pr_err("Unable to locatel acl_ci\n"); return NULL; } buf = kzalloc(strlen(name) + 1, GFP_KERNEL); if (!buf) { pr_err("Unable to allocate memory for name buf\n"); return ERR_PTR(-ENOMEM); } snprintf(buf, strlen(name) + 1, "%s", name); /* * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID. */ if (strstr(buf, "lun_") != buf) { pr_err("Unable to locate \"lun_\" from buf: %s" " name: %s\n", buf, name); ret = -EINVAL; goto out; } /* * Determine the Mapped LUN value. This is what the SCSI Initiator * Port will actually see. */ ret = kstrtoul(buf + 4, 0, &mapped_lun); if (ret) goto out; if (mapped_lun > UINT_MAX) { ret = -EINVAL; goto out; } if (mapped_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) { pr_err("Mapped LUN: %lu exceeds TRANSPORT_MAX_LUNS_PER_TPG" "-1: %u for Target Portal Group: %u\n", mapped_lun, TRANSPORT_MAX_LUNS_PER_TPG-1, se_tpg->se_tpg_tfo->tpg_get_tag(se_tpg)); ret = -EINVAL; goto out; } lacl = core_dev_init_initiator_node_lun_acl(se_tpg, se_nacl, mapped_lun, &ret); if (!lacl) { ret = -EINVAL; goto out; } lacl_cg = &lacl->se_lun_group; lacl_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!lacl_cg->default_groups) { pr_err("Unable to allocate lacl_cg->default_groups\n"); ret = -ENOMEM; goto out; } config_group_init_type_name(&lacl->se_lun_group, name, &tf->tf_cit_tmpl.tfc_tpg_mappedlun_cit); config_group_init_type_name(&lacl->ml_stat_grps.stat_group, "statistics", &tf->tf_cit_tmpl.tfc_tpg_mappedlun_stat_cit); lacl_cg->default_groups[0] = &lacl->ml_stat_grps.stat_group; lacl_cg->default_groups[1] = NULL; ml_stat_grp = &lacl->ml_stat_grps.stat_group; ml_stat_grp->default_groups = kmalloc(sizeof(struct config_group *) * 3, GFP_KERNEL); if (!ml_stat_grp->default_groups) { pr_err("Unable to allocate ml_stat_grp->default_groups\n"); ret = -ENOMEM; goto out; } target_stat_setup_mappedlun_default_groups(lacl); kfree(buf); return &lacl->se_lun_group; out: if (lacl_cg) kfree(lacl_cg->default_groups); kfree(buf); return ERR_PTR(ret); } static void target_fabric_drop_mappedlun( struct config_group *group, struct config_item *item) { struct se_lun_acl *lacl = container_of(to_config_group(item), struct se_lun_acl, se_lun_group); struct config_item *df_item; struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; int i; ml_stat_grp = &lacl->ml_stat_grps.stat_group; for (i = 0; ml_stat_grp->default_groups[i]; i++) { df_item = &ml_stat_grp->default_groups[i]->cg_item; ml_stat_grp->default_groups[i] = NULL; config_item_put(df_item); } kfree(ml_stat_grp->default_groups); lacl_cg = &lacl->se_lun_group; for (i = 0; lacl_cg->default_groups[i]; i++) { df_item = &lacl_cg->default_groups[i]->cg_item; lacl_cg->default_groups[i] = NULL; config_item_put(df_item); } kfree(lacl_cg->default_groups); config_item_put(item); } static void target_fabric_nacl_base_release(struct config_item *item) { struct se_node_acl *se_nacl = container_of(to_config_group(item), struct se_node_acl, acl_group); struct se_portal_group *se_tpg = se_nacl->se_tpg; struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; tf->tf_ops.fabric_drop_nodeacl(se_nacl); } static struct configfs_item_operations target_fabric_nacl_base_item_ops = { .release = target_fabric_nacl_base_release, .show_attribute = target_fabric_nacl_base_attr_show, .store_attribute = target_fabric_nacl_base_attr_store, }; static struct configfs_group_operations target_fabric_nacl_base_group_ops = { .make_group = target_fabric_make_mappedlun, .drop_item = target_fabric_drop_mappedlun, }; TF_CIT_SETUP(tpg_nacl_base, &target_fabric_nacl_base_item_ops, &target_fabric_nacl_base_group_ops, NULL); /* End of tfc_tpg_nacl_base_cit */ /* Start of tfc_node_fabric_stats_cit */ /* * This is used as a placeholder for struct se_node_acl->acl_fabric_stat_group * to allow fabrics access to ->acl_fabric_stat_group->default_groups[] */ TF_CIT_SETUP(tpg_nacl_stat, NULL, NULL, NULL); /* End of tfc_wwn_fabric_stats_cit */ /* Start of tfc_tpg_nacl_cit */ static struct config_group *target_fabric_make_nodeacl( struct config_group *group, const char *name) { struct se_portal_group *se_tpg = container_of(group, struct se_portal_group, tpg_acl_group); struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct se_node_acl *se_nacl; struct config_group *nacl_cg; if (!tf->tf_ops.fabric_make_nodeacl) { pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n"); return ERR_PTR(-ENOSYS); } se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name); if (IS_ERR(se_nacl)) return ERR_CAST(se_nacl); nacl_cg = &se_nacl->acl_group; nacl_cg->default_groups = se_nacl->acl_default_groups; nacl_cg->default_groups[0] = &se_nacl->acl_attrib_group; nacl_cg->default_groups[1] = &se_nacl->acl_auth_group; nacl_cg->default_groups[2] = &se_nacl->acl_param_group; nacl_cg->default_groups[3] = &se_nacl->acl_fabric_stat_group; nacl_cg->default_groups[4] = NULL; config_group_init_type_name(&se_nacl->acl_group, name, &tf->tf_cit_tmpl.tfc_tpg_nacl_base_cit); config_group_init_type_name(&se_nacl->acl_attrib_group, "attrib", &tf->tf_cit_tmpl.tfc_tpg_nacl_attrib_cit); config_group_init_type_name(&se_nacl->acl_auth_group, "auth", &tf->tf_cit_tmpl.tfc_tpg_nacl_auth_cit); config_group_init_type_name(&se_nacl->acl_param_group, "param", &tf->tf_cit_tmpl.tfc_tpg_nacl_param_cit); config_group_init_type_name(&se_nacl->acl_fabric_stat_group, "fabric_statistics", &tf->tf_cit_tmpl.tfc_tpg_nacl_stat_cit); return &se_nacl->acl_group; } static void target_fabric_drop_nodeacl( struct config_group *group, struct config_item *item) { struct se_node_acl *se_nacl = container_of(to_config_group(item), struct se_node_acl, acl_group); struct config_item *df_item; struct config_group *nacl_cg; int i; nacl_cg = &se_nacl->acl_group; for (i = 0; nacl_cg->default_groups[i]; i++) { df_item = &nacl_cg->default_groups[i]->cg_item; nacl_cg->default_groups[i] = NULL; config_item_put(df_item); } /* * struct se_node_acl free is done in target_fabric_nacl_base_release() */ config_item_put(item); } static struct configfs_group_operations target_fabric_nacl_group_ops = { .make_group = target_fabric_make_nodeacl, .drop_item = target_fabric_drop_nodeacl, }; TF_CIT_SETUP(tpg_nacl, NULL, &target_fabric_nacl_group_ops, NULL); /* End of tfc_tpg_nacl_cit */ /* Start of tfc_tpg_np_base_cit */ CONFIGFS_EATTR_OPS(target_fabric_np_base, se_tpg_np, tpg_np_group); static void target_fabric_np_base_release(struct config_item *item) { struct se_tpg_np *se_tpg_np = container_of(to_config_group(item), struct se_tpg_np, tpg_np_group); struct se_portal_group *se_tpg = se_tpg_np->tpg_np_parent; struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; tf->tf_ops.fabric_drop_np(se_tpg_np); } static struct configfs_item_operations target_fabric_np_base_item_ops = { .release = target_fabric_np_base_release, .show_attribute = target_fabric_np_base_attr_show, .store_attribute = target_fabric_np_base_attr_store, }; TF_CIT_SETUP(tpg_np_base, &target_fabric_np_base_item_ops, NULL, NULL); /* End of tfc_tpg_np_base_cit */ /* Start of tfc_tpg_np_cit */ static struct config_group *target_fabric_make_np( struct config_group *group, const char *name) { struct se_portal_group *se_tpg = container_of(group, struct se_portal_group, tpg_np_group); struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct se_tpg_np *se_tpg_np; if (!tf->tf_ops.fabric_make_np) { pr_err("tf->tf_ops.fabric_make_np is NULL\n"); return ERR_PTR(-ENOSYS); } se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name); if (!se_tpg_np || IS_ERR(se_tpg_np)) return ERR_PTR(-EINVAL); se_tpg_np->tpg_np_parent = se_tpg; config_group_init_type_name(&se_tpg_np->tpg_np_group, name, &tf->tf_cit_tmpl.tfc_tpg_np_base_cit); return &se_tpg_np->tpg_np_group; } static void target_fabric_drop_np( struct config_group *group, struct config_item *item) { /* * struct se_tpg_np is released via target_fabric_np_base_release() */ config_item_put(item); } static struct configfs_group_operations target_fabric_np_group_ops = { .make_group = &target_fabric_make_np, .drop_item = &target_fabric_drop_np, }; TF_CIT_SETUP(tpg_np, NULL, &target_fabric_np_group_ops, NULL); /* End of tfc_tpg_np_cit */ /* Start of tfc_tpg_port_cit */ CONFIGFS_EATTR_STRUCT(target_fabric_port, se_lun); #define TCM_PORT_ATTR(_name, _mode) \ static struct target_fabric_port_attribute target_fabric_port_##_name = \ __CONFIGFS_EATTR(_name, _mode, \ target_fabric_port_show_attr_##_name, \ target_fabric_port_store_attr_##_name); #define TCM_PORT_ATTOR_RO(_name) \ __CONFIGFS_EATTR_RO(_name, \ target_fabric_port_show_attr_##_name); /* * alua_tg_pt_gp */ static ssize_t target_fabric_port_show_attr_alua_tg_pt_gp( struct se_lun *lun, char *page) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_tg_pt_gp_info(lun->lun_sep, page); } static ssize_t target_fabric_port_store_attr_alua_tg_pt_gp( struct se_lun *lun, const char *page, size_t count) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count); } TCM_PORT_ATTR(alua_tg_pt_gp, S_IRUGO | S_IWUSR); /* * alua_tg_pt_offline */ static ssize_t target_fabric_port_show_attr_alua_tg_pt_offline( struct se_lun *lun, char *page) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_offline_bit(lun, page); } static ssize_t target_fabric_port_store_attr_alua_tg_pt_offline( struct se_lun *lun, const char *page, size_t count) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_offline_bit(lun, page, count); } TCM_PORT_ATTR(alua_tg_pt_offline, S_IRUGO | S_IWUSR); /* * alua_tg_pt_status */ static ssize_t target_fabric_port_show_attr_alua_tg_pt_status( struct se_lun *lun, char *page) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_secondary_status(lun, page); } static ssize_t target_fabric_port_store_attr_alua_tg_pt_status( struct se_lun *lun, const char *page, size_t count) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_secondary_status(lun, page, count); } TCM_PORT_ATTR(alua_tg_pt_status, S_IRUGO | S_IWUSR); /* * alua_tg_pt_write_md */ static ssize_t target_fabric_port_show_attr_alua_tg_pt_write_md( struct se_lun *lun, char *page) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_show_secondary_write_metadata(lun, page); } static ssize_t target_fabric_port_store_attr_alua_tg_pt_write_md( struct se_lun *lun, const char *page, size_t count) { if (!lun || !lun->lun_sep) return -ENODEV; return core_alua_store_secondary_write_metadata(lun, page, count); } TCM_PORT_ATTR(alua_tg_pt_write_md, S_IRUGO | S_IWUSR); static struct configfs_attribute *target_fabric_port_attrs[] = { &target_fabric_port_alua_tg_pt_gp.attr, &target_fabric_port_alua_tg_pt_offline.attr, &target_fabric_port_alua_tg_pt_status.attr, &target_fabric_port_alua_tg_pt_write_md.attr, NULL, }; CONFIGFS_EATTR_OPS(target_fabric_port, se_lun, lun_group); static int target_fabric_port_link( struct config_item *lun_ci, struct config_item *se_dev_ci) { struct config_item *tpg_ci; struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); struct se_lun *lun_p; struct se_portal_group *se_tpg; struct se_device *dev = container_of(to_config_group(se_dev_ci), struct se_device, dev_group); struct target_fabric_configfs *tf; int ret; if (dev->dev_link_magic != SE_DEV_LINK_MAGIC) { pr_err("Bad dev->dev_link_magic, not a valid se_dev_ci pointer:" " %p to struct se_device: %p\n", se_dev_ci, dev); return -EFAULT; } if (!(dev->dev_flags & DF_CONFIGURED)) { pr_err("se_device not configured yet, cannot port link\n"); return -ENODEV; } tpg_ci = &lun_ci->ci_parent->ci_group->cg_item; se_tpg = container_of(to_config_group(tpg_ci), struct se_portal_group, tpg_group); tf = se_tpg->se_tpg_wwn->wwn_tf; if (lun->lun_se_dev != NULL) { pr_err("Port Symlink already exists\n"); return -EEXIST; } lun_p = core_dev_add_lun(se_tpg, dev, lun->unpacked_lun); if (IS_ERR(lun_p)) { pr_err("core_dev_add_lun() failed\n"); ret = PTR_ERR(lun_p); goto out; } if (tf->tf_ops.fabric_post_link) { /* * Call the optional fabric_post_link() to allow a * fabric module to setup any additional state once * core_dev_add_lun() has been called.. */ tf->tf_ops.fabric_post_link(se_tpg, lun); } return 0; out: return ret; } static int target_fabric_port_unlink( struct config_item *lun_ci, struct config_item *se_dev_ci) { struct se_lun *lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group); struct se_portal_group *se_tpg = lun->lun_sep->sep_tpg; struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; if (tf->tf_ops.fabric_pre_unlink) { /* * Call the optional fabric_pre_unlink() to allow a * fabric module to release any additional stat before * core_dev_del_lun() is called. */ tf->tf_ops.fabric_pre_unlink(se_tpg, lun); } core_dev_del_lun(se_tpg, lun->unpacked_lun); return 0; } static struct configfs_item_operations target_fabric_port_item_ops = { .show_attribute = target_fabric_port_attr_show, .store_attribute = target_fabric_port_attr_store, .allow_link = target_fabric_port_link, .drop_link = target_fabric_port_unlink, }; TF_CIT_SETUP(tpg_port, &target_fabric_port_item_ops, NULL, target_fabric_port_attrs); /* End of tfc_tpg_port_cit */ /* Start of tfc_tpg_port_stat_cit */ static struct config_group *target_core_port_stat_mkdir( struct config_group *group, const char *name) { return ERR_PTR(-ENOSYS); } static void target_core_port_stat_rmdir( struct config_group *group, struct config_item *item) { return; } static struct configfs_group_operations target_fabric_port_stat_group_ops = { .make_group = target_core_port_stat_mkdir, .drop_item = target_core_port_stat_rmdir, }; TF_CIT_SETUP(tpg_port_stat, NULL, &target_fabric_port_stat_group_ops, NULL); /* End of tfc_tpg_port_stat_cit */ /* Start of tfc_tpg_lun_cit */ static struct config_group *target_fabric_make_lun( struct config_group *group, const char *name) { struct se_lun *lun; struct se_portal_group *se_tpg = container_of(group, struct se_portal_group, tpg_lun_group); struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; struct config_group *lun_cg = NULL, *port_stat_grp = NULL; unsigned long unpacked_lun; int errno; if (strstr(name, "lun_") != name) { pr_err("Unable to locate \'_\" in" " \"lun_$LUN_NUMBER\"\n"); return ERR_PTR(-EINVAL); } errno = kstrtoul(name + 4, 0, &unpacked_lun); if (errno) return ERR_PTR(errno); if (unpacked_lun > UINT_MAX) return ERR_PTR(-EINVAL); lun = core_get_lun_from_tpg(se_tpg, unpacked_lun); if (!lun) return ERR_PTR(-EINVAL); lun_cg = &lun->lun_group; lun_cg->default_groups = kmalloc(sizeof(struct config_group *) * 2, GFP_KERNEL); if (!lun_cg->default_groups) { pr_err("Unable to allocate lun_cg->default_groups\n"); return ERR_PTR(-ENOMEM); } config_group_init_type_name(&lun->lun_group, name, &tf->tf_cit_tmpl.tfc_tpg_port_cit); config_group_init_type_name(&lun->port_stat_grps.stat_group, "statistics", &tf->tf_cit_tmpl.tfc_tpg_port_stat_cit); lun_cg->default_groups[0] = &lun->port_stat_grps.stat_group; lun_cg->default_groups[1] = NULL; port_stat_grp = &lun->port_stat_grps.stat_group; port_stat_grp->default_groups = kzalloc(sizeof(struct config_group *) * 4, GFP_KERNEL); if (!port_stat_grp->default_groups) { pr_err("Unable to allocate port_stat_grp->default_groups\n"); errno = -ENOMEM; goto out; } target_stat_setup_port_default_groups(lun); return &lun->lun_group; out: if (lun_cg) kfree(lun_cg->default_groups); return ERR_PTR(errno); } static void target_fabric_drop_lun( struct config_group *group, struct config_item *item) { struct se_lun *lun = container_of(to_config_group(item), struct se_lun, lun_group); struct config_item *df_item; struct config_group *lun_cg, *port_stat_grp; int i; port_stat_grp = &lun->port_stat_grps.stat_group; for (i = 0; port_stat_grp->default_groups[i]; i++) { df_item = &port_stat_grp->default_groups[i]->cg_item; port_stat_grp->default_groups[i] = NULL; config_item_put(df_item); } kfree(port_stat_grp->default_groups); lun_cg = &lun->lun_group; for (i = 0; lun_cg->default_groups[i]; i++) { df_item = &lun_cg->default_groups[i]->cg_item; lun_cg->default_groups[i] = NULL; config_item_put(df_item); } kfree(lun_cg->default_groups); config_item_put(item); } static struct configfs_group_operations target_fabric_lun_group_ops = { .make_group = &target_fabric_make_lun, .drop_item = &target_fabric_drop_lun, }; TF_CIT_SETUP(tpg_lun, NULL, &target_fabric_lun_group_ops, NULL); /* End of tfc_tpg_lun_cit */ /* Start of tfc_tpg_attrib_cit */ CONFIGFS_EATTR_OPS(target_fabric_tpg_attrib, se_portal_group, tpg_attrib_group); static struct configfs_item_operations target_fabric_tpg_attrib_item_ops = { .show_attribute = target_fabric_tpg_attrib_attr_show, .store_attribute = target_fabric_tpg_attrib_attr_store, }; TF_CIT_SETUP(tpg_attrib, &target_fabric_tpg_attrib_item_ops, NULL, NULL); /* End of tfc_tpg_attrib_cit */ /* Start of tfc_tpg_auth_cit */ CONFIGFS_EATTR_OPS(target_fabric_tpg_auth, se_portal_group, tpg_auth_group); static struct configfs_item_operations target_fabric_tpg_auth_item_ops = { .show_attribute = target_fabric_tpg_auth_attr_show, .store_attribute = target_fabric_tpg_auth_attr_store, }; TF_CIT_SETUP(tpg_auth, &target_fabric_tpg_auth_item_ops, NULL, NULL); /* End of tfc_tpg_attrib_cit */ /* Start of tfc_tpg_param_cit */ CONFIGFS_EATTR_OPS(target_fabric_tpg_param, se_portal_group, tpg_param_group); static struct configfs_item_operations target_fabric_tpg_param_item_ops = { .show_attribute = target_fabric_tpg_param_attr_show, .store_attribute = target_fabric_tpg_param_attr_store, }; TF_CIT_SETUP(tpg_param, &target_fabric_tpg_param_item_ops, NULL, NULL); /* End of tfc_tpg_param_cit */ /* Start of tfc_tpg_base_cit */ /* * For use with TF_TPG_ATTR() and TF_TPG_ATTR_RO() */ CONFIGFS_EATTR_OPS(target_fabric_tpg, se_portal_group, tpg_group); static void target_fabric_tpg_release(struct config_item *item) { struct se_portal_group *se_tpg = container_of(to_config_group(item), struct se_portal_group, tpg_group); struct se_wwn *wwn = se_tpg->se_tpg_wwn; struct target_fabric_configfs *tf = wwn->wwn_tf; tf->tf_ops.fabric_drop_tpg(se_tpg); } static struct configfs_item_operations target_fabric_tpg_base_item_ops = { .release = target_fabric_tpg_release, .show_attribute = target_fabric_tpg_attr_show, .store_attribute = target_fabric_tpg_attr_store, }; TF_CIT_SETUP(tpg_base, &target_fabric_tpg_base_item_ops, NULL, NULL); /* End of tfc_tpg_base_cit */ /* Start of tfc_tpg_cit */ static struct config_group *target_fabric_make_tpg( struct config_group *group, const char *name) { struct se_wwn *wwn = container_of(group, struct se_wwn, wwn_group); struct target_fabric_configfs *tf = wwn->wwn_tf; struct se_portal_group *se_tpg; if (!tf->tf_ops.fabric_make_tpg) { pr_err("tf->tf_ops.fabric_make_tpg is NULL\n"); return ERR_PTR(-ENOSYS); } se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name); if (!se_tpg || IS_ERR(se_tpg)) return ERR_PTR(-EINVAL); /* * Setup default groups from pre-allocated se_tpg->tpg_default_groups */ se_tpg->tpg_group.default_groups = se_tpg->tpg_default_groups; se_tpg->tpg_group.default_groups[0] = &se_tpg->tpg_lun_group; se_tpg->tpg_group.default_groups[1] = &se_tpg->tpg_np_group; se_tpg->tpg_group.default_groups[2] = &se_tpg->tpg_acl_group; se_tpg->tpg_group.default_groups[3] = &se_tpg->tpg_attrib_group; se_tpg->tpg_group.default_groups[4] = &se_tpg->tpg_auth_group; se_tpg->tpg_group.default_groups[5] = &se_tpg->tpg_param_group; se_tpg->tpg_group.default_groups[6] = NULL; config_group_init_type_name(&se_tpg->tpg_group, name, &tf->tf_cit_tmpl.tfc_tpg_base_cit); config_group_init_type_name(&se_tpg->tpg_lun_group, "lun", &tf->tf_cit_tmpl.tfc_tpg_lun_cit); config_group_init_type_name(&se_tpg->tpg_np_group, "np", &tf->tf_cit_tmpl.tfc_tpg_np_cit); config_group_init_type_name(&se_tpg->tpg_acl_group, "acls", &tf->tf_cit_tmpl.tfc_tpg_nacl_cit); config_group_init_type_name(&se_tpg->tpg_attrib_group, "attrib", &tf->tf_cit_tmpl.tfc_tpg_attrib_cit); config_group_init_type_name(&se_tpg->tpg_auth_group, "auth", &tf->tf_cit_tmpl.tfc_tpg_auth_cit); config_group_init_type_name(&se_tpg->tpg_param_group, "param", &tf->tf_cit_tmpl.tfc_tpg_param_cit); return &se_tpg->tpg_group; } static void target_fabric_drop_tpg( struct config_group *group, struct config_item *item) { struct se_portal_group *se_tpg = container_of(to_config_group(item), struct se_portal_group, tpg_group); struct config_group *tpg_cg = &se_tpg->tpg_group; struct config_item *df_item; int i; /* * Release default groups, but do not release tpg_cg->default_groups * memory as it is statically allocated at se_tpg->tpg_default_groups. */ for (i = 0; tpg_cg->default_groups[i]; i++) { df_item = &tpg_cg->default_groups[i]->cg_item; tpg_cg->default_groups[i] = NULL; config_item_put(df_item); } config_item_put(item); } static void target_fabric_release_wwn(struct config_item *item) { struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, wwn_group); struct target_fabric_configfs *tf = wwn->wwn_tf; tf->tf_ops.fabric_drop_wwn(wwn); } static struct configfs_item_operations target_fabric_tpg_item_ops = { .release = target_fabric_release_wwn, }; static struct configfs_group_operations target_fabric_tpg_group_ops = { .make_group = target_fabric_make_tpg, .drop_item = target_fabric_drop_tpg, }; TF_CIT_SETUP(tpg, &target_fabric_tpg_item_ops, &target_fabric_tpg_group_ops, NULL); /* End of tfc_tpg_cit */ /* Start of tfc_wwn_fabric_stats_cit */ /* * This is used as a placeholder for struct se_wwn->fabric_stat_group * to allow fabrics access to ->fabric_stat_group->default_groups[] */ TF_CIT_SETUP(wwn_fabric_stats, NULL, NULL, NULL); /* End of tfc_wwn_fabric_stats_cit */ /* Start of tfc_wwn_cit */ static struct config_group *target_fabric_make_wwn( struct config_group *group, const char *name) { struct target_fabric_configfs *tf = container_of(group, struct target_fabric_configfs, tf_group); struct se_wwn *wwn; if (!tf->tf_ops.fabric_make_wwn) { pr_err("tf->tf_ops.fabric_make_wwn is NULL\n"); return ERR_PTR(-ENOSYS); } wwn = tf->tf_ops.fabric_make_wwn(tf, group, name); if (!wwn || IS_ERR(wwn)) return ERR_PTR(-EINVAL); wwn->wwn_tf = tf; /* * Setup default groups from pre-allocated wwn->wwn_default_groups */ wwn->wwn_group.default_groups = wwn->wwn_default_groups; wwn->wwn_group.default_groups[0] = &wwn->fabric_stat_group; wwn->wwn_group.default_groups[1] = NULL; config_group_init_type_name(&wwn->wwn_group, name, &tf->tf_cit_tmpl.tfc_tpg_cit); config_group_init_type_name(&wwn->fabric_stat_group, "fabric_statistics", &tf->tf_cit_tmpl.tfc_wwn_fabric_stats_cit); return &wwn->wwn_group; } static void target_fabric_drop_wwn( struct config_group *group, struct config_item *item) { struct se_wwn *wwn = container_of(to_config_group(item), struct se_wwn, wwn_group); struct config_item *df_item; struct config_group *cg = &wwn->wwn_group; int i; for (i = 0; cg->default_groups[i]; i++) { df_item = &cg->default_groups[i]->cg_item; cg->default_groups[i] = NULL; config_item_put(df_item); } config_item_put(item); } static struct configfs_group_operations target_fabric_wwn_group_ops = { .make_group = target_fabric_make_wwn, .drop_item = target_fabric_drop_wwn, }; /* * For use with TF_WWN_ATTR() and TF_WWN_ATTR_RO() */ CONFIGFS_EATTR_OPS(target_fabric_wwn, target_fabric_configfs, tf_group); static struct configfs_item_operations target_fabric_wwn_item_ops = { .show_attribute = target_fabric_wwn_attr_show, .store_attribute = target_fabric_wwn_attr_store, }; TF_CIT_SETUP(wwn, &target_fabric_wwn_item_ops, &target_fabric_wwn_group_ops, NULL); /* End of tfc_wwn_cit */ /* Start of tfc_discovery_cit */ CONFIGFS_EATTR_OPS(target_fabric_discovery, target_fabric_configfs, tf_disc_group); static struct configfs_item_operations target_fabric_discovery_item_ops = { .show_attribute = target_fabric_discovery_attr_show, .store_attribute = target_fabric_discovery_attr_store, }; TF_CIT_SETUP(discovery, &target_fabric_discovery_item_ops, NULL, NULL); /* End of tfc_discovery_cit */ int target_fabric_setup_cits(struct target_fabric_configfs *tf) { target_fabric_setup_discovery_cit(tf); target_fabric_setup_wwn_cit(tf); target_fabric_setup_wwn_fabric_stats_cit(tf); target_fabric_setup_tpg_cit(tf); target_fabric_setup_tpg_base_cit(tf); target_fabric_setup_tpg_port_cit(tf); target_fabric_setup_tpg_port_stat_cit(tf); target_fabric_setup_tpg_lun_cit(tf); target_fabric_setup_tpg_np_cit(tf); target_fabric_setup_tpg_np_base_cit(tf); target_fabric_setup_tpg_attrib_cit(tf); target_fabric_setup_tpg_auth_cit(tf); target_fabric_setup_tpg_param_cit(tf); target_fabric_setup_tpg_nacl_cit(tf); target_fabric_setup_tpg_nacl_base_cit(tf); target_fabric_setup_tpg_nacl_attrib_cit(tf); target_fabric_setup_tpg_nacl_auth_cit(tf); target_fabric_setup_tpg_nacl_param_cit(tf); target_fabric_setup_tpg_nacl_stat_cit(tf); target_fabric_setup_tpg_mappedlun_cit(tf); target_fabric_setup_tpg_mappedlun_stat_cit(tf); return 0; }
gpl-2.0
SM-G920P-MM/G920P-MM
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
1739
46851
/* * Freescale GPMI NAND Flash Driver * * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. * Copyright (C) 2008 Embedded Alley Solutions, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mtd/partitions.h> #include <linux/pinctrl/consumer.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_mtd.h> #include "gpmi-nand.h" /* Resource names for the GPMI NAND driver. */ #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" /* add our owner bbt descriptor */ static uint8_t scan_ff_pattern[] = { 0xff }; static struct nand_bbt_descr gpmi_bbt_descr = { .options = 0, .offs = 0, .len = 1, .pattern = scan_ff_pattern }; /* We will use all the (page + OOB). */ static struct nand_ecclayout gpmi_hw_ecclayout = { .eccbytes = 0, .eccpos = { 0, }, .oobfree = { {.offset = 0, .length = 0} } }; static irqreturn_t bch_irq(int irq, void *cookie) { struct gpmi_nand_data *this = cookie; gpmi_clear_bch(this); complete(&this->bch_done); return IRQ_HANDLED; } /* * Calculate the ECC strength by hand: * E : The ECC strength. * G : the length of Galois Field. * N : The chunk count of per page. * O : the oobsize of the NAND chip. * M : the metasize of per page. * * The formula is : * E * G * N * ------------ <= (O - M) * 8 * * So, we get E by: * (O - M) * 8 * E <= ------------- * G * N */ static inline int get_ecc_strength(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; struct mtd_info *mtd = &this->mtd; int ecc_strength; ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) / (geo->gf_len * geo->ecc_chunk_count); /* We need the minor even number. */ return round_down(ecc_strength, 2); } static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; /* Do the sanity check. */ if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) { /* The mx23/mx28 only support the GF13. */ if (geo->gf_len == 14) return false; if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX) return false; } else if (GPMI_IS_MX6Q(this)) { if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX) return false; } return true; } int common_nfc_set_geometry(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; struct mtd_info *mtd = &this->mtd; unsigned int metadata_size; unsigned int status_size; unsigned int block_mark_bit_offset; /* * The size of the metadata can be changed, though we set it to 10 * bytes now. But it can't be too large, because we have to save * enough space for BCH. */ geo->metadata_size = 10; /* The default for the length of Galois Field. */ geo->gf_len = 13; /* The default for chunk size. */ geo->ecc_chunk_size = 512; while (geo->ecc_chunk_size < mtd->oobsize) { geo->ecc_chunk_size *= 2; /* keep C >= O */ geo->gf_len = 14; } geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; /* We use the same ECC strength for all chunks. */ geo->ecc_strength = get_ecc_strength(this); if (!gpmi_check_ecc(this)) { dev_err(this->dev, "We can not support this nand chip." " Its required ecc strength(%d) is beyond our" " capability(%d).\n", geo->ecc_strength, (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX : MXS_ECC_STRENGTH_MAX)); return -EINVAL; } geo->page_size = mtd->writesize + mtd->oobsize; geo->payload_size = mtd->writesize; /* * The auxiliary buffer contains the metadata and the ECC status. The * metadata is padded to the nearest 32-bit boundary. The ECC status * contains one byte for every ECC chunk, and is also padded to the * nearest 32-bit boundary. */ metadata_size = ALIGN(geo->metadata_size, 4); status_size = ALIGN(geo->ecc_chunk_count, 4); geo->auxiliary_size = metadata_size + status_size; geo->auxiliary_status_offset = metadata_size; if (!this->swap_block_mark) return 0; /* * We need to compute the byte and bit offsets of * the physical block mark within the ECC-based view of the page. * * NAND chip with 2K page shows below: * (Block Mark) * | | * | D | * |<---->| * V V * +---+----------+-+----------+-+----------+-+----------+-+ * | M | data |E| data |E| data |E| data |E| * +---+----------+-+----------+-+----------+-+----------+-+ * * The position of block mark moves forward in the ECC-based view * of page, and the delta is: * * E * G * (N - 1) * D = (---------------- + M) * 8 * * With the formula to compute the ECC strength, and the condition * : C >= O (C is the ecc chunk size) * * It's easy to deduce to the following result: * * E * G (O - M) C - M C - M * ----------- <= ------- <= -------- < --------- * 8 N N (N - 1) * * So, we get: * * E * G * (N - 1) * D = (---------------- + M) < C * 8 * * The above inequality means the position of block mark * within the ECC-based view of the page is still in the data chunk, * and it's NOT in the ECC bits of the chunk. * * Use the following to compute the bit position of the * physical block mark within the ECC-based view of the page: * (page_size - D) * 8 * * --Huang Shijie */ block_mark_bit_offset = mtd->writesize * 8 - (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) + geo->metadata_size * 8); geo->block_mark_byte_offset = block_mark_bit_offset / 8; geo->block_mark_bit_offset = block_mark_bit_offset % 8; return 0; } struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) { int chipnr = this->current_chip; return this->dma_chans[chipnr]; } /* Can we use the upper's buffer directly for DMA? */ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr) { struct scatterlist *sgl = &this->data_sgl; int ret; this->direct_dma_map_ok = true; /* first try to map the upper buffer directly */ sg_init_one(sgl, this->upper_buf, this->upper_len); ret = dma_map_sg(this->dev, sgl, 1, dr); if (ret == 0) { /* We have to use our own DMA buffer. */ sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE); if (dr == DMA_TO_DEVICE) memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len); ret = dma_map_sg(this->dev, sgl, 1, dr); if (ret == 0) pr_err("DMA mapping failed.\n"); this->direct_dma_map_ok = false; } } /* This will be called after the DMA operation is finished. */ static void dma_irq_callback(void *param) { struct gpmi_nand_data *this = param; struct completion *dma_c = &this->dma_done; switch (this->dma_type) { case DMA_FOR_COMMAND: dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE); break; case DMA_FOR_READ_DATA: dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); if (this->direct_dma_map_ok == false) memcpy(this->upper_buf, this->data_buffer_dma, this->upper_len); break; case DMA_FOR_WRITE_DATA: dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); break; case DMA_FOR_READ_ECC_PAGE: case DMA_FOR_WRITE_ECC_PAGE: /* We have to wait the BCH interrupt to finish. */ break; default: pr_err("in wrong DMA operation.\n"); } complete(dma_c); } int start_dma_without_bch_irq(struct gpmi_nand_data *this, struct dma_async_tx_descriptor *desc) { struct completion *dma_c = &this->dma_done; int err; init_completion(dma_c); desc->callback = dma_irq_callback; desc->callback_param = this; dmaengine_submit(desc); dma_async_issue_pending(get_dma_chan(this)); /* Wait for the interrupt from the DMA block. */ err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); if (!err) { pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type); gpmi_dump_info(this); return -ETIMEDOUT; } return 0; } /* * This function is used in BCH reading or BCH writing pages. * It will wait for the BCH interrupt as long as ONE second. * Actually, we must wait for two interrupts : * [1] firstly the DMA interrupt and * [2] secondly the BCH interrupt. */ int start_dma_with_bch_irq(struct gpmi_nand_data *this, struct dma_async_tx_descriptor *desc) { struct completion *bch_c = &this->bch_done; int err; /* Prepare to receive an interrupt from the BCH block. */ init_completion(bch_c); /* start the DMA */ start_dma_without_bch_irq(this, desc); /* Wait for the interrupt from the BCH block. */ err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); if (!err) { pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type); gpmi_dump_info(this); return -ETIMEDOUT; } return 0; } static int acquire_register_block(struct gpmi_nand_data *this, const char *res_name) { struct platform_device *pdev = this->pdev; struct resources *res = &this->resources; struct resource *r; void __iomem *p; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); if (!r) { pr_err("Can't get resource for %s\n", res_name); return -ENXIO; } p = ioremap(r->start, resource_size(r)); if (!p) { pr_err("Can't remap %s\n", res_name); return -ENOMEM; } if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) res->gpmi_regs = p; else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) res->bch_regs = p; else pr_err("unknown resource name : %s\n", res_name); return 0; } static void release_register_block(struct gpmi_nand_data *this) { struct resources *res = &this->resources; if (res->gpmi_regs) iounmap(res->gpmi_regs); if (res->bch_regs) iounmap(res->bch_regs); res->gpmi_regs = NULL; res->bch_regs = NULL; } static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) { struct platform_device *pdev = this->pdev; struct resources *res = &this->resources; const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; struct resource *r; int err; r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); if (!r) { pr_err("Can't get resource for %s\n", res_name); return -ENXIO; } err = request_irq(r->start, irq_h, 0, res_name, this); if (err) { pr_err("Can't own %s\n", res_name); return err; } res->bch_low_interrupt = r->start; res->bch_high_interrupt = r->end; return 0; } static void release_bch_irq(struct gpmi_nand_data *this) { struct resources *res = &this->resources; int i = res->bch_low_interrupt; for (; i <= res->bch_high_interrupt; i++) free_irq(i, this); } static void release_dma_channels(struct gpmi_nand_data *this) { unsigned int i; for (i = 0; i < DMA_CHANS; i++) if (this->dma_chans[i]) { dma_release_channel(this->dma_chans[i]); this->dma_chans[i] = NULL; } } static int acquire_dma_channels(struct gpmi_nand_data *this) { struct platform_device *pdev = this->pdev; struct dma_chan *dma_chan; /* request dma channel */ dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx"); if (!dma_chan) { pr_err("Failed to request DMA channel.\n"); goto acquire_err; } this->dma_chans[0] = dma_chan; return 0; acquire_err: release_dma_channels(this); return -EINVAL; } static void gpmi_put_clks(struct gpmi_nand_data *this) { struct resources *r = &this->resources; struct clk *clk; int i; for (i = 0; i < GPMI_CLK_MAX; i++) { clk = r->clock[i]; if (clk) { clk_put(clk); r->clock[i] = NULL; } } } static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = { "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", }; static int gpmi_get_clks(struct gpmi_nand_data *this) { struct resources *r = &this->resources; char **extra_clks = NULL; struct clk *clk; int i; /* The main clock is stored in the first. */ r->clock[0] = clk_get(this->dev, "gpmi_io"); if (IS_ERR(r->clock[0])) goto err_clock; /* Get extra clocks */ if (GPMI_IS_MX6Q(this)) extra_clks = extra_clks_for_mx6q; if (!extra_clks) return 0; for (i = 1; i < GPMI_CLK_MAX; i++) { if (extra_clks[i - 1] == NULL) break; clk = clk_get(this->dev, extra_clks[i - 1]); if (IS_ERR(clk)) goto err_clock; r->clock[i] = clk; } if (GPMI_IS_MX6Q(this)) /* * Set the default value for the gpmi clock in mx6q: * * If you want to use the ONFI nand which is in the * Synchronous Mode, you should change the clock as you need. */ clk_set_rate(r->clock[0], 22000000); return 0; err_clock: dev_dbg(this->dev, "failed in finding the clocks.\n"); gpmi_put_clks(this); return -ENOMEM; } static int acquire_resources(struct gpmi_nand_data *this) { struct pinctrl *pinctrl; int ret; ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); if (ret) goto exit_regs; ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); if (ret) goto exit_regs; ret = acquire_bch_irq(this, bch_irq); if (ret) goto exit_regs; ret = acquire_dma_channels(this); if (ret) goto exit_dma_channels; pinctrl = devm_pinctrl_get_select_default(&this->pdev->dev); if (IS_ERR(pinctrl)) { ret = PTR_ERR(pinctrl); goto exit_pin; } ret = gpmi_get_clks(this); if (ret) goto exit_clock; return 0; exit_clock: exit_pin: release_dma_channels(this); exit_dma_channels: release_bch_irq(this); exit_regs: release_register_block(this); return ret; } static void release_resources(struct gpmi_nand_data *this) { gpmi_put_clks(this); release_register_block(this); release_bch_irq(this); release_dma_channels(this); } static int init_hardware(struct gpmi_nand_data *this) { int ret; /* * This structure contains the "safe" GPMI timing that should succeed * with any NAND Flash device * (although, with less-than-optimal performance). */ struct nand_timing safe_timing = { .data_setup_in_ns = 80, .data_hold_in_ns = 60, .address_setup_in_ns = 25, .gpmi_sample_delay_in_ns = 6, .tREA_in_ns = -1, .tRLOH_in_ns = -1, .tRHOH_in_ns = -1, }; /* Initialize the hardwares. */ ret = gpmi_init(this); if (ret) return ret; this->timing = safe_timing; return 0; } static int read_page_prepare(struct gpmi_nand_data *this, void *destination, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, void **use_virt, dma_addr_t *use_phys) { struct device *dev = this->dev; if (virt_addr_valid(destination)) { dma_addr_t dest_phys; dest_phys = dma_map_single(dev, destination, length, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dest_phys)) { if (alt_size < length) { pr_err("%s, Alternate buffer is too small\n", __func__); return -ENOMEM; } goto map_failed; } *use_virt = destination; *use_phys = dest_phys; this->direct_dma_map_ok = true; return 0; } map_failed: *use_virt = alt_virt; *use_phys = alt_phys; this->direct_dma_map_ok = false; return 0; } static inline void read_page_end(struct gpmi_nand_data *this, void *destination, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, void *used_virt, dma_addr_t used_phys) { if (this->direct_dma_map_ok) dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE); } static inline void read_page_swap_end(struct gpmi_nand_data *this, void *destination, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, void *used_virt, dma_addr_t used_phys) { if (!this->direct_dma_map_ok) memcpy(destination, alt_virt, length); } static int send_page_prepare(struct gpmi_nand_data *this, const void *source, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, const void **use_virt, dma_addr_t *use_phys) { struct device *dev = this->dev; if (virt_addr_valid(source)) { dma_addr_t source_phys; source_phys = dma_map_single(dev, (void *)source, length, DMA_TO_DEVICE); if (dma_mapping_error(dev, source_phys)) { if (alt_size < length) { pr_err("%s, Alternate buffer is too small\n", __func__); return -ENOMEM; } goto map_failed; } *use_virt = source; *use_phys = source_phys; return 0; } map_failed: /* * Copy the content of the source buffer into the alternate * buffer and set up the return values accordingly. */ memcpy(alt_virt, source, length); *use_virt = alt_virt; *use_phys = alt_phys; return 0; } static void send_page_end(struct gpmi_nand_data *this, const void *source, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, const void *used_virt, dma_addr_t used_phys) { struct device *dev = this->dev; if (used_virt == source) dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE); } static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) { struct device *dev = this->dev; if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) dma_free_coherent(dev, this->page_buffer_size, this->page_buffer_virt, this->page_buffer_phys); kfree(this->cmd_buffer); kfree(this->data_buffer_dma); this->cmd_buffer = NULL; this->data_buffer_dma = NULL; this->page_buffer_virt = NULL; this->page_buffer_size = 0; } /* Allocate the DMA buffers */ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; struct device *dev = this->dev; /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); if (this->cmd_buffer == NULL) goto error_alloc; /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); if (this->data_buffer_dma == NULL) goto error_alloc; /* * [3] Allocate the page buffer. * * Both the payload buffer and the auxiliary buffer must appear on * 32-bit boundaries. We presume the size of the payload buffer is a * power of two and is much larger than four, which guarantees the * auxiliary buffer will appear on a 32-bit boundary. */ this->page_buffer_size = geo->payload_size + geo->auxiliary_size; this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size, &this->page_buffer_phys, GFP_DMA); if (!this->page_buffer_virt) goto error_alloc; /* Slice up the page buffer. */ this->payload_virt = this->page_buffer_virt; this->payload_phys = this->page_buffer_phys; this->auxiliary_virt = this->payload_virt + geo->payload_size; this->auxiliary_phys = this->payload_phys + geo->payload_size; return 0; error_alloc: gpmi_free_dma_buffer(this); pr_err("Error allocating DMA buffers!\n"); return -ENOMEM; } static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; int ret; /* * Every operation begins with a command byte and a series of zero or * more address bytes. These are distinguished by either the Address * Latch Enable (ALE) or Command Latch Enable (CLE) signals being * asserted. When MTD is ready to execute the command, it will deassert * both latch enables. * * Rather than run a separate DMA operation for every single byte, we * queue them up and run a single DMA operation for the entire series * of command and data bytes. NAND_CMD_NONE means the END of the queue. */ if ((ctrl & (NAND_ALE | NAND_CLE))) { if (data != NAND_CMD_NONE) this->cmd_buffer[this->command_length++] = data; return; } if (!this->command_length) return; ret = gpmi_send_command(this); if (ret) pr_err("Chip: %u, Error %d\n", this->current_chip, ret); this->command_length = 0; } static int gpmi_dev_ready(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; return gpmi_is_ready(this, this->current_chip); } static void gpmi_select_chip(struct mtd_info *mtd, int chipnr) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; if ((this->current_chip < 0) && (chipnr >= 0)) gpmi_begin(this); else if ((this->current_chip >= 0) && (chipnr < 0)) gpmi_end(this); this->current_chip = chipnr; } static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; pr_debug("len is %d\n", len); this->upper_buf = buf; this->upper_len = len; gpmi_read_data(this); } static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; pr_debug("len is %d\n", len); this->upper_buf = (uint8_t *)buf; this->upper_len = len; gpmi_send_data(this); } static uint8_t gpmi_read_byte(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; uint8_t *buf = this->data_buffer_dma; gpmi_read_buf(mtd, buf, 1); return buf[0]; } /* * Handles block mark swapping. * It can be called in swapping the block mark, or swapping it back, * because the the operations are the same. */ static void block_mark_swapping(struct gpmi_nand_data *this, void *payload, void *auxiliary) { struct bch_geometry *nfc_geo = &this->bch_geometry; unsigned char *p; unsigned char *a; unsigned int bit; unsigned char mask; unsigned char from_data; unsigned char from_oob; if (!this->swap_block_mark) return; /* * If control arrives here, we're swapping. Make some convenience * variables. */ bit = nfc_geo->block_mark_bit_offset; p = payload + nfc_geo->block_mark_byte_offset; a = auxiliary; /* * Get the byte from the data area that overlays the block mark. Since * the ECC engine applies its own view to the bits in the page, the * physical block mark won't (in general) appear on a byte boundary in * the data. */ from_data = (p[0] >> bit) | (p[1] << (8 - bit)); /* Get the byte from the OOB. */ from_oob = a[0]; /* Swap them. */ a[0] = from_data; mask = (0x1 << bit) - 1; p[0] = (p[0] & mask) | (from_oob << bit); mask = ~0 << bit; p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); } static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct gpmi_nand_data *this = chip->priv; struct bch_geometry *nfc_geo = &this->bch_geometry; void *payload_virt; dma_addr_t payload_phys; void *auxiliary_virt; dma_addr_t auxiliary_phys; unsigned int i; unsigned char *status; unsigned int max_bitflips = 0; int ret; pr_debug("page number is : %d\n", page); ret = read_page_prepare(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, &payload_virt, &payload_phys); if (ret) { pr_err("Inadequate DMA buffer\n"); ret = -ENOMEM; return ret; } auxiliary_virt = this->auxiliary_virt; auxiliary_phys = this->auxiliary_phys; /* go! */ ret = gpmi_read_page(this, payload_phys, auxiliary_phys); read_page_end(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, payload_virt, payload_phys); if (ret) { pr_err("Error in ECC-based read: %d\n", ret); return ret; } /* handle the block mark swapping */ block_mark_swapping(this, payload_virt, auxiliary_virt); /* Loop over status bytes, accumulating ECC status. */ status = auxiliary_virt + nfc_geo->auxiliary_status_offset; for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) continue; if (*status == STATUS_UNCORRECTABLE) { mtd->ecc_stats.failed++; continue; } mtd->ecc_stats.corrected += *status; max_bitflips = max_t(unsigned int, max_bitflips, *status); } if (oob_required) { /* * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() * for details about our policy for delivering the OOB. * * We fill the caller's buffer with set bits, and then copy the * block mark to th caller's buffer. Note that, if block mark * swapping was necessary, it has already been done, so we can * rely on the first byte of the auxiliary buffer to contain * the block mark. */ memset(chip->oob_poi, ~0, mtd->oobsize); chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; } read_page_swap_end(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, payload_virt, payload_phys); return max_bitflips; } static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required) { struct gpmi_nand_data *this = chip->priv; struct bch_geometry *nfc_geo = &this->bch_geometry; const void *payload_virt; dma_addr_t payload_phys; const void *auxiliary_virt; dma_addr_t auxiliary_phys; int ret; pr_debug("ecc write page.\n"); if (this->swap_block_mark) { /* * If control arrives here, we're doing block mark swapping. * Since we can't modify the caller's buffers, we must copy them * into our own. */ memcpy(this->payload_virt, buf, mtd->writesize); payload_virt = this->payload_virt; payload_phys = this->payload_phys; memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size); auxiliary_virt = this->auxiliary_virt; auxiliary_phys = this->auxiliary_phys; /* Handle block mark swapping. */ block_mark_swapping(this, (void *) payload_virt, (void *) auxiliary_virt); } else { /* * If control arrives here, we're not doing block mark swapping, * so we can to try and use the caller's buffers. */ ret = send_page_prepare(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, &payload_virt, &payload_phys); if (ret) { pr_err("Inadequate payload DMA buffer\n"); return 0; } ret = send_page_prepare(this, chip->oob_poi, mtd->oobsize, this->auxiliary_virt, this->auxiliary_phys, nfc_geo->auxiliary_size, &auxiliary_virt, &auxiliary_phys); if (ret) { pr_err("Inadequate auxiliary DMA buffer\n"); goto exit_auxiliary; } } /* Ask the NFC. */ ret = gpmi_send_page(this, payload_phys, auxiliary_phys); if (ret) pr_err("Error in ECC-based write: %d\n", ret); if (!this->swap_block_mark) { send_page_end(this, chip->oob_poi, mtd->oobsize, this->auxiliary_virt, this->auxiliary_phys, nfc_geo->auxiliary_size, auxiliary_virt, auxiliary_phys); exit_auxiliary: send_page_end(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, payload_virt, payload_phys); } return 0; } /* * There are several places in this driver where we have to handle the OOB and * block marks. This is the function where things are the most complicated, so * this is where we try to explain it all. All the other places refer back to * here. * * These are the rules, in order of decreasing importance: * * 1) Nothing the caller does can be allowed to imperil the block mark. * * 2) In read operations, the first byte of the OOB we return must reflect the * true state of the block mark, no matter where that block mark appears in * the physical page. * * 3) ECC-based read operations return an OOB full of set bits (since we never * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads * return). * * 4) "Raw" read operations return a direct view of the physical bytes in the * page, using the conventional definition of which bytes are data and which * are OOB. This gives the caller a way to see the actual, physical bytes * in the page, without the distortions applied by our ECC engine. * * * What we do for this specific read operation depends on two questions: * * 1) Are we doing a "raw" read, or an ECC-based read? * * 2) Are we using block mark swapping or transcription? * * There are four cases, illustrated by the following Karnaugh map: * * | Raw | ECC-based | * -------------+-------------------------+-------------------------+ * | Read the conventional | | * | OOB at the end of the | | * Swapping | page and return it. It | | * | contains exactly what | | * | we want. | Read the block mark and | * -------------+-------------------------+ return it in a buffer | * | Read the conventional | full of set bits. | * | OOB at the end of the | | * | page and also the block | | * Transcribing | mark in the metadata. | | * | Copy the block mark | | * | into the first byte of | | * | the OOB. | | * -------------+-------------------------+-------------------------+ * * Note that we break rule #4 in the Transcribing/Raw case because we're not * giving an accurate view of the actual, physical bytes in the page (we're * overwriting the block mark). That's OK because it's more important to follow * rule #2. * * It turns out that knowing whether we want an "ECC-based" or "raw" read is not * easy. When reading a page, for example, the NAND Flash MTD code calls our * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an * ECC-based or raw view of the page is implicit in which function it calls * (there is a similar pair of ECC-based/raw functions for writing). * * FIXME: The following paragraph is incorrect, now that there exist * ecc.read_oob_raw and ecc.write_oob_raw functions. * * Since MTD assumes the OOB is not covered by ECC, there is no pair of * ECC-based/raw functions for reading or or writing the OOB. The fact that the * caller wants an ECC-based or raw view of the page is not propagated down to * this driver. */ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { struct gpmi_nand_data *this = chip->priv; pr_debug("page number is %d\n", page); /* clear the OOB buffer */ memset(chip->oob_poi, ~0, mtd->oobsize); /* Read out the conventional OOB. */ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); /* * Now, we want to make sure the block mark is correct. In the * Swapping/Raw case, we already have it. Otherwise, we need to * explicitly read it. */ if (!this->swap_block_mark) { /* Read the block mark into the first byte of the OOB buffer. */ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); chip->oob_poi[0] = chip->read_byte(mtd); } return 0; } static int gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { /* * The BCH will use all the (page + oob). * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob. * But it can not stop some ioctls such MEMWRITEOOB which uses * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit * these ioctls too. */ return -EPERM; } static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; int block, ret = 0; uint8_t *block_mark; int column, page, status, chipnr; /* Get block number */ block = (int)(ofs >> chip->bbt_erase_shift); if (chip->bbt) chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); /* Do we have a flash based bad block table ? */ if (chip->bbt_options & NAND_BBT_USE_FLASH) ret = nand_update_bbt(mtd, ofs); else { chipnr = (int)(ofs >> chip->chip_shift); chip->select_chip(mtd, chipnr); column = this->swap_block_mark ? mtd->writesize : 0; /* Write the block mark. */ block_mark = this->data_buffer_dma; block_mark[0] = 0; /* bad block marker */ /* Shift to get page */ page = (int)(ofs >> chip->page_shift); chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page); chip->write_buf(mtd, block_mark, 1); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); if (status & NAND_STATUS_FAIL) ret = -EIO; chip->select_chip(mtd, -1); } if (!ret) mtd->ecc_stats.badblocks++; return ret; } static int nand_boot_set_geometry(struct gpmi_nand_data *this) { struct boot_rom_geometry *geometry = &this->rom_geometry; /* * Set the boot block stride size. * * In principle, we should be reading this from the OTP bits, since * that's where the ROM is going to get it. In fact, we don't have any * way to read the OTP bits, so we go with the default and hope for the * best. */ geometry->stride_size_in_pages = 64; /* * Set the search area stride exponent. * * In principle, we should be reading this from the OTP bits, since * that's where the ROM is going to get it. In fact, we don't have any * way to read the OTP bits, so we go with the default and hope for the * best. */ geometry->search_area_stride_exponent = 2; return 0; } static const char *fingerprint = "STMP"; static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) { struct boot_rom_geometry *rom_geo = &this->rom_geometry; struct device *dev = this->dev; struct mtd_info *mtd = &this->mtd; struct nand_chip *chip = &this->nand; unsigned int search_area_size_in_strides; unsigned int stride; unsigned int page; uint8_t *buffer = chip->buffers->databuf; int saved_chip_number; int found_an_ncb_fingerprint = false; /* Compute the number of strides in a search area. */ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; saved_chip_number = this->current_chip; chip->select_chip(mtd, 0); /* * Loop through the first search area, looking for the NCB fingerprint. */ dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); for (stride = 0; stride < search_area_size_in_strides; stride++) { /* Compute the page addresses. */ page = stride * rom_geo->stride_size_in_pages; dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); /* * Read the NCB fingerprint. The fingerprint is four bytes long * and starts in the 12th byte of the page. */ chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page); chip->read_buf(mtd, buffer, strlen(fingerprint)); /* Look for the fingerprint. */ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { found_an_ncb_fingerprint = true; break; } } chip->select_chip(mtd, saved_chip_number); if (found_an_ncb_fingerprint) dev_dbg(dev, "\tFound a fingerprint\n"); else dev_dbg(dev, "\tNo fingerprint found\n"); return found_an_ncb_fingerprint; } /* Writes a transcription stamp. */ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) { struct device *dev = this->dev; struct boot_rom_geometry *rom_geo = &this->rom_geometry; struct mtd_info *mtd = &this->mtd; struct nand_chip *chip = &this->nand; unsigned int block_size_in_pages; unsigned int search_area_size_in_strides; unsigned int search_area_size_in_pages; unsigned int search_area_size_in_blocks; unsigned int block; unsigned int stride; unsigned int page; uint8_t *buffer = chip->buffers->databuf; int saved_chip_number; int status; /* Compute the search area geometry. */ block_size_in_pages = mtd->erasesize / mtd->writesize; search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; search_area_size_in_pages = search_area_size_in_strides * rom_geo->stride_size_in_pages; search_area_size_in_blocks = (search_area_size_in_pages + (block_size_in_pages - 1)) / block_size_in_pages; dev_dbg(dev, "Search Area Geometry :\n"); dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); /* Select chip 0. */ saved_chip_number = this->current_chip; chip->select_chip(mtd, 0); /* Loop over blocks in the first search area, erasing them. */ dev_dbg(dev, "Erasing the search area...\n"); for (block = 0; block < search_area_size_in_blocks; block++) { /* Compute the page address. */ page = block * block_size_in_pages; /* Erase this block. */ dev_dbg(dev, "\tErasing block 0x%x\n", block); chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); /* Wait for the erase to finish. */ status = chip->waitfunc(mtd, chip); if (status & NAND_STATUS_FAIL) dev_err(dev, "[%s] Erase failed.\n", __func__); } /* Write the NCB fingerprint into the page buffer. */ memset(buffer, ~0, mtd->writesize); memset(chip->oob_poi, ~0, mtd->oobsize); memcpy(buffer + 12, fingerprint, strlen(fingerprint)); /* Loop through the first search area, writing NCB fingerprints. */ dev_dbg(dev, "Writing NCB fingerprints...\n"); for (stride = 0; stride < search_area_size_in_strides; stride++) { /* Compute the page addresses. */ page = stride * rom_geo->stride_size_in_pages; /* Write the first page of the current stride. */ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); chip->ecc.write_page_raw(mtd, chip, buffer, 0); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); /* Wait for the write to finish. */ status = chip->waitfunc(mtd, chip); if (status & NAND_STATUS_FAIL) dev_err(dev, "[%s] Write failed.\n", __func__); } /* Deselect chip 0. */ chip->select_chip(mtd, saved_chip_number); return 0; } static int mx23_boot_init(struct gpmi_nand_data *this) { struct device *dev = this->dev; struct nand_chip *chip = &this->nand; struct mtd_info *mtd = &this->mtd; unsigned int block_count; unsigned int block; int chipnr; int page; loff_t byte; uint8_t block_mark; int ret = 0; /* * If control arrives here, we can't use block mark swapping, which * means we're forced to use transcription. First, scan for the * transcription stamp. If we find it, then we don't have to do * anything -- the block marks are already transcribed. */ if (mx23_check_transcription_stamp(this)) return 0; /* * If control arrives here, we couldn't find a transcription stamp, so * so we presume the block marks are in the conventional location. */ dev_dbg(dev, "Transcribing bad block marks...\n"); /* Compute the number of blocks in the entire medium. */ block_count = chip->chipsize >> chip->phys_erase_shift; /* * Loop over all the blocks in the medium, transcribing block marks as * we go. */ for (block = 0; block < block_count; block++) { /* * Compute the chip, page and byte addresses for this block's * conventional mark. */ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); page = block << (chip->phys_erase_shift - chip->page_shift); byte = block << chip->phys_erase_shift; /* Send the command to read the conventional block mark. */ chip->select_chip(mtd, chipnr); chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); block_mark = chip->read_byte(mtd); chip->select_chip(mtd, -1); /* * Check if the block is marked bad. If so, we need to mark it * again, but this time the result will be a mark in the * location where we transcribe block marks. */ if (block_mark != 0xff) { dev_dbg(dev, "Transcribing mark in block %u\n", block); ret = chip->block_markbad(mtd, byte); if (ret) dev_err(dev, "Failed to mark block bad with " "ret %d\n", ret); } } /* Write the stamp that indicates we've transcribed the block marks. */ mx23_write_transcription_stamp(this); return 0; } static int nand_boot_init(struct gpmi_nand_data *this) { nand_boot_set_geometry(this); /* This is ROM arch-specific initilization before the BBT scanning. */ if (GPMI_IS_MX23(this)) return mx23_boot_init(this); return 0; } static int gpmi_set_geometry(struct gpmi_nand_data *this) { int ret; /* Free the temporary DMA memory for reading ID. */ gpmi_free_dma_buffer(this); /* Set up the NFC geometry which is used by BCH. */ ret = bch_set_geometry(this); if (ret) { pr_err("Error setting BCH geometry : %d\n", ret); return ret; } /* Alloc the new DMA buffers according to the pagesize and oobsize */ return gpmi_alloc_dma_buffer(this); } static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this) { int ret; /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ if (GPMI_IS_MX23(this)) this->swap_block_mark = false; else this->swap_block_mark = true; /* Set up the medium geometry */ ret = gpmi_set_geometry(this); if (ret) return ret; /* Adjust the ECC strength according to the chip. */ this->nand.ecc.strength = this->bch_geometry.ecc_strength; this->mtd.ecc_strength = this->bch_geometry.ecc_strength; this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength; /* NAND boot init, depends on the gpmi_set_geometry(). */ return nand_boot_init(this); } static int gpmi_scan_bbt(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; int ret; /* Prepare for the BBT scan. */ ret = gpmi_pre_bbt_scan(this); if (ret) return ret; /* * Can we enable the extra features? such as EDO or Sync mode. * * We do not check the return value now. That's means if we fail in * enable the extra features, we still can run in the normal way. */ gpmi_extra_init(this); /* use the default BBT implementation */ return nand_default_bbt(mtd); } static void gpmi_nfc_exit(struct gpmi_nand_data *this) { nand_release(&this->mtd); gpmi_free_dma_buffer(this); } static int gpmi_nfc_init(struct gpmi_nand_data *this) { struct mtd_info *mtd = &this->mtd; struct nand_chip *chip = &this->nand; struct mtd_part_parser_data ppdata = {}; int ret; /* init current chip */ this->current_chip = -1; /* init the MTD data structures */ mtd->priv = chip; mtd->name = "gpmi-nand"; mtd->owner = THIS_MODULE; /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ chip->priv = this; chip->select_chip = gpmi_select_chip; chip->cmd_ctrl = gpmi_cmd_ctrl; chip->dev_ready = gpmi_dev_ready; chip->read_byte = gpmi_read_byte; chip->read_buf = gpmi_read_buf; chip->write_buf = gpmi_write_buf; chip->ecc.read_page = gpmi_ecc_read_page; chip->ecc.write_page = gpmi_ecc_write_page; chip->ecc.read_oob = gpmi_ecc_read_oob; chip->ecc.write_oob = gpmi_ecc_write_oob; chip->scan_bbt = gpmi_scan_bbt; chip->badblock_pattern = &gpmi_bbt_descr; chip->block_markbad = gpmi_block_markbad; chip->options |= NAND_NO_SUBPAGE_WRITE; chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = 1; chip->ecc.strength = 8; chip->ecc.layout = &gpmi_hw_ecclayout; if (of_get_nand_on_flash_bbt(this->dev->of_node)) chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ this->bch_geometry.payload_size = 1024; this->bch_geometry.auxiliary_size = 128; ret = gpmi_alloc_dma_buffer(this); if (ret) goto err_out; ret = nand_scan(mtd, 1); if (ret) { pr_err("Chip scan failed\n"); goto err_out; } ppdata.of_node = this->pdev->dev.of_node; ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); if (ret) goto err_out; return 0; err_out: gpmi_nfc_exit(this); return ret; } static const struct platform_device_id gpmi_ids[] = { { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, }, { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, }, { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, }, {}, }; static const struct of_device_id gpmi_nand_id_table[] = { { .compatible = "fsl,imx23-gpmi-nand", .data = (void *)&gpmi_ids[IS_MX23] }, { .compatible = "fsl,imx28-gpmi-nand", .data = (void *)&gpmi_ids[IS_MX28] }, { .compatible = "fsl,imx6q-gpmi-nand", .data = (void *)&gpmi_ids[IS_MX6Q] }, {} }; MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); static int gpmi_nand_probe(struct platform_device *pdev) { struct gpmi_nand_data *this; const struct of_device_id *of_id; int ret; of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); if (of_id) { pdev->id_entry = of_id->data; } else { pr_err("Failed to find the right device id.\n"); return -ENOMEM; } this = kzalloc(sizeof(*this), GFP_KERNEL); if (!this) { pr_err("Failed to allocate per-device memory\n"); return -ENOMEM; } platform_set_drvdata(pdev, this); this->pdev = pdev; this->dev = &pdev->dev; ret = acquire_resources(this); if (ret) goto exit_acquire_resources; ret = init_hardware(this); if (ret) goto exit_nfc_init; ret = gpmi_nfc_init(this); if (ret) goto exit_nfc_init; dev_info(this->dev, "driver registered.\n"); return 0; exit_nfc_init: release_resources(this); exit_acquire_resources: platform_set_drvdata(pdev, NULL); dev_err(this->dev, "driver registration failed: %d\n", ret); kfree(this); return ret; } static int gpmi_nand_remove(struct platform_device *pdev) { struct gpmi_nand_data *this = platform_get_drvdata(pdev); gpmi_nfc_exit(this); release_resources(this); platform_set_drvdata(pdev, NULL); kfree(this); return 0; } static struct platform_driver gpmi_nand_driver = { .driver = { .name = "gpmi-nand", .of_match_table = gpmi_nand_id_table, }, .probe = gpmi_nand_probe, .remove = gpmi_nand_remove, .id_table = gpmi_ids, }; module_platform_driver(gpmi_nand_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); MODULE_LICENSE("GPL");
gpl-2.0
Carlstark/SAMA5D4-XULT
linux-at91-linux-3.10/drivers/media/usb/gspca/stv06xx/stv06xx.c
2251
17027
/* * Copyright (c) 2001 Jean-Fredric Clere, Nikolas Zimmermann, Georg Acher * Mark Cave-Ayland, Carlo E Prelz, Dick Streefland * Copyright (c) 2002, 2003 Tuukka Toivonen * Copyright (c) 2008 Erik Andrén * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * P/N 861037: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0010: Sensor HDCS1000 ASIC STV0600 * P/N 861050-0020: Sensor Photobit PB100 ASIC STV0600-1 - QuickCam Express * P/N 861055: Sensor ST VV6410 ASIC STV0610 - LEGO cam * P/N 861075-0040: Sensor HDCS1000 ASIC * P/N 961179-0700: Sensor ST VV6410 ASIC STV0602 - Dexxa WebCam USB * P/N 861040-0000: Sensor ST VV6410 ASIC STV0610 - QuickCam Web */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/input.h> #include "stv06xx_sensor.h" MODULE_AUTHOR("Erik Andrén"); MODULE_DESCRIPTION("STV06XX USB Camera Driver"); MODULE_LICENSE("GPL"); static bool dump_bridge; static bool dump_sensor; int stv06xx_write_bridge(struct sd *sd, u16 address, u16 i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; u8 len = (i2c_data > 0xff) ? 2 : 1; buf[0] = i2c_data & 0xff; buf[1] = (i2c_data >> 8) & 0xff; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, address, 0, buf, len, STV06XX_URB_MSG_TIMEOUT); PDEBUG(D_CONF, "Written 0x%x to address 0x%x, status: %d", i2c_data, address, err); return (err < 0) ? err : 0; } int stv06xx_read_bridge(struct sd *sd, u16 address, u8 *i2c_data) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, address, 0, buf, 1, STV06XX_URB_MSG_TIMEOUT); *i2c_data = buf[0]; PDEBUG(D_CONF, "Reading 0x%x from address 0x%x, status %d", *i2c_data, address, err); return (err < 0) ? err : 0; } /* Wraps the normal write sensor bytes / words functions for writing a single value */ int stv06xx_write_sensor(struct sd *sd, u8 address, u16 value) { if (sd->sensor->i2c_len == 2) { u16 data[2] = { address, value }; return stv06xx_write_sensor_words(sd, data, 1); } else { u8 data[2] = { address, value }; return stv06xx_write_sensor_bytes(sd, data, 1); } } static int stv06xx_write_sensor_finish(struct sd *sd) { int err = 0; if (sd->bridge == BRIDGE_STV610) { struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; buf[0] = 0; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x1704, 0, buf, 1, STV06XX_URB_MSG_TIMEOUT); } return (err < 0) ? err : 0; } int stv06xx_write_sensor_bytes(struct sd *sd, const u8 *data, u8 len) { int err, i, j; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; PDEBUG(D_CONF, "I2C: Command buffer contains %d entries", len); for (i = 0; i < len;) { /* Build the command buffer */ memset(buf, 0, I2C_BUFFER_LENGTH); for (j = 0; j < I2C_MAX_BYTES && i < len; j++, i++) { buf[j] = data[2*i]; buf[0x10 + j] = data[2*i+1]; PDEBUG(D_CONF, "I2C: Writing 0x%02x to reg 0x%02x", data[2*i+1], data[2*i]); } buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = j - 1; /* Number of commands to send - 1 */ buf[0x22] = I2C_WRITE_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x0400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) return err; } return stv06xx_write_sensor_finish(sd); } int stv06xx_write_sensor_words(struct sd *sd, const u16 *data, u8 len) { int err, i, j; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; PDEBUG(D_CONF, "I2C: Command buffer contains %d entries", len); for (i = 0; i < len;) { /* Build the command buffer */ memset(buf, 0, I2C_BUFFER_LENGTH); for (j = 0; j < I2C_MAX_WORDS && i < len; j++, i++) { buf[j] = data[2*i]; buf[0x10 + j * 2] = data[2*i+1]; buf[0x10 + j * 2 + 1] = data[2*i+1] >> 8; PDEBUG(D_CONF, "I2C: Writing 0x%04x to reg 0x%02x", data[2*i+1], data[2*i]); } buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = j - 1; /* Number of commands to send - 1 */ buf[0x22] = I2C_WRITE_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x0400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) return err; } return stv06xx_write_sensor_finish(sd); } int stv06xx_read_sensor(struct sd *sd, const u8 address, u16 *value) { int err; struct gspca_dev *gspca_dev = (struct gspca_dev *)sd; struct usb_device *udev = sd->gspca_dev.dev; __u8 *buf = sd->gspca_dev.usb_buf; err = stv06xx_write_bridge(sd, STV_I2C_FLUSH, sd->sensor->i2c_flush); if (err < 0) return err; /* Clear mem */ memset(buf, 0, I2C_BUFFER_LENGTH); buf[0] = address; buf[0x20] = sd->sensor->i2c_addr; buf[0x21] = 0; /* Read I2C register */ buf[0x22] = I2C_READ_CMD; err = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x04, 0x40, 0x1400, 0, buf, I2C_BUFFER_LENGTH, STV06XX_URB_MSG_TIMEOUT); if (err < 0) { pr_err("I2C: Read error writing address: %d\n", err); return err; } err = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x04, 0xc0, 0x1410, 0, buf, sd->sensor->i2c_len, STV06XX_URB_MSG_TIMEOUT); if (sd->sensor->i2c_len == 2) *value = buf[0] | (buf[1] << 8); else *value = buf[0]; PDEBUG(D_CONF, "I2C: Read 0x%x from address 0x%x, status: %d", *value, address, err); return (err < 0) ? err : 0; } /* Dumps all bridge registers */ static void stv06xx_dump_bridge(struct sd *sd) { int i; u8 data, buf; pr_info("Dumping all stv06xx bridge registers\n"); for (i = 0x1400; i < 0x160f; i++) { stv06xx_read_bridge(sd, i, &data); pr_info("Read 0x%x from address 0x%x\n", data, i); } pr_info("Testing stv06xx bridge registers for writability\n"); for (i = 0x1400; i < 0x160f; i++) { stv06xx_read_bridge(sd, i, &data); buf = data; stv06xx_write_bridge(sd, i, 0xff); stv06xx_read_bridge(sd, i, &data); if (data == 0xff) pr_info("Register 0x%x is read/write\n", i); else if (data != buf) pr_info("Register 0x%x is read/write, but only partially\n", i); else pr_info("Register 0x%x is read-only\n", i); stv06xx_write_bridge(sd, i, buf); } } /* this function is called at probe and resume time */ static int stv06xx_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int err; PDEBUG(D_PROBE, "Initializing camera"); /* Let the usb init settle for a bit before performing the initialization */ msleep(250); err = sd->sensor->init(sd); if (dump_sensor && sd->sensor->dump) sd->sensor->dump(sd); return (err < 0) ? err : 0; } /* this function is called at probe time */ static int stv06xx_init_controls(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; PDEBUG(D_PROBE, "Initializing controls"); gspca_dev->vdev.ctrl_handler = &gspca_dev->ctrl_handler; return sd->sensor->init_controls(sd); } /* Start the camera */ static int stv06xx_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; struct usb_host_interface *alt; struct usb_interface *intf; int err, packet_size; intf = usb_ifnum_to_if(sd->gspca_dev.dev, sd->gspca_dev.iface); alt = usb_altnum_to_altsetting(intf, sd->gspca_dev.alt); if (!alt) { PERR("Couldn't get altsetting"); return -EIO; } packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); err = stv06xx_write_bridge(sd, STV_ISO_SIZE_L, packet_size); if (err < 0) return err; /* Prepare the sensor for start */ err = sd->sensor->start(sd); if (err < 0) goto out; /* Start isochronous streaming */ err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 1); out: if (err < 0) PDEBUG(D_STREAM, "Starting stream failed"); else PDEBUG(D_STREAM, "Started streaming"); return (err < 0) ? err : 0; } static int stv06xx_isoc_init(struct gspca_dev *gspca_dev) { struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; /* Start isoc bandwidth "negotiation" at max isoc bandwidth */ alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(sd->sensor->max_packet_size[gspca_dev->curr_mode]); return 0; } static int stv06xx_isoc_nego(struct gspca_dev *gspca_dev) { int ret, packet_size, min_packet_size; struct usb_host_interface *alt; struct sd *sd = (struct sd *) gspca_dev; alt = &gspca_dev->dev->actconfig->intf_cache[0]->altsetting[1]; packet_size = le16_to_cpu(alt->endpoint[0].desc.wMaxPacketSize); min_packet_size = sd->sensor->min_packet_size[gspca_dev->curr_mode]; if (packet_size <= min_packet_size) return -EIO; packet_size -= 100; if (packet_size < min_packet_size) packet_size = min_packet_size; alt->endpoint[0].desc.wMaxPacketSize = cpu_to_le16(packet_size); ret = usb_set_interface(gspca_dev->dev, gspca_dev->iface, 1); if (ret < 0) PERR("set alt 1 err %d", ret); return ret; } static void stv06xx_stopN(struct gspca_dev *gspca_dev) { int err; struct sd *sd = (struct sd *) gspca_dev; /* stop ISO-streaming */ err = stv06xx_write_bridge(sd, STV_ISO_ENABLE, 0); if (err < 0) goto out; err = sd->sensor->stop(sd); out: if (err < 0) PDEBUG(D_STREAM, "Failed to stop stream"); else PDEBUG(D_STREAM, "Stopped streaming"); } /* * Analyse an USB packet of the data stream and store it appropriately. * Each packet contains an integral number of chunks. Each chunk has * 2-bytes identification, followed by 2-bytes that describe the chunk * length. Known/guessed chunk identifications are: * 8001/8005/C001/C005 - Begin new frame * 8002/8006/C002/C006 - End frame * 0200/4200 - Contains actual image data, bayer or compressed * 0005 - 11 bytes of unknown data * 0100 - 2 bytes of unknown data * The 0005 and 0100 chunks seem to appear only in compressed stream. */ static void stv06xx_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; PDEBUG(D_PACK, "Packet of length %d arrived", len); /* A packet may contain several frames loop until the whole packet is reached */ while (len) { int id, chunk_len; if (len < 4) { PDEBUG(D_PACK, "Packet is smaller than 4 bytes"); return; } /* Capture the id */ id = (data[0] << 8) | data[1]; /* Capture the chunk length */ chunk_len = (data[2] << 8) | data[3]; PDEBUG(D_PACK, "Chunk id: %x, length: %d", id, chunk_len); data += 4; len -= 4; if (len < chunk_len) { PERR("URB packet length is smaller" " than the specified chunk length"); gspca_dev->last_packet_type = DISCARD_PACKET; return; } /* First byte seem to be 02=data 2nd byte is unknown??? */ if (sd->bridge == BRIDGE_ST6422 && (id & 0xff00) == 0x0200) goto frame_data; switch (id) { case 0x0200: case 0x4200: frame_data: PDEBUG(D_PACK, "Frame data packet detected"); if (sd->to_skip) { int skip = (sd->to_skip < chunk_len) ? sd->to_skip : chunk_len; data += skip; len -= skip; chunk_len -= skip; sd->to_skip -= skip; } gspca_frame_add(gspca_dev, INTER_PACKET, data, chunk_len); break; case 0x8001: case 0x8005: case 0xc001: case 0xc005: PDEBUG(D_PACK, "Starting new frame"); /* Create a new frame, chunk length should be zero */ gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); if (sd->bridge == BRIDGE_ST6422) sd->to_skip = gspca_dev->width * 4; if (chunk_len) PERR("Chunk length is " "non-zero on a SOF"); break; case 0x8002: case 0x8006: case 0xc002: PDEBUG(D_PACK, "End of frame detected"); /* Complete the last frame (if any) */ gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); if (chunk_len) PERR("Chunk length is " "non-zero on a EOF"); break; case 0x0005: PDEBUG(D_PACK, "Chunk 0x005 detected"); /* Unknown chunk with 11 bytes of data, occurs just before end of each frame in compressed mode */ break; case 0x0100: PDEBUG(D_PACK, "Chunk 0x0100 detected"); /* Unknown chunk with 2 bytes of data, occurs 2-3 times per USB interrupt */ break; case 0x42ff: PDEBUG(D_PACK, "Chunk 0x42ff detected"); /* Special chunk seen sometimes on the ST6422 */ break; default: PDEBUG(D_PACK, "Unknown chunk 0x%04x detected", id); /* Unknown chunk */ } data += chunk_len; len -= chunk_len; } } #if IS_ENABLED(CONFIG_INPUT) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; if (len == 1 && data[0] == 0x80) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); ret = 0; } if (len == 1 && data[0] == 0x88) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } return ret; } #endif static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = stv06xx_config, .init = stv06xx_init, .init_controls = stv06xx_init_controls, .start = stv06xx_start, .stopN = stv06xx_stopN, .pkt_scan = stv06xx_pkt_scan, .isoc_init = stv06xx_isoc_init, .isoc_nego = stv06xx_isoc_nego, #if IS_ENABLED(CONFIG_INPUT) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* This function is called at probe time */ static int stv06xx_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; PDEBUG(D_PROBE, "Configuring camera"); sd->bridge = id->driver_info; gspca_dev->sd_desc = &sd_desc; if (dump_bridge) stv06xx_dump_bridge(sd); sd->sensor = &stv06xx_sensor_st6422; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_vv6410; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_hdcs1x00; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_hdcs1020; if (!sd->sensor->probe(sd)) return 0; sd->sensor = &stv06xx_sensor_pb0100; if (!sd->sensor->probe(sd)) return 0; sd->sensor = NULL; return -ENODEV; } /* -- module initialisation -- */ static const struct usb_device_id device_table[] = { /* QuickCam Express */ {USB_DEVICE(0x046d, 0x0840), .driver_info = BRIDGE_STV600 }, /* LEGO cam / QuickCam Web */ {USB_DEVICE(0x046d, 0x0850), .driver_info = BRIDGE_STV610 }, /* Dexxa WebCam USB */ {USB_DEVICE(0x046d, 0x0870), .driver_info = BRIDGE_STV602 }, /* QuickCam Messenger */ {USB_DEVICE(0x046D, 0x08F0), .driver_info = BRIDGE_ST6422 }, /* QuickCam Communicate */ {USB_DEVICE(0x046D, 0x08F5), .driver_info = BRIDGE_ST6422 }, /* QuickCam Messenger (new) */ {USB_DEVICE(0x046D, 0x08F6), .driver_info = BRIDGE_ST6422 }, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static void sd_disconnect(struct usb_interface *intf) { struct gspca_dev *gspca_dev = usb_get_intfdata(intf); struct sd *sd = (struct sd *) gspca_dev; void *priv = sd->sensor_priv; PDEBUG(D_PROBE, "Disconnecting the stv06xx device"); sd->sensor = NULL; gspca_disconnect(intf); kfree(priv); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = sd_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver); module_param(dump_bridge, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_bridge, "Dumps all usb bridge registers at startup"); module_param(dump_sensor, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(dump_sensor, "Dumps all sensor registers at startup");
gpl-2.0
YUPlayGod/android_kernel_yu_msm8916
drivers/media/usb/gspca/sq930x.c
2251
32401
/* * SQ930x subdriver * * Copyright (C) 2010 Jean-François Moine <http://moinejf.free.fr> * Copyright (C) 2006 -2008 Gerard Klaver <gerard at gkall dot hobby dot nl> * Copyright (C) 2007 Sam Revitch <samr7@cs.washington.edu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define MODULE_NAME "sq930x" #include "gspca.h" MODULE_AUTHOR("Jean-Francois Moine <http://moinejf.free.fr>\n" "Gerard Klaver <gerard at gkall dot hobby dot nl\n" "Sam Revitch <samr7@cs.washington.edu>"); MODULE_DESCRIPTION("GSPCA/SQ930x USB Camera Driver"); MODULE_LICENSE("GPL"); /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct { /* exposure/gain control cluster */ struct v4l2_ctrl *exposure; struct v4l2_ctrl *gain; }; u8 do_ctrl; u8 gpio[2]; u8 sensor; u8 type; #define Generic 0 #define Creative_live_motion 1 }; enum sensors { SENSOR_ICX098BQ, SENSOR_LZ24BP, SENSOR_MI0360, SENSOR_MT9V111, /* = MI360SOC */ SENSOR_OV7660, SENSOR_OV9630, }; static struct v4l2_pix_format vga_mode[] = { {320, 240, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 0}, {640, 480, V4L2_PIX_FMT_SRGGB8, V4L2_FIELD_NONE, .bytesperline = 640, .sizeimage = 640 * 480, .colorspace = V4L2_COLORSPACE_SRGB, .priv = 1}, }; /* sq930x registers */ #define SQ930_CTRL_UCBUS_IO 0x0001 #define SQ930_CTRL_I2C_IO 0x0002 #define SQ930_CTRL_GPIO 0x0005 #define SQ930_CTRL_CAP_START 0x0010 #define SQ930_CTRL_CAP_STOP 0x0011 #define SQ930_CTRL_SET_EXPOSURE 0x001d #define SQ930_CTRL_RESET 0x001e #define SQ930_CTRL_GET_DEV_INFO 0x001f /* gpio 1 (8..15) */ #define SQ930_GPIO_DFL_I2C_SDA 0x0001 #define SQ930_GPIO_DFL_I2C_SCL 0x0002 #define SQ930_GPIO_RSTBAR 0x0004 #define SQ930_GPIO_EXTRA1 0x0040 #define SQ930_GPIO_EXTRA2 0x0080 /* gpio 3 (24..31) */ #define SQ930_GPIO_POWER 0x0200 #define SQ930_GPIO_DFL_LED 0x1000 struct ucbus_write_cmd { u16 bw_addr; u8 bw_data; }; struct i2c_write_cmd { u8 reg; u16 val; }; static const struct ucbus_write_cmd icx098bq_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf800, 0x02}, {0xf801, 0xce}, {0xf802, 0xc1}, {0xf804, 0x00}, {0xf808, 0x00}, {0xf809, 0x0e}, {0xf80a, 0x01}, {0xf80b, 0xee}, {0xf807, 0x60}, {0xf80c, 0x02}, {0xf80d, 0xf0}, {0xf80e, 0x03}, {0xf80f, 0x0a}, {0xf81c, 0x02}, {0xf81d, 0xf0}, {0xf81e, 0x03}, {0xf81f, 0x0a}, {0xf83a, 0x00}, {0xf83b, 0x10}, {0xf83c, 0x00}, {0xf83d, 0x4e}, {0xf810, 0x04}, {0xf811, 0x00}, {0xf812, 0x02}, {0xf813, 0x10}, {0xf803, 0x00}, {0xf814, 0x01}, {0xf815, 0x18}, {0xf816, 0x00}, {0xf817, 0x48}, {0xf818, 0x00}, {0xf819, 0x25}, {0xf81a, 0x00}, {0xf81b, 0x3c}, {0xf82f, 0x03}, {0xf820, 0xff}, {0xf821, 0x0d}, {0xf822, 0xff}, {0xf823, 0x07}, {0xf824, 0xff}, {0xf825, 0x03}, {0xf826, 0xff}, {0xf827, 0x06}, {0xf828, 0xff}, {0xf829, 0x03}, {0xf82a, 0xff}, {0xf82b, 0x0c}, {0xf82c, 0xfd}, {0xf82d, 0x01}, {0xf82e, 0x00}, {0xf830, 0x00}, {0xf831, 0x47}, {0xf832, 0x00}, {0xf833, 0x00}, {0xf850, 0x00}, {0xf851, 0x00}, {0xf852, 0x00}, {0xf853, 0x24}, {0xf854, 0x00}, {0xf855, 0x18}, {0xf856, 0x00}, {0xf857, 0x3c}, {0xf858, 0x00}, {0xf859, 0x0c}, {0xf85a, 0x00}, {0xf85b, 0x30}, {0xf85c, 0x00}, {0xf85d, 0x0c}, {0xf85e, 0x00}, {0xf85f, 0x30}, {0xf860, 0x00}, {0xf861, 0x48}, {0xf862, 0x01}, {0xf863, 0xdc}, {0xf864, 0xff}, {0xf865, 0x98}, {0xf866, 0xff}, {0xf867, 0xc0}, {0xf868, 0xff}, {0xf869, 0x70}, {0xf86c, 0xff}, {0xf86d, 0x00}, {0xf86a, 0xff}, {0xf86b, 0x48}, {0xf86e, 0xff}, {0xf86f, 0x00}, {0xf870, 0x01}, {0xf871, 0xdb}, {0xf872, 0x01}, {0xf873, 0xfa}, {0xf874, 0x01}, {0xf875, 0xdb}, {0xf876, 0x01}, {0xf877, 0xfa}, {0xf878, 0x0f}, {0xf879, 0x0f}, {0xf87a, 0xff}, {0xf87b, 0xff}, {0xf800, 0x03} }; static const struct ucbus_write_cmd icx098bq_start_1[] = { {0xf5f0, 0x00}, {0xf5f1, 0xcd}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xc0}, {0xf5f0, 0x49}, {0xf5f1, 0xcd}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xc0}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct ucbus_write_cmd icx098bq_start_2[] = { {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x82}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x40}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0xcf}, {0xf806, 0xd0}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x00}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03} }; static const struct ucbus_write_cmd lz24bp_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf800, 0x02}, {0xf801, 0xbe}, {0xf802, 0xc6}, {0xf804, 0x00}, {0xf808, 0x00}, {0xf809, 0x06}, {0xf80a, 0x01}, {0xf80b, 0xfe}, {0xf807, 0x84}, {0xf80c, 0x02}, {0xf80d, 0xf7}, {0xf80e, 0x03}, {0xf80f, 0x0b}, {0xf81c, 0x00}, {0xf81d, 0x49}, {0xf81e, 0x03}, {0xf81f, 0x0b}, {0xf83a, 0x00}, {0xf83b, 0x01}, {0xf83c, 0x00}, {0xf83d, 0x6b}, {0xf810, 0x03}, {0xf811, 0x10}, {0xf812, 0x02}, {0xf813, 0x6f}, {0xf803, 0x00}, {0xf814, 0x00}, {0xf815, 0x44}, {0xf816, 0x00}, {0xf817, 0x48}, {0xf818, 0x00}, {0xf819, 0x25}, {0xf81a, 0x00}, {0xf81b, 0x3c}, {0xf82f, 0x03}, {0xf820, 0xff}, {0xf821, 0x0d}, {0xf822, 0xff}, {0xf823, 0x07}, {0xf824, 0xfd}, {0xf825, 0x07}, {0xf826, 0xf0}, {0xf827, 0x0c}, {0xf828, 0xff}, {0xf829, 0x03}, {0xf82a, 0xff}, {0xf82b, 0x0c}, {0xf82c, 0xfc}, {0xf82d, 0x01}, {0xf82e, 0x00}, {0xf830, 0x00}, {0xf831, 0x47}, {0xf832, 0x00}, {0xf833, 0x00}, {0xf850, 0x00}, {0xf851, 0x00}, {0xf852, 0x00}, {0xf853, 0x24}, {0xf854, 0x00}, {0xf855, 0x0c}, {0xf856, 0x00}, {0xf857, 0x30}, {0xf858, 0x00}, {0xf859, 0x18}, {0xf85a, 0x00}, {0xf85b, 0x3c}, {0xf85c, 0x00}, {0xf85d, 0x18}, {0xf85e, 0x00}, {0xf85f, 0x3c}, {0xf860, 0xff}, {0xf861, 0x37}, {0xf862, 0xff}, {0xf863, 0x1d}, {0xf864, 0xff}, {0xf865, 0x98}, {0xf866, 0xff}, {0xf867, 0xc0}, {0xf868, 0x00}, {0xf869, 0x37}, {0xf86c, 0x02}, {0xf86d, 0x1d}, {0xf86a, 0x00}, {0xf86b, 0x37}, {0xf86e, 0x02}, {0xf86f, 0x1d}, {0xf870, 0x01}, {0xf871, 0xc6}, {0xf872, 0x02}, {0xf873, 0x04}, {0xf874, 0x01}, {0xf875, 0xc6}, {0xf876, 0x02}, {0xf877, 0x04}, {0xf878, 0x0f}, {0xf879, 0x0f}, {0xf87a, 0xff}, {0xf87b, 0xff}, {0xf800, 0x03} }; static const struct ucbus_write_cmd lz24bp_start_1_gen[] = { {0xf5f0, 0x00}, {0xf5f1, 0xff}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xb3}, {0xf5f0, 0x40}, {0xf5f1, 0xff}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xb3}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct ucbus_write_cmd lz24bp_start_1_clm[] = { {0xf5f0, 0x00}, {0xf5f1, 0xff}, {0xf5f2, 0x88}, {0xf5f3, 0x88}, {0xf5f4, 0xc0}, {0xf5f0, 0x40}, {0xf5f1, 0xff}, {0xf5f2, 0x88}, {0xf5f3, 0x88}, {0xf5f4, 0xc0}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct ucbus_write_cmd lz24bp_start_2[] = { {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x80}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x4e}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0xc0}, {0xf806, 0x48}, {0xf807, 0x7f}, {0xf800, 0x03}, {0xf800, 0x02}, {0xf807, 0xff}, {0xf805, 0x00}, {0xf806, 0x00}, {0xf807, 0x7f}, {0xf800, 0x03} }; static const struct ucbus_write_cmd mi0360_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf332, 0xcc}, {0xf333, 0xcc}, {0xf334, 0xcc}, {0xf335, 0xcc}, {0xf33f, 0x00} }; static const struct i2c_write_cmd mi0360_init_23[] = { {0x30, 0x0040}, /* reserved - def 0x0005 */ {0x31, 0x0000}, /* reserved - def 0x002a */ {0x34, 0x0100}, /* reserved - def 0x0100 */ {0x3d, 0x068f}, /* reserved - def 0x068f */ }; static const struct i2c_write_cmd mi0360_init_24[] = { {0x03, 0x01e5}, /* window height */ {0x04, 0x0285}, /* window width */ }; static const struct i2c_write_cmd mi0360_init_25[] = { {0x35, 0x0020}, /* global gain */ {0x2b, 0x0020}, /* green1 gain */ {0x2c, 0x002a}, /* blue gain */ {0x2d, 0x0028}, /* red gain */ {0x2e, 0x0020}, /* green2 gain */ }; static const struct ucbus_write_cmd mi0360_start_1[] = { {0xf5f0, 0x11}, {0xf5f1, 0x99}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xa6}, {0xf5f0, 0x51}, {0xf5f1, 0x99}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xa6}, {0xf5fa, 0x00}, {0xf5f6, 0x00}, {0xf5f7, 0x00}, {0xf5f8, 0x00}, {0xf5f9, 0x00} }; static const struct i2c_write_cmd mi0360_start_2[] = { {0x62, 0x041d}, /* reserved - def 0x0418 */ }; static const struct i2c_write_cmd mi0360_start_3[] = { {0x05, 0x007b}, /* horiz blanking */ }; static const struct i2c_write_cmd mi0360_start_4[] = { {0x05, 0x03f5}, /* horiz blanking */ }; static const struct i2c_write_cmd mt9v111_init_0[] = { {0x01, 0x0001}, /* select IFP/SOC registers */ {0x06, 0x300c}, /* operating mode control */ {0x08, 0xcc00}, /* output format control (RGB) */ {0x01, 0x0004}, /* select sensor core registers */ }; static const struct i2c_write_cmd mt9v111_init_1[] = { {0x03, 0x01e5}, /* window height */ {0x04, 0x0285}, /* window width */ }; static const struct i2c_write_cmd mt9v111_init_2[] = { {0x30, 0x7800}, {0x31, 0x0000}, {0x07, 0x3002}, /* output control */ {0x35, 0x0020}, /* global gain */ {0x2b, 0x0020}, /* green1 gain */ {0x2c, 0x0020}, /* blue gain */ {0x2d, 0x0020}, /* red gain */ {0x2e, 0x0020}, /* green2 gain */ }; static const struct ucbus_write_cmd mt9v111_start_1[] = { {0xf5f0, 0x11}, {0xf5f1, 0x96}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xaa}, {0xf5f0, 0x51}, {0xf5f1, 0x96}, {0xf5f2, 0x80}, {0xf5f3, 0x80}, {0xf5f4, 0xaa}, {0xf5fa, 0x00}, {0xf5f6, 0x0a}, {0xf5f7, 0x0a}, {0xf5f8, 0x0a}, {0xf5f9, 0x0a} }; static const struct i2c_write_cmd mt9v111_init_3[] = { {0x62, 0x0405}, }; static const struct i2c_write_cmd mt9v111_init_4[] = { /* {0x05, 0x00ce}, */ {0x05, 0x005d}, /* horizontal blanking */ }; static const struct ucbus_write_cmd ov7660_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf332, 0x00}, {0xf333, 0xc0}, {0xf334, 0x39}, {0xf335, 0xe7}, {0xf33f, 0x03} }; static const struct ucbus_write_cmd ov9630_start_0[] = { {0x0354, 0x00}, {0x03fa, 0x00}, {0xf332, 0x00}, {0xf333, 0x00}, {0xf334, 0x3e}, {0xf335, 0xf8}, {0xf33f, 0x03} }; /* start parameters indexed by [sensor][mode] */ static const struct cap_s { u8 cc_sizeid; u8 cc_bytes[32]; } capconfig[4][2] = { [SENSOR_ICX098BQ] = { {2, /* Bayer 320x240 */ {0x05, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x1f, 0x20, 0x0e, 0x00, 0x9f, 0x02, 0xee, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, [SENSOR_LZ24BP] = { {2, /* Bayer 320x240 */ {0x05, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xee, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x22, 0x20, 0x0e, 0x00, 0xa2, 0x02, 0xee, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, [SENSOR_MI0360] = { {2, /* Bayer 320x240 */ {0x05, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, [SENSOR_MT9V111] = { {2, /* Bayer 320x240 */ {0x05, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x01, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, {4, /* Bayer 640x480 */ {0x01, 0x02, 0x20, 0x01, 0x20, 0x82, 0x02, 0xe1, 0x01, 0x02, 0x00, 0x08, 0x18, 0x12, 0x78, 0xc8, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} }, }, }; struct sensor_s { const char *name; u8 i2c_addr; u8 i2c_dum; u8 gpio[5]; u8 cmd_len; const struct ucbus_write_cmd *cmd; }; static const struct sensor_s sensor_tb[] = { [SENSOR_ICX098BQ] = { "icx098bp", 0x00, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 8, icx098bq_start_0 }, [SENSOR_LZ24BP] = { "lz24bp", 0x00, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 8, lz24bp_start_0 }, [SENSOR_MI0360] = { "mi0360", 0x5d, 0x80, {SQ930_GPIO_RSTBAR, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, 0 }, 7, mi0360_start_0 }, [SENSOR_MT9V111] = { "mt9v111", 0x5c, 0x7f, {SQ930_GPIO_RSTBAR, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, 0 }, 7, mi0360_start_0 }, [SENSOR_OV7660] = { "ov7660", 0x21, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 7, ov7660_start_0 }, [SENSOR_OV9630] = { "ov9630", 0x30, 0x00, {0, SQ930_GPIO_DFL_I2C_SDA | SQ930_GPIO_DFL_I2C_SCL, SQ930_GPIO_DFL_I2C_SDA, 0, SQ930_GPIO_RSTBAR }, 7, ov9630_start_0 }, }; static void reg_r(struct gspca_dev *gspca_dev, u16 value, int len) { int ret; if (gspca_dev->usb_err < 0) return; ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0x0c, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, 0, gspca_dev->usb_buf, len, 500); if (ret < 0) { pr_err("reg_r %04x failed %d\n", value, ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u16 value, u16 index) { int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "reg_w v: %04x i: %04x", value, index); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, NULL, 0, 500); msleep(30); if (ret < 0) { pr_err("reg_w %04x %04x failed %d\n", value, index, ret); gspca_dev->usb_err = ret; } } static void reg_wb(struct gspca_dev *gspca_dev, u16 value, u16 index, const u8 *data, int len) { int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "reg_wb v: %04x i: %04x %02x...%02x", value, index, *data, data[len - 1]); memcpy(gspca_dev->usb_buf, data, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, index, gspca_dev->usb_buf, len, 1000); msleep(30); if (ret < 0) { pr_err("reg_wb %04x %04x failed %d\n", value, index, ret); gspca_dev->usb_err = ret; } } static void i2c_write(struct sd *sd, const struct i2c_write_cmd *cmd, int ncmds) { struct gspca_dev *gspca_dev = &sd->gspca_dev; const struct sensor_s *sensor; u16 val, idx; u8 *buf; int ret; if (gspca_dev->usb_err < 0) return; sensor = &sensor_tb[sd->sensor]; val = (sensor->i2c_addr << 8) | SQ930_CTRL_I2C_IO; idx = (cmd->val & 0xff00) | cmd->reg; buf = gspca_dev->usb_buf; *buf++ = sensor->i2c_dum; *buf++ = cmd->val; while (--ncmds > 0) { cmd++; *buf++ = cmd->reg; *buf++ = cmd->val >> 8; *buf++ = sensor->i2c_dum; *buf++ = cmd->val; } PDEBUG(D_USBO, "i2c_w v: %04x i: %04x %02x...%02x", val, idx, gspca_dev->usb_buf[0], buf[-1]); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, idx, gspca_dev->usb_buf, buf - gspca_dev->usb_buf, 500); if (ret < 0) { pr_err("i2c_write failed %d\n", ret); gspca_dev->usb_err = ret; } } static void ucbus_write(struct gspca_dev *gspca_dev, const struct ucbus_write_cmd *cmd, int ncmds, int batchsize) { u8 *buf; u16 val, idx; int len, ret; if (gspca_dev->usb_err < 0) return; if ((batchsize - 1) * 3 > USB_BUF_SZ) { PERR("Bug: usb_buf overflow\n"); gspca_dev->usb_err = -ENOMEM; return; } for (;;) { len = ncmds; if (len > batchsize) len = batchsize; ncmds -= len; val = (cmd->bw_addr << 8) | SQ930_CTRL_UCBUS_IO; idx = (cmd->bw_data << 8) | (cmd->bw_addr >> 8); buf = gspca_dev->usb_buf; while (--len > 0) { cmd++; *buf++ = cmd->bw_addr; *buf++ = cmd->bw_addr >> 8; *buf++ = cmd->bw_data; } if (buf != gspca_dev->usb_buf) PDEBUG(D_USBO, "ucbus v: %04x i: %04x %02x...%02x", val, idx, gspca_dev->usb_buf[0], buf[-1]); else PDEBUG(D_USBO, "ucbus v: %04x i: %04x", val, idx); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x0c, /* request */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, val, idx, gspca_dev->usb_buf, buf - gspca_dev->usb_buf, 500); if (ret < 0) { pr_err("ucbus_write failed %d\n", ret); gspca_dev->usb_err = ret; return; } msleep(30); if (ncmds <= 0) break; cmd++; } } static void gpio_set(struct sd *sd, u16 val, u16 mask) { struct gspca_dev *gspca_dev = &sd->gspca_dev; if (mask & 0x00ff) { sd->gpio[0] &= ~mask; sd->gpio[0] |= val; reg_w(gspca_dev, 0x0100 | SQ930_CTRL_GPIO, ~sd->gpio[0] << 8); } mask >>= 8; val >>= 8; if (mask) { sd->gpio[1] &= ~mask; sd->gpio[1] |= val; reg_w(gspca_dev, 0x0300 | SQ930_CTRL_GPIO, ~sd->gpio[1] << 8); } } static void gpio_init(struct sd *sd, const u8 *gpio) { gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio++, 0x000f); gpio_set(sd, *gpio, 0x000f); } static void bridge_init(struct sd *sd) { static const struct ucbus_write_cmd clkfreq_cmd = { 0xf031, 0 /* SQ930_CLKFREQ_60MHZ */ }; ucbus_write(&sd->gspca_dev, &clkfreq_cmd, 1, 1); gpio_set(sd, SQ930_GPIO_POWER, 0xff00); } static void cmos_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; const struct sensor_s *sensor; static const u8 probe_order[] = { /* SENSOR_LZ24BP, (tested as ccd) */ SENSOR_OV9630, SENSOR_MI0360, SENSOR_OV7660, SENSOR_MT9V111, }; for (i = 0; i < ARRAY_SIZE(probe_order); i++) { sensor = &sensor_tb[probe_order[i]]; ucbus_write(&sd->gspca_dev, sensor->cmd, sensor->cmd_len, 8); gpio_init(sd, sensor->gpio); msleep(100); reg_r(gspca_dev, (sensor->i2c_addr << 8) | 0x001c, 1); msleep(100); if (gspca_dev->usb_buf[0] != 0) break; } if (i >= ARRAY_SIZE(probe_order)) { pr_err("Unknown sensor\n"); gspca_dev->usb_err = -EINVAL; return; } sd->sensor = probe_order[i]; switch (sd->sensor) { case SENSOR_OV7660: case SENSOR_OV9630: pr_err("Sensor %s not yet treated\n", sensor_tb[sd->sensor].name); gspca_dev->usb_err = -EINVAL; break; } } static void mt9v111_init(struct gspca_dev *gspca_dev) { int i, nwait; static const u8 cmd_001b[] = { 0x00, 0x3b, 0xf6, 0x01, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00 }; static const u8 cmd_011b[][7] = { {0x10, 0x01, 0x66, 0x08, 0x00, 0x00, 0x00}, {0x01, 0x00, 0x1a, 0x04, 0x00, 0x00, 0x00}, {0x20, 0x00, 0x10, 0x04, 0x00, 0x00, 0x00}, {0x02, 0x01, 0xae, 0x01, 0x00, 0x00, 0x00}, }; reg_wb(gspca_dev, 0x001b, 0x0000, cmd_001b, sizeof cmd_001b); for (i = 0; i < ARRAY_SIZE(cmd_011b); i++) { reg_wb(gspca_dev, 0x001b, 0x0000, cmd_011b[i], ARRAY_SIZE(cmd_011b[0])); msleep(400); nwait = 20; for (;;) { reg_r(gspca_dev, 0x031b, 1); if (gspca_dev->usb_buf[0] == 0 || gspca_dev->usb_err != 0) break; if (--nwait < 0) { PDEBUG(D_PROBE, "mt9v111_init timeout"); gspca_dev->usb_err = -ETIME; return; } msleep(50); } } } static void global_init(struct sd *sd, int first_time) { switch (sd->sensor) { case SENSOR_ICX098BQ: if (first_time) ucbus_write(&sd->gspca_dev, icx098bq_start_0, 8, 8); gpio_init(sd, sensor_tb[sd->sensor].gpio); break; case SENSOR_LZ24BP: if (sd->type != Creative_live_motion) gpio_set(sd, SQ930_GPIO_EXTRA1, 0x00ff); else gpio_set(sd, 0, 0x00ff); msleep(50); if (first_time) ucbus_write(&sd->gspca_dev, lz24bp_start_0, 8, 8); gpio_init(sd, sensor_tb[sd->sensor].gpio); break; case SENSOR_MI0360: if (first_time) ucbus_write(&sd->gspca_dev, mi0360_start_0, ARRAY_SIZE(mi0360_start_0), 8); gpio_init(sd, sensor_tb[sd->sensor].gpio); gpio_set(sd, SQ930_GPIO_EXTRA2, SQ930_GPIO_EXTRA2); break; default: /* case SENSOR_MT9V111: */ if (first_time) mt9v111_init(&sd->gspca_dev); else gpio_init(sd, sensor_tb[sd->sensor].gpio); break; } } static void lz24bp_ppl(struct sd *sd, u16 ppl) { struct ucbus_write_cmd cmds[2] = { {0xf810, ppl >> 8}, {0xf811, ppl} }; ucbus_write(&sd->gspca_dev, cmds, ARRAY_SIZE(cmds), 2); } static void setexposure(struct gspca_dev *gspca_dev, s32 expo, s32 gain) { struct sd *sd = (struct sd *) gspca_dev; int i, integclks, intstartclk, frameclks, min_frclk; const struct sensor_s *sensor; u16 cmd; u8 buf[15]; integclks = expo; i = 0; cmd = SQ930_CTRL_SET_EXPOSURE; switch (sd->sensor) { case SENSOR_ICX098BQ: /* ccd */ case SENSOR_LZ24BP: min_frclk = sd->sensor == SENSOR_ICX098BQ ? 0x210 : 0x26f; if (integclks >= min_frclk) { intstartclk = 0; frameclks = integclks; } else { intstartclk = min_frclk - integclks; frameclks = min_frclk; } buf[i++] = intstartclk >> 8; buf[i++] = intstartclk; buf[i++] = frameclks >> 8; buf[i++] = frameclks; buf[i++] = gain; break; default: /* cmos */ /* case SENSOR_MI0360: */ /* case SENSOR_MT9V111: */ cmd |= 0x0100; sensor = &sensor_tb[sd->sensor]; buf[i++] = sensor->i2c_addr; /* i2c_slave_addr */ buf[i++] = 0x08; /* 2 * ni2c */ buf[i++] = 0x09; /* reg = shutter width */ buf[i++] = integclks >> 8; /* val H */ buf[i++] = sensor->i2c_dum; buf[i++] = integclks; /* val L */ buf[i++] = 0x35; /* reg = global gain */ buf[i++] = 0x00; /* val H */ buf[i++] = sensor->i2c_dum; buf[i++] = 0x80 + gain / 2; /* val L */ buf[i++] = 0x00; buf[i++] = 0x00; buf[i++] = 0x00; buf[i++] = 0x00; buf[i++] = 0x83; break; } reg_wb(gspca_dev, cmd, 0, buf, i); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam = &gspca_dev->cam; sd->sensor = id->driver_info >> 8; sd->type = id->driver_info; cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); cam->bulk = 1; return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; sd->gpio[0] = sd->gpio[1] = 0xff; /* force gpio rewrite */ /*fixme: is this needed for icx098bp and mi0360? if (sd->sensor != SENSOR_LZ24BP) reg_w(gspca_dev, SQ930_CTRL_RESET, 0x0000); */ reg_r(gspca_dev, SQ930_CTRL_GET_DEV_INFO, 8); if (gspca_dev->usb_err < 0) return gspca_dev->usb_err; /* it returns: * 03 00 12 93 0b f6 c9 00 live! ultra * 03 00 07 93 0b f6 ca 00 live! ultra for notebook * 03 00 12 93 0b fe c8 00 Trust WB-3500T * 02 00 06 93 0b fe c8 00 Joy-IT 318S * 03 00 12 93 0b f6 cf 00 icam tracer - sensor icx098bq * 02 00 12 93 0b fe cf 00 ProQ Motion Webcam * * byte * 0: 02 = usb 1.0 (12Mbit) / 03 = usb2.0 (480Mbit) * 1: 00 * 2: 06 / 07 / 12 = mode webcam? firmware?? * 3: 93 chip = 930b (930b or 930c) * 4: 0b * 5: f6 = cdd (icx098bq, lz24bp) / fe or de = cmos (i2c) (other sensors) * 6: c8 / c9 / ca / cf = mode webcam?, sensor? webcam? * 7: 00 */ PDEBUG(D_PROBE, "info: %*ph", 8, gspca_dev->usb_buf); bridge_init(sd); if (sd->sensor == SENSOR_MI0360) { /* no sensor probe for icam tracer */ if (gspca_dev->usb_buf[5] == 0xf6) /* if ccd */ sd->sensor = SENSOR_ICX098BQ; else cmos_probe(gspca_dev); } if (gspca_dev->usb_err >= 0) { PDEBUG(D_PROBE, "Sensor %s", sensor_tb[sd->sensor].name); global_init(sd, 1); } return gspca_dev->usb_err; } /* send the start/stop commands to the webcam */ static void send_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const struct cap_s *cap; int mode; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; cap = &capconfig[sd->sensor][mode]; reg_wb(gspca_dev, 0x0900 | SQ930_CTRL_CAP_START, 0x0a00 | cap->cc_sizeid, cap->cc_bytes, 32); } static void send_stop(struct gspca_dev *gspca_dev) { reg_w(gspca_dev, SQ930_CTRL_CAP_STOP, 0); } /* function called at start time before URB creation */ static int sd_isoc_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; gspca_dev->cam.bulk_nurbs = 1; /* there must be one URB only */ sd->do_ctrl = 0; gspca_dev->cam.bulk_size = gspca_dev->width * gspca_dev->height + 8; return 0; } /* start the capture */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int mode; bridge_init(sd); global_init(sd, 0); msleep(100); switch (sd->sensor) { case SENSOR_ICX098BQ: ucbus_write(gspca_dev, icx098bq_start_0, ARRAY_SIZE(icx098bq_start_0), 8); ucbus_write(gspca_dev, icx098bq_start_1, ARRAY_SIZE(icx098bq_start_1), 5); ucbus_write(gspca_dev, icx098bq_start_2, ARRAY_SIZE(icx098bq_start_2), 6); msleep(50); /* 1st start */ send_start(gspca_dev); gpio_set(sd, SQ930_GPIO_EXTRA2 | SQ930_GPIO_RSTBAR, 0x00ff); msleep(70); reg_w(gspca_dev, SQ930_CTRL_CAP_STOP, 0x0000); gpio_set(sd, 0x7f, 0x00ff); /* 2nd start */ send_start(gspca_dev); gpio_set(sd, SQ930_GPIO_EXTRA2 | SQ930_GPIO_RSTBAR, 0x00ff); goto out; case SENSOR_LZ24BP: ucbus_write(gspca_dev, lz24bp_start_0, ARRAY_SIZE(lz24bp_start_0), 8); if (sd->type != Creative_live_motion) ucbus_write(gspca_dev, lz24bp_start_1_gen, ARRAY_SIZE(lz24bp_start_1_gen), 5); else ucbus_write(gspca_dev, lz24bp_start_1_clm, ARRAY_SIZE(lz24bp_start_1_clm), 5); ucbus_write(gspca_dev, lz24bp_start_2, ARRAY_SIZE(lz24bp_start_2), 6); mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; lz24bp_ppl(sd, mode == 1 ? 0x0564 : 0x0310); msleep(10); break; case SENSOR_MI0360: ucbus_write(gspca_dev, mi0360_start_0, ARRAY_SIZE(mi0360_start_0), 8); i2c_write(sd, mi0360_init_23, ARRAY_SIZE(mi0360_init_23)); i2c_write(sd, mi0360_init_24, ARRAY_SIZE(mi0360_init_24)); i2c_write(sd, mi0360_init_25, ARRAY_SIZE(mi0360_init_25)); ucbus_write(gspca_dev, mi0360_start_1, ARRAY_SIZE(mi0360_start_1), 5); i2c_write(sd, mi0360_start_2, ARRAY_SIZE(mi0360_start_2)); i2c_write(sd, mi0360_start_3, ARRAY_SIZE(mi0360_start_3)); /* 1st start */ send_start(gspca_dev); msleep(60); send_stop(gspca_dev); i2c_write(sd, mi0360_start_4, ARRAY_SIZE(mi0360_start_4)); break; default: /* case SENSOR_MT9V111: */ ucbus_write(gspca_dev, mi0360_start_0, ARRAY_SIZE(mi0360_start_0), 8); i2c_write(sd, mt9v111_init_0, ARRAY_SIZE(mt9v111_init_0)); i2c_write(sd, mt9v111_init_1, ARRAY_SIZE(mt9v111_init_1)); i2c_write(sd, mt9v111_init_2, ARRAY_SIZE(mt9v111_init_2)); ucbus_write(gspca_dev, mt9v111_start_1, ARRAY_SIZE(mt9v111_start_1), 5); i2c_write(sd, mt9v111_init_3, ARRAY_SIZE(mt9v111_init_3)); i2c_write(sd, mt9v111_init_4, ARRAY_SIZE(mt9v111_init_4)); break; } send_start(gspca_dev); out: msleep(1000); if (sd->sensor == SENSOR_MT9V111) gpio_set(sd, SQ930_GPIO_DFL_LED, SQ930_GPIO_DFL_LED); sd->do_ctrl = 1; /* set the exposure */ return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->sensor == SENSOR_MT9V111) gpio_set(sd, 0, SQ930_GPIO_DFL_LED); send_stop(gspca_dev); } /* function called when the application gets a new frame */ /* It sets the exposure if required and restart the bulk transfer. */ static void sd_dq_callback(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int ret; if (!sd->do_ctrl || gspca_dev->cam.bulk_nurbs != 0) return; sd->do_ctrl = 0; setexposure(gspca_dev, v4l2_ctrl_g_ctrl(sd->exposure), v4l2_ctrl_g_ctrl(sd->gain)); gspca_dev->cam.bulk_nurbs = 1; ret = usb_submit_urb(gspca_dev->urb[0], GFP_ATOMIC); if (ret < 0) pr_err("sd_dq_callback() err %d\n", ret); /* wait a little time, otherwise the webcam crashes */ msleep(100); } static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; if (sd->do_ctrl) gspca_dev->cam.bulk_nurbs = 0; gspca_frame_add(gspca_dev, FIRST_PACKET, NULL, 0); gspca_frame_add(gspca_dev, INTER_PACKET, data, len - 8); gspca_frame_add(gspca_dev, LAST_PACKET, NULL, 0); } static int sd_s_ctrl(struct v4l2_ctrl *ctrl) { struct gspca_dev *gspca_dev = container_of(ctrl->handler, struct gspca_dev, ctrl_handler); struct sd *sd = (struct sd *) gspca_dev; gspca_dev->usb_err = 0; if (!gspca_dev->streaming) return 0; switch (ctrl->id) { case V4L2_CID_EXPOSURE: setexposure(gspca_dev, ctrl->val, sd->gain->val); break; } return gspca_dev->usb_err; } static const struct v4l2_ctrl_ops sd_ctrl_ops = { .s_ctrl = sd_s_ctrl, }; static int sd_init_controls(struct gspca_dev *gspca_dev) { struct v4l2_ctrl_handler *hdl = &gspca_dev->ctrl_handler; struct sd *sd = (struct sd *) gspca_dev; gspca_dev->vdev.ctrl_handler = hdl; v4l2_ctrl_handler_init(hdl, 2); sd->exposure = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_EXPOSURE, 1, 0xfff, 1, 0x356); sd->gain = v4l2_ctrl_new_std(hdl, &sd_ctrl_ops, V4L2_CID_GAIN, 1, 255, 1, 0x8d); if (hdl->error) { pr_err("Could not initialize controls\n"); return hdl->error; } v4l2_ctrl_cluster(2, &sd->exposure); return 0; } /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .init_controls = sd_init_controls, .isoc_init = sd_isoc_init, .start = sd_start, .stopN = sd_stopN, .pkt_scan = sd_pkt_scan, .dq_callback = sd_dq_callback, }; /* Table of supported USB devices */ #define ST(sensor, type) \ .driver_info = (SENSOR_ ## sensor << 8) \ | (type) static const struct usb_device_id device_table[] = { {USB_DEVICE(0x041e, 0x4038), ST(MI0360, 0)}, {USB_DEVICE(0x041e, 0x403c), ST(LZ24BP, 0)}, {USB_DEVICE(0x041e, 0x403d), ST(LZ24BP, 0)}, {USB_DEVICE(0x041e, 0x4041), ST(LZ24BP, Creative_live_motion)}, {USB_DEVICE(0x2770, 0x930b), ST(MI0360, 0)}, {USB_DEVICE(0x2770, 0x930c), ST(MI0360, 0)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, .reset_resume = gspca_resume, #endif }; module_usb_driver(sd_driver);
gpl-2.0
CAFans/android_kernel_lge_msm8974
drivers/bluetooth/hci_vhci.c
2251
6366
/* * * Bluetooth virtual HCI driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/miscdevice.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.3" static bool amp; struct vhci_data { struct hci_dev *hdev; unsigned long flags; wait_queue_head_t read_wait; struct sk_buff_head readq; }; static int vhci_open_dev(struct hci_dev *hdev) { set_bit(HCI_RUNNING, &hdev->flags); return 0; } static int vhci_close_dev(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; skb_queue_purge(&data->readq); return 0; } static int vhci_flush(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_send_frame(struct sk_buff *skb) { struct hci_dev* hdev = (struct hci_dev *) skb->dev; struct vhci_data *data; if (!hdev) { BT_ERR("Frame for unknown HCI device (hdev=NULL)"); return -ENODEV; } if (!test_bit(HCI_RUNNING, &hdev->flags)) return -EBUSY; data = hci_get_drvdata(hdev); memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); skb_queue_tail(&data->readq, skb); wake_up_interruptible(&data->read_wait); return 0; } static inline ssize_t vhci_get_user(struct vhci_data *data, const char __user *buf, size_t count) { struct sk_buff *skb; if (count > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(count, GFP_KERNEL); if (!skb) return -ENOMEM; if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); return -EFAULT; } skb->dev = (void *) data->hdev; bt_cb(skb)->pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); hci_recv_frame(skb); return count; } static inline ssize_t vhci_put_user(struct vhci_data *data, struct sk_buff *skb, char __user *buf, int count) { char __user *ptr = buf; int len, total = 0; len = min_t(unsigned int, skb->len, count); if (copy_to_user(ptr, skb->data, len)) return -EFAULT; total += len; data->hdev->stat.byte_tx += len; switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: data->hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: data->hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: data->hdev->stat.sco_tx++; break; } return total; } static ssize_t vhci_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct vhci_data *data = file->private_data; struct sk_buff *skb; ssize_t ret = 0; while (count) { skb = skb_dequeue(&data->readq); if (skb) { ret = vhci_put_user(data, skb, buf, count); if (ret < 0) skb_queue_head(&data->readq, skb); else kfree_skb(skb); break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } ret = wait_event_interruptible(data->read_wait, !skb_queue_empty(&data->readq)); if (ret < 0) break; } return ret; } static ssize_t vhci_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct vhci_data *data = file->private_data; return vhci_get_user(data, buf, count); } static unsigned int vhci_poll(struct file *file, poll_table *wait) { struct vhci_data *data = file->private_data; poll_wait(file, &data->read_wait, wait); if (!skb_queue_empty(&data->readq)) return POLLIN | POLLRDNORM; return POLLOUT | POLLWRNORM; } static int vhci_open(struct inode *inode, struct file *file) { struct vhci_data *data; struct hci_dev *hdev; data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL); if (!data) return -ENOMEM; skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); hdev = hci_alloc_dev(); if (!hdev) { kfree(data); return -ENOMEM; } data->hdev = hdev; hdev->bus = HCI_VIRTUAL; hci_set_drvdata(hdev, data); if (amp) hdev->dev_type = HCI_AMP; hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; hdev->send = vhci_send_frame; if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); kfree(data); hci_free_dev(hdev); return -EBUSY; } file->private_data = data; nonseekable_open(inode, file); return 0; } static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; hci_unregister_dev(hdev); hci_free_dev(hdev); file->private_data = NULL; kfree(data); return 0; } static const struct file_operations vhci_fops = { .owner = THIS_MODULE, .read = vhci_read, .write = vhci_write, .poll = vhci_poll, .open = vhci_open, .release = vhci_release, .llseek = no_llseek, }; static struct miscdevice vhci_miscdev= { .name = "vhci", .fops = &vhci_fops, .minor = MISC_DYNAMIC_MINOR, }; static int __init vhci_init(void) { BT_INFO("Virtual HCI driver ver %s", VERSION); return misc_register(&vhci_miscdev); } static void __exit vhci_exit(void) { misc_deregister(&vhci_miscdev); } module_init(vhci_init); module_exit(vhci_exit); module_param(amp, bool, 0644); MODULE_PARM_DESC(amp, "Create AMP controller device"); MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>"); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
gpl-2.0
iamroot12b/kernel
drivers/isdn/hisax/jade_irq.c
2251
6389
/* $Id: jade_irq.c,v 1.7.2.4 2004/02/11 13:21:34 keil Exp $ * * Low level JADE IRQ stuff (derived from original hscx_irq.c) * * Author Roland Klabunde * Copyright by Roland Klabunde <R.Klabunde@Berkom.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ static inline void waitforCEC(struct IsdnCardState *cs, int jade, int reg) { int to = 50; int mask = (reg == jade_HDLC_XCMD ? jadeSTAR_XCEC : jadeSTAR_RCEC); while ((READJADE(cs, jade, jade_HDLC_STAR) & mask) && to) { udelay(1); to--; } if (!to) printk(KERN_WARNING "HiSax: waitforCEC (jade) timeout\n"); } static inline void waitforXFW(struct IsdnCardState *cs, int jade) { /* Does not work on older jade versions, don't care */ } static inline void WriteJADECMDR(struct IsdnCardState *cs, int jade, int reg, u_char data) { waitforCEC(cs, jade, reg); WRITEJADE(cs, jade, reg, data); } static void jade_empty_fifo(struct BCState *bcs, int count) { u_char *ptr; struct IsdnCardState *cs = bcs->cs; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "jade_empty_fifo"); if (bcs->hw.hscx.rcvidx + count > HSCX_BUFMAX) { if (cs->debug & L1_DEB_WARN) debugl1(cs, "jade_empty_fifo: incoming packet too large"); WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_RCMD, jadeRCMD_RMC); bcs->hw.hscx.rcvidx = 0; return; } ptr = bcs->hw.hscx.rcvbuf + bcs->hw.hscx.rcvidx; bcs->hw.hscx.rcvidx += count; READJADEFIFO(cs, bcs->hw.hscx.hscx, ptr, count); WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_RCMD, jadeRCMD_RMC); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "jade_empty_fifo %c cnt %d", bcs->hw.hscx.hscx ? 'B' : 'A', count); QuickHex(t, ptr, count); debugl1(cs, "%s", bcs->blog); } } static void jade_fill_fifo(struct BCState *bcs) { struct IsdnCardState *cs = bcs->cs; int more, count; int fifo_size = 32; u_char *ptr; if ((cs->debug & L1_DEB_HSCX) && !(cs->debug & L1_DEB_HSCX_FIFO)) debugl1(cs, "jade_fill_fifo"); if (!bcs->tx_skb) return; if (bcs->tx_skb->len <= 0) return; more = (bcs->mode == L1_MODE_TRANS) ? 1 : 0; if (bcs->tx_skb->len > fifo_size) { more = !0; count = fifo_size; } else count = bcs->tx_skb->len; waitforXFW(cs, bcs->hw.hscx.hscx); ptr = bcs->tx_skb->data; skb_pull(bcs->tx_skb, count); bcs->tx_cnt -= count; bcs->hw.hscx.count += count; WRITEJADEFIFO(cs, bcs->hw.hscx.hscx, ptr, count); WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_XCMD, more ? jadeXCMD_XF : (jadeXCMD_XF | jadeXCMD_XME)); if (cs->debug & L1_DEB_HSCX_FIFO) { char *t = bcs->blog; t += sprintf(t, "jade_fill_fifo %c cnt %d", bcs->hw.hscx.hscx ? 'B' : 'A', count); QuickHex(t, ptr, count); debugl1(cs, "%s", bcs->blog); } } static void jade_interrupt(struct IsdnCardState *cs, u_char val, u_char jade) { u_char r; struct BCState *bcs = cs->bcs + jade; struct sk_buff *skb; int fifo_size = 32; int count; int i_jade = (int) jade; /* To satisfy the compiler */ if (!test_bit(BC_FLG_INIT, &bcs->Flag)) return; if (val & 0x80) { /* RME */ r = READJADE(cs, i_jade, jade_HDLC_RSTA); if ((r & 0xf0) != 0xa0) { if (!(r & 0x80)) if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %s invalid frame", (jade ? "B" : "A")); if ((r & 0x40) && bcs->mode) if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %c RDO mode=%d", 'A' + jade, bcs->mode); if (!(r & 0x20)) if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %c CRC error", 'A' + jade); WriteJADECMDR(cs, jade, jade_HDLC_RCMD, jadeRCMD_RMC); } else { count = READJADE(cs, i_jade, jade_HDLC_RBCL) & 0x1F; if (count == 0) count = fifo_size; jade_empty_fifo(bcs, count); if ((count = bcs->hw.hscx.rcvidx - 1) > 0) { if (cs->debug & L1_DEB_HSCX_FIFO) debugl1(cs, "HX Frame %d", count); if (!(skb = dev_alloc_skb(count))) printk(KERN_WARNING "JADE %s receive out of memory\n", (jade ? "B" : "A")); else { memcpy(skb_put(skb, count), bcs->hw.hscx.rcvbuf, count); skb_queue_tail(&bcs->rqueue, skb); } } } bcs->hw.hscx.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } if (val & 0x40) { /* RPF */ jade_empty_fifo(bcs, fifo_size); if (bcs->mode == L1_MODE_TRANS) { /* receive audio data */ if (!(skb = dev_alloc_skb(fifo_size))) printk(KERN_WARNING "HiSax: receive out of memory\n"); else { memcpy(skb_put(skb, fifo_size), bcs->hw.hscx.rcvbuf, fifo_size); skb_queue_tail(&bcs->rqueue, skb); } bcs->hw.hscx.rcvidx = 0; schedule_event(bcs, B_RCVBUFREADY); } } if (val & 0x10) { /* XPR */ if (bcs->tx_skb) { if (bcs->tx_skb->len) { jade_fill_fifo(bcs); return; } else { if (test_bit(FLG_LLI_L1WAKEUP, &bcs->st->lli.flag) && (PACKET_NOACK != bcs->tx_skb->pkt_type)) { u_long flags; spin_lock_irqsave(&bcs->aclock, flags); bcs->ackcnt += bcs->hw.hscx.count; spin_unlock_irqrestore(&bcs->aclock, flags); schedule_event(bcs, B_ACKPENDING); } dev_kfree_skb_irq(bcs->tx_skb); bcs->hw.hscx.count = 0; bcs->tx_skb = NULL; } } if ((bcs->tx_skb = skb_dequeue(&bcs->squeue))) { bcs->hw.hscx.count = 0; test_and_set_bit(BC_FLG_BUSY, &bcs->Flag); jade_fill_fifo(bcs); } else { test_and_clear_bit(BC_FLG_BUSY, &bcs->Flag); schedule_event(bcs, B_XMTBUFREADY); } } } static inline void jade_int_main(struct IsdnCardState *cs, u_char val, int jade) { struct BCState *bcs; bcs = cs->bcs + jade; if (val & jadeISR_RFO) { /* handled with RDO */ val &= ~jadeISR_RFO; } if (val & jadeISR_XDU) { /* relevant in HDLC mode only */ /* don't reset XPR here */ if (bcs->mode == 1) jade_fill_fifo(bcs); else { /* Here we lost an TX interrupt, so * restart transmitting the whole frame. */ if (bcs->tx_skb) { skb_push(bcs->tx_skb, bcs->hw.hscx.count); bcs->tx_cnt += bcs->hw.hscx.count; bcs->hw.hscx.count = 0; } WriteJADECMDR(cs, bcs->hw.hscx.hscx, jade_HDLC_XCMD, jadeXCMD_XRES); if (cs->debug & L1_DEB_WARN) debugl1(cs, "JADE %c EXIR %x Lost TX", 'A' + jade, val); } } if (val & (jadeISR_RME | jadeISR_RPF | jadeISR_XPR)) { if (cs->debug & L1_DEB_HSCX) debugl1(cs, "JADE %c interrupt %x", 'A' + jade, val); jade_interrupt(cs, val, jade); } }
gpl-2.0
Frontier314/frontkernel_kitkat
drivers/usb/gadget/fusb300_udc.c
2507
42556
/* * Fusb300 UDC (USB gadget) * * Copyright (C) 2010 Faraday Technology Corp. * * Author : Yuan-hsin Chen <yhchen@faraday-tech.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include "fusb300_udc.h" MODULE_DESCRIPTION("FUSB300 USB gadget driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>"); MODULE_ALIAS("platform:fusb300_udc"); #define DRIVER_VERSION "20 October 2010" static const char udc_name[] = "fusb300_udc"; static const char * const fusb300_ep_name[] = { "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9", "ep10", "ep11", "ep12", "ep13", "ep14", "ep15" }; static void done(struct fusb300_ep *ep, struct fusb300_request *req, int status); static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset, u32 value) { u32 reg = ioread32(fusb300->reg + offset); reg |= value; iowrite32(reg, fusb300->reg + offset); } static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset, u32 value) { u32 reg = ioread32(fusb300->reg + offset); reg &= ~value; iowrite32(reg, fusb300->reg + offset); } static void fusb300_ep_setting(struct fusb300_ep *ep, struct fusb300_ep_info info) { ep->epnum = info.epnum; ep->type = info.type; } static int fusb300_ep_release(struct fusb300_ep *ep) { if (!ep->epnum) return 0; ep->epnum = 0; ep->stall = 0; ep->wedged = 0; return 0; } static void fusb300_set_fifo_entry(struct fusb300 *fusb300, u32 ep) { u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); val &= ~FUSB300_EPSET1_FIFOENTRY_MSK; val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM); iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); } static void fusb300_set_start_entry(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM; reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ; reg |= FUSB300_EPSET1_START_ENTRY(start_entry); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) { fusb300->fifo_entry_num = 0; fusb300->addrofs = 0; pr_err("fifo entry is over the maximum number!\n"); } else fusb300->fifo_entry_num++; } /* set fusb300_set_start_entry first before fusb300_set_epaddrofs */ static void fusb300_set_epaddrofs(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); reg &= ~FUSB300_EPSET2_ADDROFS_MSK; reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM; } static void ep_fifo_setting(struct fusb300 *fusb300, struct fusb300_ep_info info) { fusb300_set_fifo_entry(fusb300, info.epnum); fusb300_set_start_entry(fusb300, info.epnum); fusb300_set_epaddrofs(fusb300, info); } static void fusb300_set_eptype(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_TYPE_MSK; reg |= FUSB300_EPSET1_TYPE(info.type); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_epdir(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg; if (!info.dir_in) return; reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_DIR_MSK; reg |= FUSB300_EPSET1_DIRIN; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_ep_active(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); reg |= FUSB300_EPSET1_ACTEN; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); } static void fusb300_set_epmps(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); reg &= ~FUSB300_EPSET2_MPS_MSK; reg |= FUSB300_EPSET2_MPS(info.maxpacket); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); } static void fusb300_set_interval(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_INTERVAL(0x7); reg |= FUSB300_EPSET1_INTERVAL(info.interval); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_bwnum(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_BWNUM(0x3); reg |= FUSB300_EPSET1_BWNUM(info.bw_num); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void set_ep_reg(struct fusb300 *fusb300, struct fusb300_ep_info info) { fusb300_set_eptype(fusb300, info); fusb300_set_epdir(fusb300, info); fusb300_set_epmps(fusb300, info); if (info.interval) fusb300_set_interval(fusb300, info); if (info.bw_num) fusb300_set_bwnum(fusb300, info); fusb300_set_ep_active(fusb300, info.epnum); } static int config_ep(struct fusb300_ep *ep, const struct usb_endpoint_descriptor *desc) { struct fusb300 *fusb300 = ep->fusb300; struct fusb300_ep_info info; ep->desc = desc; info.interval = 0; info.addrofs = 0; info.bw_num = 0; info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; info.maxpacket = le16_to_cpu(desc->wMaxPacketSize); info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; if ((info.type == USB_ENDPOINT_XFER_INT) || (info.type == USB_ENDPOINT_XFER_ISOC)) { info.interval = desc->bInterval; if (info.type == USB_ENDPOINT_XFER_ISOC) info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11); } ep_fifo_setting(fusb300, info); set_ep_reg(fusb300, info); fusb300_ep_setting(ep, info); fusb300->ep[info.epnum] = ep; return 0; } static int fusb300_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct fusb300_ep *ep; ep = container_of(_ep, struct fusb300_ep, ep); if (ep->fusb300->reenum) { ep->fusb300->fifo_entry_num = 0; ep->fusb300->addrofs = 0; ep->fusb300->reenum = 0; } return config_ep(ep, desc); } static int fusb300_disable(struct usb_ep *_ep) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; ep = container_of(_ep, struct fusb300_ep, ep); BUG_ON(!ep); while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct fusb300_request, queue); spin_lock_irqsave(&ep->fusb300->lock, flags); done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->fusb300->lock, flags); } return fusb300_ep_release(ep); } static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct fusb300_request *req; req = kzalloc(sizeof(struct fusb300_request), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); return &req->req; } static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct fusb300_request *req; req = container_of(_req, struct fusb300_request, req); kfree(req); } static int enable_fifo_int(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; if (ep->epnum) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); } else { pr_err("can't enable_fifo_int ep0\n"); return -EINVAL; } return 0; } static int disable_fifo_int(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; if (ep->epnum) { fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); } else { pr_err("can't disable_fifo_int ep0\n"); return -EINVAL; } return 0; } static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); reg &= ~FUSB300_CSR_LEN_MSK; reg |= FUSB300_CSR_LEN(length); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR); } /* write data to cx fifo */ static void fusb300_wrcxf(struct fusb300_ep *ep, struct fusb300_request *req) { int i = 0; u8 *tmp; u32 data; struct fusb300 *fusb300 = ep->fusb300; u32 length = req->req.length - req->req.actual; tmp = req->req.buf + req->req.actual; if (length > SS_CTL_MAX_PACKET_SIZE) { fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE); for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); tmp += 4; } req->req.actual += SS_CTL_MAX_PACKET_SIZE; } else { /* length is less than max packet size */ fusb300_set_cxlen(fusb300, length); for (i = length >> 2; i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); tmp = tmp + 4; } switch (length % 4) { case 1: data = *tmp; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; case 2: data = *tmp | *(tmp + 1) << 8; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; case 3: data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; default: break; } req->req.actual += length; } } static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), FUSB300_EPSET0_STL); } static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); if (reg & FUSB300_EPSET0_STL) { printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep); reg &= ~FUSB300_EPSET0_STL; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); } } static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req) { if (ep->fusb300->ep0_dir) { /* if IN */ if (req->req.length) { fusb300_wrcxf(ep, req); } else printk(KERN_DEBUG "%s : req->req.length = 0x%x\n", __func__, req->req.length); if ((req->req.length == req->req.actual) || (req->req.actual < ep->ep.maxpacket)) done(ep, req, 0); } else { /* OUT */ if (!req->req.length) done(ep, req, 0); else fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1, FUSB300_IGER1_CX_OUT_INT); } } static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; int request = 0; ep = container_of(_ep, struct fusb300_ep, ep); req = container_of(_req, struct fusb300_request, req); if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&ep->fusb300->lock, flags); if (list_empty(&ep->queue)) request = 1; list_add_tail(&req->queue, &ep->queue); req->req.actual = 0; req->req.status = -EINPROGRESS; if (ep->desc == NULL) /* ep0 */ ep0_queue(ep, req); else if (request && !ep->stall) enable_fifo_int(ep); spin_unlock_irqrestore(&ep->fusb300->lock, flags); return 0; } static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; ep = container_of(_ep, struct fusb300_ep, ep); req = container_of(_req, struct fusb300_request, req); spin_lock_irqsave(&ep->fusb300->lock, flags); if (!list_empty(&ep->queue)) done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->fusb300->lock, flags); return 0; } static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge) { struct fusb300_ep *ep; struct fusb300 *fusb300; unsigned long flags; int ret = 0; ep = container_of(_ep, struct fusb300_ep, ep); fusb300 = ep->fusb300; spin_lock_irqsave(&ep->fusb300->lock, flags); if (!list_empty(&ep->queue)) { ret = -EAGAIN; goto out; } if (value) { fusb300_set_epnstall(fusb300, ep->epnum); ep->stall = 1; if (wedge) ep->wedged = 1; } else { fusb300_clear_epnstall(fusb300, ep->epnum); ep->stall = 0; ep->wedged = 0; } out: spin_unlock_irqrestore(&ep->fusb300->lock, flags); return ret; } static int fusb300_set_halt(struct usb_ep *_ep, int value) { return fusb300_set_halt_and_wedge(_ep, value, 0); } static int fusb300_set_wedge(struct usb_ep *_ep) { return fusb300_set_halt_and_wedge(_ep, 1, 1); } static void fusb300_fifo_flush(struct usb_ep *_ep) { } static struct usb_ep_ops fusb300_ep_ops = { .enable = fusb300_enable, .disable = fusb300_disable, .alloc_request = fusb300_alloc_request, .free_request = fusb300_free_request, .queue = fusb300_queue, .dequeue = fusb300_dequeue, .set_halt = fusb300_set_halt, .fifo_flush = fusb300_fifo_flush, .set_wedge = fusb300_set_wedge, }; /*****************************************************************************/ static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset, u32 value) { iowrite32(value, fusb300->reg + offset); } static void fusb300_reset(void) { } static void fusb300_set_cxstall(struct fusb300 *fusb300) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, FUSB300_CSR_STL); } static void fusb300_set_cxdone(struct fusb300 *fusb300) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, FUSB300_CSR_DONE); } /* read data from cx fifo */ void fusb300_rdcxf(struct fusb300 *fusb300, u8 *buffer, u32 length) { int i = 0; u8 *tmp; u32 data; tmp = buffer; for (i = (length >> 2); i > 0; i--) { data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; *(tmp + 3) = (data >> 24) & 0xFF; tmp = tmp + 4; } switch (length % 4) { case 1: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; break; case 2: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; break; case 3: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; break; default: break; } } #if 0 static void fusb300_dbg_fifo(struct fusb300_ep *ep, u8 entry, u16 length) { u32 reg; u32 i = 0; u32 j = 0; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); for (i = 0; i < (length >> 2); i++) { if (i * 4 == 1024) break; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i * 4); printk(KERN_DEBUG" 0x%-8x", reg); j++; if ((j % 4) == 0) printk(KERN_DEBUG "\n"); } if (length % 4) { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i * 4); printk(KERN_DEBUG " 0x%x\n", reg); } if ((j % 4) != 0) printk(KERN_DEBUG "\n"); fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, FUSB300_GTM_TST_FIFO_DEG); } static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep, u8 entry, u16 length, u8 *golden) { u32 reg; u32 i = 0; u32 golden_value; u8 *tmp; tmp = golden; printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry); reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); for (i = 0; i < (length >> 2); i++) { if (i * 4 == 1024) break; golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4); if (reg != golden_value) { printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4)); printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", golden_value, reg); } tmp += 4; } switch (length % 4) { case 1: golden_value = *tmp; case 2: golden_value = *tmp | *(tmp + 1) << 8; case 3: golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; default: break; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4); if (reg != golden_value) { printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4)); printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", golden_value, reg); } } printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n"); fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, FUSB300_GTM_TST_FIFO_DEG); } #endif static void fusb300_rdfifo(struct fusb300_ep *ep, struct fusb300_request *req, u32 length) { int i = 0; u8 *tmp; u32 data, reg; struct fusb300 *fusb300 = ep->fusb300; tmp = req->req.buf + req->req.actual; req->req.actual += length; if (req->req.actual > req->req.length) printk(KERN_DEBUG "req->req.actual > req->req.length\n"); for (i = (length >> 2); i > 0; i--) { data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; *(tmp + 3) = (data >> 24) & 0xFF; tmp = tmp + 4; } switch (length % 4) { case 1: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; break; case 2: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; break; case 3: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; break; default: break; } do { reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); reg &= FUSB300_IGR1_SYNF0_EMPTY_INT; if (i) printk(KERN_INFO "sync fifo is not empty!\n"); i++; } while (!reg); } /* write data to fifo */ static void fusb300_wrfifo(struct fusb300_ep *ep, struct fusb300_request *req) { int i = 0; u8 *tmp; u32 data, reg; struct fusb300 *fusb300 = ep->fusb300; tmp = req->req.buf; req->req.actual = req->req.length; for (i = (req->req.length >> 2); i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); tmp += 4; } switch (req->req.length % 4) { case 1: data = *tmp; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); break; case 2: data = *tmp | *(tmp + 1) << 8; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); break; case 3: data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); break; default: break; } do { reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); reg &= FUSB300_IGR1_SYNF0_EMPTY_INT; if (i) printk(KERN_INFO"sync fifo is not empty!\n"); i++; } while (!reg); } static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep) { u8 value; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); value = reg & FUSB300_EPSET0_STL; return value; } static u8 fusb300_get_cxstall(struct fusb300 *fusb300) { u8 value; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); value = (reg & FUSB300_CSR_STL) >> 1; return value; } static void request_error(struct fusb300 *fusb300) { fusb300_set_cxstall(fusb300); printk(KERN_DEBUG "request error!!\n"); } static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) __releases(fusb300->lock) __acquires(fusb300->lock) { u8 ep; u16 status = 0; u16 w_index = ctrl->wIndex; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: status = 1 << USB_DEVICE_SELF_POWERED; break; case USB_RECIP_INTERFACE: status = 0; break; case USB_RECIP_ENDPOINT: ep = w_index & USB_ENDPOINT_NUMBER_MASK; if (ep) { if (fusb300_get_epnstall(fusb300, ep)) status = 1 << USB_ENDPOINT_HALT; } else { if (fusb300_get_cxstall(fusb300)) status = 0; } break; default: request_error(fusb300); return; /* exit */ } fusb300->ep0_data = cpu_to_le16(status); fusb300->ep0_req->buf = &fusb300->ep0_data; fusb300->ep0_req->length = 2; spin_unlock(&fusb300->lock); fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL); spin_lock(&fusb300->lock); } static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { u8 ep; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_INTERFACE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_ENDPOINT: { u16 w_index = le16_to_cpu(ctrl->wIndex); ep = w_index & USB_ENDPOINT_NUMBER_MASK; if (ep) fusb300_set_epnstall(fusb300, ep); else fusb300_set_cxstall(fusb300); fusb300_set_cxdone(fusb300); } break; default: request_error(fusb300); break; } } static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), FUSB300_EPSET0_CLRSEQNUM); } static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { struct fusb300_ep *ep = fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK]; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_INTERFACE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_ENDPOINT: if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) { if (ep->wedged) { fusb300_set_cxdone(fusb300); break; } if (ep->stall) { ep->stall = 0; fusb300_clear_seqnum(fusb300, ep->epnum); fusb300_clear_epnstall(fusb300, ep->epnum); if (!list_empty(&ep->queue)) enable_fifo_int(ep); } } fusb300_set_cxdone(fusb300); break; default: request_error(fusb300); break; } } static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR); reg &= ~FUSB300_DAR_DRVADDR_MSK; reg |= FUSB300_DAR_DRVADDR(addr); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR); } static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { if (ctrl->wValue >= 0x0100) request_error(fusb300); else { fusb300_set_dev_addr(fusb300, ctrl->wValue); fusb300_set_cxdone(fusb300); } } #define UVC_COPY_DESCRIPTORS(mem, src) \ do { \ const struct usb_descriptor_header * const *__src; \ for (__src = src; *__src; ++__src) { \ memcpy(mem, *__src, (*__src)->bLength); \ mem += (*__src)->bLength; \ } \ } while (0) static void fusb300_ep0_complete(struct usb_ep *ep, struct usb_request *req) { } static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { u8 *p = (u8 *)ctrl; u8 ret = 0; u8 i = 0; fusb300_rdcxf(fusb300, p, 8); fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN; fusb300->ep0_length = ctrl->wLength; /* check request */ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrl->bRequest) { case USB_REQ_GET_STATUS: get_status(fusb300, ctrl); break; case USB_REQ_CLEAR_FEATURE: clear_feature(fusb300, ctrl); break; case USB_REQ_SET_FEATURE: set_feature(fusb300, ctrl); break; case USB_REQ_SET_ADDRESS: set_address(fusb300, ctrl); break; case USB_REQ_SET_CONFIGURATION: fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR, FUSB300_DAR_SETCONFG); /* clear sequence number */ for (i = 1; i <= FUSB300_MAX_NUM_EP; i++) fusb300_clear_seqnum(fusb300, i); fusb300->reenum = 1; ret = 1; break; default: ret = 1; break; } } else ret = 1; return ret; } static void fusb300_set_ep_bycnt(struct fusb300_ep *ep, u32 bycnt) { struct fusb300 *fusb300 = ep->fusb300; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); reg &= ~FUSB300_FFR_BYCNT; reg |= bycnt & FUSB300_FFR_BYCNT; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); } static void done(struct fusb300_ep *ep, struct fusb300_request *req, int status) { list_del_init(&req->queue); /* don't modify queue heads during completion callback */ if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) req->req.status = -ESHUTDOWN; else req->req.status = status; spin_unlock(&ep->fusb300->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&ep->fusb300->lock); if (ep->epnum) { disable_fifo_int(ep); if (!list_empty(&ep->queue)) enable_fifo_int(ep); } else fusb300_set_cxdone(ep->fusb300); } void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, struct fusb300_request *req) { u32 value; u32 reg; /* wait SW owner */ do { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); reg &= FUSB300_EPPRD0_H; } while (reg); iowrite32((u32) req->req.buf, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum)); value = FUSB300_EPPRD0_BTC(req->req.length) | FUSB300_EPPRD0_H | FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I; iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum)); fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY, FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum)); } static void fusb300_wait_idma_finished(struct fusb300_ep *ep) { u32 reg; do { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1); if ((reg & FUSB300_IGR1_VBUS_CHG_INT) || (reg & FUSB300_IGR1_WARM_RST_INT) || (reg & FUSB300_IGR1_HOT_RST_INT) || (reg & FUSB300_IGR1_USBRST_INT) ) goto IDMA_RESET; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0); reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum); } while (!reg); fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0, FUSB300_IGR0_EPn_PRD_INT(ep->epnum)); IDMA_RESET: fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_PRD_INT(ep->epnum)); } static void fusb300_set_idma(struct fusb300_ep *ep, struct fusb300_request *req) { dma_addr_t d; u8 *tmp = NULL; d = dma_map_single(NULL, req->req.buf, req->req.length, DMA_TO_DEVICE); if (dma_mapping_error(NULL, d)) { kfree(req->req.buf); printk(KERN_DEBUG "dma_mapping_error\n"); } dma_sync_single_for_device(NULL, d, req->req.length, DMA_TO_DEVICE); fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_PRD_INT(ep->epnum)); tmp = req->req.buf; req->req.buf = (u8 *)d; fusb300_fill_idma_prdtbl(ep, req); /* check idma is done */ fusb300_wait_idma_finished(ep); req->req.buf = tmp; if (d) dma_unmap_single(NULL, d, req->req.length, DMA_TO_DEVICE); } static void in_ep_fifo_handler(struct fusb300_ep *ep) { struct fusb300_request *req = list_entry(ep->queue.next, struct fusb300_request, queue); if (req->req.length) { #if 0 fusb300_set_ep_bycnt(ep, req->req.length); fusb300_wrfifo(ep, req); #else fusb300_set_idma(ep, req); #endif } done(ep, req, 0); } static void out_ep_fifo_handler(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; struct fusb300_request *req = list_entry(ep->queue.next, struct fusb300_request, queue); u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); u32 length = reg & FUSB300_FFR_BYCNT; fusb300_rdfifo(ep, req, length); /* finish out transfer */ if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket)) done(ep, req, 0); } static void check_device_mode(struct fusb300 *fusb300) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR); switch (reg & FUSB300_GCR_DEVEN_MSK) { case FUSB300_GCR_DEVEN_SS: fusb300->gadget.speed = USB_SPEED_SUPER; break; case FUSB300_GCR_DEVEN_HS: fusb300->gadget.speed = USB_SPEED_HIGH; break; case FUSB300_GCR_DEVEN_FS: fusb300->gadget.speed = USB_SPEED_FULL; break; default: fusb300->gadget.speed = USB_SPEED_UNKNOWN; break; } printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK)); } static void fusb300_ep0out(struct fusb300 *fusb300) { struct fusb300_ep *ep = fusb300->ep[0]; u32 reg; if (!list_empty(&ep->queue)) { struct fusb300_request *req; req = list_first_entry(&ep->queue, struct fusb300_request, queue); if (req->req.length) fusb300_rdcxf(ep->fusb300, req->req.buf, req->req.length); done(ep, req, 0); reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); reg &= ~FUSB300_IGER1_CX_OUT_INT; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1); } else pr_err("%s : empty queue\n", __func__); } static void fusb300_ep0in(struct fusb300 *fusb300) { struct fusb300_request *req; struct fusb300_ep *ep = fusb300->ep[0]; if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) { req = list_entry(ep->queue.next, struct fusb300_request, queue); if (req->req.length) fusb300_wrcxf(ep, req); if ((req->req.length - req->req.actual) < ep->ep.maxpacket) done(ep, req, 0); } else fusb300_set_cxdone(fusb300); } static void fusb300_grp2_handler(void) { } static void fusb300_grp3_handler(void) { } static void fusb300_grp4_handler(void) { } static void fusb300_grp5_handler(void) { } static irqreturn_t fusb300_irq(int irq, void *_fusb300) { struct fusb300 *fusb300 = _fusb300; u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0); u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0); struct usb_ctrlrequest ctrl; u8 in; u32 reg; int i; spin_lock(&fusb300->lock); int_grp1 &= int_grp1_en; int_grp0 &= int_grp0_en; if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_WARM_RST_INT); printk(KERN_INFO"fusb300_warmreset\n"); fusb300_reset(); } if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_HOT_RST_INT); printk(KERN_INFO"fusb300_hotreset\n"); fusb300_reset(); } if (int_grp1 & FUSB300_IGR1_USBRST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_USBRST_INT); fusb300_reset(); } /* COMABT_INT has a highest priority */ if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_CX_COMABT_INT); printk(KERN_INFO"fusb300_ep0abt\n"); } if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_VBUS_CHG_INT); printk(KERN_INFO"fusb300_vbus_change\n"); } if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_ENTRY_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_ENTRY_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n"); fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1, FUSB300_SSCR1_GO_U3_DONE); } if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n"); } if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n"); } if (int_grp1 & FUSB300_IGR1_RESM_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_RESM_INT); printk(KERN_INFO "fusb300_resume\n"); } if (int_grp1 & FUSB300_IGR1_SUSP_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_SUSP_INT); printk(KERN_INFO "fusb300_suspend\n"); } if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_HS_LPM_INT); printk(KERN_INFO "fusb300_HS_LPM_INT\n"); } if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_DEV_MODE_CHG_INT); check_device_mode(fusb300); } if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) { fusb300_set_cxstall(fusb300); printk(KERN_INFO "fusb300_ep0fail\n"); } if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) { printk(KERN_INFO "fusb300_ep0setup\n"); if (setup_packet(fusb300, &ctrl)) { spin_unlock(&fusb300->lock); if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0) fusb300_set_cxstall(fusb300); spin_lock(&fusb300->lock); } } if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT) printk(KERN_INFO "fusb300_cmdend\n"); if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) { printk(KERN_INFO "fusb300_cxout\n"); fusb300_ep0out(fusb300); } if (int_grp1 & FUSB300_IGR1_CX_IN_INT) { printk(KERN_INFO "fusb300_cxin\n"); fusb300_ep0in(fusb300); } if (int_grp1 & FUSB300_IGR1_INTGRP5) fusb300_grp5_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP4) fusb300_grp4_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP3) fusb300_grp3_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP2) fusb300_grp2_handler(); if (int_grp0) { for (i = 1; i < FUSB300_MAX_NUM_EP; i++) { if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) { reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(i)); in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0; if (in) in_ep_fifo_handler(fusb300->ep[i]); else out_ep_fifo_handler(fusb300->ep[i]); } } } spin_unlock(&fusb300->lock); return IRQ_HANDLED; } static void fusb300_set_u2_timeout(struct fusb300 *fusb300, u32 time) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); reg &= ~0xff; reg |= FUSB300_SSCR2_U2TIMEOUT(time); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); } static void fusb300_set_u1_timeout(struct fusb300 *fusb300, u32 time) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); reg &= ~(0xff << 8); reg |= FUSB300_SSCR2_U1TIMEOUT(time); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); } static void init_controller(struct fusb300 *fusb300) { u32 reg; u32 mask = 0; u32 val = 0; /* split on */ mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON; reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR); reg &= ~mask; reg |= val; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR); /* enable high-speed LPM */ mask = val = FUSB300_HSCR_HS_LPM_PERMIT; reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR); reg &= ~mask; reg |= val; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR); /*set u1 u2 timmer*/ fusb300_set_u2_timeout(fusb300, 0xff); fusb300_set_u1_timeout(fusb300, 0xff); /* enable all grp1 interrupt */ iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1); } /*------------------------------------------------------------------------*/ static struct fusb300 *the_controller; int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct fusb300 *fusb300 = the_controller; int retval; if (!driver || driver->speed < USB_SPEED_FULL || !bind || !driver->setup) return -EINVAL; if (!fusb300) return -ENODEV; if (fusb300->driver) return -EBUSY; /* hook up the driver */ driver->driver.bus = NULL; fusb300->driver = driver; fusb300->gadget.dev.driver = &driver->driver; retval = device_add(&fusb300->gadget.dev); if (retval) { pr_err("device_add error (%d)\n", retval); goto error; } retval = bind(&fusb300->gadget); if (retval) { pr_err("bind to driver error (%d)\n", retval); device_del(&fusb300->gadget.dev); goto error; } return 0; error: fusb300->driver = NULL; fusb300->gadget.dev.driver = NULL; return retval; } EXPORT_SYMBOL(usb_gadget_probe_driver); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct fusb300 *fusb300 = the_controller; if (driver != fusb300->driver || !driver->unbind) return -EINVAL; driver->unbind(&fusb300->gadget); fusb300->gadget.dev.driver = NULL; init_controller(fusb300); device_del(&fusb300->gadget.dev); fusb300->driver = NULL; return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); /*--------------------------------------------------------------------------*/ static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active) { return 0; } static struct usb_gadget_ops fusb300_gadget_ops = { .pullup = fusb300_udc_pullup, }; static int __exit fusb300_remove(struct platform_device *pdev) { struct fusb300 *fusb300 = dev_get_drvdata(&pdev->dev); iounmap(fusb300->reg); free_irq(platform_get_irq(pdev, 0), fusb300); fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); kfree(fusb300); return 0; } static int __init fusb300_probe(struct platform_device *pdev) { struct resource *res, *ires, *ires1; void __iomem *reg = NULL; struct fusb300 *fusb300 = NULL; struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP]; int ret = 0; int i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; pr_err("platform_get_resource error.\n"); goto clean_up; } ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!ires) { ret = -ENODEV; dev_err(&pdev->dev, "platform_get_resource IORESOURCE_IRQ error.\n"); goto clean_up; } ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!ires1) { ret = -ENODEV; dev_err(&pdev->dev, "platform_get_resource IORESOURCE_IRQ 1 error.\n"); goto clean_up; } reg = ioremap(res->start, resource_size(res)); if (reg == NULL) { ret = -ENOMEM; pr_err("ioremap error.\n"); goto clean_up; } /* initialize udc */ fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL); if (fusb300 == NULL) { pr_err("kzalloc error\n"); goto clean_up; } for (i = 0; i < FUSB300_MAX_NUM_EP; i++) { _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL); if (_ep[i] == NULL) { pr_err("_ep kzalloc error\n"); goto clean_up; } fusb300->ep[i] = _ep[i]; } spin_lock_init(&fusb300->lock); dev_set_drvdata(&pdev->dev, fusb300); fusb300->gadget.ops = &fusb300_gadget_ops; device_initialize(&fusb300->gadget.dev); dev_set_name(&fusb300->gadget.dev, "gadget"); fusb300->gadget.is_dualspeed = 1; fusb300->gadget.dev.parent = &pdev->dev; fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask; fusb300->gadget.dev.release = pdev->dev.release; fusb300->gadget.name = udc_name; fusb300->reg = reg; ret = request_irq(ires->start, fusb300_irq, IRQF_DISABLED | IRQF_SHARED, udc_name, fusb300); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); goto clean_up; } ret = request_irq(ires1->start, fusb300_irq, IRQF_DISABLED | IRQF_SHARED, udc_name, fusb300); if (ret < 0) { pr_err("request_irq1 error (%d)\n", ret); goto clean_up; } INIT_LIST_HEAD(&fusb300->gadget.ep_list); for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) { struct fusb300_ep *ep = fusb300->ep[i]; if (i != 0) { INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list); list_add_tail(&fusb300->ep[i]->ep.ep_list, &fusb300->gadget.ep_list); } ep->fusb300 = fusb300; INIT_LIST_HEAD(&ep->queue); ep->ep.name = fusb300_ep_name[i]; ep->ep.ops = &fusb300_ep_ops; ep->ep.maxpacket = HS_BULK_MAX_PACKET_SIZE; } fusb300->ep[0]->ep.maxpacket = HS_CTL_MAX_PACKET_SIZE; fusb300->ep[0]->epnum = 0; fusb300->gadget.ep0 = &fusb300->ep[0]->ep; INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list); the_controller = fusb300; fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, GFP_KERNEL); if (fusb300->ep0_req == NULL) goto clean_up3; init_controller(fusb300); dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); return 0; clean_up3: free_irq(ires->start, fusb300); clean_up: if (fusb300) { if (fusb300->ep0_req) fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); kfree(fusb300); } if (reg) iounmap(reg); return ret; } static struct platform_driver fusb300_driver = { .remove = __exit_p(fusb300_remove), .driver = { .name = (char *) udc_name, .owner = THIS_MODULE, }, }; static int __init fusb300_udc_init(void) { return platform_driver_probe(&fusb300_driver, fusb300_probe); } module_init(fusb300_udc_init); static void __exit fusb300_udc_cleanup(void) { platform_driver_unregister(&fusb300_driver); } module_exit(fusb300_udc_cleanup);
gpl-2.0
casper-astro/linux_devel
sound/soc/codecs/ad1980.c
2763
7822
/* * ad1980.c -- ALSA Soc AD1980 codec support * * Copyright: Analog Device Inc. * Author: Roy Huang <roy.huang@analog.com> * Cliff Cai <cliff.cai@analog.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ /* * WARNING: * * Because Analog Devices Inc. discontinued the ad1980 sound chip since * Sep. 2009, this ad1980 driver is not maintained, tested and supported * by ADI now. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/soc.h> #include "ad1980.h" /* * AD1980 register cache */ static const u16 ad1980_reg[] = { 0x0090, 0x8000, 0x8000, 0x8000, /* 0 - 6 */ 0x0000, 0x0000, 0x8008, 0x8008, /* 8 - e */ 0x8808, 0x8808, 0x0000, 0x8808, /* 10 - 16 */ 0x8808, 0x0000, 0x8000, 0x0000, /* 18 - 1e */ 0x0000, 0x0000, 0x0000, 0x0000, /* 20 - 26 */ 0x03c7, 0x0000, 0xbb80, 0xbb80, /* 28 - 2e */ 0xbb80, 0xbb80, 0x0000, 0x8080, /* 30 - 36 */ 0x8080, 0x2000, 0x0000, 0x0000, /* 38 - 3e */ 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 0x8080, 0x0000, 0x0000, 0x0000, /* 60 - 66 */ 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 0x0000, 0x0000, 0x1001, 0x0000, /* 70 - 76 */ 0x0000, 0x0000, 0x4144, 0x5370 /* 78 - 7e */ }; static const char *ad1980_rec_sel[] = {"Mic", "CD", "NC", "AUX", "Line", "Stereo Mix", "Mono Mix", "Phone"}; static const struct soc_enum ad1980_cap_src = SOC_ENUM_DOUBLE(AC97_REC_SEL, 8, 0, 7, ad1980_rec_sel); static const struct snd_kcontrol_new ad1980_snd_ac97_controls[] = { SOC_DOUBLE("Master Playback Volume", AC97_MASTER, 8, 0, 31, 1), SOC_SINGLE("Master Playback Switch", AC97_MASTER, 15, 1, 1), SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1), SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE, 15, 1, 1), SOC_DOUBLE("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1), SOC_SINGLE("PCM Playback Switch", AC97_PCM, 15, 1, 1), SOC_DOUBLE("PCM Capture Volume", AC97_REC_GAIN, 8, 0, 31, 0), SOC_SINGLE("PCM Capture Switch", AC97_REC_GAIN, 15, 1, 1), SOC_SINGLE("Mono Playback Volume", AC97_MASTER_MONO, 0, 31, 1), SOC_SINGLE("Mono Playback Switch", AC97_MASTER_MONO, 15, 1, 1), SOC_SINGLE("Phone Capture Volume", AC97_PHONE, 0, 31, 1), SOC_SINGLE("Phone Capture Switch", AC97_PHONE, 15, 1, 1), SOC_SINGLE("Mic Volume", AC97_MIC, 0, 31, 1), SOC_SINGLE("Mic Switch", AC97_MIC, 15, 1, 1), SOC_SINGLE("Stereo Mic Switch", AC97_AD_MISC, 6, 1, 0), SOC_DOUBLE("Line HP Swap Switch", AC97_AD_MISC, 10, 5, 1, 0), SOC_DOUBLE("Surround Playback Volume", AC97_SURROUND_MASTER, 8, 0, 31, 1), SOC_DOUBLE("Surround Playback Switch", AC97_SURROUND_MASTER, 15, 7, 1, 1), SOC_DOUBLE("Center/LFE Playback Volume", AC97_CENTER_LFE_MASTER, 8, 0, 31, 1), SOC_DOUBLE("Center/LFE Playback Switch", AC97_CENTER_LFE_MASTER, 15, 7, 1, 1), SOC_ENUM("Capture Source", ad1980_cap_src), SOC_SINGLE("Mic Boost Switch", AC97_MIC, 6, 1, 0), }; static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg) { u16 *cache = codec->reg_cache; switch (reg) { case AC97_RESET: case AC97_INT_PAGING: case AC97_POWERDOWN: case AC97_EXTENDED_STATUS: case AC97_VENDOR_ID1: case AC97_VENDOR_ID2: return soc_ac97_ops.read(codec->ac97, reg); default: reg = reg >> 1; if (reg >= ARRAY_SIZE(ad1980_reg)) return -EINVAL; return cache[reg]; } } static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val) { u16 *cache = codec->reg_cache; soc_ac97_ops.write(codec->ac97, reg, val); reg = reg >> 1; if (reg < ARRAY_SIZE(ad1980_reg)) cache[reg] = val; return 0; } static struct snd_soc_dai_driver ad1980_dai = { .name = "ad1980-hifi", .ac97_control = 1, .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 6, .rates = SNDRV_PCM_RATE_48000, .formats = SND_SOC_STD_AC97_FMTS, }, .capture = { .stream_name = "Capture", .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .formats = SND_SOC_STD_AC97_FMTS, }, }; EXPORT_SYMBOL_GPL(ad1980_dai); static int ad1980_reset(struct snd_soc_codec *codec, int try_warm) { u16 retry_cnt = 0; retry: if (try_warm && soc_ac97_ops.warm_reset) { soc_ac97_ops.warm_reset(codec->ac97); if (ac97_read(codec, AC97_RESET) == 0x0090) return 1; } soc_ac97_ops.reset(codec->ac97); /* Set bit 16slot in register 74h, then every slot will has only 16 * bits. This command is sent out in 20bit mode, in which case the * first nibble of data is eaten by the addr. (Tag is always 16 bit)*/ ac97_write(codec, AC97_AD_SERIAL_CFG, 0x9900); if (ac97_read(codec, AC97_RESET) != 0x0090) goto err; return 0; err: while (retry_cnt++ < 10) goto retry; printk(KERN_ERR "AD1980 AC97 reset failed\n"); return -EIO; } static int ad1980_soc_probe(struct snd_soc_codec *codec) { int ret; u16 vendor_id2; u16 ext_status; printk(KERN_INFO "AD1980 SoC Audio Codec\n"); ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) { printk(KERN_ERR "ad1980: failed to register AC97 codec\n"); return ret; } ret = ad1980_reset(codec, 0); if (ret < 0) { printk(KERN_ERR "Failed to reset AD1980: AC97 link error\n"); goto reset_err; } /* Read out vendor ID to make sure it is ad1980 */ if (ac97_read(codec, AC97_VENDOR_ID1) != 0x4144) goto reset_err; vendor_id2 = ac97_read(codec, AC97_VENDOR_ID2); if (vendor_id2 != 0x5370) { if (vendor_id2 != 0x5374) goto reset_err; else printk(KERN_WARNING "ad1980: " "Found AD1981 - only 2/2 IN/OUT Channels " "supported\n"); } /* unmute captures and playbacks volume */ ac97_write(codec, AC97_MASTER, 0x0000); ac97_write(codec, AC97_PCM, 0x0000); ac97_write(codec, AC97_REC_GAIN, 0x0000); ac97_write(codec, AC97_CENTER_LFE_MASTER, 0x0000); ac97_write(codec, AC97_SURROUND_MASTER, 0x0000); /*power on LFE/CENTER/Surround DACs*/ ext_status = ac97_read(codec, AC97_EXTENDED_STATUS); ac97_write(codec, AC97_EXTENDED_STATUS, ext_status&~0x3800); snd_soc_add_controls(codec, ad1980_snd_ac97_controls, ARRAY_SIZE(ad1980_snd_ac97_controls)); return 0; reset_err: snd_soc_free_ac97_codec(codec); return ret; } static int ad1980_soc_remove(struct snd_soc_codec *codec) { snd_soc_free_ac97_codec(codec); return 0; } static struct snd_soc_codec_driver soc_codec_dev_ad1980 = { .probe = ad1980_soc_probe, .remove = ad1980_soc_remove, .reg_cache_size = ARRAY_SIZE(ad1980_reg), .reg_word_size = sizeof(u16), .reg_cache_default = ad1980_reg, .reg_cache_step = 2, .write = ac97_write, .read = ac97_read, }; static __devinit int ad1980_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_ad1980, &ad1980_dai, 1); } static int __devexit ad1980_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver ad1980_codec_driver = { .driver = { .name = "ad1980", .owner = THIS_MODULE, }, .probe = ad1980_probe, .remove = __devexit_p(ad1980_remove), }; static int __init ad1980_init(void) { return platform_driver_register(&ad1980_codec_driver); } module_init(ad1980_init); static void __exit ad1980_exit(void) { platform_driver_unregister(&ad1980_codec_driver); } module_exit(ad1980_exit); MODULE_DESCRIPTION("ASoC ad1980 driver (Obsolete)"); MODULE_AUTHOR("Roy Huang, Cliff Cai"); MODULE_LICENSE("GPL");
gpl-2.0
omnirom/android_kernel_samsung_smdk4412
drivers/media/video/cx23885/cx23885-vbi.c
3275
6723
/* * Driver for the Conexant CX23885 PCIe bridge * * Copyright (c) 2007 Steven Toth <stoth@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include "cx23885.h" static unsigned int vbibufs = 4; module_param(vbibufs, int, 0644); MODULE_PARM_DESC(vbibufs, "number of vbi buffers, range 2-32"); static unsigned int vbi_debug; module_param(vbi_debug, int, 0644); MODULE_PARM_DESC(vbi_debug, "enable debug messages [vbi]"); #define dprintk(level, fmt, arg...)\ do { if (vbi_debug >= level)\ printk(KERN_DEBUG "%s/0: " fmt, dev->name, ## arg);\ } while (0) /* ------------------------------------------------------------------ */ int cx23885_vbi_fmt(struct file *file, void *priv, struct v4l2_format *f) { struct cx23885_fh *fh = priv; struct cx23885_dev *dev = fh->dev; if (dev->tvnorm & V4L2_STD_525_60) { /* ntsc */ f->fmt.vbi.sampling_rate = 28636363; f->fmt.vbi.start[0] = 10; f->fmt.vbi.start[1] = 273; } else if (dev->tvnorm & V4L2_STD_625_50) { /* pal */ f->fmt.vbi.sampling_rate = 35468950; f->fmt.vbi.start[0] = 7 - 1; f->fmt.vbi.start[1] = 319 - 1; } return 0; } static int cx23885_start_vbi_dma(struct cx23885_dev *dev, struct cx23885_dmaqueue *q, struct cx23885_buffer *buf) { /* setup fifo + format */ cx23885_sram_channel_setup(dev, &dev->sram_channels[SRAM_CH02], buf->vb.width, buf->risc.dma); /* reset counter */ q->count = 1; /* enable irqs */ cx23885_irq_add_enable(dev, 0x01); cx_set(VID_A_INT_MSK, 0x000022); /* start dma */ cx_set(DEV_CNTRL2, (1<<5)); cx_set(VID_A_DMA_CTL, 0x00000022); return 0; } static int cx23885_restart_vbi_queue(struct cx23885_dev *dev, struct cx23885_dmaqueue *q) { struct cx23885_buffer *buf; struct list_head *item; if (list_empty(&q->active)) return 0; buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue); dprintk(2, "restart_queue [%p/%d]: restart dma\n", buf, buf->vb.i); cx23885_start_vbi_dma(dev, q, buf); list_for_each(item, &q->active) { buf = list_entry(item, struct cx23885_buffer, vb.queue); buf->count = q->count++; } mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); return 0; } void cx23885_vbi_timeout(unsigned long data) { struct cx23885_dev *dev = (struct cx23885_dev *)data; struct cx23885_dmaqueue *q = &dev->vbiq; struct cx23885_buffer *buf; unsigned long flags; cx23885_sram_channel_dump(dev, &dev->sram_channels[SRAM_CH02]); cx_clear(VID_A_DMA_CTL, 0x22); spin_lock_irqsave(&dev->slock, flags); while (!list_empty(&q->active)) { buf = list_entry(q->active.next, struct cx23885_buffer, vb.queue); list_del(&buf->vb.queue); buf->vb.state = VIDEOBUF_ERROR; wake_up(&buf->vb.done); printk("%s/0: [%p/%d] timeout - dma=0x%08lx\n", dev->name, buf, buf->vb.i, (unsigned long)buf->risc.dma); } cx23885_restart_vbi_queue(dev, q); spin_unlock_irqrestore(&dev->slock, flags); } /* ------------------------------------------------------------------ */ #define VBI_LINE_LENGTH 2048 #define VBI_LINE_COUNT 17 static int vbi_setup(struct videobuf_queue *q, unsigned int *count, unsigned int *size) { *size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2; if (0 == *count) *count = vbibufs; if (*count < 2) *count = 2; if (*count > 32) *count = 32; return 0; } static int vbi_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct cx23885_fh *fh = q->priv_data; struct cx23885_dev *dev = fh->dev; struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); unsigned int size; int rc; size = VBI_LINE_COUNT * VBI_LINE_LENGTH * 2; if (0 != buf->vb.baddr && buf->vb.bsize < size) return -EINVAL; if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { buf->vb.width = VBI_LINE_LENGTH; buf->vb.height = VBI_LINE_COUNT; buf->vb.size = size; buf->vb.field = V4L2_FIELD_SEQ_TB; rc = videobuf_iolock(q, &buf->vb, NULL); if (0 != rc) goto fail; cx23885_risc_buffer(dev->pci, &buf->risc, dma->sglist, 0, buf->vb.width * buf->vb.height, buf->vb.width, 0, buf->vb.height); } buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: cx23885_free_buffer(q, buf); return rc; } static void vbi_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); struct cx23885_buffer *prev; struct cx23885_fh *fh = vq->priv_data; struct cx23885_dev *dev = fh->dev; struct cx23885_dmaqueue *q = &dev->vbiq; /* add jump to stopper */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP | RISC_IRQ1 | RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(q->stopper.dma); buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ if (list_empty(&q->active)) { list_add_tail(&buf->vb.queue, &q->active); cx23885_start_vbi_dma(dev, q, buf); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; mod_timer(&q->timeout, jiffies+BUFFER_TIMEOUT); dprintk(2, "[%p/%d] vbi_queue - first active\n", buf, buf->vb.i); } else { prev = list_entry(q->active.prev, struct cx23885_buffer, vb.queue); list_add_tail(&buf->vb.queue, &q->active); buf->vb.state = VIDEOBUF_ACTIVE; buf->count = q->count++; prev->risc.jmp[1] = cpu_to_le32(buf->risc.dma); prev->risc.jmp[2] = cpu_to_le32(0); /* Bits 63-32 */ dprintk(2, "[%p/%d] buffer_queue - append to active\n", buf, buf->vb.i); } } static void vbi_release(struct videobuf_queue *q, struct videobuf_buffer *vb) { struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); cx23885_free_buffer(q, buf); } struct videobuf_queue_ops cx23885_vbi_qops = { .buf_setup = vbi_setup, .buf_prepare = vbi_prepare, .buf_queue = vbi_queue, .buf_release = vbi_release, }; /* ------------------------------------------------------------------ */ /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
christiantroy/linux-allwinner
drivers/gpio/max7301.c
3531
2798
/* * drivers/gpio/max7301.c * * Copyright (C) 2006 Juergen Beisert, Pengutronix * Copyright (C) 2008 Guennadi Liakhovetski, Pengutronix * Copyright (C) 2009 Wolfram Sang, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Check max730x.c for further details. */ #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/max7301.h> /* A write to the MAX7301 means one message with one transfer */ static int max7301_spi_write(struct device *dev, unsigned int reg, unsigned int val) { struct spi_device *spi = to_spi_device(dev); u16 word = ((reg & 0x7F) << 8) | (val & 0xFF); return spi_write(spi, (const u8 *)&word, sizeof(word)); } /* A read from the MAX7301 means two transfers; here, one message each */ static int max7301_spi_read(struct device *dev, unsigned int reg) { int ret; u16 word; struct spi_device *spi = to_spi_device(dev); word = 0x8000 | (reg << 8); ret = spi_write(spi, (const u8 *)&word, sizeof(word)); if (ret) return ret; /* * This relies on the fact, that a transfer with NULL tx_buf shifts out * zero bytes (=NOOP for MAX7301) */ ret = spi_read(spi, (u8 *)&word, sizeof(word)); if (ret) return ret; return word & 0xff; } static int __devinit max7301_probe(struct spi_device *spi) { struct max7301 *ts; int ret; /* bits_per_word cannot be configured in platform data */ spi->bits_per_word = 16; ret = spi_setup(spi); if (ret < 0) return ret; ts = kzalloc(sizeof(struct max7301), GFP_KERNEL); if (!ts) return -ENOMEM; ts->read = max7301_spi_read; ts->write = max7301_spi_write; ts->dev = &spi->dev; ret = __max730x_probe(ts); if (ret) kfree(ts); return ret; } static int __devexit max7301_remove(struct spi_device *spi) { return __max730x_remove(&spi->dev); } static const struct spi_device_id max7301_id[] = { { "max7301", 0 }, { } }; MODULE_DEVICE_TABLE(spi, max7301_id); static struct spi_driver max7301_driver = { .driver = { .name = "max7301", .owner = THIS_MODULE, }, .probe = max7301_probe, .remove = __devexit_p(max7301_remove), .id_table = max7301_id, }; static int __init max7301_init(void) { return spi_register_driver(&max7301_driver); } /* register after spi postcore initcall and before * subsys initcalls that may rely on these GPIOs */ subsys_initcall(max7301_init); static void __exit max7301_exit(void) { spi_unregister_driver(&max7301_driver); } module_exit(max7301_exit); MODULE_AUTHOR("Juergen Beisert, Wolfram Sang"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("MAX7301 GPIO-Expander");
gpl-2.0
aatjitra/sgs3jb
arch/sh/mm/tlb-sh3.c
4043
2291
/* * arch/sh/mm/tlb-sh3.c * * SH-3 specific TLB operations * * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2002 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags, pteval, vpn; /* * Handle debugger faulting in for debugee. */ if (vma && current->active_mm != vma->vm_mm) return; local_irq_save(flags); /* Set PTEH register */ vpn = (address & MMU_VPN_MASK) | get_asid(); __raw_writel(vpn, MMU_PTEH); pteval = pte_val(pte); /* Set PTEL register */ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ /* conveniently, we want all the software flags to be 0 anyway */ __raw_writel(pteval, MMU_PTEL); /* Load the TLB */ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); local_irq_restore(flags); } void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long addr, data; int i, ways = MMU_NTLB_WAYS; /* * NOTE: PTEH.ASID should be set to this MM * _AND_ we need to write ASID to the array. * * It would be simple if we didn't need to set PTEH.ASID... */ addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); data = (page & 0xfffe0000) | asid; /* VALID bit is off */ if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) { addr |= MMU_PAGE_ASSOC_BIT; ways = 1; /* we already know the way .. */ } for (i = 0; i < ways; i++) __raw_writel(data, addr + (i << 8)); } void local_flush_tlb_all(void) { unsigned long flags, status; /* * Flush all the TLB. * * Write to the MMU control register's bit: * TF-bit for SH-3, TI-bit for SH-4. * It's same position, bit #2. */ local_irq_save(flags); status = __raw_readl(MMUCR); status |= 0x04; __raw_writel(status, MMUCR); ctrl_barrier(); local_irq_restore(flags); }
gpl-2.0
nspierbundel/amlogic-common-3.0.8
arch/sh/mm/tlb-sh3.c
4043
2291
/* * arch/sh/mm/tlb-sh3.c * * SH-3 specific TLB operations * * Copyright (C) 1999 Niibe Yutaka * Copyright (C) 2002 Paul Mundt * * Released under the terms of the GNU GPL v2.0. */ #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <asm/system.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte) { unsigned long flags, pteval, vpn; /* * Handle debugger faulting in for debugee. */ if (vma && current->active_mm != vma->vm_mm) return; local_irq_save(flags); /* Set PTEH register */ vpn = (address & MMU_VPN_MASK) | get_asid(); __raw_writel(vpn, MMU_PTEH); pteval = pte_val(pte); /* Set PTEL register */ pteval &= _PAGE_FLAGS_HARDWARE_MASK; /* drop software flags */ /* conveniently, we want all the software flags to be 0 anyway */ __raw_writel(pteval, MMU_PTEL); /* Load the TLB */ asm volatile("ldtlb": /* no output */ : /* no input */ : "memory"); local_irq_restore(flags); } void local_flush_tlb_one(unsigned long asid, unsigned long page) { unsigned long addr, data; int i, ways = MMU_NTLB_WAYS; /* * NOTE: PTEH.ASID should be set to this MM * _AND_ we need to write ASID to the array. * * It would be simple if we didn't need to set PTEH.ASID... */ addr = MMU_TLB_ADDRESS_ARRAY | (page & 0x1F000); data = (page & 0xfffe0000) | asid; /* VALID bit is off */ if ((current_cpu_data.flags & CPU_HAS_MMU_PAGE_ASSOC)) { addr |= MMU_PAGE_ASSOC_BIT; ways = 1; /* we already know the way .. */ } for (i = 0; i < ways; i++) __raw_writel(data, addr + (i << 8)); } void local_flush_tlb_all(void) { unsigned long flags, status; /* * Flush all the TLB. * * Write to the MMU control register's bit: * TF-bit for SH-3, TI-bit for SH-4. * It's same position, bit #2. */ local_irq_save(flags); status = __raw_readl(MMUCR); status |= 0x04; __raw_writel(status, MMUCR); ctrl_barrier(); local_irq_restore(flags); }
gpl-2.0
mydongistiny/android_kernel_motorola_shamu
drivers/isdn/hardware/avm/b1.c
4299
21101
/* $Id: b1.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ * * Common module for AVM B1 cards. * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/capi.h> #include <linux/kernelcapi.h> #include <linux/slab.h> #include <asm/io.h> #include <linux/init.h> #include <asm/uaccess.h> #include <linux/netdevice.h> #include <linux/isdn/capilli.h> #include "avmcard.h" #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> static char *revision = "$Revision: 1.1.2.2 $"; /* ------------------------------------------------------------- */ MODULE_DESCRIPTION("CAPI4Linux: Common support for active AVM cards"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------- */ int b1_irq_table[16] = {0, 0, 0, 192, /* irq 3 */ 32, /* irq 4 */ 160, /* irq 5 */ 96, /* irq 6 */ 224, /* irq 7 */ 0, 64, /* irq 9 */ 80, /* irq 10 */ 208, /* irq 11 */ 48, /* irq 12 */ 0, 0, 112, /* irq 15 */ }; /* ------------------------------------------------------------- */ avmcard *b1_alloc_card(int nr_controllers) { avmcard *card; avmctrl_info *cinfo; int i; card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return NULL; cinfo = kzalloc(sizeof(*cinfo) * nr_controllers, GFP_KERNEL); if (!cinfo) { kfree(card); return NULL; } card->ctrlinfo = cinfo; for (i = 0; i < nr_controllers; i++) { INIT_LIST_HEAD(&cinfo[i].ncci_head); cinfo[i].card = card; } spin_lock_init(&card->lock); card->nr_controllers = nr_controllers; return card; } /* ------------------------------------------------------------- */ void b1_free_card(avmcard *card) { kfree(card->ctrlinfo); kfree(card); } /* ------------------------------------------------------------- */ int b1_detect(unsigned int base, enum avmcardtype cardtype) { int onoff, i; /* * Statusregister 0000 00xx */ if ((inb(base + B1_INSTAT) & 0xfc) || (inb(base + B1_OUTSTAT) & 0xfc)) return 1; /* * Statusregister 0000 001x */ b1outp(base, B1_INSTAT, 0x2); /* enable irq */ /* b1outp(base, B1_OUTSTAT, 0x2); */ if ((inb(base + B1_INSTAT) & 0xfe) != 0x2 /* || (inb(base + B1_OUTSTAT) & 0xfe) != 0x2 */) return 2; /* * Statusregister 0000 000x */ b1outp(base, B1_INSTAT, 0x0); /* disable irq */ b1outp(base, B1_OUTSTAT, 0x0); if ((inb(base + B1_INSTAT) & 0xfe) || (inb(base + B1_OUTSTAT) & 0xfe)) return 3; for (onoff = !0, i = 0; i < 10; i++) { b1_set_test_bit(base, cardtype, onoff); if (b1_get_test_bit(base, cardtype) != onoff) return 4; onoff = !onoff; } if (cardtype == avm_m1) return 0; if ((b1_rd_reg(base, B1_STAT1(cardtype)) & 0x0f) != 0x01) return 5; return 0; } void b1_getrevision(avmcard *card) { card->class = inb(card->port + B1_ANALYSE); card->revision = inb(card->port + B1_REVISION); } #define FWBUF_SIZE 256 int b1_load_t4file(avmcard *card, capiloaddatapart *t4file) { unsigned char buf[FWBUF_SIZE]; unsigned char *dp; int i, left; unsigned int base = card->port; dp = t4file->data; left = t4file->len; while (left > FWBUF_SIZE) { if (t4file->user) { if (copy_from_user(buf, dp, FWBUF_SIZE)) return -EFAULT; } else { memcpy(buf, dp, FWBUF_SIZE); } for (i = 0; i < FWBUF_SIZE; i++) if (b1_save_put_byte(base, buf[i]) < 0) { printk(KERN_ERR "%s: corrupted firmware file ?\n", card->name); return -EIO; } left -= FWBUF_SIZE; dp += FWBUF_SIZE; } if (left) { if (t4file->user) { if (copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); } for (i = 0; i < left; i++) if (b1_save_put_byte(base, buf[i]) < 0) { printk(KERN_ERR "%s: corrupted firmware file ?\n", card->name); return -EIO; } } return 0; } int b1_load_config(avmcard *card, capiloaddatapart *config) { unsigned char buf[FWBUF_SIZE]; unsigned char *dp; unsigned int base = card->port; int i, j, left; dp = config->data; left = config->len; if (left) { b1_put_byte(base, SEND_CONFIG); b1_put_word(base, 1); b1_put_byte(base, SEND_CONFIG); b1_put_word(base, left); } while (left > FWBUF_SIZE) { if (config->user) { if (copy_from_user(buf, dp, FWBUF_SIZE)) return -EFAULT; } else { memcpy(buf, dp, FWBUF_SIZE); } for (i = 0; i < FWBUF_SIZE; ) { b1_put_byte(base, SEND_CONFIG); for (j = 0; j < 4; j++) { b1_put_byte(base, buf[i++]); } } left -= FWBUF_SIZE; dp += FWBUF_SIZE; } if (left) { if (config->user) { if (copy_from_user(buf, dp, left)) return -EFAULT; } else { memcpy(buf, dp, left); } for (i = 0; i < left; ) { b1_put_byte(base, SEND_CONFIG); for (j = 0; j < 4; j++) { if (i < left) b1_put_byte(base, buf[i++]); else b1_put_byte(base, 0); } } } return 0; } int b1_loaded(avmcard *card) { unsigned int base = card->port; unsigned long stop; unsigned char ans; unsigned long tout = 2; for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) { if (b1_tx_empty(base)) break; } if (!b1_tx_empty(base)) { printk(KERN_ERR "%s: b1_loaded: tx err, corrupted t4 file ?\n", card->name); return 0; } b1_put_byte(base, SEND_POLL); for (stop = jiffies + tout * HZ; time_before(jiffies, stop);) { if (b1_rx_full(base)) { if ((ans = b1_get_byte(base)) == RECEIVE_POLL) { return 1; } printk(KERN_ERR "%s: b1_loaded: got 0x%x, firmware not running\n", card->name, ans); return 0; } } printk(KERN_ERR "%s: b1_loaded: firmware not running\n", card->name); return 0; } /* ------------------------------------------------------------- */ int b1_load_firmware(struct capi_ctr *ctrl, capiloaddata *data) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; int retval; b1_reset(port); if ((retval = b1_load_t4file(card, &data->firmware))) { b1_reset(port); printk(KERN_ERR "%s: failed to load t4file!!\n", card->name); return retval; } b1_disable_irq(port); if (data->configuration.len > 0 && data->configuration.data) { if ((retval = b1_load_config(card, &data->configuration))) { b1_reset(port); printk(KERN_ERR "%s: failed to load config!!\n", card->name); return retval; } } if (!b1_loaded(card)) { printk(KERN_ERR "%s: failed to load t4file.\n", card->name); return -EIO; } spin_lock_irqsave(&card->lock, flags); b1_setinterrupt(port, card->irq, card->cardtype); b1_put_byte(port, SEND_INIT); b1_put_word(port, CAPI_MAXAPPL); b1_put_word(port, AVM_NCCI_PER_CHANNEL * 2); b1_put_word(port, ctrl->cnr - 1); spin_unlock_irqrestore(&card->lock, flags); return 0; } void b1_reset_ctr(struct capi_ctr *ctrl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; b1_reset(port); b1_reset(port); memset(cinfo->version, 0, sizeof(cinfo->version)); spin_lock_irqsave(&card->lock, flags); capilib_release(&cinfo->ncci_head); spin_unlock_irqrestore(&card->lock, flags); capi_ctr_down(ctrl); } void b1_register_appl(struct capi_ctr *ctrl, u16 appl, capi_register_params *rp) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; int nconn, want = rp->level3cnt; if (want > 0) nconn = want; else nconn = ctrl->profile.nbchannel * -want; if (nconn == 0) nconn = ctrl->profile.nbchannel; spin_lock_irqsave(&card->lock, flags); b1_put_byte(port, SEND_REGISTER); b1_put_word(port, appl); b1_put_word(port, 1024 * (nconn + 1)); b1_put_word(port, nconn); b1_put_word(port, rp->datablkcnt); b1_put_word(port, rp->datablklen); spin_unlock_irqrestore(&card->lock, flags); } void b1_release_appl(struct capi_ctr *ctrl, u16 appl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; spin_lock_irqsave(&card->lock, flags); capilib_release_appl(&cinfo->ncci_head, appl); b1_put_byte(port, SEND_RELEASE); b1_put_word(port, appl); spin_unlock_irqrestore(&card->lock, flags); } u16 b1_send_message(struct capi_ctr *ctrl, struct sk_buff *skb) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; unsigned int port = card->port; unsigned long flags; u16 len = CAPIMSG_LEN(skb->data); u8 cmd = CAPIMSG_COMMAND(skb->data); u8 subcmd = CAPIMSG_SUBCOMMAND(skb->data); u16 dlen, retval; spin_lock_irqsave(&card->lock, flags); if (CAPICMD(cmd, subcmd) == CAPI_DATA_B3_REQ) { retval = capilib_data_b3_req(&cinfo->ncci_head, CAPIMSG_APPID(skb->data), CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); if (retval != CAPI_NOERROR) { spin_unlock_irqrestore(&card->lock, flags); return retval; } dlen = CAPIMSG_DATALEN(skb->data); b1_put_byte(port, SEND_DATA_B3_REQ); b1_put_slice(port, skb->data, len); b1_put_slice(port, skb->data + len, dlen); } else { b1_put_byte(port, SEND_MESSAGE); b1_put_slice(port, skb->data, len); } spin_unlock_irqrestore(&card->lock, flags); dev_kfree_skb_any(skb); return CAPI_NOERROR; } /* ------------------------------------------------------------- */ void b1_parse_version(avmctrl_info *cinfo) { struct capi_ctr *ctrl = &cinfo->capi_ctrl; avmcard *card = cinfo->card; capi_profile *profp; u8 *dversion; u8 flag; int i, j; for (j = 0; j < AVM_MAXVERSION; j++) cinfo->version[j] = "\0\0" + 1; for (i = 0, j = 0; j < AVM_MAXVERSION && i < cinfo->versionlen; j++, i += cinfo->versionbuf[i] + 1) cinfo->version[j] = &cinfo->versionbuf[i + 1]; strlcpy(ctrl->serial, cinfo->version[VER_SERIAL], sizeof(ctrl->serial)); memcpy(&ctrl->profile, cinfo->version[VER_PROFILE], sizeof(capi_profile)); strlcpy(ctrl->manu, "AVM GmbH", sizeof(ctrl->manu)); dversion = cinfo->version[VER_DRIVER]; ctrl->version.majorversion = 2; ctrl->version.minorversion = 0; ctrl->version.majormanuversion = (((dversion[0] - '0') & 0xf) << 4); ctrl->version.majormanuversion |= ((dversion[2] - '0') & 0xf); ctrl->version.minormanuversion = (dversion[3] - '0') << 4; ctrl->version.minormanuversion |= (dversion[5] - '0') * 10 + ((dversion[6] - '0') & 0xf); profp = &ctrl->profile; flag = ((u8 *)(profp->manu))[1]; switch (flag) { case 0: if (cinfo->version[VER_CARDTYPE]) strcpy(cinfo->cardname, cinfo->version[VER_CARDTYPE]); else strcpy(cinfo->cardname, "B1"); break; case 3: strcpy(cinfo->cardname, "PCMCIA B"); break; case 4: strcpy(cinfo->cardname, "PCMCIA M1"); break; case 5: strcpy(cinfo->cardname, "PCMCIA M2"); break; case 6: strcpy(cinfo->cardname, "B1 V3.0"); break; case 7: strcpy(cinfo->cardname, "B1 PCI"); break; default: sprintf(cinfo->cardname, "AVM?%u", (unsigned int)flag); break; } printk(KERN_NOTICE "%s: card %d \"%s\" ready.\n", card->name, ctrl->cnr, cinfo->cardname); flag = ((u8 *)(profp->manu))[3]; if (flag) printk(KERN_NOTICE "%s: card %d Protocol:%s%s%s%s%s%s%s\n", card->name, ctrl->cnr, (flag & 0x01) ? " DSS1" : "", (flag & 0x02) ? " CT1" : "", (flag & 0x04) ? " VN3" : "", (flag & 0x08) ? " NI1" : "", (flag & 0x10) ? " AUSTEL" : "", (flag & 0x20) ? " ESS" : "", (flag & 0x40) ? " 1TR6" : "" ); flag = ((u8 *)(profp->manu))[5]; if (flag) printk(KERN_NOTICE "%s: card %d Linetype:%s%s%s%s\n", card->name, ctrl->cnr, (flag & 0x01) ? " point to point" : "", (flag & 0x02) ? " point to multipoint" : "", (flag & 0x08) ? " leased line without D-channel" : "", (flag & 0x04) ? " leased line with D-channel" : "" ); } /* ------------------------------------------------------------- */ irqreturn_t b1_interrupt(int interrupt, void *devptr) { avmcard *card = devptr; avmctrl_info *cinfo = &card->ctrlinfo[0]; struct capi_ctr *ctrl = &cinfo->capi_ctrl; unsigned char b1cmd; struct sk_buff *skb; unsigned ApplId; unsigned MsgLen; unsigned DataB3Len; unsigned NCCI; unsigned WindowSize; unsigned long flags; spin_lock_irqsave(&card->lock, flags); if (!b1_rx_full(card->port)) { spin_unlock_irqrestore(&card->lock, flags); return IRQ_NONE; } b1cmd = b1_get_byte(card->port); switch (b1cmd) { case RECEIVE_DATA_B3_IND: ApplId = (unsigned) b1_get_word(card->port); MsgLen = b1_get_slice(card->port, card->msgbuf); DataB3Len = b1_get_slice(card->port, card->databuf); spin_unlock_irqrestore(&card->lock, flags); if (MsgLen < 30) { /* not CAPI 64Bit */ memset(card->msgbuf + MsgLen, 0, 30-MsgLen); MsgLen = 30; CAPIMSG_SETLEN(card->msgbuf, 30); } if (!(skb = alloc_skb(DataB3Len + MsgLen, GFP_ATOMIC))) { printk(KERN_ERR "%s: incoming packet dropped\n", card->name); } else { memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen); memcpy(skb_put(skb, DataB3Len), card->databuf, DataB3Len); capi_ctr_handle_message(ctrl, ApplId, skb); } break; case RECEIVE_MESSAGE: ApplId = (unsigned) b1_get_word(card->port); MsgLen = b1_get_slice(card->port, card->msgbuf); if (!(skb = alloc_skb(MsgLen, GFP_ATOMIC))) { printk(KERN_ERR "%s: incoming packet dropped\n", card->name); spin_unlock_irqrestore(&card->lock, flags); } else { memcpy(skb_put(skb, MsgLen), card->msgbuf, MsgLen); if (CAPIMSG_CMD(skb->data) == CAPI_DATA_B3_CONF) capilib_data_b3_conf(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data), CAPIMSG_MSGID(skb->data)); spin_unlock_irqrestore(&card->lock, flags); capi_ctr_handle_message(ctrl, ApplId, skb); } break; case RECEIVE_NEW_NCCI: ApplId = b1_get_word(card->port); NCCI = b1_get_word(card->port); WindowSize = b1_get_word(card->port); capilib_new_ncci(&cinfo->ncci_head, ApplId, NCCI, WindowSize); spin_unlock_irqrestore(&card->lock, flags); break; case RECEIVE_FREE_NCCI: ApplId = b1_get_word(card->port); NCCI = b1_get_word(card->port); if (NCCI != 0xffffffff) capilib_free_ncci(&cinfo->ncci_head, ApplId, NCCI); spin_unlock_irqrestore(&card->lock, flags); break; case RECEIVE_START: /* b1_put_byte(card->port, SEND_POLLACK); */ spin_unlock_irqrestore(&card->lock, flags); capi_ctr_resume_output(ctrl); break; case RECEIVE_STOP: spin_unlock_irqrestore(&card->lock, flags); capi_ctr_suspend_output(ctrl); break; case RECEIVE_INIT: cinfo->versionlen = b1_get_slice(card->port, cinfo->versionbuf); spin_unlock_irqrestore(&card->lock, flags); b1_parse_version(cinfo); printk(KERN_INFO "%s: %s-card (%s) now active\n", card->name, cinfo->version[VER_CARDTYPE], cinfo->version[VER_DRIVER]); capi_ctr_ready(ctrl); break; case RECEIVE_TASK_READY: ApplId = (unsigned) b1_get_word(card->port); MsgLen = b1_get_slice(card->port, card->msgbuf); spin_unlock_irqrestore(&card->lock, flags); card->msgbuf[MsgLen] = 0; while (MsgLen > 0 && (card->msgbuf[MsgLen - 1] == '\n' || card->msgbuf[MsgLen - 1] == '\r')) { card->msgbuf[MsgLen - 1] = 0; MsgLen--; } printk(KERN_INFO "%s: task %d \"%s\" ready.\n", card->name, ApplId, card->msgbuf); break; case RECEIVE_DEBUGMSG: MsgLen = b1_get_slice(card->port, card->msgbuf); spin_unlock_irqrestore(&card->lock, flags); card->msgbuf[MsgLen] = 0; while (MsgLen > 0 && (card->msgbuf[MsgLen - 1] == '\n' || card->msgbuf[MsgLen - 1] == '\r')) { card->msgbuf[MsgLen - 1] = 0; MsgLen--; } printk(KERN_INFO "%s: DEBUG: %s\n", card->name, card->msgbuf); break; case 0xff: spin_unlock_irqrestore(&card->lock, flags); printk(KERN_ERR "%s: card removed ?\n", card->name); return IRQ_NONE; default: spin_unlock_irqrestore(&card->lock, flags); printk(KERN_ERR "%s: b1_interrupt: 0x%x ???\n", card->name, b1cmd); return IRQ_HANDLED; } return IRQ_HANDLED; } /* ------------------------------------------------------------- */ static int b1ctl_proc_show(struct seq_file *m, void *v) { struct capi_ctr *ctrl = m->private; avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); avmcard *card = cinfo->card; u8 flag; char *s; seq_printf(m, "%-16s %s\n", "name", card->name); seq_printf(m, "%-16s 0x%x\n", "io", card->port); seq_printf(m, "%-16s %d\n", "irq", card->irq); switch (card->cardtype) { case avm_b1isa: s = "B1 ISA"; break; case avm_b1pci: s = "B1 PCI"; break; case avm_b1pcmcia: s = "B1 PCMCIA"; break; case avm_m1: s = "M1"; break; case avm_m2: s = "M2"; break; case avm_t1isa: s = "T1 ISA (HEMA)"; break; case avm_t1pci: s = "T1 PCI"; break; case avm_c4: s = "C4"; break; case avm_c2: s = "C2"; break; default: s = "???"; break; } seq_printf(m, "%-16s %s\n", "type", s); if (card->cardtype == avm_t1isa) seq_printf(m, "%-16s %d\n", "cardnr", card->cardnr); if ((s = cinfo->version[VER_DRIVER]) != NULL) seq_printf(m, "%-16s %s\n", "ver_driver", s); if ((s = cinfo->version[VER_CARDTYPE]) != NULL) seq_printf(m, "%-16s %s\n", "ver_cardtype", s); if ((s = cinfo->version[VER_SERIAL]) != NULL) seq_printf(m, "%-16s %s\n", "ver_serial", s); if (card->cardtype != avm_m1) { flag = ((u8 *)(ctrl->profile.manu))[3]; if (flag) seq_printf(m, "%-16s%s%s%s%s%s%s%s\n", "protocol", (flag & 0x01) ? " DSS1" : "", (flag & 0x02) ? " CT1" : "", (flag & 0x04) ? " VN3" : "", (flag & 0x08) ? " NI1" : "", (flag & 0x10) ? " AUSTEL" : "", (flag & 0x20) ? " ESS" : "", (flag & 0x40) ? " 1TR6" : "" ); } if (card->cardtype != avm_m1) { flag = ((u8 *)(ctrl->profile.manu))[5]; if (flag) seq_printf(m, "%-16s%s%s%s%s\n", "linetype", (flag & 0x01) ? " point to point" : "", (flag & 0x02) ? " point to multipoint" : "", (flag & 0x08) ? " leased line without D-channel" : "", (flag & 0x04) ? " leased line with D-channel" : "" ); } seq_printf(m, "%-16s %s\n", "cardname", cinfo->cardname); return 0; } static int b1ctl_proc_open(struct inode *inode, struct file *file) { return single_open(file, b1ctl_proc_show, PDE_DATA(inode)); } const struct file_operations b1ctl_proc_fops = { .owner = THIS_MODULE, .open = b1ctl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; EXPORT_SYMBOL(b1ctl_proc_fops); /* ------------------------------------------------------------- */ #ifdef CONFIG_PCI avmcard_dmainfo * avmcard_dma_alloc(char *name, struct pci_dev *pdev, long rsize, long ssize) { avmcard_dmainfo *p; void *buf; p = kzalloc(sizeof(avmcard_dmainfo), GFP_KERNEL); if (!p) { printk(KERN_WARNING "%s: no memory.\n", name); goto err; } p->recvbuf.size = rsize; buf = pci_alloc_consistent(pdev, rsize, &p->recvbuf.dmaaddr); if (!buf) { printk(KERN_WARNING "%s: allocation of receive dma buffer failed.\n", name); goto err_kfree; } p->recvbuf.dmabuf = buf; p->sendbuf.size = ssize; buf = pci_alloc_consistent(pdev, ssize, &p->sendbuf.dmaaddr); if (!buf) { printk(KERN_WARNING "%s: allocation of send dma buffer failed.\n", name); goto err_free_consistent; } p->sendbuf.dmabuf = buf; skb_queue_head_init(&p->send_queue); return p; err_free_consistent: pci_free_consistent(p->pcidev, p->recvbuf.size, p->recvbuf.dmabuf, p->recvbuf.dmaaddr); err_kfree: kfree(p); err: return NULL; } void avmcard_dma_free(avmcard_dmainfo *p) { pci_free_consistent(p->pcidev, p->recvbuf.size, p->recvbuf.dmabuf, p->recvbuf.dmaaddr); pci_free_consistent(p->pcidev, p->sendbuf.size, p->sendbuf.dmabuf, p->sendbuf.dmaaddr); skb_queue_purge(&p->send_queue); kfree(p); } EXPORT_SYMBOL(avmcard_dma_alloc); EXPORT_SYMBOL(avmcard_dma_free); #endif EXPORT_SYMBOL(b1_irq_table); EXPORT_SYMBOL(b1_alloc_card); EXPORT_SYMBOL(b1_free_card); EXPORT_SYMBOL(b1_detect); EXPORT_SYMBOL(b1_getrevision); EXPORT_SYMBOL(b1_load_t4file); EXPORT_SYMBOL(b1_load_config); EXPORT_SYMBOL(b1_loaded); EXPORT_SYMBOL(b1_load_firmware); EXPORT_SYMBOL(b1_reset_ctr); EXPORT_SYMBOL(b1_register_appl); EXPORT_SYMBOL(b1_release_appl); EXPORT_SYMBOL(b1_send_message); EXPORT_SYMBOL(b1_parse_version); EXPORT_SYMBOL(b1_interrupt); static int __init b1_init(void) { char *p; char rev[32]; if ((p = strchr(revision, ':')) != NULL && p[1]) { strlcpy(rev, p + 2, 32); if ((p = strchr(rev, '$')) != NULL && p > rev) *(p - 1) = 0; } else strcpy(rev, "1.0"); printk(KERN_INFO "b1: revision %s\n", rev); return 0; } static void __exit b1_exit(void) { } module_init(b1_init); module_exit(b1_exit);
gpl-2.0
Alex-V2/One_M8_4.4.3_kernel
arch/sparc/kernel/visemul.c
4555
20079
/* visemul.c: Emulation of VIS instructions. * * Copyright (C) 2006 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/thread_info.h> #include <linux/perf_event.h> #include <asm/ptrace.h> #include <asm/pstate.h> #include <asm/fpumacro.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> /* OPF field of various VIS instructions. */ /* 000111011 - four 16-bit packs */ #define FPACK16_OPF 0x03b /* 000111010 - two 32-bit packs */ #define FPACK32_OPF 0x03a /* 000111101 - four 16-bit packs */ #define FPACKFIX_OPF 0x03d /* 001001101 - four 16-bit expands */ #define FEXPAND_OPF 0x04d /* 001001011 - two 32-bit merges */ #define FPMERGE_OPF 0x04b /* 000110001 - 8-by-16-bit partitoned product */ #define FMUL8x16_OPF 0x031 /* 000110011 - 8-by-16-bit upper alpha partitioned product */ #define FMUL8x16AU_OPF 0x033 /* 000110101 - 8-by-16-bit lower alpha partitioned product */ #define FMUL8x16AL_OPF 0x035 /* 000110110 - upper 8-by-16-bit partitioned product */ #define FMUL8SUx16_OPF 0x036 /* 000110111 - lower 8-by-16-bit partitioned product */ #define FMUL8ULx16_OPF 0x037 /* 000111000 - upper 8-by-16-bit partitioned product */ #define FMULD8SUx16_OPF 0x038 /* 000111001 - lower unsigned 8-by-16-bit partitioned product */ #define FMULD8ULx16_OPF 0x039 /* 000101000 - four 16-bit compare; set rd if src1 > src2 */ #define FCMPGT16_OPF 0x028 /* 000101100 - two 32-bit compare; set rd if src1 > src2 */ #define FCMPGT32_OPF 0x02c /* 000100000 - four 16-bit compare; set rd if src1 <= src2 */ #define FCMPLE16_OPF 0x020 /* 000100100 - two 32-bit compare; set rd if src1 <= src2 */ #define FCMPLE32_OPF 0x024 /* 000100010 - four 16-bit compare; set rd if src1 != src2 */ #define FCMPNE16_OPF 0x022 /* 000100110 - two 32-bit compare; set rd if src1 != src2 */ #define FCMPNE32_OPF 0x026 /* 000101010 - four 16-bit compare; set rd if src1 == src2 */ #define FCMPEQ16_OPF 0x02a /* 000101110 - two 32-bit compare; set rd if src1 == src2 */ #define FCMPEQ32_OPF 0x02e /* 000000000 - Eight 8-bit edge boundary processing */ #define EDGE8_OPF 0x000 /* 000000001 - Eight 8-bit edge boundary processing, no CC */ #define EDGE8N_OPF 0x001 /* 000000010 - Eight 8-bit edge boundary processing, little-endian */ #define EDGE8L_OPF 0x002 /* 000000011 - Eight 8-bit edge boundary processing, little-endian, no CC */ #define EDGE8LN_OPF 0x003 /* 000000100 - Four 16-bit edge boundary processing */ #define EDGE16_OPF 0x004 /* 000000101 - Four 16-bit edge boundary processing, no CC */ #define EDGE16N_OPF 0x005 /* 000000110 - Four 16-bit edge boundary processing, little-endian */ #define EDGE16L_OPF 0x006 /* 000000111 - Four 16-bit edge boundary processing, little-endian, no CC */ #define EDGE16LN_OPF 0x007 /* 000001000 - Two 32-bit edge boundary processing */ #define EDGE32_OPF 0x008 /* 000001001 - Two 32-bit edge boundary processing, no CC */ #define EDGE32N_OPF 0x009 /* 000001010 - Two 32-bit edge boundary processing, little-endian */ #define EDGE32L_OPF 0x00a /* 000001011 - Two 32-bit edge boundary processing, little-endian, no CC */ #define EDGE32LN_OPF 0x00b /* 000111110 - distance between 8 8-bit components */ #define PDIST_OPF 0x03e /* 000010000 - convert 8-bit 3-D address to blocked byte address */ #define ARRAY8_OPF 0x010 /* 000010010 - convert 16-bit 3-D address to blocked byte address */ #define ARRAY16_OPF 0x012 /* 000010100 - convert 32-bit 3-D address to blocked byte address */ #define ARRAY32_OPF 0x014 /* 000011001 - Set the GSR.MASK field in preparation for a BSHUFFLE */ #define BMASK_OPF 0x019 /* 001001100 - Permute bytes as specified by GSR.MASK */ #define BSHUFFLE_OPF 0x04c #define VIS_OPF_SHIFT 5 #define VIS_OPF_MASK (0x1ff << VIS_OPF_SHIFT) #define RS1(INSN) (((INSN) >> 14) & 0x1f) #define RS2(INSN) (((INSN) >> 0) & 0x1f) #define RD(INSN) (((INSN) >> 25) & 0x1f) static inline void maybe_flush_windows(unsigned int rs1, unsigned int rs2, unsigned int rd, int from_kernel) { if (rs2 >= 16 || rs1 >= 16 || rd >= 16) { if (from_kernel != 0) __asm__ __volatile__("flushw"); else flushw_user(); } } static unsigned long fetch_reg(unsigned int reg, struct pt_regs *regs) { unsigned long value; if (reg < 16) return (!reg ? 0 : regs->u_regs[reg]); if (regs->tstate & TSTATE_PRIV) { struct reg_window *win; win = (struct reg_window *)(regs->u_regs[UREG_FP] + STACK_BIAS); value = win->locals[reg - 16]; } else if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); get_user(value, &win32->locals[reg - 16]); } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); get_user(value, &win->locals[reg - 16]); } return value; } static inline unsigned long __user *__fetch_reg_addr_user(unsigned int reg, struct pt_regs *regs) { BUG_ON(reg < 16); BUG_ON(regs->tstate & TSTATE_PRIV); if (test_thread_flag(TIF_32BIT)) { struct reg_window32 __user *win32; win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP])); return (unsigned long __user *)&win32->locals[reg - 16]; } else { struct reg_window __user *win; win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS); return &win->locals[reg - 16]; } } static inline unsigned long *__fetch_reg_addr_kern(unsigned int reg, struct pt_regs *regs) { BUG_ON(reg >= 16); BUG_ON(regs->tstate & TSTATE_PRIV); return &regs->u_regs[reg]; } static void store_reg(struct pt_regs *regs, unsigned long val, unsigned long rd) { if (rd < 16) { unsigned long *rd_kern = __fetch_reg_addr_kern(rd, regs); *rd_kern = val; } else { unsigned long __user *rd_user = __fetch_reg_addr_user(rd, regs); if (test_thread_flag(TIF_32BIT)) __put_user((u32)val, (u32 __user *)rd_user); else __put_user(val, rd_user); } } static inline unsigned long fpd_regval(struct fpustate *f, unsigned int insn_regnum) { insn_regnum = (((insn_regnum & 1) << 5) | (insn_regnum & 0x1e)); return *(unsigned long *) &f->regs[insn_regnum]; } static inline unsigned long *fpd_regaddr(struct fpustate *f, unsigned int insn_regnum) { insn_regnum = (((insn_regnum & 1) << 5) | (insn_regnum & 0x1e)); return (unsigned long *) &f->regs[insn_regnum]; } static inline unsigned int fps_regval(struct fpustate *f, unsigned int insn_regnum) { return f->regs[insn_regnum]; } static inline unsigned int *fps_regaddr(struct fpustate *f, unsigned int insn_regnum) { return &f->regs[insn_regnum]; } struct edge_tab { u16 left, right; }; static struct edge_tab edge8_tab[8] = { { 0xff, 0x80 }, { 0x7f, 0xc0 }, { 0x3f, 0xe0 }, { 0x1f, 0xf0 }, { 0x0f, 0xf8 }, { 0x07, 0xfc }, { 0x03, 0xfe }, { 0x01, 0xff }, }; static struct edge_tab edge8_tab_l[8] = { { 0xff, 0x01 }, { 0xfe, 0x03 }, { 0xfc, 0x07 }, { 0xf8, 0x0f }, { 0xf0, 0x1f }, { 0xe0, 0x3f }, { 0xc0, 0x7f }, { 0x80, 0xff }, }; static struct edge_tab edge16_tab[4] = { { 0xf, 0x8 }, { 0x7, 0xc }, { 0x3, 0xe }, { 0x1, 0xf }, }; static struct edge_tab edge16_tab_l[4] = { { 0xf, 0x1 }, { 0xe, 0x3 }, { 0xc, 0x7 }, { 0x8, 0xf }, }; static struct edge_tab edge32_tab[2] = { { 0x3, 0x2 }, { 0x1, 0x3 }, }; static struct edge_tab edge32_tab_l[2] = { { 0x3, 0x1 }, { 0x2, 0x3 }, }; static void edge(struct pt_regs *regs, unsigned int insn, unsigned int opf) { unsigned long orig_rs1, rs1, orig_rs2, rs2, rd_val; u16 left, right; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); orig_rs1 = rs1 = fetch_reg(RS1(insn), regs); orig_rs2 = rs2 = fetch_reg(RS2(insn), regs); if (test_thread_flag(TIF_32BIT)) { rs1 = rs1 & 0xffffffff; rs2 = rs2 & 0xffffffff; } switch (opf) { default: case EDGE8_OPF: case EDGE8N_OPF: left = edge8_tab[rs1 & 0x7].left; right = edge8_tab[rs2 & 0x7].right; break; case EDGE8L_OPF: case EDGE8LN_OPF: left = edge8_tab_l[rs1 & 0x7].left; right = edge8_tab_l[rs2 & 0x7].right; break; case EDGE16_OPF: case EDGE16N_OPF: left = edge16_tab[(rs1 >> 1) & 0x3].left; right = edge16_tab[(rs2 >> 1) & 0x3].right; break; case EDGE16L_OPF: case EDGE16LN_OPF: left = edge16_tab_l[(rs1 >> 1) & 0x3].left; right = edge16_tab_l[(rs2 >> 1) & 0x3].right; break; case EDGE32_OPF: case EDGE32N_OPF: left = edge32_tab[(rs1 >> 2) & 0x1].left; right = edge32_tab[(rs2 >> 2) & 0x1].right; break; case EDGE32L_OPF: case EDGE32LN_OPF: left = edge32_tab_l[(rs1 >> 2) & 0x1].left; right = edge32_tab_l[(rs2 >> 2) & 0x1].right; break; } if ((rs1 & ~0x7UL) == (rs2 & ~0x7UL)) rd_val = right & left; else rd_val = left; store_reg(regs, rd_val, RD(insn)); switch (opf) { case EDGE8_OPF: case EDGE8L_OPF: case EDGE16_OPF: case EDGE16L_OPF: case EDGE32_OPF: case EDGE32L_OPF: { unsigned long ccr, tstate; __asm__ __volatile__("subcc %1, %2, %%g0\n\t" "rd %%ccr, %0" : "=r" (ccr) : "r" (orig_rs1), "r" (orig_rs2) : "cc"); tstate = regs->tstate & ~(TSTATE_XCC | TSTATE_ICC); regs->tstate = tstate | (ccr << 32UL); } } } static void array(struct pt_regs *regs, unsigned int insn, unsigned int opf) { unsigned long rs1, rs2, rd_val; unsigned int bits, bits_mask; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); rs1 = fetch_reg(RS1(insn), regs); rs2 = fetch_reg(RS2(insn), regs); bits = (rs2 > 5 ? 5 : rs2); bits_mask = (1UL << bits) - 1UL; rd_val = ((((rs1 >> 11) & 0x3) << 0) | (((rs1 >> 33) & 0x3) << 2) | (((rs1 >> 55) & 0x1) << 4) | (((rs1 >> 13) & 0xf) << 5) | (((rs1 >> 35) & 0xf) << 9) | (((rs1 >> 56) & 0xf) << 13) | (((rs1 >> 17) & bits_mask) << 17) | (((rs1 >> 39) & bits_mask) << (17 + bits)) | (((rs1 >> 60) & 0xf) << (17 + (2*bits)))); switch (opf) { case ARRAY16_OPF: rd_val <<= 1; break; case ARRAY32_OPF: rd_val <<= 2; } store_reg(regs, rd_val, RD(insn)); } static void bmask(struct pt_regs *regs, unsigned int insn) { unsigned long rs1, rs2, rd_val, gsr; maybe_flush_windows(RS1(insn), RS2(insn), RD(insn), 0); rs1 = fetch_reg(RS1(insn), regs); rs2 = fetch_reg(RS2(insn), regs); rd_val = rs1 + rs2; store_reg(regs, rd_val, RD(insn)); gsr = current_thread_info()->gsr[0] & 0xffffffff; gsr |= rd_val << 32UL; current_thread_info()->gsr[0] = gsr; } static void bshuffle(struct pt_regs *regs, unsigned int insn) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val; unsigned long bmask, i; bmask = current_thread_info()->gsr[0] >> 32UL; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0UL; for (i = 0; i < 8; i++) { unsigned long which = (bmask >> (i * 4)) & 0xf; unsigned long byte; if (which < 8) byte = (rs1 >> (which * 8)) & 0xff; else byte = (rs2 >> ((which-8)*8)) & 0xff; rd_val |= (byte << (i * 8)); } *fpd_regaddr(f, RD(insn)) = rd_val; } static void pdist(struct pt_regs *regs, unsigned int insn) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, *rd, rd_val; unsigned long i; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd = fpd_regaddr(f, RD(insn)); rd_val = *rd; for (i = 0; i < 8; i++) { s16 s1, s2; s1 = (rs1 >> (56 - (i * 8))) & 0xff; s2 = (rs2 >> (56 - (i * 8))) & 0xff; /* Absolute value of difference. */ s1 -= s2; if (s1 < 0) s1 = ~s1 + 1; rd_val += s1; } *rd = rd_val; } static void pformat(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, gsr, scale, rd_val; gsr = current_thread_info()->gsr[0]; scale = (gsr >> 3) & (opf == FPACK16_OPF ? 0xf : 0x1f); switch (opf) { case FPACK16_OPF: { unsigned long byte; rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { unsigned int val; s16 src = (rs2 >> (byte * 16UL)) & 0xffffUL; int scaled = src << scale; int from_fixed = scaled >> 7; val = ((from_fixed < 0) ? 0 : (from_fixed > 255) ? 255 : from_fixed); rd_val |= (val << (8 * byte)); } *fps_regaddr(f, RD(insn)) = rd_val; break; } case FPACK32_OPF: { unsigned long word; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = (rs1 << 8) & ~(0x000000ff000000ffUL); for (word = 0; word < 2; word++) { unsigned long val; s32 src = (rs2 >> (word * 32UL)); s64 scaled = src << scale; s64 from_fixed = scaled >> 23; val = ((from_fixed < 0) ? 0 : (from_fixed > 255) ? 255 : from_fixed); rd_val |= (val << (32 * word)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FPACKFIX_OPF: { unsigned long word; rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (word = 0; word < 2; word++) { long val; s32 src = (rs2 >> (word * 32UL)); s64 scaled = src << scale; s64 from_fixed = scaled >> 16; val = ((from_fixed < -32768) ? -32768 : (from_fixed > 32767) ? 32767 : from_fixed); rd_val |= ((val & 0xffff) << (word * 16)); } *fps_regaddr(f, RD(insn)) = rd_val; break; } case FEXPAND_OPF: { unsigned long byte; rs2 = fps_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { unsigned long val; u8 src = (rs2 >> (byte * 8)) & 0xff; val = src << 4; rd_val |= (val << (byte * 16)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FPMERGE_OPF: { rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = (((rs2 & 0x000000ff) << 0) | ((rs1 & 0x000000ff) << 8) | ((rs2 & 0x0000ff00) << 8) | ((rs1 & 0x0000ff00) << 16) | ((rs2 & 0x00ff0000) << 16) | ((rs1 & 0x00ff0000) << 24) | ((rs2 & 0xff000000) << 24) | ((rs1 & 0xff000000) << 32)); *fpd_regaddr(f, RD(insn)) = rd_val; break; } } } static void pmul(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val; switch (opf) { case FMUL8x16_OPF: { unsigned long byte; rs1 = fps_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; for (byte = 0; byte < 4; byte++) { u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; s16 src2 = (rs2 >> (byte * 16)) & 0xffff; u32 prod = src1 * src2; u16 scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMUL8x16AU_OPF: case FMUL8x16AL_OPF: { unsigned long byte; s16 src2; rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = 0; src2 = rs2 >> (opf == FMUL8x16AU_OPF ? 16 : 0); for (byte = 0; byte < 4; byte++) { u16 src1 = (rs1 >> (byte * 8)) & 0x00ff; u32 prod = src1 * src2; u16 scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMUL8SUx16_OPF: case FMUL8ULx16_OPF: { unsigned long byte, ushift; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; ushift = (opf == FMUL8SUx16_OPF) ? 8 : 0; for (byte = 0; byte < 4; byte++) { u16 src1; s16 src2; u32 prod; u16 scaled; src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); src2 = ((rs2 >> (16 * byte)) & 0xffff); prod = src1 * src2; scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << (byte * 16UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } case FMULD8SUx16_OPF: case FMULD8ULx16_OPF: { unsigned long byte, ushift; rs1 = fps_regval(f, RS1(insn)); rs2 = fps_regval(f, RS2(insn)); rd_val = 0; ushift = (opf == FMULD8SUx16_OPF) ? 8 : 0; for (byte = 0; byte < 2; byte++) { u16 src1; s16 src2; u32 prod; u16 scaled; src1 = ((rs1 >> ((16 * byte) + ushift)) & 0x00ff); src2 = ((rs2 >> (16 * byte)) & 0xffff); prod = src1 * src2; scaled = ((prod & 0x00ffff00) >> 8); /* Round up. */ if (prod & 0x80) scaled++; rd_val |= ((scaled & 0xffffUL) << ((byte * 32UL) + 7UL)); } *fpd_regaddr(f, RD(insn)) = rd_val; break; } } } static void pcmp(struct pt_regs *regs, unsigned int insn, unsigned int opf) { struct fpustate *f = FPUSTATE; unsigned long rs1, rs2, rd_val, i; rs1 = fpd_regval(f, RS1(insn)); rs2 = fpd_regval(f, RS2(insn)); rd_val = 0; switch (opf) { case FCMPGT16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a > b) rd_val |= 8 >> i; } break; case FCMPGT32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a > b) rd_val |= 2 >> i; } break; case FCMPLE16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a <= b) rd_val |= 8 >> i; } break; case FCMPLE32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a <= b) rd_val |= 2 >> i; } break; case FCMPNE16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a != b) rd_val |= 8 >> i; } break; case FCMPNE32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a != b) rd_val |= 2 >> i; } break; case FCMPEQ16_OPF: for (i = 0; i < 4; i++) { s16 a = (rs1 >> (i * 16)) & 0xffff; s16 b = (rs2 >> (i * 16)) & 0xffff; if (a == b) rd_val |= 8 >> i; } break; case FCMPEQ32_OPF: for (i = 0; i < 2; i++) { s32 a = (rs1 >> (i * 32)) & 0xffffffff; s32 b = (rs2 >> (i * 32)) & 0xffffffff; if (a == b) rd_val |= 2 >> i; } break; } maybe_flush_windows(0, 0, RD(insn), 0); store_reg(regs, rd_val, RD(insn)); } /* Emulate the VIS instructions which are not implemented in * hardware on Niagara. */ int vis_emul(struct pt_regs *regs, unsigned int insn) { unsigned long pc = regs->tpc; unsigned int opf; BUG_ON(regs->tstate & TSTATE_PRIV); perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); if (test_thread_flag(TIF_32BIT)) pc = (u32)pc; if (get_user(insn, (u32 __user *) pc)) return -EFAULT; save_and_clear_fpu(); opf = (insn & VIS_OPF_MASK) >> VIS_OPF_SHIFT; switch (opf) { default: return -EINVAL; /* Pixel Formatting Instructions. */ case FPACK16_OPF: case FPACK32_OPF: case FPACKFIX_OPF: case FEXPAND_OPF: case FPMERGE_OPF: pformat(regs, insn, opf); break; /* Partitioned Multiply Instructions */ case FMUL8x16_OPF: case FMUL8x16AU_OPF: case FMUL8x16AL_OPF: case FMUL8SUx16_OPF: case FMUL8ULx16_OPF: case FMULD8SUx16_OPF: case FMULD8ULx16_OPF: pmul(regs, insn, opf); break; /* Pixel Compare Instructions */ case FCMPGT16_OPF: case FCMPGT32_OPF: case FCMPLE16_OPF: case FCMPLE32_OPF: case FCMPNE16_OPF: case FCMPNE32_OPF: case FCMPEQ16_OPF: case FCMPEQ32_OPF: pcmp(regs, insn, opf); break; /* Edge Handling Instructions */ case EDGE8_OPF: case EDGE8N_OPF: case EDGE8L_OPF: case EDGE8LN_OPF: case EDGE16_OPF: case EDGE16N_OPF: case EDGE16L_OPF: case EDGE16LN_OPF: case EDGE32_OPF: case EDGE32N_OPF: case EDGE32L_OPF: case EDGE32LN_OPF: edge(regs, insn, opf); break; /* Pixel Component Distance */ case PDIST_OPF: pdist(regs, insn); break; /* Three-Dimensional Array Addressing Instructions */ case ARRAY8_OPF: case ARRAY16_OPF: case ARRAY32_OPF: array(regs, insn, opf); break; /* Byte Mask and Shuffle Instructions */ case BMASK_OPF: bmask(regs, insn); break; case BSHUFFLE_OPF: bshuffle(regs, insn); break; } regs->tpc = regs->tnpc; regs->tnpc += 4; return 0; }
gpl-2.0
tjbrandt/Project-Fjord
fs/btrfs/check-integrity.c
4811
95686
/* * Copyright (C) STRATO AG 2011. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License v2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ /* * This module can be used to catch cases when the btrfs kernel * code executes write requests to the disk that bring the file * system in an inconsistent state. In such a state, a power-loss * or kernel panic event would cause that the data on disk is * lost or at least damaged. * * Code is added that examines all block write requests during * runtime (including writes of the super block). Three rules * are verified and an error is printed on violation of the * rules: * 1. It is not allowed to write a disk block which is * currently referenced by the super block (either directly * or indirectly). * 2. When a super block is written, it is verified that all * referenced (directly or indirectly) blocks fulfill the * following requirements: * 2a. All referenced blocks have either been present when * the file system was mounted, (i.e., they have been * referenced by the super block) or they have been * written since then and the write completion callback * was called and a FLUSH request to the device where * these blocks are located was received and completed. * 2b. All referenced blocks need to have a generation * number which is equal to the parent's number. * * One issue that was found using this module was that the log * tree on disk became temporarily corrupted because disk blocks * that had been in use for the log tree had been freed and * reused too early, while being referenced by the written super * block. * * The search term in the kernel log that can be used to filter * on the existence of detected integrity issues is * "btrfs: attempt". * * The integrity check is enabled via mount options. These * mount options are only supported if the integrity check * tool is compiled by defining BTRFS_FS_CHECK_INTEGRITY. * * Example #1, apply integrity checks to all metadata: * mount /dev/sdb1 /mnt -o check_int * * Example #2, apply integrity checks to all metadata and * to data extents: * mount /dev/sdb1 /mnt -o check_int_data * * Example #3, apply integrity checks to all metadata and dump * the tree that the super block references to kernel messages * each time after a super block was written: * mount /dev/sdb1 /mnt -o check_int,check_int_print_mask=263 * * If the integrity check tool is included and activated in * the mount options, plenty of kernel memory is used, and * plenty of additional CPU cycles are spent. Enabling this * functionality is not intended for normal use. In most * cases, unless you are a btrfs developer who needs to verify * the integrity of (super)-block write requests, do not * enable the config option BTRFS_FS_CHECK_INTEGRITY to * include and compile the integrity check tool. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/buffer_head.h> #include <linux/mutex.h> #include <linux/crc32c.h> #include <linux/genhd.h> #include <linux/blkdev.h> #include "ctree.h" #include "disk-io.h" #include "transaction.h" #include "extent_io.h" #include "volumes.h" #include "print-tree.h" #include "locking.h" #include "check-integrity.h" #define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 #define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 #define BTRFSIC_DEV2STATE_HASHTABLE_SIZE 0x100 #define BTRFSIC_BLOCK_MAGIC_NUMBER 0x14491051 #define BTRFSIC_BLOCK_LINK_MAGIC_NUMBER 0x11070807 #define BTRFSIC_DEV2STATE_MAGIC_NUMBER 0x20111530 #define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300 #define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters, * excluding " [...]" */ #define BTRFSIC_BLOCK_SIZE PAGE_SIZE #define BTRFSIC_GENERATION_UNKNOWN ((u64)-1) /* * The definition of the bitmask fields for the print_mask. * They are specified with the mount option check_integrity_print_mask. */ #define BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE 0x00000001 #define BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION 0x00000002 #define BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE 0x00000004 #define BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE 0x00000008 #define BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH 0x00000010 #define BTRFSIC_PRINT_MASK_END_IO_BIO_BH 0x00000020 #define BTRFSIC_PRINT_MASK_VERBOSE 0x00000040 #define BTRFSIC_PRINT_MASK_VERY_VERBOSE 0x00000080 #define BTRFSIC_PRINT_MASK_INITIAL_TREE 0x00000100 #define BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES 0x00000200 #define BTRFSIC_PRINT_MASK_INITIAL_DATABASE 0x00000400 #define BTRFSIC_PRINT_MASK_NUM_COPIES 0x00000800 #define BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS 0x00001000 struct btrfsic_dev_state; struct btrfsic_state; struct btrfsic_block { u32 magic_num; /* only used for debug purposes */ unsigned int is_metadata:1; /* if it is meta-data, not data-data */ unsigned int is_superblock:1; /* if it is one of the superblocks */ unsigned int is_iodone:1; /* if is done by lower subsystem */ unsigned int iodone_w_error:1; /* error was indicated to endio */ unsigned int never_written:1; /* block was added because it was * referenced, not because it was * written */ unsigned int mirror_num:2; /* large enough to hold * BTRFS_SUPER_MIRROR_MAX */ struct btrfsic_dev_state *dev_state; u64 dev_bytenr; /* key, physical byte num on disk */ u64 logical_bytenr; /* logical byte num on disk */ u64 generation; struct btrfs_disk_key disk_key; /* extra info to print in case of * issues, will not always be correct */ struct list_head collision_resolving_node; /* list node */ struct list_head all_blocks_node; /* list node */ /* the following two lists contain block_link items */ struct list_head ref_to_list; /* list */ struct list_head ref_from_list; /* list */ struct btrfsic_block *next_in_same_bio; void *orig_bio_bh_private; union { bio_end_io_t *bio; bh_end_io_t *bh; } orig_bio_bh_end_io; int submit_bio_bh_rw; u64 flush_gen; /* only valid if !never_written */ }; /* * Elements of this type are allocated dynamically and required because * each block object can refer to and can be ref from multiple blocks. * The key to lookup them in the hashtable is the dev_bytenr of * the block ref to plus the one from the block refered from. * The fact that they are searchable via a hashtable and that a * ref_cnt is maintained is not required for the btrfs integrity * check algorithm itself, it is only used to make the output more * beautiful in case that an error is detected (an error is defined * as a write operation to a block while that block is still referenced). */ struct btrfsic_block_link { u32 magic_num; /* only used for debug purposes */ u32 ref_cnt; struct list_head node_ref_to; /* list node */ struct list_head node_ref_from; /* list node */ struct list_head collision_resolving_node; /* list node */ struct btrfsic_block *block_ref_to; struct btrfsic_block *block_ref_from; u64 parent_generation; }; struct btrfsic_dev_state { u32 magic_num; /* only used for debug purposes */ struct block_device *bdev; struct btrfsic_state *state; struct list_head collision_resolving_node; /* list node */ struct btrfsic_block dummy_block_for_bio_bh_flush; u64 last_flush_gen; char name[BDEVNAME_SIZE]; }; struct btrfsic_block_hashtable { struct list_head table[BTRFSIC_BLOCK_HASHTABLE_SIZE]; }; struct btrfsic_block_link_hashtable { struct list_head table[BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE]; }; struct btrfsic_dev_state_hashtable { struct list_head table[BTRFSIC_DEV2STATE_HASHTABLE_SIZE]; }; struct btrfsic_block_data_ctx { u64 start; /* virtual bytenr */ u64 dev_bytenr; /* physical bytenr on device */ u32 len; struct btrfsic_dev_state *dev; char *data; struct buffer_head *bh; /* do not use if set to NULL */ }; /* This structure is used to implement recursion without occupying * any stack space, refer to btrfsic_process_metablock() */ struct btrfsic_stack_frame { u32 magic; u32 nr; int error; int i; int limit_nesting; int num_copies; int mirror_num; struct btrfsic_block *block; struct btrfsic_block_data_ctx *block_ctx; struct btrfsic_block *next_block; struct btrfsic_block_data_ctx next_block_ctx; struct btrfs_header *hdr; struct btrfsic_stack_frame *prev; }; /* Some state per mounted filesystem */ struct btrfsic_state { u32 print_mask; int include_extent_data; int csum_size; struct list_head all_blocks_list; struct btrfsic_block_hashtable block_hashtable; struct btrfsic_block_link_hashtable block_link_hashtable; struct btrfs_root *root; u64 max_superblock_generation; struct btrfsic_block *latest_superblock; }; static void btrfsic_block_init(struct btrfsic_block *b); static struct btrfsic_block *btrfsic_block_alloc(void); static void btrfsic_block_free(struct btrfsic_block *b); static void btrfsic_block_link_init(struct btrfsic_block_link *n); static struct btrfsic_block_link *btrfsic_block_link_alloc(void); static void btrfsic_block_link_free(struct btrfsic_block_link *n); static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds); static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void); static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds); static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h); static void btrfsic_block_hashtable_add(struct btrfsic_block *b, struct btrfsic_block_hashtable *h); static void btrfsic_block_hashtable_remove(struct btrfsic_block *b); static struct btrfsic_block *btrfsic_block_hashtable_lookup( struct block_device *bdev, u64 dev_bytenr, struct btrfsic_block_hashtable *h); static void btrfsic_block_link_hashtable_init( struct btrfsic_block_link_hashtable *h); static void btrfsic_block_link_hashtable_add( struct btrfsic_block_link *l, struct btrfsic_block_link_hashtable *h); static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l); static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( struct block_device *bdev_ref_to, u64 dev_bytenr_ref_to, struct block_device *bdev_ref_from, u64 dev_bytenr_ref_from, struct btrfsic_block_link_hashtable *h); static void btrfsic_dev_state_hashtable_init( struct btrfsic_dev_state_hashtable *h); static void btrfsic_dev_state_hashtable_add( struct btrfsic_dev_state *ds, struct btrfsic_dev_state_hashtable *h); static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds); static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( struct block_device *bdev, struct btrfsic_dev_state_hashtable *h); static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void); static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf); static int btrfsic_process_superblock(struct btrfsic_state *state, struct btrfs_fs_devices *fs_devices); static int btrfsic_process_metablock(struct btrfsic_state *state, struct btrfsic_block *block, struct btrfsic_block_data_ctx *block_ctx, struct btrfs_header *hdr, int limit_nesting, int force_iodone_flag); static int btrfsic_create_link_to_next_block( struct btrfsic_state *state, struct btrfsic_block *block, struct btrfsic_block_data_ctx *block_ctx, u64 next_bytenr, int limit_nesting, struct btrfsic_block_data_ctx *next_block_ctx, struct btrfsic_block **next_blockp, int force_iodone_flag, int *num_copiesp, int *mirror_nump, struct btrfs_disk_key *disk_key, u64 parent_generation); static int btrfsic_handle_extent_data(struct btrfsic_state *state, struct btrfsic_block *block, struct btrfsic_block_data_ctx *block_ctx, u32 item_offset, int force_iodone_flag); static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, struct btrfsic_block_data_ctx *block_ctx_out, int mirror_num); static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr, u32 len, struct block_device *bdev, struct btrfsic_block_data_ctx *block_ctx_out); static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); static int btrfsic_read_block(struct btrfsic_state *state, struct btrfsic_block_data_ctx *block_ctx); static void btrfsic_dump_database(struct btrfsic_state *state); static int btrfsic_test_for_metadata(struct btrfsic_state *state, const u8 *data, unsigned int size); static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, u64 dev_bytenr, u8 *mapped_data, unsigned int len, struct bio *bio, int *bio_is_patched, struct buffer_head *bh, int submit_bio_bh_rw); static int btrfsic_process_written_superblock( struct btrfsic_state *state, struct btrfsic_block *const block, struct btrfs_super_block *const super_hdr); static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status); static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate); static int btrfsic_is_block_ref_by_superblock(const struct btrfsic_state *state, const struct btrfsic_block *block, int recursion_level); static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, struct btrfsic_block *const block, int recursion_level); static void btrfsic_print_add_link(const struct btrfsic_state *state, const struct btrfsic_block_link *l); static void btrfsic_print_rem_link(const struct btrfsic_state *state, const struct btrfsic_block_link *l); static char btrfsic_get_block_type(const struct btrfsic_state *state, const struct btrfsic_block *block); static void btrfsic_dump_tree(const struct btrfsic_state *state); static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, const struct btrfsic_block *block, int indent_level); static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( struct btrfsic_state *state, struct btrfsic_block_data_ctx *next_block_ctx, struct btrfsic_block *next_block, struct btrfsic_block *from_block, u64 parent_generation); static struct btrfsic_block *btrfsic_block_lookup_or_add( struct btrfsic_state *state, struct btrfsic_block_data_ctx *block_ctx, const char *additional_string, int is_metadata, int is_iodone, int never_written, int mirror_num, int *was_created); static int btrfsic_process_superblock_dev_mirror( struct btrfsic_state *state, struct btrfsic_dev_state *dev_state, struct btrfs_device *device, int superblock_mirror_num, struct btrfsic_dev_state **selected_dev_state, struct btrfs_super_block *selected_super); static struct btrfsic_dev_state *btrfsic_dev_state_lookup( struct block_device *bdev); static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, u64 bytenr, struct btrfsic_dev_state *dev_state, u64 dev_bytenr, char *data); static struct mutex btrfsic_mutex; static int btrfsic_is_initialized; static struct btrfsic_dev_state_hashtable btrfsic_dev_state_hashtable; static void btrfsic_block_init(struct btrfsic_block *b) { b->magic_num = BTRFSIC_BLOCK_MAGIC_NUMBER; b->dev_state = NULL; b->dev_bytenr = 0; b->logical_bytenr = 0; b->generation = BTRFSIC_GENERATION_UNKNOWN; b->disk_key.objectid = 0; b->disk_key.type = 0; b->disk_key.offset = 0; b->is_metadata = 0; b->is_superblock = 0; b->is_iodone = 0; b->iodone_w_error = 0; b->never_written = 0; b->mirror_num = 0; b->next_in_same_bio = NULL; b->orig_bio_bh_private = NULL; b->orig_bio_bh_end_io.bio = NULL; INIT_LIST_HEAD(&b->collision_resolving_node); INIT_LIST_HEAD(&b->all_blocks_node); INIT_LIST_HEAD(&b->ref_to_list); INIT_LIST_HEAD(&b->ref_from_list); b->submit_bio_bh_rw = 0; b->flush_gen = 0; } static struct btrfsic_block *btrfsic_block_alloc(void) { struct btrfsic_block *b; b = kzalloc(sizeof(*b), GFP_NOFS); if (NULL != b) btrfsic_block_init(b); return b; } static void btrfsic_block_free(struct btrfsic_block *b) { BUG_ON(!(NULL == b || BTRFSIC_BLOCK_MAGIC_NUMBER == b->magic_num)); kfree(b); } static void btrfsic_block_link_init(struct btrfsic_block_link *l) { l->magic_num = BTRFSIC_BLOCK_LINK_MAGIC_NUMBER; l->ref_cnt = 1; INIT_LIST_HEAD(&l->node_ref_to); INIT_LIST_HEAD(&l->node_ref_from); INIT_LIST_HEAD(&l->collision_resolving_node); l->block_ref_to = NULL; l->block_ref_from = NULL; } static struct btrfsic_block_link *btrfsic_block_link_alloc(void) { struct btrfsic_block_link *l; l = kzalloc(sizeof(*l), GFP_NOFS); if (NULL != l) btrfsic_block_link_init(l); return l; } static void btrfsic_block_link_free(struct btrfsic_block_link *l) { BUG_ON(!(NULL == l || BTRFSIC_BLOCK_LINK_MAGIC_NUMBER == l->magic_num)); kfree(l); } static void btrfsic_dev_state_init(struct btrfsic_dev_state *ds) { ds->magic_num = BTRFSIC_DEV2STATE_MAGIC_NUMBER; ds->bdev = NULL; ds->state = NULL; ds->name[0] = '\0'; INIT_LIST_HEAD(&ds->collision_resolving_node); ds->last_flush_gen = 0; btrfsic_block_init(&ds->dummy_block_for_bio_bh_flush); ds->dummy_block_for_bio_bh_flush.is_iodone = 1; ds->dummy_block_for_bio_bh_flush.dev_state = ds; } static struct btrfsic_dev_state *btrfsic_dev_state_alloc(void) { struct btrfsic_dev_state *ds; ds = kzalloc(sizeof(*ds), GFP_NOFS); if (NULL != ds) btrfsic_dev_state_init(ds); return ds; } static void btrfsic_dev_state_free(struct btrfsic_dev_state *ds) { BUG_ON(!(NULL == ds || BTRFSIC_DEV2STATE_MAGIC_NUMBER == ds->magic_num)); kfree(ds); } static void btrfsic_block_hashtable_init(struct btrfsic_block_hashtable *h) { int i; for (i = 0; i < BTRFSIC_BLOCK_HASHTABLE_SIZE; i++) INIT_LIST_HEAD(h->table + i); } static void btrfsic_block_hashtable_add(struct btrfsic_block *b, struct btrfsic_block_hashtable *h) { const unsigned int hashval = (((unsigned int)(b->dev_bytenr >> 16)) ^ ((unsigned int)((uintptr_t)b->dev_state->bdev))) & (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); list_add(&b->collision_resolving_node, h->table + hashval); } static void btrfsic_block_hashtable_remove(struct btrfsic_block *b) { list_del(&b->collision_resolving_node); } static struct btrfsic_block *btrfsic_block_hashtable_lookup( struct block_device *bdev, u64 dev_bytenr, struct btrfsic_block_hashtable *h) { const unsigned int hashval = (((unsigned int)(dev_bytenr >> 16)) ^ ((unsigned int)((uintptr_t)bdev))) & (BTRFSIC_BLOCK_HASHTABLE_SIZE - 1); struct list_head *elem; list_for_each(elem, h->table + hashval) { struct btrfsic_block *const b = list_entry(elem, struct btrfsic_block, collision_resolving_node); if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr) return b; } return NULL; } static void btrfsic_block_link_hashtable_init( struct btrfsic_block_link_hashtable *h) { int i; for (i = 0; i < BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE; i++) INIT_LIST_HEAD(h->table + i); } static void btrfsic_block_link_hashtable_add( struct btrfsic_block_link *l, struct btrfsic_block_link_hashtable *h) { const unsigned int hashval = (((unsigned int)(l->block_ref_to->dev_bytenr >> 16)) ^ ((unsigned int)(l->block_ref_from->dev_bytenr >> 16)) ^ ((unsigned int)((uintptr_t)l->block_ref_to->dev_state->bdev)) ^ ((unsigned int)((uintptr_t)l->block_ref_from->dev_state->bdev))) & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); BUG_ON(NULL == l->block_ref_to); BUG_ON(NULL == l->block_ref_from); list_add(&l->collision_resolving_node, h->table + hashval); } static void btrfsic_block_link_hashtable_remove(struct btrfsic_block_link *l) { list_del(&l->collision_resolving_node); } static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup( struct block_device *bdev_ref_to, u64 dev_bytenr_ref_to, struct block_device *bdev_ref_from, u64 dev_bytenr_ref_from, struct btrfsic_block_link_hashtable *h) { const unsigned int hashval = (((unsigned int)(dev_bytenr_ref_to >> 16)) ^ ((unsigned int)(dev_bytenr_ref_from >> 16)) ^ ((unsigned int)((uintptr_t)bdev_ref_to)) ^ ((unsigned int)((uintptr_t)bdev_ref_from))) & (BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1); struct list_head *elem; list_for_each(elem, h->table + hashval) { struct btrfsic_block_link *const l = list_entry(elem, struct btrfsic_block_link, collision_resolving_node); BUG_ON(NULL == l->block_ref_to); BUG_ON(NULL == l->block_ref_from); if (l->block_ref_to->dev_state->bdev == bdev_ref_to && l->block_ref_to->dev_bytenr == dev_bytenr_ref_to && l->block_ref_from->dev_state->bdev == bdev_ref_from && l->block_ref_from->dev_bytenr == dev_bytenr_ref_from) return l; } return NULL; } static void btrfsic_dev_state_hashtable_init( struct btrfsic_dev_state_hashtable *h) { int i; for (i = 0; i < BTRFSIC_DEV2STATE_HASHTABLE_SIZE; i++) INIT_LIST_HEAD(h->table + i); } static void btrfsic_dev_state_hashtable_add( struct btrfsic_dev_state *ds, struct btrfsic_dev_state_hashtable *h) { const unsigned int hashval = (((unsigned int)((uintptr_t)ds->bdev)) & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); list_add(&ds->collision_resolving_node, h->table + hashval); } static void btrfsic_dev_state_hashtable_remove(struct btrfsic_dev_state *ds) { list_del(&ds->collision_resolving_node); } static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup( struct block_device *bdev, struct btrfsic_dev_state_hashtable *h) { const unsigned int hashval = (((unsigned int)((uintptr_t)bdev)) & (BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1)); struct list_head *elem; list_for_each(elem, h->table + hashval) { struct btrfsic_dev_state *const ds = list_entry(elem, struct btrfsic_dev_state, collision_resolving_node); if (ds->bdev == bdev) return ds; } return NULL; } static int btrfsic_process_superblock(struct btrfsic_state *state, struct btrfs_fs_devices *fs_devices) { int ret = 0; struct btrfs_super_block *selected_super; struct list_head *dev_head = &fs_devices->devices; struct btrfs_device *device; struct btrfsic_dev_state *selected_dev_state = NULL; int pass; BUG_ON(NULL == state); selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS); if (NULL == selected_super) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); return -1; } list_for_each_entry(device, dev_head, dev_list) { int i; struct btrfsic_dev_state *dev_state; if (!device->bdev || !device->name) continue; dev_state = btrfsic_dev_state_lookup(device->bdev); BUG_ON(NULL == dev_state); for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { ret = btrfsic_process_superblock_dev_mirror( state, dev_state, device, i, &selected_dev_state, selected_super); if (0 != ret && 0 == i) { kfree(selected_super); return ret; } } } if (NULL == state->latest_superblock) { printk(KERN_INFO "btrfsic: no superblock found!\n"); kfree(selected_super); return -1; } state->csum_size = btrfs_super_csum_size(selected_super); for (pass = 0; pass < 3; pass++) { int num_copies; int mirror_num; u64 next_bytenr; switch (pass) { case 0: next_bytenr = btrfs_super_root(selected_super); if (state->print_mask & BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) printk(KERN_INFO "root@%llu\n", (unsigned long long)next_bytenr); break; case 1: next_bytenr = btrfs_super_chunk_root(selected_super); if (state->print_mask & BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) printk(KERN_INFO "chunk@%llu\n", (unsigned long long)next_bytenr); break; case 2: next_bytenr = btrfs_super_log_root(selected_super); if (0 == next_bytenr) continue; if (state->print_mask & BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) printk(KERN_INFO "log@%llu\n", (unsigned long long)next_bytenr); break; } num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree, next_bytenr, PAGE_SIZE); if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", (unsigned long long)next_bytenr, num_copies); for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { struct btrfsic_block *next_block; struct btrfsic_block_data_ctx tmp_next_block_ctx; struct btrfsic_block_link *l; struct btrfs_header *hdr; ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE, &tmp_next_block_ctx, mirror_num); if (ret) { printk(KERN_INFO "btrfsic:" " btrfsic_map_block(root @%llu," " mirror %d) failed!\n", (unsigned long long)next_bytenr, mirror_num); kfree(selected_super); return -1; } next_block = btrfsic_block_hashtable_lookup( tmp_next_block_ctx.dev->bdev, tmp_next_block_ctx.dev_bytenr, &state->block_hashtable); BUG_ON(NULL == next_block); l = btrfsic_block_link_hashtable_lookup( tmp_next_block_ctx.dev->bdev, tmp_next_block_ctx.dev_bytenr, state->latest_superblock->dev_state-> bdev, state->latest_superblock->dev_bytenr, &state->block_link_hashtable); BUG_ON(NULL == l); ret = btrfsic_read_block(state, &tmp_next_block_ctx); if (ret < (int)BTRFSIC_BLOCK_SIZE) { printk(KERN_INFO "btrfsic: read @logical %llu failed!\n", (unsigned long long) tmp_next_block_ctx.start); btrfsic_release_block_ctx(&tmp_next_block_ctx); kfree(selected_super); return -1; } hdr = (struct btrfs_header *)tmp_next_block_ctx.data; ret = btrfsic_process_metablock(state, next_block, &tmp_next_block_ctx, hdr, BTRFS_MAX_LEVEL + 3, 1); btrfsic_release_block_ctx(&tmp_next_block_ctx); } } kfree(selected_super); return ret; } static int btrfsic_process_superblock_dev_mirror( struct btrfsic_state *state, struct btrfsic_dev_state *dev_state, struct btrfs_device *device, int superblock_mirror_num, struct btrfsic_dev_state **selected_dev_state, struct btrfs_super_block *selected_super) { struct btrfs_super_block *super_tmp; u64 dev_bytenr; struct buffer_head *bh; struct btrfsic_block *superblock_tmp; int pass; struct block_device *const superblock_bdev = device->bdev; /* super block bytenr is always the unmapped device bytenr */ dev_bytenr = btrfs_sb_offset(superblock_mirror_num); bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096); if (NULL == bh) return -1; super_tmp = (struct btrfs_super_block *) (bh->b_data + (dev_bytenr & 4095)); if (btrfs_super_bytenr(super_tmp) != dev_bytenr || strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC, sizeof(super_tmp->magic)) || memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) { brelse(bh); return 0; } superblock_tmp = btrfsic_block_hashtable_lookup(superblock_bdev, dev_bytenr, &state->block_hashtable); if (NULL == superblock_tmp) { superblock_tmp = btrfsic_block_alloc(); if (NULL == superblock_tmp) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); brelse(bh); return -1; } /* for superblock, only the dev_bytenr makes sense */ superblock_tmp->dev_bytenr = dev_bytenr; superblock_tmp->dev_state = dev_state; superblock_tmp->logical_bytenr = dev_bytenr; superblock_tmp->generation = btrfs_super_generation(super_tmp); superblock_tmp->is_metadata = 1; superblock_tmp->is_superblock = 1; superblock_tmp->is_iodone = 1; superblock_tmp->never_written = 0; superblock_tmp->mirror_num = 1 + superblock_mirror_num; if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) printk(KERN_INFO "New initial S-block (bdev %p, %s)" " @%llu (%s/%llu/%d)\n", superblock_bdev, device->name, (unsigned long long)dev_bytenr, dev_state->name, (unsigned long long)dev_bytenr, superblock_mirror_num); list_add(&superblock_tmp->all_blocks_node, &state->all_blocks_list); btrfsic_block_hashtable_add(superblock_tmp, &state->block_hashtable); } /* select the one with the highest generation field */ if (btrfs_super_generation(super_tmp) > state->max_superblock_generation || 0 == state->max_superblock_generation) { memcpy(selected_super, super_tmp, sizeof(*selected_super)); *selected_dev_state = dev_state; state->max_superblock_generation = btrfs_super_generation(super_tmp); state->latest_superblock = superblock_tmp; } for (pass = 0; pass < 3; pass++) { u64 next_bytenr; int num_copies; int mirror_num; const char *additional_string = NULL; struct btrfs_disk_key tmp_disk_key; tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY; tmp_disk_key.offset = 0; switch (pass) { case 0: tmp_disk_key.objectid = cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID); additional_string = "initial root "; next_bytenr = btrfs_super_root(super_tmp); break; case 1: tmp_disk_key.objectid = cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID); additional_string = "initial chunk "; next_bytenr = btrfs_super_chunk_root(super_tmp); break; case 2: tmp_disk_key.objectid = cpu_to_le64(BTRFS_TREE_LOG_OBJECTID); additional_string = "initial log "; next_bytenr = btrfs_super_log_root(super_tmp); if (0 == next_bytenr) continue; break; } num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree, next_bytenr, PAGE_SIZE); if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", (unsigned long long)next_bytenr, num_copies); for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { struct btrfsic_block *next_block; struct btrfsic_block_data_ctx tmp_next_block_ctx; struct btrfsic_block_link *l; if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE, &tmp_next_block_ctx, mirror_num)) { printk(KERN_INFO "btrfsic: btrfsic_map_block(" "bytenr @%llu, mirror %d) failed!\n", (unsigned long long)next_bytenr, mirror_num); brelse(bh); return -1; } next_block = btrfsic_block_lookup_or_add( state, &tmp_next_block_ctx, additional_string, 1, 1, 0, mirror_num, NULL); if (NULL == next_block) { btrfsic_release_block_ctx(&tmp_next_block_ctx); brelse(bh); return -1; } next_block->disk_key = tmp_disk_key; next_block->generation = BTRFSIC_GENERATION_UNKNOWN; l = btrfsic_block_link_lookup_or_add( state, &tmp_next_block_ctx, next_block, superblock_tmp, BTRFSIC_GENERATION_UNKNOWN); btrfsic_release_block_ctx(&tmp_next_block_ctx); if (NULL == l) { brelse(bh); return -1; } } } if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_ALL_TREES) btrfsic_dump_tree_sub(state, superblock_tmp, 0); brelse(bh); return 0; } static struct btrfsic_stack_frame *btrfsic_stack_frame_alloc(void) { struct btrfsic_stack_frame *sf; sf = kzalloc(sizeof(*sf), GFP_NOFS); if (NULL == sf) printk(KERN_INFO "btrfsic: alloc memory failed!\n"); else sf->magic = BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER; return sf; } static void btrfsic_stack_frame_free(struct btrfsic_stack_frame *sf) { BUG_ON(!(NULL == sf || BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER == sf->magic)); kfree(sf); } static int btrfsic_process_metablock( struct btrfsic_state *state, struct btrfsic_block *const first_block, struct btrfsic_block_data_ctx *const first_block_ctx, struct btrfs_header *const first_hdr, int first_limit_nesting, int force_iodone_flag) { struct btrfsic_stack_frame initial_stack_frame = { 0 }; struct btrfsic_stack_frame *sf; struct btrfsic_stack_frame *next_stack; sf = &initial_stack_frame; sf->error = 0; sf->i = -1; sf->limit_nesting = first_limit_nesting; sf->block = first_block; sf->block_ctx = first_block_ctx; sf->next_block = NULL; sf->hdr = first_hdr; sf->prev = NULL; continue_with_new_stack_frame: sf->block->generation = le64_to_cpu(sf->hdr->generation); if (0 == sf->hdr->level) { struct btrfs_leaf *const leafhdr = (struct btrfs_leaf *)sf->hdr; if (-1 == sf->i) { sf->nr = le32_to_cpu(leafhdr->header.nritems); if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "leaf %llu items %d generation %llu" " owner %llu\n", (unsigned long long) sf->block_ctx->start, sf->nr, (unsigned long long) le64_to_cpu(leafhdr->header.generation), (unsigned long long) le64_to_cpu(leafhdr->header.owner)); } continue_with_current_leaf_stack_frame: if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { sf->i++; sf->num_copies = 0; } if (sf->i < sf->nr) { struct btrfs_item *disk_item = leafhdr->items + sf->i; struct btrfs_disk_key *disk_key = &disk_item->key; u8 type; const u32 item_offset = le32_to_cpu(disk_item->offset); type = disk_key->type; if (BTRFS_ROOT_ITEM_KEY == type) { const struct btrfs_root_item *const root_item = (struct btrfs_root_item *) (sf->block_ctx->data + offsetof(struct btrfs_leaf, items) + item_offset); const u64 next_bytenr = le64_to_cpu(root_item->bytenr); sf->error = btrfsic_create_link_to_next_block( state, sf->block, sf->block_ctx, next_bytenr, sf->limit_nesting, &sf->next_block_ctx, &sf->next_block, force_iodone_flag, &sf->num_copies, &sf->mirror_num, disk_key, le64_to_cpu(root_item-> generation)); if (sf->error) goto one_stack_frame_backwards; if (NULL != sf->next_block) { struct btrfs_header *const next_hdr = (struct btrfs_header *) sf->next_block_ctx.data; next_stack = btrfsic_stack_frame_alloc(); if (NULL == next_stack) { btrfsic_release_block_ctx( &sf-> next_block_ctx); goto one_stack_frame_backwards; } next_stack->i = -1; next_stack->block = sf->next_block; next_stack->block_ctx = &sf->next_block_ctx; next_stack->next_block = NULL; next_stack->hdr = next_hdr; next_stack->limit_nesting = sf->limit_nesting - 1; next_stack->prev = sf; sf = next_stack; goto continue_with_new_stack_frame; } } else if (BTRFS_EXTENT_DATA_KEY == type && state->include_extent_data) { sf->error = btrfsic_handle_extent_data( state, sf->block, sf->block_ctx, item_offset, force_iodone_flag); if (sf->error) goto one_stack_frame_backwards; } goto continue_with_current_leaf_stack_frame; } } else { struct btrfs_node *const nodehdr = (struct btrfs_node *)sf->hdr; if (-1 == sf->i) { sf->nr = le32_to_cpu(nodehdr->header.nritems); if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "node %llu level %d items %d" " generation %llu owner %llu\n", (unsigned long long) sf->block_ctx->start, nodehdr->header.level, sf->nr, (unsigned long long) le64_to_cpu(nodehdr->header.generation), (unsigned long long) le64_to_cpu(nodehdr->header.owner)); } continue_with_current_node_stack_frame: if (0 == sf->num_copies || sf->mirror_num > sf->num_copies) { sf->i++; sf->num_copies = 0; } if (sf->i < sf->nr) { struct btrfs_key_ptr *disk_key_ptr = nodehdr->ptrs + sf->i; const u64 next_bytenr = le64_to_cpu(disk_key_ptr->blockptr); sf->error = btrfsic_create_link_to_next_block( state, sf->block, sf->block_ctx, next_bytenr, sf->limit_nesting, &sf->next_block_ctx, &sf->next_block, force_iodone_flag, &sf->num_copies, &sf->mirror_num, &disk_key_ptr->key, le64_to_cpu(disk_key_ptr->generation)); if (sf->error) goto one_stack_frame_backwards; if (NULL != sf->next_block) { struct btrfs_header *const next_hdr = (struct btrfs_header *) sf->next_block_ctx.data; next_stack = btrfsic_stack_frame_alloc(); if (NULL == next_stack) goto one_stack_frame_backwards; next_stack->i = -1; next_stack->block = sf->next_block; next_stack->block_ctx = &sf->next_block_ctx; next_stack->next_block = NULL; next_stack->hdr = next_hdr; next_stack->limit_nesting = sf->limit_nesting - 1; next_stack->prev = sf; sf = next_stack; goto continue_with_new_stack_frame; } goto continue_with_current_node_stack_frame; } } one_stack_frame_backwards: if (NULL != sf->prev) { struct btrfsic_stack_frame *const prev = sf->prev; /* the one for the initial block is freed in the caller */ btrfsic_release_block_ctx(sf->block_ctx); if (sf->error) { prev->error = sf->error; btrfsic_stack_frame_free(sf); sf = prev; goto one_stack_frame_backwards; } btrfsic_stack_frame_free(sf); sf = prev; goto continue_with_new_stack_frame; } else { BUG_ON(&initial_stack_frame != sf); } return sf->error; } static int btrfsic_create_link_to_next_block( struct btrfsic_state *state, struct btrfsic_block *block, struct btrfsic_block_data_ctx *block_ctx, u64 next_bytenr, int limit_nesting, struct btrfsic_block_data_ctx *next_block_ctx, struct btrfsic_block **next_blockp, int force_iodone_flag, int *num_copiesp, int *mirror_nump, struct btrfs_disk_key *disk_key, u64 parent_generation) { struct btrfsic_block *next_block = NULL; int ret; struct btrfsic_block_link *l; int did_alloc_block_link; int block_was_created; *next_blockp = NULL; if (0 == *num_copiesp) { *num_copiesp = btrfs_num_copies(&state->root->fs_info->mapping_tree, next_bytenr, PAGE_SIZE); if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", (unsigned long long)next_bytenr, *num_copiesp); *mirror_nump = 1; } if (*mirror_nump > *num_copiesp) return 0; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "btrfsic_create_link_to_next_block(mirror_num=%d)\n", *mirror_nump); ret = btrfsic_map_block(state, next_bytenr, BTRFSIC_BLOCK_SIZE, next_block_ctx, *mirror_nump); if (ret) { printk(KERN_INFO "btrfsic: btrfsic_map_block(@%llu, mirror=%d) failed!\n", (unsigned long long)next_bytenr, *mirror_nump); btrfsic_release_block_ctx(next_block_ctx); *next_blockp = NULL; return -1; } next_block = btrfsic_block_lookup_or_add(state, next_block_ctx, "referenced ", 1, force_iodone_flag, !force_iodone_flag, *mirror_nump, &block_was_created); if (NULL == next_block) { btrfsic_release_block_ctx(next_block_ctx); *next_blockp = NULL; return -1; } if (block_was_created) { l = NULL; next_block->generation = BTRFSIC_GENERATION_UNKNOWN; } else { if (next_block->logical_bytenr != next_bytenr && !(!next_block->is_metadata && 0 == next_block->logical_bytenr)) { printk(KERN_INFO "Referenced block @%llu (%s/%llu/%d)" " found in hash table, %c," " bytenr mismatch (!= stored %llu).\n", (unsigned long long)next_bytenr, next_block_ctx->dev->name, (unsigned long long)next_block_ctx->dev_bytenr, *mirror_nump, btrfsic_get_block_type(state, next_block), (unsigned long long)next_block->logical_bytenr); } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "Referenced block @%llu (%s/%llu/%d)" " found in hash table, %c.\n", (unsigned long long)next_bytenr, next_block_ctx->dev->name, (unsigned long long)next_block_ctx->dev_bytenr, *mirror_nump, btrfsic_get_block_type(state, next_block)); next_block->logical_bytenr = next_bytenr; next_block->mirror_num = *mirror_nump; l = btrfsic_block_link_hashtable_lookup( next_block_ctx->dev->bdev, next_block_ctx->dev_bytenr, block_ctx->dev->bdev, block_ctx->dev_bytenr, &state->block_link_hashtable); } next_block->disk_key = *disk_key; if (NULL == l) { l = btrfsic_block_link_alloc(); if (NULL == l) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(next_block_ctx); *next_blockp = NULL; return -1; } did_alloc_block_link = 1; l->block_ref_to = next_block; l->block_ref_from = block; l->ref_cnt = 1; l->parent_generation = parent_generation; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) btrfsic_print_add_link(state, l); list_add(&l->node_ref_to, &block->ref_to_list); list_add(&l->node_ref_from, &next_block->ref_from_list); btrfsic_block_link_hashtable_add(l, &state->block_link_hashtable); } else { did_alloc_block_link = 0; if (0 == limit_nesting) { l->ref_cnt++; l->parent_generation = parent_generation; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) btrfsic_print_add_link(state, l); } } if (limit_nesting > 0 && did_alloc_block_link) { ret = btrfsic_read_block(state, next_block_ctx); if (ret < (int)BTRFSIC_BLOCK_SIZE) { printk(KERN_INFO "btrfsic: read block @logical %llu failed!\n", (unsigned long long)next_bytenr); btrfsic_release_block_ctx(next_block_ctx); *next_blockp = NULL; return -1; } *next_blockp = next_block; } else { *next_blockp = NULL; } (*mirror_nump)++; return 0; } static int btrfsic_handle_extent_data( struct btrfsic_state *state, struct btrfsic_block *block, struct btrfsic_block_data_ctx *block_ctx, u32 item_offset, int force_iodone_flag) { int ret; struct btrfs_file_extent_item *file_extent_item = (struct btrfs_file_extent_item *)(block_ctx->data + offsetof(struct btrfs_leaf, items) + item_offset); u64 next_bytenr = le64_to_cpu(file_extent_item->disk_bytenr) + le64_to_cpu(file_extent_item->offset); u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes); u64 generation = le64_to_cpu(file_extent_item->generation); struct btrfsic_block_link *l; if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu," " offset = %llu, num_bytes = %llu\n", file_extent_item->type, (unsigned long long) le64_to_cpu(file_extent_item->disk_bytenr), (unsigned long long) le64_to_cpu(file_extent_item->offset), (unsigned long long) le64_to_cpu(file_extent_item->num_bytes)); if (BTRFS_FILE_EXTENT_REG != file_extent_item->type || ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr)) return 0; while (num_bytes > 0) { u32 chunk_len; int num_copies; int mirror_num; if (num_bytes > BTRFSIC_BLOCK_SIZE) chunk_len = BTRFSIC_BLOCK_SIZE; else chunk_len = num_bytes; num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree, next_bytenr, PAGE_SIZE); if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", (unsigned long long)next_bytenr, num_copies); for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { struct btrfsic_block_data_ctx next_block_ctx; struct btrfsic_block *next_block; int block_was_created; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "btrfsic_handle_extent_data(" "mirror_num=%d)\n", mirror_num); if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) printk(KERN_INFO "\tdisk_bytenr = %llu, num_bytes %u\n", (unsigned long long)next_bytenr, chunk_len); ret = btrfsic_map_block(state, next_bytenr, chunk_len, &next_block_ctx, mirror_num); if (ret) { printk(KERN_INFO "btrfsic: btrfsic_map_block(@%llu," " mirror=%d) failed!\n", (unsigned long long)next_bytenr, mirror_num); return -1; } next_block = btrfsic_block_lookup_or_add( state, &next_block_ctx, "referenced ", 0, force_iodone_flag, !force_iodone_flag, mirror_num, &block_was_created); if (NULL == next_block) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(&next_block_ctx); return -1; } if (!block_was_created) { if (next_block->logical_bytenr != next_bytenr && !(!next_block->is_metadata && 0 == next_block->logical_bytenr)) { printk(KERN_INFO "Referenced block" " @%llu (%s/%llu/%d)" " found in hash table, D," " bytenr mismatch" " (!= stored %llu).\n", (unsigned long long)next_bytenr, next_block_ctx.dev->name, (unsigned long long) next_block_ctx.dev_bytenr, mirror_num, (unsigned long long) next_block->logical_bytenr); } next_block->logical_bytenr = next_bytenr; next_block->mirror_num = mirror_num; } l = btrfsic_block_link_lookup_or_add(state, &next_block_ctx, next_block, block, generation); btrfsic_release_block_ctx(&next_block_ctx); if (NULL == l) return -1; } next_bytenr += chunk_len; num_bytes -= chunk_len; } return 0; } static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, struct btrfsic_block_data_ctx *block_ctx_out, int mirror_num) { int ret; u64 length; struct btrfs_bio *multi = NULL; struct btrfs_device *device; length = len; ret = btrfs_map_block(&state->root->fs_info->mapping_tree, READ, bytenr, &length, &multi, mirror_num); device = multi->stripes[0].dev; block_ctx_out->dev = btrfsic_dev_state_lookup(device->bdev); block_ctx_out->dev_bytenr = multi->stripes[0].physical; block_ctx_out->start = bytenr; block_ctx_out->len = len; block_ctx_out->data = NULL; block_ctx_out->bh = NULL; if (0 == ret) kfree(multi); if (NULL == block_ctx_out->dev) { ret = -ENXIO; printk(KERN_INFO "btrfsic: error, cannot lookup dev (#1)!\n"); } return ret; } static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr, u32 len, struct block_device *bdev, struct btrfsic_block_data_ctx *block_ctx_out) { block_ctx_out->dev = btrfsic_dev_state_lookup(bdev); block_ctx_out->dev_bytenr = bytenr; block_ctx_out->start = bytenr; block_ctx_out->len = len; block_ctx_out->data = NULL; block_ctx_out->bh = NULL; if (NULL != block_ctx_out->dev) { return 0; } else { printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n"); return -ENXIO; } } static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) { if (NULL != block_ctx->bh) { brelse(block_ctx->bh); block_ctx->bh = NULL; } } static int btrfsic_read_block(struct btrfsic_state *state, struct btrfsic_block_data_ctx *block_ctx) { block_ctx->bh = NULL; if (block_ctx->dev_bytenr & 4095) { printk(KERN_INFO "btrfsic: read_block() with unaligned bytenr %llu\n", (unsigned long long)block_ctx->dev_bytenr); return -1; } if (block_ctx->len > 4096) { printk(KERN_INFO "btrfsic: read_block() with too huge size %d\n", block_ctx->len); return -1; } block_ctx->bh = __bread(block_ctx->dev->bdev, block_ctx->dev_bytenr >> 12, 4096); if (NULL == block_ctx->bh) return -1; block_ctx->data = block_ctx->bh->b_data; return block_ctx->len; } static void btrfsic_dump_database(struct btrfsic_state *state) { struct list_head *elem_all; BUG_ON(NULL == state); printk(KERN_INFO "all_blocks_list:\n"); list_for_each(elem_all, &state->all_blocks_list) { const struct btrfsic_block *const b_all = list_entry(elem_all, struct btrfsic_block, all_blocks_node); struct list_head *elem_ref_to; struct list_head *elem_ref_from; printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n", btrfsic_get_block_type(state, b_all), (unsigned long long)b_all->logical_bytenr, b_all->dev_state->name, (unsigned long long)b_all->dev_bytenr, b_all->mirror_num); list_for_each(elem_ref_to, &b_all->ref_to_list) { const struct btrfsic_block_link *const l = list_entry(elem_ref_to, struct btrfsic_block_link, node_ref_to); printk(KERN_INFO " %c @%llu (%s/%llu/%d)" " refers %u* to" " %c @%llu (%s/%llu/%d)\n", btrfsic_get_block_type(state, b_all), (unsigned long long)b_all->logical_bytenr, b_all->dev_state->name, (unsigned long long)b_all->dev_bytenr, b_all->mirror_num, l->ref_cnt, btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long) l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num); } list_for_each(elem_ref_from, &b_all->ref_from_list) { const struct btrfsic_block_link *const l = list_entry(elem_ref_from, struct btrfsic_block_link, node_ref_from); printk(KERN_INFO " %c @%llu (%s/%llu/%d)" " is ref %u* from" " %c @%llu (%s/%llu/%d)\n", btrfsic_get_block_type(state, b_all), (unsigned long long)b_all->logical_bytenr, b_all->dev_state->name, (unsigned long long)b_all->dev_bytenr, b_all->mirror_num, l->ref_cnt, btrfsic_get_block_type(state, l->block_ref_from), (unsigned long long) l->block_ref_from->logical_bytenr, l->block_ref_from->dev_state->name, (unsigned long long) l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num); } printk(KERN_INFO "\n"); } } /* * Test whether the disk block contains a tree block (leaf or node) * (note that this test fails for the super block) */ static int btrfsic_test_for_metadata(struct btrfsic_state *state, const u8 *data, unsigned int size) { struct btrfs_header *h; u8 csum[BTRFS_CSUM_SIZE]; u32 crc = ~(u32)0; int fail = 0; int crc_fail = 0; h = (struct btrfs_header *)data; if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) fail++; crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE); btrfs_csum_final(crc, csum); if (memcmp(csum, h->csum, state->csum_size)) crc_fail++; return fail || crc_fail; } static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, u64 dev_bytenr, u8 *mapped_data, unsigned int len, struct bio *bio, int *bio_is_patched, struct buffer_head *bh, int submit_bio_bh_rw) { int is_metadata; struct btrfsic_block *block; struct btrfsic_block_data_ctx block_ctx; int ret; struct btrfsic_state *state = dev_state->state; struct block_device *bdev = dev_state->bdev; WARN_ON(len > PAGE_SIZE); is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len)); if (NULL != bio_is_patched) *bio_is_patched = 0; block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, &state->block_hashtable); if (NULL != block) { u64 bytenr = 0; struct list_head *elem_ref_to; struct list_head *tmp_ref_to; if (block->is_superblock) { bytenr = le64_to_cpu(((struct btrfs_super_block *) mapped_data)->bytenr); is_metadata = 1; if (state->print_mask & BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { printk(KERN_INFO "[before new superblock is written]:\n"); btrfsic_dump_tree_sub(state, block, 0); } } if (is_metadata) { if (!block->is_superblock) { bytenr = le64_to_cpu(((struct btrfs_header *) mapped_data)->bytenr); btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, dev_bytenr, mapped_data); } if (block->logical_bytenr != bytenr) { printk(KERN_INFO "Written block @%llu (%s/%llu/%d)" " found in hash table, %c," " bytenr mismatch" " (!= stored %llu).\n", (unsigned long long)bytenr, dev_state->name, (unsigned long long)dev_bytenr, block->mirror_num, btrfsic_get_block_type(state, block), (unsigned long long) block->logical_bytenr); block->logical_bytenr = bytenr; } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "Written block @%llu (%s/%llu/%d)" " found in hash table, %c.\n", (unsigned long long)bytenr, dev_state->name, (unsigned long long)dev_bytenr, block->mirror_num, btrfsic_get_block_type(state, block)); } else { bytenr = block->logical_bytenr; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "Written block @%llu (%s/%llu/%d)" " found in hash table, %c.\n", (unsigned long long)bytenr, dev_state->name, (unsigned long long)dev_bytenr, block->mirror_num, btrfsic_get_block_type(state, block)); } if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "ref_to_list: %cE, ref_from_list: %cE\n", list_empty(&block->ref_to_list) ? ' ' : '!', list_empty(&block->ref_from_list) ? ' ' : '!'); if (btrfsic_is_block_ref_by_superblock(state, block, 0)) { printk(KERN_INFO "btrfs: attempt to overwrite %c-block" " @%llu (%s/%llu/%d), old(gen=%llu," " objectid=%llu, type=%d, offset=%llu)," " new(gen=%llu)," " which is referenced by most recent superblock" " (superblockgen=%llu)!\n", btrfsic_get_block_type(state, block), (unsigned long long)bytenr, dev_state->name, (unsigned long long)dev_bytenr, block->mirror_num, (unsigned long long)block->generation, (unsigned long long) le64_to_cpu(block->disk_key.objectid), block->disk_key.type, (unsigned long long) le64_to_cpu(block->disk_key.offset), (unsigned long long) le64_to_cpu(((struct btrfs_header *) mapped_data)->generation), (unsigned long long) state->max_superblock_generation); btrfsic_dump_tree(state); } if (!block->is_iodone && !block->never_written) { printk(KERN_INFO "btrfs: attempt to overwrite %c-block" " @%llu (%s/%llu/%d), oldgen=%llu, newgen=%llu," " which is not yet iodone!\n", btrfsic_get_block_type(state, block), (unsigned long long)bytenr, dev_state->name, (unsigned long long)dev_bytenr, block->mirror_num, (unsigned long long)block->generation, (unsigned long long) le64_to_cpu(((struct btrfs_header *) mapped_data)->generation)); /* it would not be safe to go on */ btrfsic_dump_tree(state); return; } /* * Clear all references of this block. Do not free * the block itself even if is not referenced anymore * because it still carries valueable information * like whether it was ever written and IO completed. */ list_for_each_safe(elem_ref_to, tmp_ref_to, &block->ref_to_list) { struct btrfsic_block_link *const l = list_entry(elem_ref_to, struct btrfsic_block_link, node_ref_to); if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) btrfsic_print_rem_link(state, l); l->ref_cnt--; if (0 == l->ref_cnt) { list_del(&l->node_ref_to); list_del(&l->node_ref_from); btrfsic_block_link_hashtable_remove(l); btrfsic_block_link_free(l); } } if (block->is_superblock) ret = btrfsic_map_superblock(state, bytenr, len, bdev, &block_ctx); else ret = btrfsic_map_block(state, bytenr, len, &block_ctx, 0); if (ret) { printk(KERN_INFO "btrfsic: btrfsic_map_block(root @%llu)" " failed!\n", (unsigned long long)bytenr); return; } block_ctx.data = mapped_data; /* the following is required in case of writes to mirrors, * use the same that was used for the lookup */ block_ctx.dev = dev_state; block_ctx.dev_bytenr = dev_bytenr; if (is_metadata || state->include_extent_data) { block->never_written = 0; block->iodone_w_error = 0; if (NULL != bio) { block->is_iodone = 0; BUG_ON(NULL == bio_is_patched); if (!*bio_is_patched) { block->orig_bio_bh_private = bio->bi_private; block->orig_bio_bh_end_io.bio = bio->bi_end_io; block->next_in_same_bio = NULL; bio->bi_private = block; bio->bi_end_io = btrfsic_bio_end_io; *bio_is_patched = 1; } else { struct btrfsic_block *chained_block = (struct btrfsic_block *) bio->bi_private; BUG_ON(NULL == chained_block); block->orig_bio_bh_private = chained_block->orig_bio_bh_private; block->orig_bio_bh_end_io.bio = chained_block->orig_bio_bh_end_io. bio; block->next_in_same_bio = chained_block; bio->bi_private = block; } } else if (NULL != bh) { block->is_iodone = 0; block->orig_bio_bh_private = bh->b_private; block->orig_bio_bh_end_io.bh = bh->b_end_io; block->next_in_same_bio = NULL; bh->b_private = block; bh->b_end_io = btrfsic_bh_end_io; } else { block->is_iodone = 1; block->orig_bio_bh_private = NULL; block->orig_bio_bh_end_io.bio = NULL; block->next_in_same_bio = NULL; } } block->flush_gen = dev_state->last_flush_gen + 1; block->submit_bio_bh_rw = submit_bio_bh_rw; if (is_metadata) { block->logical_bytenr = bytenr; block->is_metadata = 1; if (block->is_superblock) { ret = btrfsic_process_written_superblock( state, block, (struct btrfs_super_block *) mapped_data); if (state->print_mask & BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { printk(KERN_INFO "[after new superblock is written]:\n"); btrfsic_dump_tree_sub(state, block, 0); } } else { block->mirror_num = 0; /* unknown */ ret = btrfsic_process_metablock( state, block, &block_ctx, (struct btrfs_header *) block_ctx.data, 0, 0); } if (ret) printk(KERN_INFO "btrfsic: btrfsic_process_metablock" "(root @%llu) failed!\n", (unsigned long long)dev_bytenr); } else { block->is_metadata = 0; block->mirror_num = 0; /* unknown */ block->generation = BTRFSIC_GENERATION_UNKNOWN; if (!state->include_extent_data && list_empty(&block->ref_from_list)) { /* * disk block is overwritten with extent * data (not meta data) and we are configured * to not include extent data: take the * chance and free the block's memory */ btrfsic_block_hashtable_remove(block); list_del(&block->all_blocks_node); btrfsic_block_free(block); } } btrfsic_release_block_ctx(&block_ctx); } else { /* block has not been found in hash table */ u64 bytenr; if (!is_metadata) { if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "Written block (%s/%llu/?)" " !found in hash table, D.\n", dev_state->name, (unsigned long long)dev_bytenr); if (!state->include_extent_data) return; /* ignore that written D block */ /* this is getting ugly for the * include_extent_data case... */ bytenr = 0; /* unknown */ block_ctx.start = bytenr; block_ctx.len = len; block_ctx.bh = NULL; } else { bytenr = le64_to_cpu(((struct btrfs_header *) mapped_data)->bytenr); btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, dev_bytenr, mapped_data); if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "Written block @%llu (%s/%llu/?)" " !found in hash table, M.\n", (unsigned long long)bytenr, dev_state->name, (unsigned long long)dev_bytenr); ret = btrfsic_map_block(state, bytenr, len, &block_ctx, 0); if (ret) { printk(KERN_INFO "btrfsic: btrfsic_map_block(root @%llu)" " failed!\n", (unsigned long long)dev_bytenr); return; } } block_ctx.data = mapped_data; /* the following is required in case of writes to mirrors, * use the same that was used for the lookup */ block_ctx.dev = dev_state; block_ctx.dev_bytenr = dev_bytenr; block = btrfsic_block_alloc(); if (NULL == block) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(&block_ctx); return; } block->dev_state = dev_state; block->dev_bytenr = dev_bytenr; block->logical_bytenr = bytenr; block->is_metadata = is_metadata; block->never_written = 0; block->iodone_w_error = 0; block->mirror_num = 0; /* unknown */ block->flush_gen = dev_state->last_flush_gen + 1; block->submit_bio_bh_rw = submit_bio_bh_rw; if (NULL != bio) { block->is_iodone = 0; BUG_ON(NULL == bio_is_patched); if (!*bio_is_patched) { block->orig_bio_bh_private = bio->bi_private; block->orig_bio_bh_end_io.bio = bio->bi_end_io; block->next_in_same_bio = NULL; bio->bi_private = block; bio->bi_end_io = btrfsic_bio_end_io; *bio_is_patched = 1; } else { struct btrfsic_block *chained_block = (struct btrfsic_block *) bio->bi_private; BUG_ON(NULL == chained_block); block->orig_bio_bh_private = chained_block->orig_bio_bh_private; block->orig_bio_bh_end_io.bio = chained_block->orig_bio_bh_end_io.bio; block->next_in_same_bio = chained_block; bio->bi_private = block; } } else if (NULL != bh) { block->is_iodone = 0; block->orig_bio_bh_private = bh->b_private; block->orig_bio_bh_end_io.bh = bh->b_end_io; block->next_in_same_bio = NULL; bh->b_private = block; bh->b_end_io = btrfsic_bh_end_io; } else { block->is_iodone = 1; block->orig_bio_bh_private = NULL; block->orig_bio_bh_end_io.bio = NULL; block->next_in_same_bio = NULL; } if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "New written %c-block @%llu (%s/%llu/%d)\n", is_metadata ? 'M' : 'D', (unsigned long long)block->logical_bytenr, block->dev_state->name, (unsigned long long)block->dev_bytenr, block->mirror_num); list_add(&block->all_blocks_node, &state->all_blocks_list); btrfsic_block_hashtable_add(block, &state->block_hashtable); if (is_metadata) { ret = btrfsic_process_metablock(state, block, &block_ctx, (struct btrfs_header *) block_ctx.data, 0, 0); if (ret) printk(KERN_INFO "btrfsic: process_metablock(root @%llu)" " failed!\n", (unsigned long long)dev_bytenr); } btrfsic_release_block_ctx(&block_ctx); } } static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) { struct btrfsic_block *block = (struct btrfsic_block *)bp->bi_private; int iodone_w_error; /* mutex is not held! This is not save if IO is not yet completed * on umount */ iodone_w_error = 0; if (bio_error_status) iodone_w_error = 1; BUG_ON(NULL == block); bp->bi_private = block->orig_bio_bh_private; bp->bi_end_io = block->orig_bio_bh_end_io.bio; do { struct btrfsic_block *next_block; struct btrfsic_dev_state *const dev_state = block->dev_state; if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) printk(KERN_INFO "bio_end_io(err=%d) for %c @%llu (%s/%llu/%d)\n", bio_error_status, btrfsic_get_block_type(dev_state->state, block), (unsigned long long)block->logical_bytenr, dev_state->name, (unsigned long long)block->dev_bytenr, block->mirror_num); next_block = block->next_in_same_bio; block->iodone_w_error = iodone_w_error; if (block->submit_bio_bh_rw & REQ_FLUSH) { dev_state->last_flush_gen++; if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) printk(KERN_INFO "bio_end_io() new %s flush_gen=%llu\n", dev_state->name, (unsigned long long) dev_state->last_flush_gen); } if (block->submit_bio_bh_rw & REQ_FUA) block->flush_gen = 0; /* FUA completed means block is * on disk */ block->is_iodone = 1; /* for FLUSH, this releases the block */ block = next_block; } while (NULL != block); bp->bi_end_io(bp, bio_error_status); } static void btrfsic_bh_end_io(struct buffer_head *bh, int uptodate) { struct btrfsic_block *block = (struct btrfsic_block *)bh->b_private; int iodone_w_error = !uptodate; struct btrfsic_dev_state *dev_state; BUG_ON(NULL == block); dev_state = block->dev_state; if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) printk(KERN_INFO "bh_end_io(error=%d) for %c @%llu (%s/%llu/%d)\n", iodone_w_error, btrfsic_get_block_type(dev_state->state, block), (unsigned long long)block->logical_bytenr, block->dev_state->name, (unsigned long long)block->dev_bytenr, block->mirror_num); block->iodone_w_error = iodone_w_error; if (block->submit_bio_bh_rw & REQ_FLUSH) { dev_state->last_flush_gen++; if ((dev_state->state->print_mask & BTRFSIC_PRINT_MASK_END_IO_BIO_BH)) printk(KERN_INFO "bh_end_io() new %s flush_gen=%llu\n", dev_state->name, (unsigned long long)dev_state->last_flush_gen); } if (block->submit_bio_bh_rw & REQ_FUA) block->flush_gen = 0; /* FUA completed means block is on disk */ bh->b_private = block->orig_bio_bh_private; bh->b_end_io = block->orig_bio_bh_end_io.bh; block->is_iodone = 1; /* for FLUSH, this releases the block */ bh->b_end_io(bh, uptodate); } static int btrfsic_process_written_superblock( struct btrfsic_state *state, struct btrfsic_block *const superblock, struct btrfs_super_block *const super_hdr) { int pass; superblock->generation = btrfs_super_generation(super_hdr); if (!(superblock->generation > state->max_superblock_generation || 0 == state->max_superblock_generation)) { if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) printk(KERN_INFO "btrfsic: superblock @%llu (%s/%llu/%d)" " with old gen %llu <= %llu\n", (unsigned long long)superblock->logical_bytenr, superblock->dev_state->name, (unsigned long long)superblock->dev_bytenr, superblock->mirror_num, (unsigned long long) btrfs_super_generation(super_hdr), (unsigned long long) state->max_superblock_generation); } else { if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) printk(KERN_INFO "btrfsic: got new superblock @%llu (%s/%llu/%d)" " with new gen %llu > %llu\n", (unsigned long long)superblock->logical_bytenr, superblock->dev_state->name, (unsigned long long)superblock->dev_bytenr, superblock->mirror_num, (unsigned long long) btrfs_super_generation(super_hdr), (unsigned long long) state->max_superblock_generation); state->max_superblock_generation = btrfs_super_generation(super_hdr); state->latest_superblock = superblock; } for (pass = 0; pass < 3; pass++) { int ret; u64 next_bytenr; struct btrfsic_block *next_block; struct btrfsic_block_data_ctx tmp_next_block_ctx; struct btrfsic_block_link *l; int num_copies; int mirror_num; const char *additional_string = NULL; struct btrfs_disk_key tmp_disk_key; tmp_disk_key.type = BTRFS_ROOT_ITEM_KEY; tmp_disk_key.offset = 0; switch (pass) { case 0: tmp_disk_key.objectid = cpu_to_le64(BTRFS_ROOT_TREE_OBJECTID); additional_string = "root "; next_bytenr = btrfs_super_root(super_hdr); if (state->print_mask & BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) printk(KERN_INFO "root@%llu\n", (unsigned long long)next_bytenr); break; case 1: tmp_disk_key.objectid = cpu_to_le64(BTRFS_CHUNK_TREE_OBJECTID); additional_string = "chunk "; next_bytenr = btrfs_super_chunk_root(super_hdr); if (state->print_mask & BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) printk(KERN_INFO "chunk@%llu\n", (unsigned long long)next_bytenr); break; case 2: tmp_disk_key.objectid = cpu_to_le64(BTRFS_TREE_LOG_OBJECTID); additional_string = "log "; next_bytenr = btrfs_super_log_root(super_hdr); if (0 == next_bytenr) continue; if (state->print_mask & BTRFSIC_PRINT_MASK_ROOT_CHUNK_LOG_TREE_LOCATION) printk(KERN_INFO "log@%llu\n", (unsigned long long)next_bytenr); break; } num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree, next_bytenr, PAGE_SIZE); if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", (unsigned long long)next_bytenr, num_copies); for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { int was_created; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "btrfsic_process_written_superblock(" "mirror_num=%d)\n", mirror_num); ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE, &tmp_next_block_ctx, mirror_num); if (ret) { printk(KERN_INFO "btrfsic: btrfsic_map_block(@%llu," " mirror=%d) failed!\n", (unsigned long long)next_bytenr, mirror_num); return -1; } next_block = btrfsic_block_lookup_or_add( state, &tmp_next_block_ctx, additional_string, 1, 0, 1, mirror_num, &was_created); if (NULL == next_block) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); btrfsic_release_block_ctx(&tmp_next_block_ctx); return -1; } next_block->disk_key = tmp_disk_key; if (was_created) next_block->generation = BTRFSIC_GENERATION_UNKNOWN; l = btrfsic_block_link_lookup_or_add( state, &tmp_next_block_ctx, next_block, superblock, BTRFSIC_GENERATION_UNKNOWN); btrfsic_release_block_ctx(&tmp_next_block_ctx); if (NULL == l) return -1; } } if (-1 == btrfsic_check_all_ref_blocks(state, superblock, 0)) { WARN_ON(1); btrfsic_dump_tree(state); } return 0; } static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, struct btrfsic_block *const block, int recursion_level) { struct list_head *elem_ref_to; int ret = 0; if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { /* * Note that this situation can happen and does not * indicate an error in regular cases. It happens * when disk blocks are freed and later reused. * The check-integrity module is not aware of any * block free operations, it just recognizes block * write operations. Therefore it keeps the linkage * information for a block until a block is * rewritten. This can temporarily cause incorrect * and even circular linkage informations. This * causes no harm unless such blocks are referenced * by the most recent super block. */ if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "btrfsic: abort cyclic linkage (case 1).\n"); return ret; } /* * This algorithm is recursive because the amount of used stack * space is very small and the max recursion depth is limited. */ list_for_each(elem_ref_to, &block->ref_to_list) { const struct btrfsic_block_link *const l = list_entry(elem_ref_to, struct btrfsic_block_link, node_ref_to); if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "rl=%d, %c @%llu (%s/%llu/%d)" " %u* refers to %c @%llu (%s/%llu/%d)\n", recursion_level, btrfsic_get_block_type(state, block), (unsigned long long)block->logical_bytenr, block->dev_state->name, (unsigned long long)block->dev_bytenr, block->mirror_num, l->ref_cnt, btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long) l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num); if (l->block_ref_to->never_written) { printk(KERN_INFO "btrfs: attempt to write superblock" " which references block %c @%llu (%s/%llu/%d)" " which is never written!\n", btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long) l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num); ret = -1; } else if (!l->block_ref_to->is_iodone) { printk(KERN_INFO "btrfs: attempt to write superblock" " which references block %c @%llu (%s/%llu/%d)" " which is not yet iodone!\n", btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long) l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num); ret = -1; } else if (l->parent_generation != l->block_ref_to->generation && BTRFSIC_GENERATION_UNKNOWN != l->parent_generation && BTRFSIC_GENERATION_UNKNOWN != l->block_ref_to->generation) { printk(KERN_INFO "btrfs: attempt to write superblock" " which references block %c @%llu (%s/%llu/%d)" " with generation %llu !=" " parent generation %llu!\n", btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long) l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num, (unsigned long long)l->block_ref_to->generation, (unsigned long long)l->parent_generation); ret = -1; } else if (l->block_ref_to->flush_gen > l->block_ref_to->dev_state->last_flush_gen) { printk(KERN_INFO "btrfs: attempt to write superblock" " which references block %c @%llu (%s/%llu/%d)" " which is not flushed out of disk's write cache" " (block flush_gen=%llu," " dev->flush_gen=%llu)!\n", btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long) l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num, (unsigned long long)block->flush_gen, (unsigned long long) l->block_ref_to->dev_state->last_flush_gen); ret = -1; } else if (-1 == btrfsic_check_all_ref_blocks(state, l->block_ref_to, recursion_level + 1)) { ret = -1; } } return ret; } static int btrfsic_is_block_ref_by_superblock( const struct btrfsic_state *state, const struct btrfsic_block *block, int recursion_level) { struct list_head *elem_ref_from; if (recursion_level >= 3 + BTRFS_MAX_LEVEL) { /* refer to comment at "abort cyclic linkage (case 1)" */ if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "btrfsic: abort cyclic linkage (case 2).\n"); return 0; } /* * This algorithm is recursive because the amount of used stack space * is very small and the max recursion depth is limited. */ list_for_each(elem_ref_from, &block->ref_from_list) { const struct btrfsic_block_link *const l = list_entry(elem_ref_from, struct btrfsic_block_link, node_ref_from); if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "rl=%d, %c @%llu (%s/%llu/%d)" " is ref %u* from %c @%llu (%s/%llu/%d)\n", recursion_level, btrfsic_get_block_type(state, block), (unsigned long long)block->logical_bytenr, block->dev_state->name, (unsigned long long)block->dev_bytenr, block->mirror_num, l->ref_cnt, btrfsic_get_block_type(state, l->block_ref_from), (unsigned long long) l->block_ref_from->logical_bytenr, l->block_ref_from->dev_state->name, (unsigned long long) l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num); if (l->block_ref_from->is_superblock && state->latest_superblock->dev_bytenr == l->block_ref_from->dev_bytenr && state->latest_superblock->dev_state->bdev == l->block_ref_from->dev_state->bdev) return 1; else if (btrfsic_is_block_ref_by_superblock(state, l->block_ref_from, recursion_level + 1)) return 1; } return 0; } static void btrfsic_print_add_link(const struct btrfsic_state *state, const struct btrfsic_block_link *l) { printk(KERN_INFO "Add %u* link from %c @%llu (%s/%llu/%d)" " to %c @%llu (%s/%llu/%d).\n", l->ref_cnt, btrfsic_get_block_type(state, l->block_ref_from), (unsigned long long)l->block_ref_from->logical_bytenr, l->block_ref_from->dev_state->name, (unsigned long long)l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long)l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num); } static void btrfsic_print_rem_link(const struct btrfsic_state *state, const struct btrfsic_block_link *l) { printk(KERN_INFO "Rem %u* link from %c @%llu (%s/%llu/%d)" " to %c @%llu (%s/%llu/%d).\n", l->ref_cnt, btrfsic_get_block_type(state, l->block_ref_from), (unsigned long long)l->block_ref_from->logical_bytenr, l->block_ref_from->dev_state->name, (unsigned long long)l->block_ref_from->dev_bytenr, l->block_ref_from->mirror_num, btrfsic_get_block_type(state, l->block_ref_to), (unsigned long long)l->block_ref_to->logical_bytenr, l->block_ref_to->dev_state->name, (unsigned long long)l->block_ref_to->dev_bytenr, l->block_ref_to->mirror_num); } static char btrfsic_get_block_type(const struct btrfsic_state *state, const struct btrfsic_block *block) { if (block->is_superblock && state->latest_superblock->dev_bytenr == block->dev_bytenr && state->latest_superblock->dev_state->bdev == block->dev_state->bdev) return 'S'; else if (block->is_superblock) return 's'; else if (block->is_metadata) return 'M'; else return 'D'; } static void btrfsic_dump_tree(const struct btrfsic_state *state) { btrfsic_dump_tree_sub(state, state->latest_superblock, 0); } static void btrfsic_dump_tree_sub(const struct btrfsic_state *state, const struct btrfsic_block *block, int indent_level) { struct list_head *elem_ref_to; int indent_add; static char buf[80]; int cursor_position; /* * Should better fill an on-stack buffer with a complete line and * dump it at once when it is time to print a newline character. */ /* * This algorithm is recursive because the amount of used stack space * is very small and the max recursion depth is limited. */ indent_add = sprintf(buf, "%c-%llu(%s/%llu/%d)", btrfsic_get_block_type(state, block), (unsigned long long)block->logical_bytenr, block->dev_state->name, (unsigned long long)block->dev_bytenr, block->mirror_num); if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { printk("[...]\n"); return; } printk(buf); indent_level += indent_add; if (list_empty(&block->ref_to_list)) { printk("\n"); return; } if (block->mirror_num > 1 && !(state->print_mask & BTRFSIC_PRINT_MASK_TREE_WITH_ALL_MIRRORS)) { printk(" [...]\n"); return; } cursor_position = indent_level; list_for_each(elem_ref_to, &block->ref_to_list) { const struct btrfsic_block_link *const l = list_entry(elem_ref_to, struct btrfsic_block_link, node_ref_to); while (cursor_position < indent_level) { printk(" "); cursor_position++; } if (l->ref_cnt > 1) indent_add = sprintf(buf, " %d*--> ", l->ref_cnt); else indent_add = sprintf(buf, " --> "); if (indent_level + indent_add > BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL) { printk("[...]\n"); cursor_position = 0; continue; } printk(buf); btrfsic_dump_tree_sub(state, l->block_ref_to, indent_level + indent_add); cursor_position = 0; } } static struct btrfsic_block_link *btrfsic_block_link_lookup_or_add( struct btrfsic_state *state, struct btrfsic_block_data_ctx *next_block_ctx, struct btrfsic_block *next_block, struct btrfsic_block *from_block, u64 parent_generation) { struct btrfsic_block_link *l; l = btrfsic_block_link_hashtable_lookup(next_block_ctx->dev->bdev, next_block_ctx->dev_bytenr, from_block->dev_state->bdev, from_block->dev_bytenr, &state->block_link_hashtable); if (NULL == l) { l = btrfsic_block_link_alloc(); if (NULL == l) { printk(KERN_INFO "btrfsic: error, kmalloc" " failed!\n"); return NULL; } l->block_ref_to = next_block; l->block_ref_from = from_block; l->ref_cnt = 1; l->parent_generation = parent_generation; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) btrfsic_print_add_link(state, l); list_add(&l->node_ref_to, &from_block->ref_to_list); list_add(&l->node_ref_from, &next_block->ref_from_list); btrfsic_block_link_hashtable_add(l, &state->block_link_hashtable); } else { l->ref_cnt++; l->parent_generation = parent_generation; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) btrfsic_print_add_link(state, l); } return l; } static struct btrfsic_block *btrfsic_block_lookup_or_add( struct btrfsic_state *state, struct btrfsic_block_data_ctx *block_ctx, const char *additional_string, int is_metadata, int is_iodone, int never_written, int mirror_num, int *was_created) { struct btrfsic_block *block; block = btrfsic_block_hashtable_lookup(block_ctx->dev->bdev, block_ctx->dev_bytenr, &state->block_hashtable); if (NULL == block) { struct btrfsic_dev_state *dev_state; block = btrfsic_block_alloc(); if (NULL == block) { printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); return NULL; } dev_state = btrfsic_dev_state_lookup(block_ctx->dev->bdev); if (NULL == dev_state) { printk(KERN_INFO "btrfsic: error, lookup dev_state failed!\n"); btrfsic_block_free(block); return NULL; } block->dev_state = dev_state; block->dev_bytenr = block_ctx->dev_bytenr; block->logical_bytenr = block_ctx->start; block->is_metadata = is_metadata; block->is_iodone = is_iodone; block->never_written = never_written; block->mirror_num = mirror_num; if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) printk(KERN_INFO "New %s%c-block @%llu (%s/%llu/%d)\n", additional_string, btrfsic_get_block_type(state, block), (unsigned long long)block->logical_bytenr, dev_state->name, (unsigned long long)block->dev_bytenr, mirror_num); list_add(&block->all_blocks_node, &state->all_blocks_list); btrfsic_block_hashtable_add(block, &state->block_hashtable); if (NULL != was_created) *was_created = 1; } else { if (NULL != was_created) *was_created = 0; } return block; } static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, u64 bytenr, struct btrfsic_dev_state *dev_state, u64 dev_bytenr, char *data) { int num_copies; int mirror_num; int ret; struct btrfsic_block_data_ctx block_ctx; int match = 0; num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree, bytenr, PAGE_SIZE); for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { ret = btrfsic_map_block(state, bytenr, PAGE_SIZE, &block_ctx, mirror_num); if (ret) { printk(KERN_INFO "btrfsic:" " btrfsic_map_block(logical @%llu," " mirror %d) failed!\n", (unsigned long long)bytenr, mirror_num); continue; } if (dev_state->bdev == block_ctx.dev->bdev && dev_bytenr == block_ctx.dev_bytenr) { match++; btrfsic_release_block_ctx(&block_ctx); break; } btrfsic_release_block_ctx(&block_ctx); } if (!match) { printk(KERN_INFO "btrfs: attempt to write M-block which contains logical bytenr that doesn't map to dev+physical bytenr of submit_bio," " buffer->log_bytenr=%llu, submit_bio(bdev=%s," " phys_bytenr=%llu)!\n", (unsigned long long)bytenr, dev_state->name, (unsigned long long)dev_bytenr); for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { ret = btrfsic_map_block(state, bytenr, PAGE_SIZE, &block_ctx, mirror_num); if (ret) continue; printk(KERN_INFO "Read logical bytenr @%llu maps to" " (%s/%llu/%d)\n", (unsigned long long)bytenr, block_ctx.dev->name, (unsigned long long)block_ctx.dev_bytenr, mirror_num); } WARN_ON(1); } } static struct btrfsic_dev_state *btrfsic_dev_state_lookup( struct block_device *bdev) { struct btrfsic_dev_state *ds; ds = btrfsic_dev_state_hashtable_lookup(bdev, &btrfsic_dev_state_hashtable); return ds; } int btrfsic_submit_bh(int rw, struct buffer_head *bh) { struct btrfsic_dev_state *dev_state; if (!btrfsic_is_initialized) return submit_bh(rw, bh); mutex_lock(&btrfsic_mutex); /* since btrfsic_submit_bh() might also be called before * btrfsic_mount(), this might return NULL */ dev_state = btrfsic_dev_state_lookup(bh->b_bdev); /* Only called to write the superblock (incl. FLUSH/FUA) */ if (NULL != dev_state && (rw & WRITE) && bh->b_size > 0) { u64 dev_bytenr; dev_bytenr = 4096 * bh->b_blocknr; if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) printk(KERN_INFO "submit_bh(rw=0x%x, blocknr=%lu (bytenr %llu)," " size=%lu, data=%p, bdev=%p)\n", rw, (unsigned long)bh->b_blocknr, (unsigned long long)dev_bytenr, (unsigned long)bh->b_size, bh->b_data, bh->b_bdev); btrfsic_process_written_block(dev_state, dev_bytenr, bh->b_data, bh->b_size, NULL, NULL, bh, rw); } else if (NULL != dev_state && (rw & REQ_FLUSH)) { if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) printk(KERN_INFO "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n", rw, bh->b_bdev); if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { if ((dev_state->state->print_mask & (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | BTRFSIC_PRINT_MASK_VERBOSE))) printk(KERN_INFO "btrfsic_submit_bh(%s) with FLUSH" " but dummy block already in use" " (ignored)!\n", dev_state->name); } else { struct btrfsic_block *const block = &dev_state->dummy_block_for_bio_bh_flush; block->is_iodone = 0; block->never_written = 0; block->iodone_w_error = 0; block->flush_gen = dev_state->last_flush_gen + 1; block->submit_bio_bh_rw = rw; block->orig_bio_bh_private = bh->b_private; block->orig_bio_bh_end_io.bh = bh->b_end_io; block->next_in_same_bio = NULL; bh->b_private = block; bh->b_end_io = btrfsic_bh_end_io; } } mutex_unlock(&btrfsic_mutex); return submit_bh(rw, bh); } void btrfsic_submit_bio(int rw, struct bio *bio) { struct btrfsic_dev_state *dev_state; if (!btrfsic_is_initialized) { submit_bio(rw, bio); return; } mutex_lock(&btrfsic_mutex); /* since btrfsic_submit_bio() is also called before * btrfsic_mount(), this might return NULL */ dev_state = btrfsic_dev_state_lookup(bio->bi_bdev); if (NULL != dev_state && (rw & WRITE) && NULL != bio->bi_io_vec) { unsigned int i; u64 dev_bytenr; int bio_is_patched; dev_bytenr = 512 * bio->bi_sector; bio_is_patched = 0; if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) printk(KERN_INFO "submit_bio(rw=0x%x, bi_vcnt=%u," " bi_sector=%lu (bytenr %llu), bi_bdev=%p)\n", rw, bio->bi_vcnt, (unsigned long)bio->bi_sector, (unsigned long long)dev_bytenr, bio->bi_bdev); for (i = 0; i < bio->bi_vcnt; i++) { u8 *mapped_data; mapped_data = kmap(bio->bi_io_vec[i].bv_page); if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | BTRFSIC_PRINT_MASK_VERBOSE) == (dev_state->state->print_mask & (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | BTRFSIC_PRINT_MASK_VERBOSE))) printk(KERN_INFO "#%u: page=%p, mapped=%p, len=%u," " offset=%u\n", i, bio->bi_io_vec[i].bv_page, mapped_data, bio->bi_io_vec[i].bv_len, bio->bi_io_vec[i].bv_offset); btrfsic_process_written_block(dev_state, dev_bytenr, mapped_data, bio->bi_io_vec[i].bv_len, bio, &bio_is_patched, NULL, rw); kunmap(bio->bi_io_vec[i].bv_page); dev_bytenr += bio->bi_io_vec[i].bv_len; } } else if (NULL != dev_state && (rw & REQ_FLUSH)) { if (dev_state->state->print_mask & BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) printk(KERN_INFO "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n", rw, bio->bi_bdev); if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { if ((dev_state->state->print_mask & (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | BTRFSIC_PRINT_MASK_VERBOSE))) printk(KERN_INFO "btrfsic_submit_bio(%s) with FLUSH" " but dummy block already in use" " (ignored)!\n", dev_state->name); } else { struct btrfsic_block *const block = &dev_state->dummy_block_for_bio_bh_flush; block->is_iodone = 0; block->never_written = 0; block->iodone_w_error = 0; block->flush_gen = dev_state->last_flush_gen + 1; block->submit_bio_bh_rw = rw; block->orig_bio_bh_private = bio->bi_private; block->orig_bio_bh_end_io.bio = bio->bi_end_io; block->next_in_same_bio = NULL; bio->bi_private = block; bio->bi_end_io = btrfsic_bio_end_io; } } mutex_unlock(&btrfsic_mutex); submit_bio(rw, bio); } int btrfsic_mount(struct btrfs_root *root, struct btrfs_fs_devices *fs_devices, int including_extent_data, u32 print_mask) { int ret; struct btrfsic_state *state; struct list_head *dev_head = &fs_devices->devices; struct btrfs_device *device; state = kzalloc(sizeof(*state), GFP_NOFS); if (NULL == state) { printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n"); return -1; } if (!btrfsic_is_initialized) { mutex_init(&btrfsic_mutex); btrfsic_dev_state_hashtable_init(&btrfsic_dev_state_hashtable); btrfsic_is_initialized = 1; } mutex_lock(&btrfsic_mutex); state->root = root; state->print_mask = print_mask; state->include_extent_data = including_extent_data; state->csum_size = 0; INIT_LIST_HEAD(&state->all_blocks_list); btrfsic_block_hashtable_init(&state->block_hashtable); btrfsic_block_link_hashtable_init(&state->block_link_hashtable); state->max_superblock_generation = 0; state->latest_superblock = NULL; list_for_each_entry(device, dev_head, dev_list) { struct btrfsic_dev_state *ds; char *p; if (!device->bdev || !device->name) continue; ds = btrfsic_dev_state_alloc(); if (NULL == ds) { printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n"); mutex_unlock(&btrfsic_mutex); return -1; } ds->bdev = device->bdev; ds->state = state; bdevname(ds->bdev, ds->name); ds->name[BDEVNAME_SIZE - 1] = '\0'; for (p = ds->name; *p != '\0'; p++); while (p > ds->name && *p != '/') p--; if (*p == '/') p++; strlcpy(ds->name, p, sizeof(ds->name)); btrfsic_dev_state_hashtable_add(ds, &btrfsic_dev_state_hashtable); } ret = btrfsic_process_superblock(state, fs_devices); if (0 != ret) { mutex_unlock(&btrfsic_mutex); btrfsic_unmount(root, fs_devices); return ret; } if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_DATABASE) btrfsic_dump_database(state); if (state->print_mask & BTRFSIC_PRINT_MASK_INITIAL_TREE) btrfsic_dump_tree(state); mutex_unlock(&btrfsic_mutex); return 0; } void btrfsic_unmount(struct btrfs_root *root, struct btrfs_fs_devices *fs_devices) { struct list_head *elem_all; struct list_head *tmp_all; struct btrfsic_state *state; struct list_head *dev_head = &fs_devices->devices; struct btrfs_device *device; if (!btrfsic_is_initialized) return; mutex_lock(&btrfsic_mutex); state = NULL; list_for_each_entry(device, dev_head, dev_list) { struct btrfsic_dev_state *ds; if (!device->bdev || !device->name) continue; ds = btrfsic_dev_state_hashtable_lookup( device->bdev, &btrfsic_dev_state_hashtable); if (NULL != ds) { state = ds->state; btrfsic_dev_state_hashtable_remove(ds); btrfsic_dev_state_free(ds); } } if (NULL == state) { printk(KERN_INFO "btrfsic: error, cannot find state information" " on umount!\n"); mutex_unlock(&btrfsic_mutex); return; } /* * Don't care about keeping the lists' state up to date, * just free all memory that was allocated dynamically. * Free the blocks and the block_links. */ list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) { struct btrfsic_block *const b_all = list_entry(elem_all, struct btrfsic_block, all_blocks_node); struct list_head *elem_ref_to; struct list_head *tmp_ref_to; list_for_each_safe(elem_ref_to, tmp_ref_to, &b_all->ref_to_list) { struct btrfsic_block_link *const l = list_entry(elem_ref_to, struct btrfsic_block_link, node_ref_to); if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) btrfsic_print_rem_link(state, l); l->ref_cnt--; if (0 == l->ref_cnt) btrfsic_block_link_free(l); } if (b_all->is_iodone) btrfsic_block_free(b_all); else printk(KERN_INFO "btrfs: attempt to free %c-block" " @%llu (%s/%llu/%d) on umount which is" " not yet iodone!\n", btrfsic_get_block_type(state, b_all), (unsigned long long)b_all->logical_bytenr, b_all->dev_state->name, (unsigned long long)b_all->dev_bytenr, b_all->mirror_num); } mutex_unlock(&btrfsic_mutex); kfree(state); }
gpl-2.0
percy-g2/android_kernel_motorola_msm8610
net/tipc/config.c
4811
14798
/* * net/tipc/config.c: TIPC configuration management code * * Copyright (c) 2002-2006, Ericsson AB * Copyright (c) 2004-2007, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "port.h" #include "name_table.h" #include "config.h" static u32 config_port_ref; static DEFINE_SPINLOCK(config_lock); static const void *req_tlv_area; /* request message TLV area */ static int req_tlv_space; /* request message TLV area size */ static int rep_headroom; /* reply message headroom to use */ struct sk_buff *tipc_cfg_reply_alloc(int payload_size) { struct sk_buff *buf; buf = alloc_skb(rep_headroom + payload_size, GFP_ATOMIC); if (buf) skb_reserve(buf, rep_headroom); return buf; } int tipc_cfg_append_tlv(struct sk_buff *buf, int tlv_type, void *tlv_data, int tlv_data_size) { struct tlv_desc *tlv = (struct tlv_desc *)skb_tail_pointer(buf); int new_tlv_space = TLV_SPACE(tlv_data_size); if (skb_tailroom(buf) < new_tlv_space) return 0; skb_put(buf, new_tlv_space); tlv->tlv_type = htons(tlv_type); tlv->tlv_len = htons(TLV_LENGTH(tlv_data_size)); if (tlv_data_size && tlv_data) memcpy(TLV_DATA(tlv), tlv_data, tlv_data_size); return 1; } static struct sk_buff *tipc_cfg_reply_unsigned_type(u16 tlv_type, u32 value) { struct sk_buff *buf; __be32 value_net; buf = tipc_cfg_reply_alloc(TLV_SPACE(sizeof(value))); if (buf) { value_net = htonl(value); tipc_cfg_append_tlv(buf, tlv_type, &value_net, sizeof(value_net)); } return buf; } static struct sk_buff *tipc_cfg_reply_unsigned(u32 value) { return tipc_cfg_reply_unsigned_type(TIPC_TLV_UNSIGNED, value); } struct sk_buff *tipc_cfg_reply_string_type(u16 tlv_type, char *string) { struct sk_buff *buf; int string_len = strlen(string) + 1; buf = tipc_cfg_reply_alloc(TLV_SPACE(string_len)); if (buf) tipc_cfg_append_tlv(buf, tlv_type, string, string_len); return buf; } #define MAX_STATS_INFO 2000 static struct sk_buff *tipc_show_stats(void) { struct sk_buff *buf; struct tlv_desc *rep_tlv; struct print_buf pb; int str_len; u32 value; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); value = ntohl(*(u32 *)TLV_DATA(req_tlv_area)); if (value != 0) return tipc_cfg_reply_error_string("unsupported argument"); buf = tipc_cfg_reply_alloc(TLV_SPACE(MAX_STATS_INFO)); if (buf == NULL) return NULL; rep_tlv = (struct tlv_desc *)buf->data; tipc_printbuf_init(&pb, (char *)TLV_DATA(rep_tlv), MAX_STATS_INFO); tipc_printf(&pb, "TIPC version " TIPC_MOD_VER "\n"); /* Use additional tipc_printf()'s to return more info ... */ str_len = tipc_printbuf_validate(&pb); skb_put(buf, TLV_SPACE(str_len)); TLV_SET(rep_tlv, TIPC_TLV_ULTRA_STRING, NULL, str_len); return buf; } static struct sk_buff *cfg_enable_bearer(void) { struct tipc_bearer_config *args; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_CONFIG)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); args = (struct tipc_bearer_config *)TLV_DATA(req_tlv_area); if (tipc_enable_bearer(args->name, ntohl(args->disc_domain), ntohl(args->priority))) return tipc_cfg_reply_error_string("unable to enable bearer"); return tipc_cfg_reply_none(); } static struct sk_buff *cfg_disable_bearer(void) { if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_BEARER_NAME)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); if (tipc_disable_bearer((char *)TLV_DATA(req_tlv_area))) return tipc_cfg_reply_error_string("unable to disable bearer"); return tipc_cfg_reply_none(); } static struct sk_buff *cfg_set_own_addr(void) { u32 addr; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); addr = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); if (addr == tipc_own_addr) return tipc_cfg_reply_none(); if (!tipc_addr_node_valid(addr)) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (node address)"); if (tipc_own_addr) return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (cannot change node address once assigned)"); /* * Must temporarily release configuration spinlock while switching into * networking mode as it calls tipc_eth_media_start(), which may sleep. * Releasing the lock is harmless as other locally-issued configuration * commands won't occur until this one completes, and remotely-issued * configuration commands can't be received until a local configuration * command to enable the first bearer is received and processed. */ spin_unlock_bh(&config_lock); tipc_core_start_net(addr); spin_lock_bh(&config_lock); return tipc_cfg_reply_none(); } static struct sk_buff *cfg_set_remote_mng(void) { u32 value; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); tipc_remote_management = (value != 0); return tipc_cfg_reply_none(); } static struct sk_buff *cfg_set_max_publications(void) { u32 value; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); if (value < 1 || value > 65535) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (max publications must be 1-65535)"); tipc_max_publications = value; return tipc_cfg_reply_none(); } static struct sk_buff *cfg_set_max_subscriptions(void) { u32 value; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); if (value < 1 || value > 65535) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (max subscriptions must be 1-65535"); tipc_max_subscriptions = value; return tipc_cfg_reply_none(); } static struct sk_buff *cfg_set_max_ports(void) { u32 value; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); if (value == tipc_max_ports) return tipc_cfg_reply_none(); if (value < 127 || value > 65535) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (max ports must be 127-65535)"); return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (cannot change max ports while TIPC is active)"); } static struct sk_buff *cfg_set_netid(void) { u32 value; if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_UNSIGNED)) return tipc_cfg_reply_error_string(TIPC_CFG_TLV_ERROR); value = ntohl(*(__be32 *)TLV_DATA(req_tlv_area)); if (value == tipc_net_id) return tipc_cfg_reply_none(); if (value < 1 || value > 9999) return tipc_cfg_reply_error_string(TIPC_CFG_INVALID_VALUE " (network id must be 1-9999)"); if (tipc_own_addr) return tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (cannot change network id once TIPC has joined a network)"); tipc_net_id = value; return tipc_cfg_reply_none(); } struct sk_buff *tipc_cfg_do_cmd(u32 orig_node, u16 cmd, const void *request_area, int request_space, int reply_headroom) { struct sk_buff *rep_tlv_buf; spin_lock_bh(&config_lock); /* Save request and reply details in a well-known location */ req_tlv_area = request_area; req_tlv_space = request_space; rep_headroom = reply_headroom; /* Check command authorization */ if (likely(orig_node == tipc_own_addr)) { /* command is permitted */ } else if (cmd >= 0x8000) { rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (cannot be done remotely)"); goto exit; } else if (!tipc_remote_management) { rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NO_REMOTE); goto exit; } else if (cmd >= 0x4000) { u32 domain = 0; if ((tipc_nametbl_translate(TIPC_ZM_SRV, 0, &domain) == 0) || (domain != orig_node)) { rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_ZONE_MSTR); goto exit; } } /* Call appropriate processing routine */ switch (cmd) { case TIPC_CMD_NOOP: rep_tlv_buf = tipc_cfg_reply_none(); break; case TIPC_CMD_GET_NODES: rep_tlv_buf = tipc_node_get_nodes(req_tlv_area, req_tlv_space); break; case TIPC_CMD_GET_LINKS: rep_tlv_buf = tipc_node_get_links(req_tlv_area, req_tlv_space); break; case TIPC_CMD_SHOW_LINK_STATS: rep_tlv_buf = tipc_link_cmd_show_stats(req_tlv_area, req_tlv_space); break; case TIPC_CMD_RESET_LINK_STATS: rep_tlv_buf = tipc_link_cmd_reset_stats(req_tlv_area, req_tlv_space); break; case TIPC_CMD_SHOW_NAME_TABLE: rep_tlv_buf = tipc_nametbl_get(req_tlv_area, req_tlv_space); break; case TIPC_CMD_GET_BEARER_NAMES: rep_tlv_buf = tipc_bearer_get_names(); break; case TIPC_CMD_GET_MEDIA_NAMES: rep_tlv_buf = tipc_media_get_names(); break; case TIPC_CMD_SHOW_PORTS: rep_tlv_buf = tipc_port_get_ports(); break; case TIPC_CMD_SET_LOG_SIZE: rep_tlv_buf = tipc_log_resize_cmd(req_tlv_area, req_tlv_space); break; case TIPC_CMD_DUMP_LOG: rep_tlv_buf = tipc_log_dump(); break; case TIPC_CMD_SHOW_STATS: rep_tlv_buf = tipc_show_stats(); break; case TIPC_CMD_SET_LINK_TOL: case TIPC_CMD_SET_LINK_PRI: case TIPC_CMD_SET_LINK_WINDOW: rep_tlv_buf = tipc_link_cmd_config(req_tlv_area, req_tlv_space, cmd); break; case TIPC_CMD_ENABLE_BEARER: rep_tlv_buf = cfg_enable_bearer(); break; case TIPC_CMD_DISABLE_BEARER: rep_tlv_buf = cfg_disable_bearer(); break; case TIPC_CMD_SET_NODE_ADDR: rep_tlv_buf = cfg_set_own_addr(); break; case TIPC_CMD_SET_REMOTE_MNG: rep_tlv_buf = cfg_set_remote_mng(); break; case TIPC_CMD_SET_MAX_PORTS: rep_tlv_buf = cfg_set_max_ports(); break; case TIPC_CMD_SET_MAX_PUBL: rep_tlv_buf = cfg_set_max_publications(); break; case TIPC_CMD_SET_MAX_SUBSCR: rep_tlv_buf = cfg_set_max_subscriptions(); break; case TIPC_CMD_SET_NETID: rep_tlv_buf = cfg_set_netid(); break; case TIPC_CMD_GET_REMOTE_MNG: rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_remote_management); break; case TIPC_CMD_GET_MAX_PORTS: rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_ports); break; case TIPC_CMD_GET_MAX_PUBL: rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_publications); break; case TIPC_CMD_GET_MAX_SUBSCR: rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_max_subscriptions); break; case TIPC_CMD_GET_NETID: rep_tlv_buf = tipc_cfg_reply_unsigned(tipc_net_id); break; case TIPC_CMD_NOT_NET_ADMIN: rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_NET_ADMIN); break; case TIPC_CMD_SET_MAX_ZONES: case TIPC_CMD_GET_MAX_ZONES: case TIPC_CMD_SET_MAX_SLAVES: case TIPC_CMD_GET_MAX_SLAVES: case TIPC_CMD_SET_MAX_CLUSTERS: case TIPC_CMD_GET_MAX_CLUSTERS: case TIPC_CMD_SET_MAX_NODES: case TIPC_CMD_GET_MAX_NODES: rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (obsolete command)"); break; default: rep_tlv_buf = tipc_cfg_reply_error_string(TIPC_CFG_NOT_SUPPORTED " (unknown command)"); break; } /* Return reply buffer */ exit: spin_unlock_bh(&config_lock); return rep_tlv_buf; } static void cfg_named_msg_event(void *userdata, u32 port_ref, struct sk_buff **buf, const unchar *msg, u32 size, u32 importance, struct tipc_portid const *orig, struct tipc_name_seq const *dest) { struct tipc_cfg_msg_hdr *req_hdr; struct tipc_cfg_msg_hdr *rep_hdr; struct sk_buff *rep_buf; /* Validate configuration message header (ignore invalid message) */ req_hdr = (struct tipc_cfg_msg_hdr *)msg; if ((size < sizeof(*req_hdr)) || (size != TCM_ALIGN(ntohl(req_hdr->tcm_len))) || (ntohs(req_hdr->tcm_flags) != TCM_F_REQUEST)) { warn("Invalid configuration message discarded\n"); return; } /* Generate reply for request (if can't, return request) */ rep_buf = tipc_cfg_do_cmd(orig->node, ntohs(req_hdr->tcm_type), msg + sizeof(*req_hdr), size - sizeof(*req_hdr), BUF_HEADROOM + MAX_H_SIZE + sizeof(*rep_hdr)); if (rep_buf) { skb_push(rep_buf, sizeof(*rep_hdr)); rep_hdr = (struct tipc_cfg_msg_hdr *)rep_buf->data; memcpy(rep_hdr, req_hdr, sizeof(*rep_hdr)); rep_hdr->tcm_len = htonl(rep_buf->len); rep_hdr->tcm_flags &= htons(~TCM_F_REQUEST); } else { rep_buf = *buf; *buf = NULL; } /* NEED TO ADD CODE TO HANDLE FAILED SEND (SUCH AS CONGESTION) */ tipc_send_buf2port(port_ref, orig, rep_buf, rep_buf->len); } int tipc_cfg_init(void) { struct tipc_name_seq seq; int res; res = tipc_createport(NULL, TIPC_CRITICAL_IMPORTANCE, NULL, NULL, NULL, NULL, cfg_named_msg_event, NULL, NULL, &config_port_ref); if (res) goto failed; seq.type = TIPC_CFG_SRV; seq.lower = seq.upper = tipc_own_addr; res = tipc_publish(config_port_ref, TIPC_ZONE_SCOPE, &seq); if (res) goto failed; return 0; failed: err("Unable to create configuration service\n"); return res; } void tipc_cfg_stop(void) { if (config_port_ref) { tipc_deleteport(config_port_ref); config_port_ref = 0; } }
gpl-2.0
TheTypoMaster/android_kernel_samsung_smdk4412
drivers/staging/media/as102/as102_usb_drv.c
4811
12223
/* * Abilis Systems Single DVB-T Receiver * Copyright (C) 2008 Pierrick Hascoet <pierrick.hascoet@abilis.com> * Copyright (C) 2010 Devin Heitmueller <dheitmueller@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/usb.h> #include "as102_drv.h" #include "as102_usb_drv.h" #include "as102_fw.h" static void as102_usb_disconnect(struct usb_interface *interface); static int as102_usb_probe(struct usb_interface *interface, const struct usb_device_id *id); static int as102_usb_start_stream(struct as102_dev_t *dev); static void as102_usb_stop_stream(struct as102_dev_t *dev); static int as102_open(struct inode *inode, struct file *file); static int as102_release(struct inode *inode, struct file *file); static struct usb_device_id as102_usb_id_table[] = { { USB_DEVICE(AS102_USB_DEVICE_VENDOR_ID, AS102_USB_DEVICE_PID_0001) }, { USB_DEVICE(PCTV_74E_USB_VID, PCTV_74E_USB_PID) }, { USB_DEVICE(ELGATO_EYETV_DTT_USB_VID, ELGATO_EYETV_DTT_USB_PID) }, { USB_DEVICE(NBOX_DVBT_DONGLE_USB_VID, NBOX_DVBT_DONGLE_USB_PID) }, { USB_DEVICE(SKY_IT_DIGITAL_KEY_USB_VID, SKY_IT_DIGITAL_KEY_USB_PID) }, { } /* Terminating entry */ }; /* Note that this table must always have the same number of entries as the as102_usb_id_table struct */ static const char * const as102_device_names[] = { AS102_REFERENCE_DESIGN, AS102_PCTV_74E, AS102_ELGATO_EYETV_DTT_NAME, AS102_NBOX_DVBT_DONGLE_NAME, AS102_SKY_IT_DIGITAL_KEY_NAME, NULL /* Terminating entry */ }; /* eLNA configuration: devices built on the reference design work best with 0xA0, while custom designs seem to require 0xC0 */ static uint8_t const as102_elna_cfg[] = { 0xA0, 0xC0, 0xC0, 0xA0, 0xA0, 0x00 /* Terminating entry */ }; struct usb_driver as102_usb_driver = { .name = DRIVER_FULL_NAME, .probe = as102_usb_probe, .disconnect = as102_usb_disconnect, .id_table = as102_usb_id_table }; static const struct file_operations as102_dev_fops = { .owner = THIS_MODULE, .open = as102_open, .release = as102_release, }; static struct usb_class_driver as102_usb_class_driver = { .name = "aton2-%d", .fops = &as102_dev_fops, .minor_base = AS102_DEVICE_MAJOR, }; static int as102_usb_xfer_cmd(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, unsigned char *recv_buf, int recv_buf_len) { int ret = 0; ENTER(); if (send_buf != NULL) { ret = usb_control_msg(bus_adap->usb_dev, usb_sndctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_TX_CTRL_CMD, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ send_buf, send_buf_len, USB_CTRL_SET_TIMEOUT /* 200 */); if (ret < 0) { dprintk(debug, "usb_control_msg(send) failed, err %i\n", ret); return ret; } if (ret != send_buf_len) { dprintk(debug, "only wrote %d of %d bytes\n", ret, send_buf_len); return -1; } } if (recv_buf != NULL) { #ifdef TRACE dprintk(debug, "want to read: %d bytes\n", recv_buf_len); #endif ret = usb_control_msg(bus_adap->usb_dev, usb_rcvctrlpipe(bus_adap->usb_dev, 0), AS102_USB_DEVICE_RX_CTRL_CMD, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, bus_adap->cmd_xid, /* value */ 0, /* index */ recv_buf, recv_buf_len, USB_CTRL_GET_TIMEOUT /* 200 */); if (ret < 0) { dprintk(debug, "usb_control_msg(recv) failed, err %i\n", ret); return ret; } #ifdef TRACE dprintk(debug, "read %d bytes\n", recv_buf_len); #endif } LEAVE(); return ret; } static int as102_send_ep1(struct as10x_bus_adapter_t *bus_adap, unsigned char *send_buf, int send_buf_len, int swap32) { int ret = 0, actual_len; ret = usb_bulk_msg(bus_adap->usb_dev, usb_sndbulkpipe(bus_adap->usb_dev, 1), send_buf, send_buf_len, &actual_len, 200); if (ret) { dprintk(debug, "usb_bulk_msg(send) failed, err %i\n", ret); return ret; } if (actual_len != send_buf_len) { dprintk(debug, "only wrote %d of %d bytes\n", actual_len, send_buf_len); return -1; } return ret ? ret : actual_len; } static int as102_read_ep2(struct as10x_bus_adapter_t *bus_adap, unsigned char *recv_buf, int recv_buf_len) { int ret = 0, actual_len; if (recv_buf == NULL) return -EINVAL; ret = usb_bulk_msg(bus_adap->usb_dev, usb_rcvbulkpipe(bus_adap->usb_dev, 2), recv_buf, recv_buf_len, &actual_len, 200); if (ret) { dprintk(debug, "usb_bulk_msg(recv) failed, err %i\n", ret); return ret; } if (actual_len != recv_buf_len) { dprintk(debug, "only read %d of %d bytes\n", actual_len, recv_buf_len); return -1; } return ret ? ret : actual_len; } struct as102_priv_ops_t as102_priv_ops = { .upload_fw_pkt = as102_send_ep1, .xfer_cmd = as102_usb_xfer_cmd, .as102_read_ep2 = as102_read_ep2, .start_stream = as102_usb_start_stream, .stop_stream = as102_usb_stop_stream, }; static int as102_submit_urb_stream(struct as102_dev_t *dev, struct urb *urb) { int err; usb_fill_bulk_urb(urb, dev->bus_adap.usb_dev, usb_rcvbulkpipe(dev->bus_adap.usb_dev, 0x2), urb->transfer_buffer, AS102_USB_BUF_SIZE, as102_urb_stream_irq, dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dprintk(debug, "%s: usb_submit_urb failed\n", __func__); return err; } void as102_urb_stream_irq(struct urb *urb) { struct as102_dev_t *as102_dev = urb->context; if (urb->actual_length > 0) { dvb_dmx_swfilter(&as102_dev->dvb_dmx, urb->transfer_buffer, urb->actual_length); } else { if (urb->actual_length == 0) memset(urb->transfer_buffer, 0, AS102_USB_BUF_SIZE); } /* is not stopped, re-submit urb */ if (as102_dev->streaming) as102_submit_urb_stream(as102_dev, urb); } static void as102_free_usb_stream_buffer(struct as102_dev_t *dev) { int i; ENTER(); for (i = 0; i < MAX_STREAM_URB; i++) usb_free_urb(dev->stream_urb[i]); usb_free_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, dev->stream, dev->dma_addr); LEAVE(); } static int as102_alloc_usb_stream_buffer(struct as102_dev_t *dev) { int i, ret = 0; ENTER(); dev->stream = usb_alloc_coherent(dev->bus_adap.usb_dev, MAX_STREAM_URB * AS102_USB_BUF_SIZE, GFP_KERNEL, &dev->dma_addr); if (!dev->stream) { dprintk(debug, "%s: usb_buffer_alloc failed\n", __func__); return -ENOMEM; } memset(dev->stream, 0, MAX_STREAM_URB * AS102_USB_BUF_SIZE); /* init urb buffers */ for (i = 0; i < MAX_STREAM_URB; i++) { struct urb *urb; urb = usb_alloc_urb(0, GFP_ATOMIC); if (urb == NULL) { dprintk(debug, "%s: usb_alloc_urb failed\n", __func__); as102_free_usb_stream_buffer(dev); return -ENOMEM; } urb->transfer_buffer = dev->stream + (i * AS102_USB_BUF_SIZE); urb->transfer_dma = dev->dma_addr + (i * AS102_USB_BUF_SIZE); urb->transfer_flags = URB_NO_TRANSFER_DMA_MAP; urb->transfer_buffer_length = AS102_USB_BUF_SIZE; dev->stream_urb[i] = urb; } LEAVE(); return ret; } static void as102_usb_stop_stream(struct as102_dev_t *dev) { int i; for (i = 0; i < MAX_STREAM_URB; i++) usb_kill_urb(dev->stream_urb[i]); } static int as102_usb_start_stream(struct as102_dev_t *dev) { int i, ret = 0; for (i = 0; i < MAX_STREAM_URB; i++) { ret = as102_submit_urb_stream(dev, dev->stream_urb[i]); if (ret) { as102_usb_stop_stream(dev); return ret; } } return 0; } static void as102_usb_release(struct kref *kref) { struct as102_dev_t *as102_dev; ENTER(); as102_dev = container_of(kref, struct as102_dev_t, kref); if (as102_dev != NULL) { usb_put_dev(as102_dev->bus_adap.usb_dev); kfree(as102_dev); } LEAVE(); } static void as102_usb_disconnect(struct usb_interface *intf) { struct as102_dev_t *as102_dev; ENTER(); /* extract as102_dev_t from usb_device private data */ as102_dev = usb_get_intfdata(intf); /* unregister dvb layer */ as102_dvb_unregister(as102_dev); /* free usb buffers */ as102_free_usb_stream_buffer(as102_dev); usb_set_intfdata(intf, NULL); /* usb unregister device */ usb_deregister_dev(intf, &as102_usb_class_driver); /* decrement usage counter */ kref_put(&as102_dev->kref, as102_usb_release); pr_info("%s: device has been disconnected\n", DRIVER_NAME); LEAVE(); } static int as102_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; struct as102_dev_t *as102_dev; int i; ENTER(); /* This should never actually happen */ if ((sizeof(as102_usb_id_table) / sizeof(struct usb_device_id)) != (sizeof(as102_device_names) / sizeof(const char *))) { pr_err("Device names table invalid size"); return -EINVAL; } as102_dev = kzalloc(sizeof(struct as102_dev_t), GFP_KERNEL); if (as102_dev == NULL) { err("%s: kzalloc failed", __func__); return -ENOMEM; } /* Assign the user-friendly device name */ for (i = 0; i < (sizeof(as102_usb_id_table) / sizeof(struct usb_device_id)); i++) { if (id == &as102_usb_id_table[i]) { as102_dev->name = as102_device_names[i]; as102_dev->elna_cfg = as102_elna_cfg[i]; } } if (as102_dev->name == NULL) as102_dev->name = "Unknown AS102 device"; /* set private callback functions */ as102_dev->bus_adap.ops = &as102_priv_ops; /* init cmd token for usb bus */ as102_dev->bus_adap.cmd = &as102_dev->bus_adap.token.usb.c; as102_dev->bus_adap.rsp = &as102_dev->bus_adap.token.usb.r; /* init kernel device reference */ kref_init(&as102_dev->kref); /* store as102 device to usb_device private data */ usb_set_intfdata(intf, (void *) as102_dev); /* store in as102 device the usb_device pointer */ as102_dev->bus_adap.usb_dev = usb_get_dev(interface_to_usbdev(intf)); /* we can register the device now, as it is ready */ ret = usb_register_dev(intf, &as102_usb_class_driver); if (ret < 0) { /* something prevented us from registering this driver */ err("%s: usb_register_dev() failed (errno = %d)", __func__, ret); goto failed; } pr_info("%s: device has been detected\n", DRIVER_NAME); /* request buffer allocation for streaming */ ret = as102_alloc_usb_stream_buffer(as102_dev); if (ret != 0) goto failed; /* register dvb layer */ ret = as102_dvb_register(as102_dev); LEAVE(); return ret; failed: usb_set_intfdata(intf, NULL); kfree(as102_dev); return ret; } static int as102_open(struct inode *inode, struct file *file) { int ret = 0, minor = 0; struct usb_interface *intf = NULL; struct as102_dev_t *dev = NULL; ENTER(); /* read minor from inode */ minor = iminor(inode); /* fetch device from usb interface */ intf = usb_find_interface(&as102_usb_driver, minor); if (intf == NULL) { pr_err("%s: can't find device for minor %d\n", __func__, minor); ret = -ENODEV; goto exit; } /* get our device */ dev = usb_get_intfdata(intf); if (dev == NULL) { ret = -EFAULT; goto exit; } /* save our device object in the file's private structure */ file->private_data = dev; /* increment our usage count for the device */ kref_get(&dev->kref); exit: LEAVE(); return ret; } static int as102_release(struct inode *inode, struct file *file) { int ret = 0; struct as102_dev_t *dev = NULL; ENTER(); dev = file->private_data; if (dev != NULL) { /* decrement the count on our device */ kref_put(&dev->kref, as102_usb_release); } LEAVE(); return ret; } MODULE_DEVICE_TABLE(usb, as102_usb_id_table);
gpl-2.0
boa19861105/android_444_KitKat_kernel_htc_B2_UHL
drivers/video/via/viafbdev.c
4811
61265
/* * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/via-core.h> #include <linux/via_i2c.h> #include <asm/olpc.h> #define _MASTER_FILE #include "global.h" static char *viafb_name = "Via"; static u32 pseudo_pal[17]; /* video mode */ static char *viafb_mode; static char *viafb_mode1; static int viafb_bpp = 32; static int viafb_bpp1 = 32; static unsigned int viafb_second_offset; static int viafb_second_size; static int viafb_accel = 1; /* Added for specifying active devices.*/ static char *viafb_active_dev; /*Added for specify lcd output port*/ static char *viafb_lcd_port = ""; static char *viafb_dvi_port = ""; static void retrieve_device_setting(struct viafb_ioctl_setting *setting_info); static int viafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static struct fb_ops viafb_ops; /* supported output devices on each IGP * only CX700, VX800, VX855, VX900 were documented * VIA_CRT should be everywhere * VIA_6C can be onle pre-CX700 (probably only on CLE266) as 6C is used for PLL * source selection on CX700 and later * K400 seems to support VIA_96, VIA_DVP1, VIA_LVDS{1,2} as in viamode.c */ static const u32 supported_odev_map[] = { [UNICHROME_CLE266] = VIA_CRT | VIA_LDVP0 | VIA_LDVP1, [UNICHROME_K400] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_K800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_PM800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CN700] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CX700] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CN750] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_K8M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_P4M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_P4M900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX800] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX855] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, }; static void viafb_fill_var_color_info(struct fb_var_screeninfo *var, u8 depth) { var->grayscale = 0; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->nonstd = 0; switch (depth) { case 8: var->bits_per_pixel = 8; var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 15: var->bits_per_pixel = 16; var->red.offset = 10; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 5; var->blue.length = 5; break; case 16: var->bits_per_pixel = 16; var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; break; case 24: var->bits_per_pixel = 32; var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 30: var->bits_per_pixel = 32; var->red.offset = 20; var->green.offset = 10; var->blue.offset = 0; var->red.length = 10; var->green.length = 10; var->blue.length = 10; break; } } static void viafb_update_fix(struct fb_info *info) { u32 bpp = info->var.bits_per_pixel; info->fix.visual = bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; info->fix.line_length = ALIGN(info->var.xres_virtual * bpp / 8, VIA_PITCH_SIZE); } static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix, struct viafb_par *viaparinfo) { memset(fix, 0, sizeof(struct fb_fix_screeninfo)); strcpy(fix->id, viafb_name); fix->smem_start = viaparinfo->fbmem; fix->smem_len = viaparinfo->fbmem_free; fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->visual = FB_VISUAL_TRUECOLOR; fix->xpanstep = fix->ywrapstep = 0; fix->ypanstep = 1; /* Just tell the accel name */ viafbinfo->fix.accel = FB_ACCEL_VIA_UNICHROME; } static int viafb_open(struct fb_info *info, int user) { DEBUG_MSG(KERN_INFO "viafb_open!\n"); return 0; } static int viafb_release(struct fb_info *info, int user) { DEBUG_MSG(KERN_INFO "viafb_release!\n"); return 0; } static inline int get_var_refresh(struct fb_var_screeninfo *var) { u32 htotal, vtotal; htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len; vtotal = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; return PICOS2KHZ(var->pixclock) * 1000 / (htotal * vtotal); } static int viafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int depth, refresh; struct viafb_par *ppar = info->par; u32 line; DEBUG_MSG(KERN_INFO "viafb_check_var!\n"); /* Sanity check */ /* HW neither support interlacte nor double-scaned mode */ if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE) return -EINVAL; /* the refresh rate is not important here, as we only want to know * whether the resolution exists */ if (!viafb_get_best_mode(var->xres, var->yres, 60)) { DEBUG_MSG(KERN_INFO "viafb: Mode %dx%dx%d not supported!!\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } depth = fb_get_color_depth(var, &info->fix); if (!depth) depth = var->bits_per_pixel; if (depth < 0 || depth > 32) return -EINVAL; else if (!depth) depth = 24; else if (depth == 15 && viafb_dual_fb && ppar->iga_path == IGA1) depth = 15; else if (depth == 30) depth = 30; else if (depth <= 8) depth = 8; else if (depth <= 16) depth = 16; else depth = 24; viafb_fill_var_color_info(var, depth); if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; line = ALIGN(var->xres_virtual * var->bits_per_pixel / 8, VIA_PITCH_SIZE); if (line > VIA_PITCH_MAX || line * var->yres_virtual > ppar->memsize) return -EINVAL; /* Based on var passed in to calculate the refresh, * because our driver use some modes special. */ refresh = viafb_get_refresh(var->xres, var->yres, get_var_refresh(var)); /* Adjust var according to our driver's own table */ viafb_fill_var_timing_info(var, viafb_get_best_mode(var->xres, var->yres, refresh)); if (var->accel_flags & FB_ACCELF_TEXT && !ppar->shared->vdev->engine_mmio) var->accel_flags = 0; return 0; } static int viafb_set_par(struct fb_info *info) { struct viafb_par *viapar = info->par; int refresh; DEBUG_MSG(KERN_INFO "viafb_set_par!\n"); viafb_update_fix(info); viapar->depth = fb_get_color_depth(&info->var, &info->fix); viafb_update_device_setting(viafbinfo->var.xres, viafbinfo->var.yres, viafbinfo->var.bits_per_pixel, 0); if (viafb_dual_fb) { viafb_update_device_setting(viafbinfo1->var.xres, viafbinfo1->var.yres, viafbinfo1->var.bits_per_pixel, 1); } else if (viafb_SAMM_ON == 1) { DEBUG_MSG(KERN_INFO "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n", viafb_second_xres, viafb_second_yres, viafb_bpp1); viafb_update_device_setting(viafb_second_xres, viafb_second_yres, viafb_bpp1, 1); } refresh = get_var_refresh(&info->var); if (viafb_dual_fb && viapar->iga_path == IGA2) { viafb_bpp1 = info->var.bits_per_pixel; viafb_refresh1 = refresh; } else { viafb_bpp = info->var.bits_per_pixel; viafb_refresh = refresh; } if (info->var.accel_flags & FB_ACCELF_TEXT) info->flags &= ~FBINFO_HWACCEL_DISABLED; else info->flags |= FBINFO_HWACCEL_DISABLED; viafb_setmode(); viafb_pan_display(&info->var, info); return 0; } /* Set one color register */ static int viafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct viafb_par *viapar = info->par; u32 r, g, b; if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) { if (regno > 255) return -EINVAL; if (!viafb_dual_fb || viapar->iga_path == IGA1) viafb_set_primary_color_register(regno, red >> 8, green >> 8, blue >> 8); if (!viafb_dual_fb || viapar->iga_path == IGA2) viafb_set_secondary_color_register(regno, red >> 8, green >> 8, blue >> 8); } else { if (regno > 15) return -EINVAL; r = (red >> (16 - info->var.red.length)) << info->var.red.offset; b = (blue >> (16 - info->var.blue.length)) << info->var.blue.offset; g = (green >> (16 - info->var.green.length)) << info->var.green.offset; ((u32 *) info->pseudo_palette)[regno] = r | g | b; } return 0; } static int viafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct viafb_par *viapar = info->par; u32 vram_addr = viapar->vram_addr + var->yoffset * info->fix.line_length + var->xoffset * info->var.bits_per_pixel / 8; DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr); if (!viafb_dual_fb) { via_set_primary_address(vram_addr); via_set_secondary_address(vram_addr); } else if (viapar->iga_path == IGA1) via_set_primary_address(vram_addr); else via_set_secondary_address(vram_addr); return 0; } static int viafb_blank(int blank_mode, struct fb_info *info) { DEBUG_MSG(KERN_INFO "viafb_blank!\n"); /* clear DPMS setting */ switch (blank_mode) { case FB_BLANK_UNBLANK: /* Screen: On, HSync: On, VSync: On */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_ON); break; case FB_BLANK_HSYNC_SUSPEND: /* Screen: Off, HSync: Off, VSync: On */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_STANDBY); break; case FB_BLANK_VSYNC_SUSPEND: /* Screen: Off, HSync: On, VSync: Off */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_SUSPEND); break; case FB_BLANK_POWERDOWN: /* Screen: Off, HSync: Off, VSync: Off */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_OFF); break; } return 0; } static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { union { struct viafb_ioctl_mode viamode; struct viafb_ioctl_samm viasamm; struct viafb_driver_version driver_version; struct fb_var_screeninfo sec_var; struct _panel_size_pos_info panel_pos_size_para; struct viafb_ioctl_setting viafb_setting; struct device_t active_dev; } u; u32 state_info = 0; u32 *viafb_gamma_table; char driver_name[] = "viafb"; u32 __user *argp = (u32 __user *) arg; u32 gpu32; DEBUG_MSG(KERN_INFO "viafb_ioctl: 0x%X !!\n", cmd); printk(KERN_WARNING "viafb_ioctl: Please avoid this interface as it is unstable and might change or vanish at any time!\n"); memset(&u, 0, sizeof(u)); switch (cmd) { case VIAFB_GET_CHIP_INFO: if (copy_to_user(argp, viaparinfo->chip_info, sizeof(struct chip_information))) return -EFAULT; break; case VIAFB_GET_INFO_SIZE: return put_user((u32)sizeof(struct viafb_ioctl_info), argp); case VIAFB_GET_INFO: return viafb_ioctl_get_viafb_info(arg); case VIAFB_HOTPLUG: return put_user(viafb_ioctl_hotplug(info->var.xres, info->var.yres, info->var.bits_per_pixel), argp); case VIAFB_SET_HOTPLUG_FLAG: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; viafb_hotplug = (gpu32) ? 1 : 0; break; case VIAFB_GET_RESOLUTION: u.viamode.xres = (u32) viafb_hotplug_Xres; u.viamode.yres = (u32) viafb_hotplug_Yres; u.viamode.refresh = (u32) viafb_hotplug_refresh; u.viamode.bpp = (u32) viafb_hotplug_bpp; if (viafb_SAMM_ON == 1) { u.viamode.xres_sec = viafb_second_xres; u.viamode.yres_sec = viafb_second_yres; u.viamode.virtual_xres_sec = viafb_dual_fb ? viafbinfo1->var.xres_virtual : viafbinfo->var.xres_virtual; u.viamode.virtual_yres_sec = viafb_dual_fb ? viafbinfo1->var.yres_virtual : viafbinfo->var.yres_virtual; u.viamode.refresh_sec = viafb_refresh1; u.viamode.bpp_sec = viafb_bpp1; } else { u.viamode.xres_sec = 0; u.viamode.yres_sec = 0; u.viamode.virtual_xres_sec = 0; u.viamode.virtual_yres_sec = 0; u.viamode.refresh_sec = 0; u.viamode.bpp_sec = 0; } if (copy_to_user(argp, &u.viamode, sizeof(u.viamode))) return -EFAULT; break; case VIAFB_GET_SAMM_INFO: u.viasamm.samm_status = viafb_SAMM_ON; if (viafb_SAMM_ON == 1) { if (viafb_dual_fb) { u.viasamm.size_prim = viaparinfo->fbmem_free; u.viasamm.size_sec = viaparinfo1->fbmem_free; } else { if (viafb_second_size) { u.viasamm.size_prim = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; u.viasamm.size_sec = viafb_second_size * 1024 * 1024; } else { u.viasamm.size_prim = viaparinfo->fbmem_free >> 1; u.viasamm.size_sec = (viaparinfo->fbmem_free >> 1); } } u.viasamm.mem_base = viaparinfo->fbmem; u.viasamm.offset_sec = viafb_second_offset; } else { u.viasamm.size_prim = viaparinfo->memsize - viaparinfo->fbmem_used; u.viasamm.size_sec = 0; u.viasamm.mem_base = viaparinfo->fbmem; u.viasamm.offset_sec = 0; } if (copy_to_user(argp, &u.viasamm, sizeof(u.viasamm))) return -EFAULT; break; case VIAFB_TURN_ON_OUTPUT_DEVICE: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; if (gpu32 & CRT_Device) via_set_state(VIA_CRT, VIA_STATE_ON); if (gpu32 & DVI_Device) viafb_dvi_enable(); if (gpu32 & LCD_Device) viafb_lcd_enable(); break; case VIAFB_TURN_OFF_OUTPUT_DEVICE: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; if (gpu32 & CRT_Device) via_set_state(VIA_CRT, VIA_STATE_OFF); if (gpu32 & DVI_Device) viafb_dvi_disable(); if (gpu32 & LCD_Device) viafb_lcd_disable(); break; case VIAFB_GET_DEVICE: u.active_dev.crt = viafb_CRT_ON; u.active_dev.dvi = viafb_DVI_ON; u.active_dev.lcd = viafb_LCD_ON; u.active_dev.samm = viafb_SAMM_ON; u.active_dev.primary_dev = viafb_primary_dev; u.active_dev.lcd_dsp_cent = viafb_lcd_dsp_method; u.active_dev.lcd_panel_id = viafb_lcd_panel_id; u.active_dev.lcd_mode = viafb_lcd_mode; u.active_dev.xres = viafb_hotplug_Xres; u.active_dev.yres = viafb_hotplug_Yres; u.active_dev.xres1 = viafb_second_xres; u.active_dev.yres1 = viafb_second_yres; u.active_dev.bpp = viafb_bpp; u.active_dev.bpp1 = viafb_bpp1; u.active_dev.refresh = viafb_refresh; u.active_dev.refresh1 = viafb_refresh1; u.active_dev.epia_dvi = viafb_platform_epia_dvi; u.active_dev.lcd_dual_edge = viafb_device_lcd_dualedge; u.active_dev.bus_width = viafb_bus_width; if (copy_to_user(argp, &u.active_dev, sizeof(u.active_dev))) return -EFAULT; break; case VIAFB_GET_DRIVER_VERSION: u.driver_version.iMajorNum = VERSION_MAJOR; u.driver_version.iKernelNum = VERSION_KERNEL; u.driver_version.iOSNum = VERSION_OS; u.driver_version.iMinorNum = VERSION_MINOR; if (copy_to_user(argp, &u.driver_version, sizeof(u.driver_version))) return -EFAULT; break; case VIAFB_GET_DEVICE_INFO: retrieve_device_setting(&u.viafb_setting); if (copy_to_user(argp, &u.viafb_setting, sizeof(u.viafb_setting))) return -EFAULT; break; case VIAFB_GET_DEVICE_SUPPORT: viafb_get_device_support_state(&state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_DEVICE_CONNECT: viafb_get_device_connect_state(&state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_PANEL_SUPPORT_EXPAND: state_info = viafb_lcd_get_support_expand_state(info->var.xres, info->var.yres); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_DRIVER_NAME: if (copy_to_user(argp, driver_name, sizeof(driver_name))) return -EFAULT; break; case VIAFB_SET_GAMMA_LUT: viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32)); if (IS_ERR(viafb_gamma_table)) return PTR_ERR(viafb_gamma_table); viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); kfree(viafb_gamma_table); break; case VIAFB_GET_GAMMA_LUT: viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); if (!viafb_gamma_table) return -ENOMEM; viafb_get_gamma_table(viafb_gamma_table); if (copy_to_user(argp, viafb_gamma_table, 256 * sizeof(u32))) { kfree(viafb_gamma_table); return -EFAULT; } kfree(viafb_gamma_table); break; case VIAFB_GET_GAMMA_SUPPORT_STATE: viafb_get_gamma_support_state(viafb_bpp, &state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_SYNC_SURFACE: DEBUG_MSG(KERN_INFO "lobo VIAFB_SYNC_SURFACE\n"); break; case VIAFB_GET_DRIVER_CAPS: break; case VIAFB_GET_PANEL_MAX_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_MAX_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_SET_PANEL_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_SET_PANEL_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; break; default: return -EINVAL; } return 0; } static void viafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; u32 fg_color; u8 rop; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) { cfb_fillrect(info, rect); return; } if (!rect->width || !rect->height) return; if (info->fix.visual == FB_VISUAL_TRUECOLOR) fg_color = ((u32 *)info->pseudo_palette)[rect->color]; else fg_color = rect->color; if (rect->rop == ROP_XOR) rop = 0x5A; else rop = 0xF0; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: fillrect\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_FILL, rect->width, rect->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, rect->dx, rect->dy, NULL, 0, 0, 0, 0, fg_color, 0, rop)) cfb_fillrect(info, rect); } static void viafb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) { cfb_copyarea(info, area); return; } if (!area->width || !area->height) return; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: copyarea\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_COLOR, area->width, area->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, area->dx, area->dy, NULL, viapar->vram_addr, info->fix.line_length, area->sx, area->sy, 0, 0, 0)) cfb_copyarea(info, area); } static void viafb_imageblit(struct fb_info *info, const struct fb_image *image) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; u32 fg_color = 0, bg_color = 0; u8 op; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt || (image->depth != 1 && image->depth != viapar->depth)) { cfb_imageblit(info, image); return; } if (image->depth == 1) { op = VIA_BITBLT_MONO; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { fg_color = ((u32 *)info->pseudo_palette)[image->fg_color]; bg_color = ((u32 *)info->pseudo_palette)[image->bg_color]; } else { fg_color = image->fg_color; bg_color = image->bg_color; } } else op = VIA_BITBLT_COLOR; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: imageblit\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, op, image->width, image->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, image->dx, image->dy, (u32 *)image->data, 0, 0, 0, 0, fg_color, bg_color, 0)) cfb_imageblit(info, image); } static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct viafb_par *viapar = info->par; void __iomem *engine = viapar->shared->vdev->engine_mmio; u32 temp, xx, yy, bg_color = 0, fg_color = 0, chip_name = viapar->shared->chip_info.gfx_chip_name; int i, j = 0, cur_size = 64; if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo) return -ENODEV; /* LCD ouput does not support hw cursors (at least on VN896) */ if ((chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) || viafb_LCD_ON) return -ENODEV; viafb_show_hw_cursor(info, HW_Cursor_OFF); if (cursor->set & FB_CUR_SETHOT) { temp = (cursor->hot.x << 16) + cursor->hot.y; writel(temp, engine + VIA_REG_CURSOR_ORG); } if (cursor->set & FB_CUR_SETPOS) { yy = cursor->image.dy - info->var.yoffset; xx = cursor->image.dx - info->var.xoffset; temp = yy & 0xFFFF; temp |= (xx << 16); writel(temp, engine + VIA_REG_CURSOR_POS); } if (cursor->image.width <= 32 && cursor->image.height <= 32) cur_size = 32; else if (cursor->image.width <= 64 && cursor->image.height <= 64) cur_size = 64; else { printk(KERN_WARNING "viafb_cursor: The cursor is too large " "%dx%d", cursor->image.width, cursor->image.height); return -ENXIO; } if (cursor->set & FB_CUR_SETSIZE) { temp = readl(engine + VIA_REG_CURSOR_MODE); if (cur_size == 32) temp |= 0x2; else temp &= ~0x2; writel(temp, engine + VIA_REG_CURSOR_MODE); } if (cursor->set & FB_CUR_SETCMAP) { fg_color = cursor->image.fg_color; bg_color = cursor->image.bg_color; if (chip_name == UNICHROME_CX700 || chip_name == UNICHROME_VX800 || chip_name == UNICHROME_VX855 || chip_name == UNICHROME_VX900) { fg_color = ((info->cmap.red[fg_color] & 0xFFC0) << 14) | ((info->cmap.green[fg_color] & 0xFFC0) << 4) | ((info->cmap.blue[fg_color] & 0xFFC0) >> 6); bg_color = ((info->cmap.red[bg_color] & 0xFFC0) << 14) | ((info->cmap.green[bg_color] & 0xFFC0) << 4) | ((info->cmap.blue[bg_color] & 0xFFC0) >> 6); } else { fg_color = ((info->cmap.red[fg_color] & 0xFF00) << 8) | (info->cmap.green[fg_color] & 0xFF00) | ((info->cmap.blue[fg_color] & 0xFF00) >> 8); bg_color = ((info->cmap.red[bg_color] & 0xFF00) << 8) | (info->cmap.green[bg_color] & 0xFF00) | ((info->cmap.blue[bg_color] & 0xFF00) >> 8); } writel(bg_color, engine + VIA_REG_CURSOR_BG); writel(fg_color, engine + VIA_REG_CURSOR_FG); } if (cursor->set & FB_CUR_SETSHAPE) { struct { u8 data[CURSOR_SIZE]; u32 bak[CURSOR_SIZE / 4]; } *cr_data = kzalloc(sizeof(*cr_data), GFP_ATOMIC); int size = ((cursor->image.width + 7) >> 3) * cursor->image.height; if (!cr_data) return -ENOMEM; if (cur_size == 32) { for (i = 0; i < (CURSOR_SIZE / 4); i++) { cr_data->bak[i] = 0x0; cr_data->bak[i + 1] = 0xFFFFFFFF; i += 1; } } else { for (i = 0; i < (CURSOR_SIZE / 4); i++) { cr_data->bak[i] = 0x0; cr_data->bak[i + 1] = 0x0; cr_data->bak[i + 2] = 0xFFFFFFFF; cr_data->bak[i + 3] = 0xFFFFFFFF; i += 3; } } switch (cursor->rop) { case ROP_XOR: for (i = 0; i < size; i++) cr_data->data[i] = cursor->mask[i]; break; case ROP_COPY: for (i = 0; i < size; i++) cr_data->data[i] = cursor->mask[i]; break; default: break; } if (cur_size == 32) { for (i = 0; i < size; i++) { cr_data->bak[j] = (u32) cr_data->data[i]; cr_data->bak[j + 1] = ~cr_data->bak[j]; j += 2; } } else { for (i = 0; i < size; i++) { cr_data->bak[j] = (u32) cr_data->data[i]; cr_data->bak[j + 1] = 0x0; cr_data->bak[j + 2] = ~cr_data->bak[j]; cr_data->bak[j + 3] = ~cr_data->bak[j + 1]; j += 4; } } memcpy_toio(viafbinfo->screen_base + viapar->shared-> cursor_vram_addr, cr_data->bak, CURSOR_SIZE); kfree(cr_data); } if (cursor->enable) viafb_show_hw_cursor(info, HW_Cursor_ON); return 0; } static int viafb_sync(struct fb_info *info) { if (!(info->flags & FBINFO_HWACCEL_DISABLED)) viafb_wait_engine_idle(info); return 0; } static int get_primary_device(void) { int primary_device = 0; /* Rule: device on iga1 path are the primary device. */ if (viafb_SAMM_ON) { if (viafb_CRT_ON) { if (viaparinfo->shared->iga1_devices & VIA_CRT) { DEBUG_MSG(KERN_INFO "CRT IGA Path:%d\n", IGA1); primary_device = CRT_Device; } } if (viafb_DVI_ON) { if (viaparinfo->tmds_setting_info->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "DVI IGA Path:%d\n", viaparinfo-> tmds_setting_info->iga_path); primary_device = DVI_Device; } } if (viafb_LCD_ON) { if (viaparinfo->lvds_setting_info->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "LCD IGA Path:%d\n", viaparinfo-> lvds_setting_info->iga_path); primary_device = LCD_Device; } } if (viafb_LCD2_ON) { if (viaparinfo->lvds_setting_info2->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "LCD2 IGA Path:%d\n", viaparinfo-> lvds_setting_info2->iga_path); primary_device = LCD2_Device; } } } return primary_device; } static void retrieve_device_setting(struct viafb_ioctl_setting *setting_info) { /* get device status */ if (viafb_CRT_ON == 1) setting_info->device_status = CRT_Device; if (viafb_DVI_ON == 1) setting_info->device_status |= DVI_Device; if (viafb_LCD_ON == 1) setting_info->device_status |= LCD_Device; if (viafb_LCD2_ON == 1) setting_info->device_status |= LCD2_Device; setting_info->samm_status = viafb_SAMM_ON; setting_info->primary_device = get_primary_device(); setting_info->first_dev_bpp = viafb_bpp; setting_info->second_dev_bpp = viafb_bpp1; setting_info->first_dev_refresh = viafb_refresh; setting_info->second_dev_refresh = viafb_refresh1; setting_info->first_dev_hor_res = viafb_hotplug_Xres; setting_info->first_dev_ver_res = viafb_hotplug_Yres; setting_info->second_dev_hor_res = viafb_second_xres; setting_info->second_dev_ver_res = viafb_second_yres; /* Get lcd attributes */ setting_info->lcd_attributes.display_center = viafb_lcd_dsp_method; setting_info->lcd_attributes.panel_id = viafb_lcd_panel_id; setting_info->lcd_attributes.lcd_mode = viafb_lcd_mode; } static int __init parse_active_dev(void) { viafb_CRT_ON = STATE_OFF; viafb_DVI_ON = STATE_OFF; viafb_LCD_ON = STATE_OFF; viafb_LCD2_ON = STATE_OFF; /* 1. Modify the active status of devices. */ /* 2. Keep the order of devices, so we can set corresponding IGA path to devices in SAMM case. */ /* Note: The previous of active_dev is primary device, and the following is secondary device. */ if (!viafb_active_dev) { if (machine_is_olpc()) { /* LCD only */ viafb_LCD_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else { viafb_CRT_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } } else if (!strcmp(viafb_active_dev, "CRT+DVI")) { /* CRT+DVI */ viafb_CRT_ON = STATE_ON; viafb_DVI_ON = STATE_ON; viafb_primary_dev = CRT_Device; } else if (!strcmp(viafb_active_dev, "DVI+CRT")) { /* DVI+CRT */ viafb_CRT_ON = STATE_ON; viafb_DVI_ON = STATE_ON; viafb_primary_dev = DVI_Device; } else if (!strcmp(viafb_active_dev, "CRT+LCD")) { /* CRT+LCD */ viafb_CRT_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = CRT_Device; } else if (!strcmp(viafb_active_dev, "LCD+CRT")) { /* LCD+CRT */ viafb_CRT_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "DVI+LCD")) { /* DVI+LCD */ viafb_DVI_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = DVI_Device; } else if (!strcmp(viafb_active_dev, "LCD+DVI")) { /* LCD+DVI */ viafb_DVI_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "LCD+LCD2")) { viafb_LCD_ON = STATE_ON; viafb_LCD2_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "LCD2+LCD")) { viafb_LCD_ON = STATE_ON; viafb_LCD2_ON = STATE_ON; viafb_primary_dev = LCD2_Device; } else if (!strcmp(viafb_active_dev, "CRT")) { /* CRT only */ viafb_CRT_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else if (!strcmp(viafb_active_dev, "DVI")) { /* DVI only */ viafb_DVI_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else if (!strcmp(viafb_active_dev, "LCD")) { /* LCD only */ viafb_LCD_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else return -EINVAL; return 0; } static int __devinit parse_port(char *opt_str, int *output_interface) { if (!strncmp(opt_str, "DVP0", 4)) *output_interface = INTERFACE_DVP0; else if (!strncmp(opt_str, "DVP1", 4)) *output_interface = INTERFACE_DVP1; else if (!strncmp(opt_str, "DFP_HIGHLOW", 11)) *output_interface = INTERFACE_DFP; else if (!strncmp(opt_str, "DFP_HIGH", 8)) *output_interface = INTERFACE_DFP_HIGH; else if (!strncmp(opt_str, "DFP_LOW", 7)) *output_interface = INTERFACE_DFP_LOW; else *output_interface = INTERFACE_NONE; return 0; } static void __devinit parse_lcd_port(void) { parse_port(viafb_lcd_port, &viaparinfo->chip_info->lvds_chip_info. output_interface); /*Initialize to avoid unexpected behavior */ viaparinfo->chip_info->lvds_chip_info2.output_interface = INTERFACE_NONE; DEBUG_MSG(KERN_INFO "parse_lcd_port: viafb_lcd_port:%s,interface:%d\n", viafb_lcd_port, viaparinfo->chip_info->lvds_chip_info. output_interface); } static void __devinit parse_dvi_port(void) { parse_port(viafb_dvi_port, &viaparinfo->chip_info->tmds_chip_info. output_interface); DEBUG_MSG(KERN_INFO "parse_dvi_port: viafb_dvi_port:%s,interface:%d\n", viafb_dvi_port, viaparinfo->chip_info->tmds_chip_info. output_interface); } #ifdef CONFIG_FB_VIA_DIRECT_PROCFS /* * The proc filesystem read/write function, a simple proc implement to * get/set the value of DPA DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1, * DVP1Driving, DFPHigh, DFPLow CR96, SR2A[5], SR1B[1], SR2A[4], SR1E[2], * CR9B, SR65, CR97, CR99 */ static int viafb_dvp0_proc_show(struct seq_file *m, void *v) { u8 dvp0_data_dri = 0, dvp0_clk_dri = 0, dvp0 = 0; dvp0_data_dri = (viafb_read_reg(VIASR, SR2A) & BIT5) >> 4 | (viafb_read_reg(VIASR, SR1B) & BIT1) >> 1; dvp0_clk_dri = (viafb_read_reg(VIASR, SR2A) & BIT4) >> 3 | (viafb_read_reg(VIASR, SR1E) & BIT2) >> 2; dvp0 = viafb_read_reg(VIACR, CR96) & 0x0f; seq_printf(m, "%x %x %x\n", dvp0, dvp0_data_dri, dvp0_clk_dri); return 0; } static int viafb_dvp0_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dvp0_proc_show, NULL); } static ssize_t viafb_dvp0_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20], *value, *pbuf; u8 reg_val = 0; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; for (i = 0; i < 3; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val) < 0) return -EINVAL; DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i, reg_val); switch (i) { case 0: viafb_write_reg_mask(CR96, VIACR, reg_val, 0x0f); break; case 1: viafb_write_reg_mask(SR2A, VIASR, reg_val << 4, BIT5); viafb_write_reg_mask(SR1B, VIASR, reg_val << 1, BIT1); break; case 2: viafb_write_reg_mask(SR2A, VIASR, reg_val << 3, BIT4); viafb_write_reg_mask(SR1E, VIASR, reg_val << 2, BIT2); break; default: break; } } else { break; } } return count; } static const struct file_operations viafb_dvp0_proc_fops = { .owner = THIS_MODULE, .open = viafb_dvp0_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dvp0_proc_write, }; static int viafb_dvp1_proc_show(struct seq_file *m, void *v) { u8 dvp1 = 0, dvp1_data_dri = 0, dvp1_clk_dri = 0; dvp1 = viafb_read_reg(VIACR, CR9B) & 0x0f; dvp1_data_dri = (viafb_read_reg(VIASR, SR65) & 0x0c) >> 2; dvp1_clk_dri = viafb_read_reg(VIASR, SR65) & 0x03; seq_printf(m, "%x %x %x\n", dvp1, dvp1_data_dri, dvp1_clk_dri); return 0; } static int viafb_dvp1_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dvp1_proc_show, NULL); } static ssize_t viafb_dvp1_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20], *value, *pbuf; u8 reg_val = 0; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; for (i = 0; i < 3; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val) < 0) return -EINVAL; switch (i) { case 0: viafb_write_reg_mask(CR9B, VIACR, reg_val, 0x0f); break; case 1: viafb_write_reg_mask(SR65, VIASR, reg_val << 2, 0x0c); break; case 2: viafb_write_reg_mask(SR65, VIASR, reg_val, 0x03); break; default: break; } } else { break; } } return count; } static const struct file_operations viafb_dvp1_proc_fops = { .owner = THIS_MODULE, .open = viafb_dvp1_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dvp1_proc_write, }; static int viafb_dfph_proc_show(struct seq_file *m, void *v) { u8 dfp_high = 0; dfp_high = viafb_read_reg(VIACR, CR97) & 0x0f; seq_printf(m, "%x\n", dfp_high); return 0; } static int viafb_dfph_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dfph_proc_show, NULL); } static ssize_t viafb_dfph_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20]; u8 reg_val = 0; unsigned long length; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ if (kstrtou8(buf, 0, &reg_val) < 0) return -EINVAL; viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f); return count; } static const struct file_operations viafb_dfph_proc_fops = { .owner = THIS_MODULE, .open = viafb_dfph_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dfph_proc_write, }; static int viafb_dfpl_proc_show(struct seq_file *m, void *v) { u8 dfp_low = 0; dfp_low = viafb_read_reg(VIACR, CR99) & 0x0f; seq_printf(m, "%x\n", dfp_low); return 0; } static int viafb_dfpl_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dfpl_proc_show, NULL); } static ssize_t viafb_dfpl_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20]; u8 reg_val = 0; unsigned long length; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ if (kstrtou8(buf, 0, &reg_val) < 0) return -EINVAL; viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f); return count; } static const struct file_operations viafb_dfpl_proc_fops = { .owner = THIS_MODULE, .open = viafb_dfpl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dfpl_proc_write, }; static int viafb_vt1636_proc_show(struct seq_file *m, void *v) { u8 vt1636_08 = 0, vt1636_09 = 0; switch (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { case VT1636_LVDS: vt1636_08 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info, 0x08) & 0x0f; vt1636_09 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info, 0x09) & 0x1f; seq_printf(m, "%x %x\n", vt1636_08, vt1636_09); break; default: break; } switch (viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { case VT1636_LVDS: vt1636_08 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2, 0x08) & 0x0f; vt1636_09 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2, 0x09) & 0x1f; seq_printf(m, " %x %x\n", vt1636_08, vt1636_09); break; default: break; } return 0; } static int viafb_vt1636_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_vt1636_proc_show, NULL); } static ssize_t viafb_vt1636_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[30], *value, *pbuf; struct IODATA reg_val; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 30 ? 30 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; switch (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { case VT1636_LVDS: for (i = 0; i < 2; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val.Data) < 0) return -EINVAL; switch (i) { case 0: reg_val.Index = 0x08; reg_val.Mask = 0x0f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info, &viaparinfo-> chip_info->lvds_chip_info, reg_val); break; case 1: reg_val.Index = 0x09; reg_val.Mask = 0x1f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info, &viaparinfo-> chip_info->lvds_chip_info, reg_val); break; default: break; } } else { break; } } break; default: break; } switch (viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { case VT1636_LVDS: for (i = 0; i < 2; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val.Data) < 0) return -EINVAL; switch (i) { case 0: reg_val.Index = 0x08; reg_val.Mask = 0x0f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info2, &viaparinfo-> chip_info->lvds_chip_info2, reg_val); break; case 1: reg_val.Index = 0x09; reg_val.Mask = 0x1f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info2, &viaparinfo-> chip_info->lvds_chip_info2, reg_val); break; default: break; } } else { break; } } break; default: break; } return count; } static const struct file_operations viafb_vt1636_proc_fops = { .owner = THIS_MODULE, .open = viafb_vt1636_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_vt1636_proc_write, }; #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ static int viafb_sup_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, supported_odev_map[ viaparinfo->shared->chip_info.gfx_chip_name]); return 0; } static int viafb_sup_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_sup_odev_proc_show, NULL); } static const struct file_operations viafb_sup_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_sup_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static ssize_t odev_update(const char __user *buffer, size_t count, u32 *odev) { char buf[64], *ptr = buf; u32 devices; bool add, sub; if (count < 1 || count > 63) return -EINVAL; if (copy_from_user(&buf[0], buffer, count)) return -EFAULT; buf[count] = '\0'; add = buf[0] == '+'; sub = buf[0] == '-'; if (add || sub) ptr++; devices = via_parse_odev(ptr, &ptr); if (*ptr == '\n') ptr++; if (*ptr != 0) return -EINVAL; if (add) *odev |= devices; else if (sub) *odev &= ~devices; else *odev = devices; return count; } static int viafb_iga1_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, viaparinfo->shared->iga1_devices); return 0; } static int viafb_iga1_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_iga1_odev_proc_show, NULL); } static ssize_t viafb_iga1_odev_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { u32 dev_on, dev_off, dev_old, dev_new; ssize_t res; dev_old = dev_new = viaparinfo->shared->iga1_devices; res = odev_update(buffer, count, &dev_new); if (res != count) return res; dev_off = dev_old & ~dev_new; dev_on = dev_new & ~dev_old; viaparinfo->shared->iga1_devices = dev_new; viaparinfo->shared->iga2_devices &= ~dev_new; via_set_state(dev_off, VIA_STATE_OFF); via_set_source(dev_new, IGA1); via_set_state(dev_on, VIA_STATE_ON); return res; } static const struct file_operations viafb_iga1_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_iga1_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_iga1_odev_proc_write, }; static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, viaparinfo->shared->iga2_devices); return 0; } static int viafb_iga2_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_iga2_odev_proc_show, NULL); } static ssize_t viafb_iga2_odev_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { u32 dev_on, dev_off, dev_old, dev_new; ssize_t res; dev_old = dev_new = viaparinfo->shared->iga2_devices; res = odev_update(buffer, count, &dev_new); if (res != count) return res; dev_off = dev_old & ~dev_new; dev_on = dev_new & ~dev_old; viaparinfo->shared->iga2_devices = dev_new; viaparinfo->shared->iga1_devices &= ~dev_new; via_set_state(dev_off, VIA_STATE_OFF); via_set_source(dev_new, IGA2); via_set_state(dev_on, VIA_STATE_ON); return res; } static const struct file_operations viafb_iga2_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_iga2_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_iga2_odev_proc_write, }; #define IS_VT1636(lvds_chip) ((lvds_chip).lvds_chip_name == VT1636_LVDS) static void viafb_init_proc(struct viafb_shared *shared) { struct proc_dir_entry *iga1_entry, *iga2_entry, *viafb_entry = proc_mkdir("viafb", NULL); shared->proc_entry = viafb_entry; if (viafb_entry) { #ifdef CONFIG_FB_VIA_DIRECT_PROCFS proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops); proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops); proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops); proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops); if (IS_VT1636(shared->chip_info.lvds_chip_info) || IS_VT1636(shared->chip_info.lvds_chip_info2)) proc_create("vt1636", 0, viafb_entry, &viafb_vt1636_proc_fops); #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ proc_create("supported_output_devices", 0, viafb_entry, &viafb_sup_odev_proc_fops); iga1_entry = proc_mkdir("iga1", viafb_entry); shared->iga1_proc_entry = iga1_entry; proc_create("output_devices", 0, iga1_entry, &viafb_iga1_odev_proc_fops); iga2_entry = proc_mkdir("iga2", viafb_entry); shared->iga2_proc_entry = iga2_entry; proc_create("output_devices", 0, iga2_entry, &viafb_iga2_odev_proc_fops); } } static void viafb_remove_proc(struct viafb_shared *shared) { struct proc_dir_entry *viafb_entry = shared->proc_entry, *iga1_entry = shared->iga1_proc_entry, *iga2_entry = shared->iga2_proc_entry; if (!viafb_entry) return; remove_proc_entry("output_devices", iga2_entry); remove_proc_entry("iga2", viafb_entry); remove_proc_entry("output_devices", iga1_entry); remove_proc_entry("iga1", viafb_entry); remove_proc_entry("supported_output_devices", viafb_entry); #ifdef CONFIG_FB_VIA_DIRECT_PROCFS remove_proc_entry("dvp0", viafb_entry);/* parent dir */ remove_proc_entry("dvp1", viafb_entry); remove_proc_entry("dfph", viafb_entry); remove_proc_entry("dfpl", viafb_entry); if (IS_VT1636(shared->chip_info.lvds_chip_info) || IS_VT1636(shared->chip_info.lvds_chip_info2)) remove_proc_entry("vt1636", viafb_entry); #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ remove_proc_entry("viafb", NULL); } #undef IS_VT1636 static int parse_mode(const char *str, u32 devices, u32 *xres, u32 *yres) { const struct fb_videomode *mode = NULL; char *ptr; if (!str) { if (devices == VIA_CRT) mode = via_aux_get_preferred_mode( viaparinfo->shared->i2c_26); else if (devices == VIA_DVP1) mode = via_aux_get_preferred_mode( viaparinfo->shared->i2c_31); if (mode) { *xres = mode->xres; *yres = mode->yres; } else if (machine_is_olpc()) { *xres = 1200; *yres = 900; } else { *xres = 640; *yres = 480; } return 0; } *xres = simple_strtoul(str, &ptr, 10); if (ptr[0] != 'x') return -EINVAL; *yres = simple_strtoul(&ptr[1], &ptr, 10); if (ptr[0]) return -EINVAL; return 0; } #ifdef CONFIG_PM static int viafb_suspend(void *unused) { console_lock(); fb_set_suspend(viafbinfo, 1); viafb_sync(viafbinfo); console_unlock(); return 0; } static int viafb_resume(void *unused) { console_lock(); if (viaparinfo->shared->vdev->engine_mmio) viafb_reset_engine(viaparinfo); viafb_set_par(viafbinfo); if (viafb_dual_fb) viafb_set_par(viafbinfo1); fb_set_suspend(viafbinfo, 0); console_unlock(); return 0; } static struct viafb_pm_hooks viafb_fb_pm_hooks = { .suspend = viafb_suspend, .resume = viafb_resume }; #endif static void __devinit i2c_bus_probe(struct viafb_shared *shared) { /* should be always CRT */ printk(KERN_INFO "viafb: Probing I2C bus 0x26\n"); shared->i2c_26 = via_aux_probe(viafb_find_i2c_adapter(VIA_PORT_26)); /* seems to be usually DVP1 */ printk(KERN_INFO "viafb: Probing I2C bus 0x31\n"); shared->i2c_31 = via_aux_probe(viafb_find_i2c_adapter(VIA_PORT_31)); /* FIXME: what is this? */ if (!machine_is_olpc()) { printk(KERN_INFO "viafb: Probing I2C bus 0x2C\n"); shared->i2c_2C = via_aux_probe(viafb_find_i2c_adapter(VIA_PORT_2C)); } printk(KERN_INFO "viafb: Finished I2C bus probing"); } static void i2c_bus_free(struct viafb_shared *shared) { via_aux_free(shared->i2c_26); via_aux_free(shared->i2c_31); via_aux_free(shared->i2c_2C); } int __devinit via_fb_pci_probe(struct viafb_dev *vdev) { u32 default_xres, default_yres; struct fb_var_screeninfo default_var; int rc; u32 viafb_par_length; DEBUG_MSG(KERN_INFO "VIAFB PCI Probe!!\n"); memset(&default_var, 0, sizeof(default_var)); viafb_par_length = ALIGN(sizeof(struct viafb_par), BITS_PER_LONG/8); /* Allocate fb_info and ***_par here, also including some other needed * variables */ viafbinfo = framebuffer_alloc(viafb_par_length + ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8), &vdev->pdev->dev); if (!viafbinfo) { printk(KERN_ERR"Could not allocate memory for viafb_info.\n"); return -ENOMEM; } viaparinfo = (struct viafb_par *)viafbinfo->par; viaparinfo->shared = viafbinfo->par + viafb_par_length; viaparinfo->shared->vdev = vdev; viaparinfo->vram_addr = 0; viaparinfo->tmds_setting_info = &viaparinfo->shared->tmds_setting_info; viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info; viaparinfo->lvds_setting_info2 = &viaparinfo->shared->lvds_setting_info2; viaparinfo->chip_info = &viaparinfo->shared->chip_info; i2c_bus_probe(viaparinfo->shared); if (viafb_dual_fb) viafb_SAMM_ON = 1; parse_lcd_port(); parse_dvi_port(); viafb_init_chip_info(vdev->chip_type); /* * The framebuffer will have been successfully mapped by * the core (or we'd not be here), but we still need to * set up our own accounting. */ viaparinfo->fbmem = vdev->fbmem_start; viaparinfo->memsize = vdev->fbmem_len; viaparinfo->fbmem_free = viaparinfo->memsize; viaparinfo->fbmem_used = 0; viafbinfo->screen_base = vdev->fbmem; viafbinfo->fix.mmio_start = vdev->engine_start; viafbinfo->fix.mmio_len = vdev->engine_len; viafbinfo->node = 0; viafbinfo->fbops = &viafb_ops; viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; viafbinfo->pseudo_palette = pseudo_pal; if (viafb_accel && !viafb_setup_engine(viafbinfo)) { viafbinfo->flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; default_var.accel_flags = FB_ACCELF_TEXT; } else { viafbinfo->flags |= FBINFO_HWACCEL_DISABLED; default_var.accel_flags = 0; } if (viafb_second_size && (viafb_second_size < 8)) { viafb_second_offset = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; } else { viafb_second_size = 8; viafb_second_offset = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; } parse_mode(viafb_mode, viaparinfo->shared->iga1_devices, &default_xres, &default_yres); if (viafb_SAMM_ON == 1) parse_mode(viafb_mode1, viaparinfo->shared->iga2_devices, &viafb_second_xres, &viafb_second_yres); default_var.xres = default_xres; default_var.yres = default_yres; default_var.xres_virtual = default_xres; default_var.yres_virtual = default_yres; default_var.bits_per_pixel = viafb_bpp; viafb_fill_var_timing_info(&default_var, viafb_get_best_mode( default_var.xres, default_var.yres, viafb_refresh)); viafb_setup_fixinfo(&viafbinfo->fix, viaparinfo); viafbinfo->var = default_var; if (viafb_dual_fb) { viafbinfo1 = framebuffer_alloc(viafb_par_length, &vdev->pdev->dev); if (!viafbinfo1) { printk(KERN_ERR "allocate the second framebuffer struct error\n"); rc = -ENOMEM; goto out_fb_release; } viaparinfo1 = viafbinfo1->par; memcpy(viaparinfo1, viaparinfo, viafb_par_length); viaparinfo1->vram_addr = viafb_second_offset; viaparinfo1->memsize = viaparinfo->memsize - viafb_second_offset; viaparinfo->memsize = viafb_second_offset; viaparinfo1->fbmem = viaparinfo->fbmem + viafb_second_offset; viaparinfo1->fbmem_used = viaparinfo->fbmem_used; viaparinfo1->fbmem_free = viaparinfo1->memsize - viaparinfo1->fbmem_used; viaparinfo->fbmem_free = viaparinfo->memsize; viaparinfo->fbmem_used = 0; viaparinfo->iga_path = IGA1; viaparinfo1->iga_path = IGA2; memcpy(viafbinfo1, viafbinfo, sizeof(struct fb_info)); viafbinfo1->par = viaparinfo1; viafbinfo1->screen_base = viafbinfo->screen_base + viafb_second_offset; default_var.xres = viafb_second_xres; default_var.yres = viafb_second_yres; default_var.xres_virtual = viafb_second_xres; default_var.yres_virtual = viafb_second_yres; default_var.bits_per_pixel = viafb_bpp1; viafb_fill_var_timing_info(&default_var, viafb_get_best_mode( default_var.xres, default_var.yres, viafb_refresh1)); viafb_setup_fixinfo(&viafbinfo1->fix, viaparinfo1); viafb_check_var(&default_var, viafbinfo1); viafbinfo1->var = default_var; viafb_update_fix(viafbinfo1); viaparinfo1->depth = fb_get_color_depth(&viafbinfo1->var, &viafbinfo1->fix); } viafb_check_var(&viafbinfo->var, viafbinfo); viafb_update_fix(viafbinfo); viaparinfo->depth = fb_get_color_depth(&viafbinfo->var, &viafbinfo->fix); default_var.activate = FB_ACTIVATE_NOW; rc = fb_alloc_cmap(&viafbinfo->cmap, 256, 0); if (rc) goto out_fb1_release; if (viafb_dual_fb && (viafb_primary_dev == LCD_Device) && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) { rc = register_framebuffer(viafbinfo1); if (rc) goto out_dealloc_cmap; } rc = register_framebuffer(viafbinfo); if (rc) goto out_fb1_unreg_lcd_cle266; if (viafb_dual_fb && ((viafb_primary_dev != LCD_Device) || (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266))) { rc = register_framebuffer(viafbinfo1); if (rc) goto out_fb_unreg; } DEBUG_MSG(KERN_INFO "fb%d: %s frame buffer device %dx%d-%dbpp\n", viafbinfo->node, viafbinfo->fix.id, default_var.xres, default_var.yres, default_var.bits_per_pixel); viafb_init_proc(viaparinfo->shared); viafb_init_dac(IGA2); #ifdef CONFIG_PM viafb_pm_register(&viafb_fb_pm_hooks); #endif return 0; out_fb_unreg: unregister_framebuffer(viafbinfo); out_fb1_unreg_lcd_cle266: if (viafb_dual_fb && (viafb_primary_dev == LCD_Device) && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) unregister_framebuffer(viafbinfo1); out_dealloc_cmap: fb_dealloc_cmap(&viafbinfo->cmap); out_fb1_release: if (viafbinfo1) framebuffer_release(viafbinfo1); out_fb_release: i2c_bus_free(viaparinfo->shared); framebuffer_release(viafbinfo); return rc; } void __devexit via_fb_pci_remove(struct pci_dev *pdev) { DEBUG_MSG(KERN_INFO "via_pci_remove!\n"); fb_dealloc_cmap(&viafbinfo->cmap); unregister_framebuffer(viafbinfo); if (viafb_dual_fb) unregister_framebuffer(viafbinfo1); viafb_remove_proc(viaparinfo->shared); i2c_bus_free(viaparinfo->shared); framebuffer_release(viafbinfo); if (viafb_dual_fb) framebuffer_release(viafbinfo1); } #ifndef MODULE static int __init viafb_setup(void) { char *this_opt; char *options; DEBUG_MSG(KERN_INFO "viafb_setup!\n"); if (fb_get_options("viafb", &options)) return -ENODEV; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "viafb_mode1=", 12)) { viafb_mode1 = kstrdup(this_opt + 12, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_mode=", 11)) { viafb_mode = kstrdup(this_opt + 11, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_bpp1=", 11)) { if (kstrtouint(this_opt + 11, 0, &viafb_bpp1) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_bpp=", 10)) { if (kstrtouint(this_opt + 10, 0, &viafb_bpp) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_refresh1=", 15)) { if (kstrtoint(this_opt + 15, 0, &viafb_refresh1) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_refresh=", 14)) { if (kstrtoint(this_opt + 14, 0, &viafb_refresh) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_dsp_method=", 21)) { if (kstrtoint(this_opt + 21, 0, &viafb_lcd_dsp_method) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_panel_id=", 19)) { if (kstrtoint(this_opt + 19, 0, &viafb_lcd_panel_id) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_accel=", 12)) { if (kstrtoint(this_opt + 12, 0, &viafb_accel) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_SAMM_ON=", 14)) { if (kstrtoint(this_opt + 14, 0, &viafb_SAMM_ON) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_active_dev=", 17)) { viafb_active_dev = kstrdup(this_opt + 17, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_display_hardware_layout=", 30)) { if (kstrtoint(this_opt + 30, 0, &viafb_display_hardware_layout) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_second_size=", 18)) { if (kstrtoint(this_opt + 18, 0, &viafb_second_size) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_platform_epia_dvi=", 24)) { if (kstrtoint(this_opt + 24, 0, &viafb_platform_epia_dvi) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_device_lcd_dualedge=", 26)) { if (kstrtoint(this_opt + 26, 0, &viafb_device_lcd_dualedge) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_bus_width=", 16)) { if (kstrtoint(this_opt + 16, 0, &viafb_bus_width) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_mode=", 15)) { if (kstrtoint(this_opt + 15, 0, &viafb_lcd_mode) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_port=", 15)) { viafb_lcd_port = kstrdup(this_opt + 15, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_dvi_port=", 15)) { viafb_dvi_port = kstrdup(this_opt + 15, GFP_KERNEL); } } return 0; } #endif /* * These are called out of via-core for now. */ int __init viafb_init(void) { u32 dummy_x, dummy_y; int r = 0; if (machine_is_olpc()) /* Apply XO-1.5-specific configuration. */ viafb_lcd_panel_id = 23; #ifndef MODULE r = viafb_setup(); if (r < 0) return r; #endif if (parse_mode(viafb_mode, 0, &dummy_x, &dummy_y) || !viafb_get_best_mode(dummy_x, dummy_y, viafb_refresh) || parse_mode(viafb_mode1, 0, &dummy_x, &dummy_y) || !viafb_get_best_mode(dummy_x, dummy_y, viafb_refresh1) || viafb_bpp < 0 || viafb_bpp > 32 || viafb_bpp1 < 0 || viafb_bpp1 > 32 || parse_active_dev()) return -EINVAL; printk(KERN_INFO "VIA Graphics Integration Chipset framebuffer %d.%d initializing\n", VERSION_MAJOR, VERSION_MINOR); return r; } void __exit viafb_exit(void) { DEBUG_MSG(KERN_INFO "viafb_exit!\n"); } static struct fb_ops viafb_ops = { .owner = THIS_MODULE, .fb_open = viafb_open, .fb_release = viafb_release, .fb_check_var = viafb_check_var, .fb_set_par = viafb_set_par, .fb_setcolreg = viafb_setcolreg, .fb_pan_display = viafb_pan_display, .fb_blank = viafb_blank, .fb_fillrect = viafb_fillrect, .fb_copyarea = viafb_copyarea, .fb_imageblit = viafb_imageblit, .fb_cursor = viafb_cursor, .fb_ioctl = viafb_ioctl, .fb_sync = viafb_sync, }; #ifdef MODULE module_param(viafb_mode, charp, S_IRUSR); MODULE_PARM_DESC(viafb_mode, "Set resolution (default=640x480)"); module_param(viafb_mode1, charp, S_IRUSR); MODULE_PARM_DESC(viafb_mode1, "Set resolution (default=640x480)"); module_param(viafb_bpp, int, S_IRUSR); MODULE_PARM_DESC(viafb_bpp, "Set color depth (default=32bpp)"); module_param(viafb_bpp1, int, S_IRUSR); MODULE_PARM_DESC(viafb_bpp1, "Set color depth (default=32bpp)"); module_param(viafb_refresh, int, S_IRUSR); MODULE_PARM_DESC(viafb_refresh, "Set CRT viafb_refresh rate (default = 60)"); module_param(viafb_refresh1, int, S_IRUSR); MODULE_PARM_DESC(viafb_refresh1, "Set CRT refresh rate (default = 60)"); module_param(viafb_lcd_panel_id, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_panel_id, "Set Flat Panel type(Default=1024x768)"); module_param(viafb_lcd_dsp_method, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_dsp_method, "Set Flat Panel display scaling method.(Default=Expandsion)"); module_param(viafb_SAMM_ON, int, S_IRUSR); MODULE_PARM_DESC(viafb_SAMM_ON, "Turn on/off flag of SAMM(Default=OFF)"); module_param(viafb_accel, int, S_IRUSR); MODULE_PARM_DESC(viafb_accel, "Set 2D Hardware Acceleration: 0 = OFF, 1 = ON (default)"); module_param(viafb_active_dev, charp, S_IRUSR); MODULE_PARM_DESC(viafb_active_dev, "Specify active devices."); module_param(viafb_display_hardware_layout, int, S_IRUSR); MODULE_PARM_DESC(viafb_display_hardware_layout, "Display Hardware Layout (LCD Only, DVI Only...,etc)"); module_param(viafb_second_size, int, S_IRUSR); MODULE_PARM_DESC(viafb_second_size, "Set secondary device memory size"); module_param(viafb_dual_fb, int, S_IRUSR); MODULE_PARM_DESC(viafb_dual_fb, "Turn on/off flag of dual framebuffer devices.(Default = OFF)"); module_param(viafb_platform_epia_dvi, int, S_IRUSR); MODULE_PARM_DESC(viafb_platform_epia_dvi, "Turn on/off flag of DVI devices on EPIA board.(Default = OFF)"); module_param(viafb_device_lcd_dualedge, int, S_IRUSR); MODULE_PARM_DESC(viafb_device_lcd_dualedge, "Turn on/off flag of dual edge panel.(Default = OFF)"); module_param(viafb_bus_width, int, S_IRUSR); MODULE_PARM_DESC(viafb_bus_width, "Set bus width of panel.(Default = 12)"); module_param(viafb_lcd_mode, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_mode, "Set Flat Panel mode(Default=OPENLDI)"); module_param(viafb_lcd_port, charp, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_port, "Specify LCD output port."); module_param(viafb_dvi_port, charp, S_IRUSR); MODULE_PARM_DESC(viafb_dvi_port, "Specify DVI output port."); MODULE_LICENSE("GPL"); #endif
gpl-2.0
bagnz0r/GT-I8160_Kernel
net/dsa/mv88e6123_61_65.c
4811
11195
/* * net/dsa/mv88e6123_61_65.c - Marvell 88e6123/6161/6165 switch chip support * Copyright (c) 2008-2009 Marvell Semiconductor * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/list.h> #include <linux/netdevice.h> #include <linux/phy.h> #include "dsa_priv.h" #include "mv88e6xxx.h" static char *mv88e6123_61_65_probe(struct mii_bus *bus, int sw_addr) { int ret; ret = __mv88e6xxx_reg_read(bus, sw_addr, REG_PORT(0), 0x03); if (ret >= 0) { ret &= 0xfff0; if (ret == 0x1210) return "Marvell 88E6123"; if (ret == 0x1610) return "Marvell 88E6161"; if (ret == 0x1650) return "Marvell 88E6165"; } return NULL; } static int mv88e6123_61_65_switch_reset(struct dsa_switch *ds) { int i; int ret; /* * Set all ports to the disabled state. */ for (i = 0; i < 8; i++) { ret = REG_READ(REG_PORT(i), 0x04); REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc); } /* * Wait for transmit queues to drain. */ msleep(2); /* * Reset the switch. */ REG_WRITE(REG_GLOBAL, 0x04, 0xc400); /* * Wait up to one second for reset to complete. */ for (i = 0; i < 1000; i++) { ret = REG_READ(REG_GLOBAL, 0x00); if ((ret & 0xc800) == 0xc800) break; msleep(1); } if (i == 1000) return -ETIMEDOUT; return 0; } static int mv88e6123_61_65_setup_global(struct dsa_switch *ds) { int ret; int i; /* * Disable the PHY polling unit (since there won't be any * external PHYs to poll), don't discard packets with * excessive collisions, and mask all interrupt sources. */ REG_WRITE(REG_GLOBAL, 0x04, 0x0000); /* * Set the default address aging time to 5 minutes, and * enable address learn messages to be sent to all message * ports. */ REG_WRITE(REG_GLOBAL, 0x0a, 0x0148); /* * Configure the priority mapping registers. */ ret = mv88e6xxx_config_prio(ds); if (ret < 0) return ret; /* * Configure the upstream port, and configure the upstream * port as the port to which ingress and egress monitor frames * are to be sent. */ REG_WRITE(REG_GLOBAL, 0x1a, (dsa_upstream_port(ds) * 0x1110)); /* * Disable remote management for now, and set the switch's * DSA device number. */ REG_WRITE(REG_GLOBAL, 0x1c, ds->index & 0x1f); /* * Send all frames with destination addresses matching * 01:80:c2:00:00:2x to the CPU port. */ REG_WRITE(REG_GLOBAL2, 0x02, 0xffff); /* * Send all frames with destination addresses matching * 01:80:c2:00:00:0x to the CPU port. */ REG_WRITE(REG_GLOBAL2, 0x03, 0xffff); /* * Disable the loopback filter, disable flow control * messages, disable flood broadcast override, disable * removing of provider tags, disable ATU age violation * interrupts, disable tag flow control, force flow * control priority to the highest, and send all special * multicast frames to the CPU at the highest priority. */ REG_WRITE(REG_GLOBAL2, 0x05, 0x00ff); /* * Program the DSA routing table. */ for (i = 0; i < 32; i++) { int nexthop; nexthop = 0x1f; if (i != ds->index && i < ds->dst->pd->nr_chips) nexthop = ds->pd->rtable[i] & 0x1f; REG_WRITE(REG_GLOBAL2, 0x06, 0x8000 | (i << 8) | nexthop); } /* * Clear all trunk masks. */ for (i = 0; i < 8; i++) REG_WRITE(REG_GLOBAL2, 0x07, 0x8000 | (i << 12) | 0xff); /* * Clear all trunk mappings. */ for (i = 0; i < 16; i++) REG_WRITE(REG_GLOBAL2, 0x08, 0x8000 | (i << 11)); /* * Disable ingress rate limiting by resetting all ingress * rate limit registers to their initial state. */ for (i = 0; i < 6; i++) REG_WRITE(REG_GLOBAL2, 0x09, 0x9000 | (i << 8)); /* * Initialise cross-chip port VLAN table to reset defaults. */ REG_WRITE(REG_GLOBAL2, 0x0b, 0x9000); /* * Clear the priority override table. */ for (i = 0; i < 16; i++) REG_WRITE(REG_GLOBAL2, 0x0f, 0x8000 | (i << 8)); /* @@@ initialise AVB (22/23) watchdog (27) sdet (29) registers */ return 0; } static int mv88e6123_61_65_setup_port(struct dsa_switch *ds, int p) { int addr = REG_PORT(p); u16 val; /* * MAC Forcing register: don't force link, speed, duplex * or flow control state to any particular values on physical * ports, but force the CPU port and all DSA ports to 1000 Mb/s * full duplex. */ if (dsa_is_cpu_port(ds, p) || ds->dsa_port_mask & (1 << p)) REG_WRITE(addr, 0x01, 0x003e); else REG_WRITE(addr, 0x01, 0x0003); /* * Do not limit the period of time that this port can be * paused for by the remote end or the period of time that * this port can pause the remote end. */ REG_WRITE(addr, 0x02, 0x0000); /* * Port Control: disable Drop-on-Unlock, disable Drop-on-Lock, * disable Header mode, enable IGMP/MLD snooping, disable VLAN * tunneling, determine priority by looking at 802.1p and IP * priority fields (IP prio has precedence), and set STP state * to Forwarding. * * If this is the CPU link, use DSA or EDSA tagging depending * on which tagging mode was configured. * * If this is a link to another switch, use DSA tagging mode. * * If this is the upstream port for this switch, enable * forwarding of unknown unicasts and multicasts. */ val = 0x0433; if (dsa_is_cpu_port(ds, p)) { if (ds->dst->tag_protocol == htons(ETH_P_EDSA)) val |= 0x3300; else val |= 0x0100; } if (ds->dsa_port_mask & (1 << p)) val |= 0x0100; if (p == dsa_upstream_port(ds)) val |= 0x000c; REG_WRITE(addr, 0x04, val); /* * Port Control 1: disable trunking. Also, if this is the * CPU port, enable learn messages to be sent to this port. */ REG_WRITE(addr, 0x05, dsa_is_cpu_port(ds, p) ? 0x8000 : 0x0000); /* * Port based VLAN map: give each port its own address * database, allow the CPU port to talk to each of the 'real' * ports, and allow each of the 'real' ports to only talk to * the upstream port. */ val = (p & 0xf) << 12; if (dsa_is_cpu_port(ds, p)) val |= ds->phys_port_mask; else val |= 1 << dsa_upstream_port(ds); REG_WRITE(addr, 0x06, val); /* * Default VLAN ID and priority: don't set a default VLAN * ID, and set the default packet priority to zero. */ REG_WRITE(addr, 0x07, 0x0000); /* * Port Control 2: don't force a good FCS, set the maximum * frame size to 10240 bytes, don't let the switch add or * strip 802.1q tags, don't discard tagged or untagged frames * on this port, do a destination address lookup on all * received packets as usual, disable ARP mirroring and don't * send a copy of all transmitted/received frames on this port * to the CPU. */ REG_WRITE(addr, 0x08, 0x2080); /* * Egress rate control: disable egress rate control. */ REG_WRITE(addr, 0x09, 0x0001); /* * Egress rate control 2: disable egress rate control. */ REG_WRITE(addr, 0x0a, 0x0000); /* * Port Association Vector: when learning source addresses * of packets, add the address to the address database using * a port bitmap that has only the bit for this port set and * the other bits clear. */ REG_WRITE(addr, 0x0b, 1 << p); /* * Port ATU control: disable limiting the number of address * database entries that this port is allowed to use. */ REG_WRITE(addr, 0x0c, 0x0000); /* * Priorit Override: disable DA, SA and VTU priority override. */ REG_WRITE(addr, 0x0d, 0x0000); /* * Port Ethertype: use the Ethertype DSA Ethertype value. */ REG_WRITE(addr, 0x0f, ETH_P_EDSA); /* * Tag Remap: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x18, 0x3210); /* * Tag Remap 2: use an identity 802.1p prio -> switch prio * mapping. */ REG_WRITE(addr, 0x19, 0x7654); return 0; } static int mv88e6123_61_65_setup(struct dsa_switch *ds) { struct mv88e6xxx_priv_state *ps = (void *)(ds + 1); int i; int ret; mutex_init(&ps->smi_mutex); mutex_init(&ps->stats_mutex); ret = mv88e6123_61_65_switch_reset(ds); if (ret < 0) return ret; /* @@@ initialise vtu and atu */ ret = mv88e6123_61_65_setup_global(ds); if (ret < 0) return ret; for (i = 0; i < 6; i++) { ret = mv88e6123_61_65_setup_port(ds, i); if (ret < 0) return ret; } return 0; } static int mv88e6123_61_65_port_to_phy_addr(int port) { if (port >= 0 && port <= 4) return port; return -1; } static int mv88e6123_61_65_phy_read(struct dsa_switch *ds, int port, int regnum) { int addr = mv88e6123_61_65_port_to_phy_addr(port); return mv88e6xxx_phy_read(ds, addr, regnum); } static int mv88e6123_61_65_phy_write(struct dsa_switch *ds, int port, int regnum, u16 val) { int addr = mv88e6123_61_65_port_to_phy_addr(port); return mv88e6xxx_phy_write(ds, addr, regnum, val); } static struct mv88e6xxx_hw_stat mv88e6123_61_65_hw_stats[] = { { "in_good_octets", 8, 0x00, }, { "in_bad_octets", 4, 0x02, }, { "in_unicast", 4, 0x04, }, { "in_broadcasts", 4, 0x06, }, { "in_multicasts", 4, 0x07, }, { "in_pause", 4, 0x16, }, { "in_undersize", 4, 0x18, }, { "in_fragments", 4, 0x19, }, { "in_oversize", 4, 0x1a, }, { "in_jabber", 4, 0x1b, }, { "in_rx_error", 4, 0x1c, }, { "in_fcs_error", 4, 0x1d, }, { "out_octets", 8, 0x0e, }, { "out_unicast", 4, 0x10, }, { "out_broadcasts", 4, 0x13, }, { "out_multicasts", 4, 0x12, }, { "out_pause", 4, 0x15, }, { "excessive", 4, 0x11, }, { "collisions", 4, 0x1e, }, { "deferred", 4, 0x05, }, { "single", 4, 0x14, }, { "multiple", 4, 0x17, }, { "out_fcs_error", 4, 0x03, }, { "late", 4, 0x1f, }, { "hist_64bytes", 4, 0x08, }, { "hist_65_127bytes", 4, 0x09, }, { "hist_128_255bytes", 4, 0x0a, }, { "hist_256_511bytes", 4, 0x0b, }, { "hist_512_1023bytes", 4, 0x0c, }, { "hist_1024_max_bytes", 4, 0x0d, }, }; static void mv88e6123_61_65_get_strings(struct dsa_switch *ds, int port, uint8_t *data) { mv88e6xxx_get_strings(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats), mv88e6123_61_65_hw_stats, port, data); } static void mv88e6123_61_65_get_ethtool_stats(struct dsa_switch *ds, int port, uint64_t *data) { mv88e6xxx_get_ethtool_stats(ds, ARRAY_SIZE(mv88e6123_61_65_hw_stats), mv88e6123_61_65_hw_stats, port, data); } static int mv88e6123_61_65_get_sset_count(struct dsa_switch *ds) { return ARRAY_SIZE(mv88e6123_61_65_hw_stats); } static struct dsa_switch_driver mv88e6123_61_65_switch_driver = { .tag_protocol = cpu_to_be16(ETH_P_EDSA), .priv_size = sizeof(struct mv88e6xxx_priv_state), .probe = mv88e6123_61_65_probe, .setup = mv88e6123_61_65_setup, .set_addr = mv88e6xxx_set_addr_indirect, .phy_read = mv88e6123_61_65_phy_read, .phy_write = mv88e6123_61_65_phy_write, .poll_link = mv88e6xxx_poll_link, .get_strings = mv88e6123_61_65_get_strings, .get_ethtool_stats = mv88e6123_61_65_get_ethtool_stats, .get_sset_count = mv88e6123_61_65_get_sset_count, }; static int __init mv88e6123_61_65_init(void) { register_switch_driver(&mv88e6123_61_65_switch_driver); return 0; } module_init(mv88e6123_61_65_init); static void __exit mv88e6123_61_65_cleanup(void) { unregister_switch_driver(&mv88e6123_61_65_switch_driver); } module_exit(mv88e6123_61_65_cleanup);
gpl-2.0
AscendG630-DEV/kernel_huawei_msm8610
drivers/media/video/tuner-core.c
4811
34570
/* * i2c tv tuner chip device driver * core core, i.e. kernel interfaces, registering and so on * * Copyright(c) by Ralph Metzler, Gerd Knorr, Gunther Mayer * * Copyright(c) 2005-2011 by Mauro Carvalho Chehab * - Added support for a separate Radio tuner * - Major rework and cleanups at the code * * This driver supports many devices and the idea is to let the driver * detect which device is present. So rather than listing all supported * devices here, we pretend to support a single, fake device type that will * handle both radio and analog TV tuning. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/poll.h> #include <linux/i2c.h> #include <linux/types.h> #include <linux/init.h> #include <linux/videodev2.h> #include <media/tuner.h> #include <media/tuner-types.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include "mt20xx.h" #include "tda8290.h" #include "tea5761.h" #include "tea5767.h" #include "tuner-xc2028.h" #include "tuner-simple.h" #include "tda9887.h" #include "xc5000.h" #include "tda18271.h" #include "xc4000.h" #define UNSET (-1U) #define PREFIX (t->i2c->driver->driver.name) /* * Driver modprobe parameters */ /* insmod options used at init time => read/only */ static unsigned int addr; static unsigned int no_autodetect; static unsigned int show_i2c; module_param(addr, int, 0444); module_param(no_autodetect, int, 0444); module_param(show_i2c, int, 0444); /* insmod options used at runtime => read/write */ static int tuner_debug; static unsigned int tv_range[2] = { 44, 958 }; static unsigned int radio_range[2] = { 65, 108 }; static char pal[] = "--"; static char secam[] = "--"; static char ntsc[] = "-"; module_param_named(debug, tuner_debug, int, 0644); module_param_array(tv_range, int, NULL, 0644); module_param_array(radio_range, int, NULL, 0644); module_param_string(pal, pal, sizeof(pal), 0644); module_param_string(secam, secam, sizeof(secam), 0644); module_param_string(ntsc, ntsc, sizeof(ntsc), 0644); /* * Static vars */ static LIST_HEAD(tuner_list); static const struct v4l2_subdev_ops tuner_ops; /* * Debug macros */ #define tuner_warn(fmt, arg...) do { \ printk(KERN_WARNING "%s %d-%04x: " fmt, PREFIX, \ i2c_adapter_id(t->i2c->adapter), \ t->i2c->addr, ##arg); \ } while (0) #define tuner_info(fmt, arg...) do { \ printk(KERN_INFO "%s %d-%04x: " fmt, PREFIX, \ i2c_adapter_id(t->i2c->adapter), \ t->i2c->addr, ##arg); \ } while (0) #define tuner_err(fmt, arg...) do { \ printk(KERN_ERR "%s %d-%04x: " fmt, PREFIX, \ i2c_adapter_id(t->i2c->adapter), \ t->i2c->addr, ##arg); \ } while (0) #define tuner_dbg(fmt, arg...) do { \ if (tuner_debug) \ printk(KERN_DEBUG "%s %d-%04x: " fmt, PREFIX, \ i2c_adapter_id(t->i2c->adapter), \ t->i2c->addr, ##arg); \ } while (0) /* * Internal struct used inside the driver */ struct tuner { /* device */ struct dvb_frontend fe; struct i2c_client *i2c; struct v4l2_subdev sd; struct list_head list; /* keep track of the current settings */ v4l2_std_id std; unsigned int tv_freq; unsigned int radio_freq; unsigned int audmode; enum v4l2_tuner_type mode; unsigned int mode_mask; /* Combination of allowable modes */ bool standby; /* Standby mode */ unsigned int type; /* chip type id */ unsigned int config; const char *name; }; /* * Function prototypes */ static void set_tv_freq(struct i2c_client *c, unsigned int freq); static void set_radio_freq(struct i2c_client *c, unsigned int freq); /* * tuner attach/detach logic */ /* This macro allows us to probe dynamically, avoiding static links */ #ifdef CONFIG_MEDIA_ATTACH #define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ int __r = -EINVAL; \ typeof(&FUNCTION) __a = symbol_request(FUNCTION); \ if (__a) { \ __r = (int) __a(ARGS); \ symbol_put(FUNCTION); \ } else { \ printk(KERN_ERR "TUNER: Unable to find " \ "symbol "#FUNCTION"()\n"); \ } \ __r; \ }) static void tuner_detach(struct dvb_frontend *fe) { if (fe->ops.tuner_ops.release) { fe->ops.tuner_ops.release(fe); symbol_put_addr(fe->ops.tuner_ops.release); } if (fe->ops.analog_ops.release) { fe->ops.analog_ops.release(fe); symbol_put_addr(fe->ops.analog_ops.release); } } #else #define tuner_symbol_probe(FUNCTION, ARGS...) ({ \ FUNCTION(ARGS); \ }) static void tuner_detach(struct dvb_frontend *fe) { if (fe->ops.tuner_ops.release) fe->ops.tuner_ops.release(fe); if (fe->ops.analog_ops.release) fe->ops.analog_ops.release(fe); } #endif static inline struct tuner *to_tuner(struct v4l2_subdev *sd) { return container_of(sd, struct tuner, sd); } /* * struct analog_demod_ops callbacks */ static void fe_set_params(struct dvb_frontend *fe, struct analog_parameters *params) { struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; struct tuner *t = fe->analog_demod_priv; if (NULL == fe_tuner_ops->set_analog_params) { tuner_warn("Tuner frontend module has no way to set freq\n"); return; } fe_tuner_ops->set_analog_params(fe, params); } static void fe_standby(struct dvb_frontend *fe) { struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; if (fe_tuner_ops->sleep) fe_tuner_ops->sleep(fe); } static int fe_has_signal(struct dvb_frontend *fe) { u16 strength = 0; if (fe->ops.tuner_ops.get_rf_strength) fe->ops.tuner_ops.get_rf_strength(fe, &strength); return strength; } static int fe_set_config(struct dvb_frontend *fe, void *priv_cfg) { struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; struct tuner *t = fe->analog_demod_priv; if (fe_tuner_ops->set_config) return fe_tuner_ops->set_config(fe, priv_cfg); tuner_warn("Tuner frontend module has no way to set config\n"); return 0; } static void tuner_status(struct dvb_frontend *fe); static struct analog_demod_ops tuner_analog_ops = { .set_params = fe_set_params, .standby = fe_standby, .has_signal = fe_has_signal, .set_config = fe_set_config, .tuner_status = tuner_status }; /* * Functions to select between radio and TV and tuner probe/remove functions */ /** * set_type - Sets the tuner type for a given device * * @c: i2c_client descriptoy * @type: type of the tuner (e. g. tuner number) * @new_mode_mask: Indicates if tuner supports TV and/or Radio * @new_config: an optional parameter ranging from 0-255 used by a few tuners to adjust an internal parameter, like LNA mode * @tuner_callback: an optional function to be called when switching * to analog mode * * This function applys the tuner config to tuner specified * by tun_setup structure. It contains several per-tuner initialization "magic" */ static void set_type(struct i2c_client *c, unsigned int type, unsigned int new_mode_mask, unsigned int new_config, int (*tuner_callback) (void *dev, int component, int cmd, int arg)) { struct tuner *t = to_tuner(i2c_get_clientdata(c)); struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; unsigned char buffer[4]; int tune_now = 1; if (type == UNSET || type == TUNER_ABSENT) { tuner_dbg("tuner 0x%02x: Tuner type absent\n", c->addr); return; } t->type = type; /* prevent invalid config values */ t->config = new_config < 256 ? new_config : 0; if (tuner_callback != NULL) { tuner_dbg("defining GPIO callback\n"); t->fe.callback = tuner_callback; } /* discard private data, in case set_type() was previously called */ tuner_detach(&t->fe); t->fe.analog_demod_priv = NULL; switch (t->type) { case TUNER_MT2032: if (!dvb_attach(microtune_attach, &t->fe, t->i2c->adapter, t->i2c->addr)) goto attach_failed; break; case TUNER_PHILIPS_TDA8290: { struct tda829x_config cfg = { .lna_cfg = t->config, }; if (!dvb_attach(tda829x_attach, &t->fe, t->i2c->adapter, t->i2c->addr, &cfg)) goto attach_failed; break; } case TUNER_TEA5767: if (!dvb_attach(tea5767_attach, &t->fe, t->i2c->adapter, t->i2c->addr)) goto attach_failed; t->mode_mask = T_RADIO; break; case TUNER_TEA5761: if (!dvb_attach(tea5761_attach, &t->fe, t->i2c->adapter, t->i2c->addr)) goto attach_failed; t->mode_mask = T_RADIO; break; case TUNER_PHILIPS_FMD1216ME_MK3: case TUNER_PHILIPS_FMD1216MEX_MK3: buffer[0] = 0x0b; buffer[1] = 0xdc; buffer[2] = 0x9c; buffer[3] = 0x60; i2c_master_send(c, buffer, 4); mdelay(1); buffer[2] = 0x86; buffer[3] = 0x54; i2c_master_send(c, buffer, 4); if (!dvb_attach(simple_tuner_attach, &t->fe, t->i2c->adapter, t->i2c->addr, t->type)) goto attach_failed; break; case TUNER_PHILIPS_TD1316: buffer[0] = 0x0b; buffer[1] = 0xdc; buffer[2] = 0x86; buffer[3] = 0xa4; i2c_master_send(c, buffer, 4); if (!dvb_attach(simple_tuner_attach, &t->fe, t->i2c->adapter, t->i2c->addr, t->type)) goto attach_failed; break; case TUNER_XC2028: { struct xc2028_config cfg = { .i2c_adap = t->i2c->adapter, .i2c_addr = t->i2c->addr, }; if (!dvb_attach(xc2028_attach, &t->fe, &cfg)) goto attach_failed; tune_now = 0; break; } case TUNER_TDA9887: if (!dvb_attach(tda9887_attach, &t->fe, t->i2c->adapter, t->i2c->addr)) goto attach_failed; break; case TUNER_XC5000: { struct xc5000_config xc5000_cfg = { .i2c_address = t->i2c->addr, /* if_khz will be set at dvb_attach() */ .if_khz = 0, }; if (!dvb_attach(xc5000_attach, &t->fe, t->i2c->adapter, &xc5000_cfg)) goto attach_failed; tune_now = 0; break; } case TUNER_XC5000C: { struct xc5000_config xc5000c_cfg = { .i2c_address = t->i2c->addr, /* if_khz will be set at dvb_attach() */ .if_khz = 0, .chip_id = XC5000C, }; if (!dvb_attach(xc5000_attach, &t->fe, t->i2c->adapter, &xc5000c_cfg)) goto attach_failed; tune_now = 0; break; } case TUNER_NXP_TDA18271: { struct tda18271_config cfg = { .config = t->config, .small_i2c = TDA18271_03_BYTE_CHUNK_INIT, }; if (!dvb_attach(tda18271_attach, &t->fe, t->i2c->addr, t->i2c->adapter, &cfg)) goto attach_failed; tune_now = 0; break; } case TUNER_XC4000: { struct xc4000_config xc4000_cfg = { .i2c_address = t->i2c->addr, /* FIXME: the correct parameters will be set */ /* only when the digital dvb_attach() occurs */ .default_pm = 0, .dvb_amplitude = 0, .set_smoothedcvbs = 0, .if_khz = 0 }; if (!dvb_attach(xc4000_attach, &t->fe, t->i2c->adapter, &xc4000_cfg)) goto attach_failed; tune_now = 0; break; } default: if (!dvb_attach(simple_tuner_attach, &t->fe, t->i2c->adapter, t->i2c->addr, t->type)) goto attach_failed; break; } if ((NULL == analog_ops->set_params) && (fe_tuner_ops->set_analog_params)) { t->name = fe_tuner_ops->info.name; t->fe.analog_demod_priv = t; memcpy(analog_ops, &tuner_analog_ops, sizeof(struct analog_demod_ops)); } else { t->name = analog_ops->info.name; } tuner_dbg("type set to %s\n", t->name); t->mode_mask = new_mode_mask; /* Some tuners require more initialization setup before use, such as firmware download or device calibration. trying to set a frequency here will just fail FIXME: better to move set_freq to the tuner code. This is needed on analog tuners for PLL to properly work */ if (tune_now) { if (V4L2_TUNER_RADIO == t->mode) set_radio_freq(c, t->radio_freq); else set_tv_freq(c, t->tv_freq); } tuner_dbg("%s %s I2C addr 0x%02x with type %d used for 0x%02x\n", c->adapter->name, c->driver->driver.name, c->addr << 1, type, t->mode_mask); return; attach_failed: tuner_dbg("Tuner attach for type = %d failed.\n", t->type); t->type = TUNER_ABSENT; return; } /** * tuner_s_type_addr - Sets the tuner type for a device * * @sd: subdev descriptor * @tun_setup: type to be associated to a given tuner i2c address * * This function applys the tuner config to tuner specified * by tun_setup structure. * If tuner I2C address is UNSET, then it will only set the device * if the tuner supports the mode specified in the call. * If the address is specified, the change will be applied only if * tuner I2C address matches. * The call can change the tuner number and the tuner mode. */ static int tuner_s_type_addr(struct v4l2_subdev *sd, struct tuner_setup *tun_setup) { struct tuner *t = to_tuner(sd); struct i2c_client *c = v4l2_get_subdevdata(sd); tuner_dbg("Calling set_type_addr for type=%d, addr=0x%02x, mode=0x%02x, config=0x%02x\n", tun_setup->type, tun_setup->addr, tun_setup->mode_mask, tun_setup->config); if ((t->type == UNSET && ((tun_setup->addr == ADDR_UNSET) && (t->mode_mask & tun_setup->mode_mask))) || (tun_setup->addr == c->addr)) { set_type(c, tun_setup->type, tun_setup->mode_mask, tun_setup->config, tun_setup->tuner_callback); } else tuner_dbg("set addr discarded for type %i, mask %x. " "Asked to change tuner at addr 0x%02x, with mask %x\n", t->type, t->mode_mask, tun_setup->addr, tun_setup->mode_mask); return 0; } /** * tuner_s_config - Sets tuner configuration * * @sd: subdev descriptor * @cfg: tuner configuration * * Calls tuner set_config() private function to set some tuner-internal * parameters */ static int tuner_s_config(struct v4l2_subdev *sd, const struct v4l2_priv_tun_config *cfg) { struct tuner *t = to_tuner(sd); struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; if (t->type != cfg->tuner) return 0; if (analog_ops->set_config) { analog_ops->set_config(&t->fe, cfg->priv); return 0; } tuner_dbg("Tuner frontend module has no way to set config\n"); return 0; } /** * tuner_lookup - Seek for tuner adapters * * @adap: i2c_adapter struct * @radio: pointer to be filled if the adapter is radio * @tv: pointer to be filled if the adapter is TV * * Search for existing radio and/or TV tuners on the given I2C adapter, * discarding demod-only adapters (tda9887). * * Note that when this function is called from tuner_probe you can be * certain no other devices will be added/deleted at the same time, I2C * core protects against that. */ static void tuner_lookup(struct i2c_adapter *adap, struct tuner **radio, struct tuner **tv) { struct tuner *pos; *radio = NULL; *tv = NULL; list_for_each_entry(pos, &tuner_list, list) { int mode_mask; if (pos->i2c->adapter != adap || strcmp(pos->i2c->driver->driver.name, "tuner")) continue; mode_mask = pos->mode_mask; if (*radio == NULL && mode_mask == T_RADIO) *radio = pos; /* Note: currently TDA9887 is the only demod-only device. If other devices appear then we need to make this test more general. */ else if (*tv == NULL && pos->type != TUNER_TDA9887 && (pos->mode_mask & T_ANALOG_TV)) *tv = pos; } } /** *tuner_probe - Probes the existing tuners on an I2C bus * * @client: i2c_client descriptor * @id: not used * * This routine probes for tuners at the expected I2C addresses. On most * cases, if a device answers to a given I2C address, it assumes that the * device is a tuner. On a few cases, however, an additional logic is needed * to double check if the device is really a tuner, or to identify the tuner * type, like on tea5767/5761 devices. * * During client attach, set_type is called by adapter's attach_inform callback. * set_type must then be completed by tuner_probe. */ static int tuner_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct tuner *t; struct tuner *radio; struct tuner *tv; t = kzalloc(sizeof(struct tuner), GFP_KERNEL); if (NULL == t) return -ENOMEM; v4l2_i2c_subdev_init(&t->sd, client, &tuner_ops); t->i2c = client; t->name = "(tuner unset)"; t->type = UNSET; t->audmode = V4L2_TUNER_MODE_STEREO; t->standby = 1; t->radio_freq = 87.5 * 16000; /* Initial freq range */ t->tv_freq = 400 * 16; /* Sets freq to VHF High - needed for some PLL's to properly start */ if (show_i2c) { unsigned char buffer[16]; int i, rc; memset(buffer, 0, sizeof(buffer)); rc = i2c_master_recv(client, buffer, sizeof(buffer)); tuner_info("I2C RECV = "); for (i = 0; i < rc; i++) printk(KERN_CONT "%02x ", buffer[i]); printk("\n"); } /* autodetection code based on the i2c addr */ if (!no_autodetect) { switch (client->addr) { case 0x10: if (tuner_symbol_probe(tea5761_autodetection, t->i2c->adapter, t->i2c->addr) >= 0) { t->type = TUNER_TEA5761; t->mode_mask = T_RADIO; tuner_lookup(t->i2c->adapter, &radio, &tv); if (tv) tv->mode_mask &= ~T_RADIO; goto register_client; } kfree(t); return -ENODEV; case 0x42: case 0x43: case 0x4a: case 0x4b: /* If chip is not tda8290, don't register. since it can be tda9887*/ if (tuner_symbol_probe(tda829x_probe, t->i2c->adapter, t->i2c->addr) >= 0) { tuner_dbg("tda829x detected\n"); } else { /* Default is being tda9887 */ t->type = TUNER_TDA9887; t->mode_mask = T_RADIO | T_ANALOG_TV; goto register_client; } break; case 0x60: if (tuner_symbol_probe(tea5767_autodetection, t->i2c->adapter, t->i2c->addr) >= 0) { t->type = TUNER_TEA5767; t->mode_mask = T_RADIO; /* Sets freq to FM range */ tuner_lookup(t->i2c->adapter, &radio, &tv); if (tv) tv->mode_mask &= ~T_RADIO; goto register_client; } break; } } /* Initializes only the first TV tuner on this adapter. Why only the first? Because there are some devices (notably the ones with TI tuners) that have more than one i2c address for the *same* device. Experience shows that, except for just one case, the first address is the right one. The exception is a Russian tuner (ACORP_Y878F). So, the desired behavior is just to enable the first found TV tuner. */ tuner_lookup(t->i2c->adapter, &radio, &tv); if (tv == NULL) { t->mode_mask = T_ANALOG_TV; if (radio == NULL) t->mode_mask |= T_RADIO; tuner_dbg("Setting mode_mask to 0x%02x\n", t->mode_mask); } /* Should be just before return */ register_client: /* Sets a default mode */ if (t->mode_mask & T_ANALOG_TV) t->mode = V4L2_TUNER_ANALOG_TV; else t->mode = V4L2_TUNER_RADIO; set_type(client, t->type, t->mode_mask, t->config, t->fe.callback); list_add_tail(&t->list, &tuner_list); tuner_info("Tuner %d found with type(s)%s%s.\n", t->type, t->mode_mask & T_RADIO ? " Radio" : "", t->mode_mask & T_ANALOG_TV ? " TV" : ""); return 0; } /** * tuner_remove - detaches a tuner * * @client: i2c_client descriptor */ static int tuner_remove(struct i2c_client *client) { struct tuner *t = to_tuner(i2c_get_clientdata(client)); v4l2_device_unregister_subdev(&t->sd); tuner_detach(&t->fe); t->fe.analog_demod_priv = NULL; list_del(&t->list); kfree(t); return 0; } /* * Functions to switch between Radio and TV * * A few cards have a separate I2C tuner for radio. Those routines * take care of switching between TV/Radio mode, filtering only the * commands that apply to the Radio or TV tuner. */ /** * check_mode - Verify if tuner supports the requested mode * @t: a pointer to the module's internal struct_tuner * * This function checks if the tuner is capable of tuning analog TV, * digital TV or radio, depending on what the caller wants. If the * tuner can't support that mode, it returns -EINVAL. Otherwise, it * returns 0. * This function is needed for boards that have a separate tuner for * radio (like devices with tea5767). * NOTE: mt20xx uses V4L2_TUNER_DIGITAL_TV and calls set_tv_freq to * select a TV frequency. So, t_mode = T_ANALOG_TV could actually * be used to represent a Digital TV too. */ static inline int check_mode(struct tuner *t, enum v4l2_tuner_type mode) { int t_mode; if (mode == V4L2_TUNER_RADIO) t_mode = T_RADIO; else t_mode = T_ANALOG_TV; if ((t_mode & t->mode_mask) == 0) return -EINVAL; return 0; } /** * set_mode - Switch tuner to other mode. * @t: a pointer to the module's internal struct_tuner * @mode: enum v4l2_type (radio or TV) * * If tuner doesn't support the needed mode (radio or TV), prints a * debug message and returns -EINVAL, changing its state to standby. * Otherwise, changes the mode and returns 0. */ static int set_mode(struct tuner *t, enum v4l2_tuner_type mode) { struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; if (mode != t->mode) { if (check_mode(t, mode) == -EINVAL) { tuner_dbg("Tuner doesn't support mode %d. " "Putting tuner to sleep\n", mode); t->standby = true; if (analog_ops->standby) analog_ops->standby(&t->fe); return -EINVAL; } t->mode = mode; tuner_dbg("Changing to mode %d\n", mode); } return 0; } /** * set_freq - Set the tuner to the desired frequency. * @t: a pointer to the module's internal struct_tuner * @freq: frequency to set (0 means to use the current frequency) */ static void set_freq(struct tuner *t, unsigned int freq) { struct i2c_client *client = v4l2_get_subdevdata(&t->sd); if (t->mode == V4L2_TUNER_RADIO) { if (!freq) freq = t->radio_freq; set_radio_freq(client, freq); } else { if (!freq) freq = t->tv_freq; set_tv_freq(client, freq); } } /* * Functions that are specific for TV mode */ /** * set_tv_freq - Set tuner frequency, freq in Units of 62.5 kHz = 1/16MHz * * @c: i2c_client descriptor * @freq: frequency */ static void set_tv_freq(struct i2c_client *c, unsigned int freq) { struct tuner *t = to_tuner(i2c_get_clientdata(c)); struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; struct analog_parameters params = { .mode = t->mode, .audmode = t->audmode, .std = t->std }; if (t->type == UNSET) { tuner_warn("tuner type not set\n"); return; } if (NULL == analog_ops->set_params) { tuner_warn("Tuner has no way to set tv freq\n"); return; } if (freq < tv_range[0] * 16 || freq > tv_range[1] * 16) { tuner_dbg("TV freq (%d.%02d) out of range (%d-%d)\n", freq / 16, freq % 16 * 100 / 16, tv_range[0], tv_range[1]); /* V4L2 spec: if the freq is not possible then the closest possible value should be selected */ if (freq < tv_range[0] * 16) freq = tv_range[0] * 16; else freq = tv_range[1] * 16; } params.frequency = freq; tuner_dbg("tv freq set to %d.%02d\n", freq / 16, freq % 16 * 100 / 16); t->tv_freq = freq; t->standby = false; analog_ops->set_params(&t->fe, &params); } /** * tuner_fixup_std - force a given video standard variant * * @t: tuner internal struct * @std: TV standard * * A few devices or drivers have problem to detect some standard variations. * On other operational systems, the drivers generally have a per-country * code, and some logic to apply per-country hacks. V4L2 API doesn't provide * such hacks. Instead, it relies on a proper video standard selection from * the userspace application. However, as some apps are buggy, not allowing * to distinguish all video standard variations, a modprobe parameter can * be used to force a video standard match. */ static v4l2_std_id tuner_fixup_std(struct tuner *t, v4l2_std_id std) { if (pal[0] != '-' && (std & V4L2_STD_PAL) == V4L2_STD_PAL) { switch (pal[0]) { case '6': return V4L2_STD_PAL_60; case 'b': case 'B': case 'g': case 'G': return V4L2_STD_PAL_BG; case 'i': case 'I': return V4L2_STD_PAL_I; case 'd': case 'D': case 'k': case 'K': return V4L2_STD_PAL_DK; case 'M': case 'm': return V4L2_STD_PAL_M; case 'N': case 'n': if (pal[1] == 'c' || pal[1] == 'C') return V4L2_STD_PAL_Nc; return V4L2_STD_PAL_N; default: tuner_warn("pal= argument not recognised\n"); break; } } if (secam[0] != '-' && (std & V4L2_STD_SECAM) == V4L2_STD_SECAM) { switch (secam[0]) { case 'b': case 'B': case 'g': case 'G': case 'h': case 'H': return V4L2_STD_SECAM_B | V4L2_STD_SECAM_G | V4L2_STD_SECAM_H; case 'd': case 'D': case 'k': case 'K': return V4L2_STD_SECAM_DK; case 'l': case 'L': if ((secam[1] == 'C') || (secam[1] == 'c')) return V4L2_STD_SECAM_LC; return V4L2_STD_SECAM_L; default: tuner_warn("secam= argument not recognised\n"); break; } } if (ntsc[0] != '-' && (std & V4L2_STD_NTSC) == V4L2_STD_NTSC) { switch (ntsc[0]) { case 'm': case 'M': return V4L2_STD_NTSC_M; case 'j': case 'J': return V4L2_STD_NTSC_M_JP; case 'k': case 'K': return V4L2_STD_NTSC_M_KR; default: tuner_info("ntsc= argument not recognised\n"); break; } } return std; } /* * Functions that are specific for Radio mode */ /** * set_radio_freq - Set tuner frequency, freq in Units of 62.5 Hz = 1/16kHz * * @c: i2c_client descriptor * @freq: frequency */ static void set_radio_freq(struct i2c_client *c, unsigned int freq) { struct tuner *t = to_tuner(i2c_get_clientdata(c)); struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; struct analog_parameters params = { .mode = t->mode, .audmode = t->audmode, .std = t->std }; if (t->type == UNSET) { tuner_warn("tuner type not set\n"); return; } if (NULL == analog_ops->set_params) { tuner_warn("tuner has no way to set radio frequency\n"); return; } if (freq < radio_range[0] * 16000 || freq > radio_range[1] * 16000) { tuner_dbg("radio freq (%d.%02d) out of range (%d-%d)\n", freq / 16000, freq % 16000 * 100 / 16000, radio_range[0], radio_range[1]); /* V4L2 spec: if the freq is not possible then the closest possible value should be selected */ if (freq < radio_range[0] * 16000) freq = radio_range[0] * 16000; else freq = radio_range[1] * 16000; } params.frequency = freq; tuner_dbg("radio freq set to %d.%02d\n", freq / 16000, freq % 16000 * 100 / 16000); t->radio_freq = freq; t->standby = false; analog_ops->set_params(&t->fe, &params); } /* * Debug function for reporting tuner status to userspace */ /** * tuner_status - Dumps the current tuner status at dmesg * @fe: pointer to struct dvb_frontend * * This callback is used only for driver debug purposes, answering to * VIDIOC_LOG_STATUS. No changes should happen on this call. */ static void tuner_status(struct dvb_frontend *fe) { struct tuner *t = fe->analog_demod_priv; unsigned long freq, freq_fraction; struct dvb_tuner_ops *fe_tuner_ops = &fe->ops.tuner_ops; struct analog_demod_ops *analog_ops = &fe->ops.analog_ops; const char *p; switch (t->mode) { case V4L2_TUNER_RADIO: p = "radio"; break; case V4L2_TUNER_DIGITAL_TV: /* Used by mt20xx */ p = "digital TV"; break; case V4L2_TUNER_ANALOG_TV: default: p = "analog TV"; break; } if (t->mode == V4L2_TUNER_RADIO) { freq = t->radio_freq / 16000; freq_fraction = (t->radio_freq % 16000) * 100 / 16000; } else { freq = t->tv_freq / 16; freq_fraction = (t->tv_freq % 16) * 100 / 16; } tuner_info("Tuner mode: %s%s\n", p, t->standby ? " on standby mode" : ""); tuner_info("Frequency: %lu.%02lu MHz\n", freq, freq_fraction); tuner_info("Standard: 0x%08lx\n", (unsigned long)t->std); if (t->mode != V4L2_TUNER_RADIO) return; if (fe_tuner_ops->get_status) { u32 tuner_status; fe_tuner_ops->get_status(&t->fe, &tuner_status); if (tuner_status & TUNER_STATUS_LOCKED) tuner_info("Tuner is locked.\n"); if (tuner_status & TUNER_STATUS_STEREO) tuner_info("Stereo: yes\n"); } if (analog_ops->has_signal) tuner_info("Signal strength: %d\n", analog_ops->has_signal(fe)); } /* * Function to splicitly change mode to radio. Probably not needed anymore */ static int tuner_s_radio(struct v4l2_subdev *sd) { struct tuner *t = to_tuner(sd); if (set_mode(t, V4L2_TUNER_RADIO) == 0) set_freq(t, 0); return 0; } /* * Tuner callbacks to handle userspace ioctl's */ /** * tuner_s_power - controls the power state of the tuner * @sd: pointer to struct v4l2_subdev * @on: a zero value puts the tuner to sleep, non-zero wakes it up */ static int tuner_s_power(struct v4l2_subdev *sd, int on) { struct tuner *t = to_tuner(sd); struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; if (on) { if (t->standby && set_mode(t, t->mode) == 0) { tuner_dbg("Waking up tuner\n"); set_freq(t, 0); } return 0; } tuner_dbg("Putting tuner to sleep\n"); t->standby = true; if (analog_ops->standby) analog_ops->standby(&t->fe); return 0; } static int tuner_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct tuner *t = to_tuner(sd); if (set_mode(t, V4L2_TUNER_ANALOG_TV)) return 0; t->std = tuner_fixup_std(t, std); if (t->std != std) tuner_dbg("Fixup standard %llx to %llx\n", std, t->std); set_freq(t, 0); return 0; } static int tuner_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f) { struct tuner *t = to_tuner(sd); if (set_mode(t, f->type) == 0) set_freq(t, f->frequency); return 0; } /** * tuner_g_frequency - Get the tuned frequency for the tuner * @sd: pointer to struct v4l2_subdev * @f: pointer to struct v4l2_frequency * * At return, the structure f will be filled with tuner frequency * if the tuner matches the f->type. * Note: f->type should be initialized before calling it. * This is done by either video_ioctl2 or by the bridge driver. */ static int tuner_g_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *f) { struct tuner *t = to_tuner(sd); struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; if (check_mode(t, f->type) == -EINVAL) return 0; if (f->type == t->mode && fe_tuner_ops->get_frequency && !t->standby) { u32 abs_freq; fe_tuner_ops->get_frequency(&t->fe, &abs_freq); f->frequency = (V4L2_TUNER_RADIO == t->mode) ? DIV_ROUND_CLOSEST(abs_freq * 2, 125) : DIV_ROUND_CLOSEST(abs_freq, 62500); } else { f->frequency = (V4L2_TUNER_RADIO == f->type) ? t->radio_freq : t->tv_freq; } return 0; } /** * tuner_g_tuner - Fill in tuner information * @sd: pointer to struct v4l2_subdev * @vt: pointer to struct v4l2_tuner * * At return, the structure vt will be filled with tuner information * if the tuner matches vt->type. * Note: vt->type should be initialized before calling it. * This is done by either video_ioctl2 or by the bridge driver. */ static int tuner_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct tuner *t = to_tuner(sd); struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; struct dvb_tuner_ops *fe_tuner_ops = &t->fe.ops.tuner_ops; if (check_mode(t, vt->type) == -EINVAL) return 0; if (vt->type == t->mode && analog_ops->get_afc) vt->afc = analog_ops->get_afc(&t->fe); if (t->mode != V4L2_TUNER_RADIO) { vt->capability |= V4L2_TUNER_CAP_NORM; vt->rangelow = tv_range[0] * 16; vt->rangehigh = tv_range[1] * 16; return 0; } /* radio mode */ if (vt->type == t->mode) { vt->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO; if (fe_tuner_ops->get_status) { u32 tuner_status; fe_tuner_ops->get_status(&t->fe, &tuner_status); vt->rxsubchans = (tuner_status & TUNER_STATUS_STEREO) ? V4L2_TUNER_SUB_STEREO : V4L2_TUNER_SUB_MONO; } if (analog_ops->has_signal) vt->signal = analog_ops->has_signal(&t->fe); vt->audmode = t->audmode; } vt->capability |= V4L2_TUNER_CAP_LOW | V4L2_TUNER_CAP_STEREO; vt->rangelow = radio_range[0] * 16000; vt->rangehigh = radio_range[1] * 16000; return 0; } /** * tuner_s_tuner - Set the tuner's audio mode * @sd: pointer to struct v4l2_subdev * @vt: pointer to struct v4l2_tuner * * Sets the audio mode if the tuner matches vt->type. * Note: vt->type should be initialized before calling it. * This is done by either video_ioctl2 or by the bridge driver. */ static int tuner_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct tuner *t = to_tuner(sd); if (set_mode(t, vt->type)) return 0; if (t->mode == V4L2_TUNER_RADIO) t->audmode = vt->audmode; set_freq(t, 0); return 0; } static int tuner_log_status(struct v4l2_subdev *sd) { struct tuner *t = to_tuner(sd); struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; if (analog_ops->tuner_status) analog_ops->tuner_status(&t->fe); return 0; } static int tuner_suspend(struct i2c_client *c, pm_message_t state) { struct tuner *t = to_tuner(i2c_get_clientdata(c)); struct analog_demod_ops *analog_ops = &t->fe.ops.analog_ops; tuner_dbg("suspend\n"); if (!t->standby && analog_ops->standby) analog_ops->standby(&t->fe); return 0; } static int tuner_resume(struct i2c_client *c) { struct tuner *t = to_tuner(i2c_get_clientdata(c)); tuner_dbg("resume\n"); if (!t->standby) if (set_mode(t, t->mode) == 0) set_freq(t, 0); return 0; } static int tuner_command(struct i2c_client *client, unsigned cmd, void *arg) { struct v4l2_subdev *sd = i2c_get_clientdata(client); /* TUNER_SET_CONFIG is still called by tuner-simple.c, so we have to handle it here. There must be a better way of doing this... */ switch (cmd) { case TUNER_SET_CONFIG: return tuner_s_config(sd, arg); } return -ENOIOCTLCMD; } /* * Callback structs */ static const struct v4l2_subdev_core_ops tuner_core_ops = { .log_status = tuner_log_status, .s_std = tuner_s_std, .s_power = tuner_s_power, }; static const struct v4l2_subdev_tuner_ops tuner_tuner_ops = { .s_radio = tuner_s_radio, .g_tuner = tuner_g_tuner, .s_tuner = tuner_s_tuner, .s_frequency = tuner_s_frequency, .g_frequency = tuner_g_frequency, .s_type_addr = tuner_s_type_addr, .s_config = tuner_s_config, }; static const struct v4l2_subdev_ops tuner_ops = { .core = &tuner_core_ops, .tuner = &tuner_tuner_ops, }; /* * I2C structs and module init functions */ static const struct i2c_device_id tuner_id[] = { { "tuner", }, /* autodetect */ { } }; MODULE_DEVICE_TABLE(i2c, tuner_id); static struct i2c_driver tuner_driver = { .driver = { .owner = THIS_MODULE, .name = "tuner", }, .probe = tuner_probe, .remove = tuner_remove, .command = tuner_command, .suspend = tuner_suspend, .resume = tuner_resume, .id_table = tuner_id, }; module_i2c_driver(tuner_driver); MODULE_DESCRIPTION("device driver for various TV and TV+FM radio tuners"); MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer"); MODULE_LICENSE("GPL");
gpl-2.0
skelitonlord/android_kernel_samsung_matissewifi
arch/arm/mach-ux500/id.c
4811
2324
/* * Copyright (C) ST-Ericsson SA 2010 * * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson * License terms: GNU General Public License (GPL) version 2 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/tlbflush.h> #include <asm/cacheflush.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/setup.h> struct dbx500_asic_id dbx500_id; static unsigned int ux500_read_asicid(phys_addr_t addr) { phys_addr_t base = addr & ~0xfff; struct map_desc desc = { .virtual = IO_ADDRESS(base), .pfn = __phys_to_pfn(base), .length = SZ_16K, .type = MT_DEVICE, }; iotable_init(&desc, 1); /* As in devicemaps_init() */ local_flush_tlb_all(); flush_cache_all(); return readl(__io_address(addr)); } static void ux500_print_soc_info(unsigned int asicid) { unsigned int rev = dbx500_revision(); pr_info("DB%4x ", dbx500_partnumber()); if (rev == 0x01) pr_cont("Early Drop"); else if (rev >= 0xA0) pr_cont("v%d.%d" , (rev >> 4) - 0xA + 1, rev & 0xf); else pr_cont("Unknown"); pr_cont(" [%#010x]\n", asicid); } static unsigned int partnumber(unsigned int asicid) { return (asicid >> 8) & 0xffff; } /* * SOC MIDR ASICID ADDRESS ASICID VALUE * DB8500ed 0x410fc090 0x9001FFF4 0x00850001 * DB8500v1 0x411fc091 0x9001FFF4 0x008500A0 * DB8500v1.1 0x411fc091 0x9001FFF4 0x008500A1 * DB8500v2 0x412fc091 0x9001DBF4 0x008500B0 * DB8520v2.2 0x412fc091 0x9001DBF4 0x008500B2 * DB5500v1 0x412fc091 0x9001FFF4 0x005500A0 */ void __init ux500_map_io(void) { unsigned int cpuid = read_cpuid_id(); unsigned int asicid = 0; phys_addr_t addr = 0; switch (cpuid) { case 0x410fc090: /* DB8500ed */ case 0x411fc091: /* DB8500v1 */ addr = 0x9001FFF4; break; case 0x412fc091: /* DB8520 / DB8500v2 / DB5500v1 */ asicid = ux500_read_asicid(0x9001DBF4); if (partnumber(asicid) == 0x8500 || partnumber(asicid) == 0x8520) /* DB8500v2 */ break; /* DB5500v1 */ addr = 0x9001FFF4; break; } if (addr) asicid = ux500_read_asicid(addr); if (!asicid) { pr_err("Unable to identify SoC\n"); ux500_unknown_soc(); } dbx500_id.process = asicid >> 24; dbx500_id.partnumber = partnumber(asicid); dbx500_id.revision = asicid & 0xff; ux500_print_soc_info(asicid); }
gpl-2.0
kbc-developers/android_kernel_samsung_jfdcm
drivers/net/ethernet/dnet.c
4811
25701
/* * Dave DNET Ethernet Controller driver * * Copyright (C) 2008 Dave S.r.l. <www.dave.eu> * Copyright (C) 2009 Ilya Yanok, Emcraft Systems Ltd, <yanok@emcraft.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/io.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/dma-mapping.h> #include <linux/platform_device.h> #include <linux/phy.h> #include "dnet.h" #undef DEBUG /* function for reading internal MAC register */ static u16 dnet_readw_mac(struct dnet *bp, u16 reg) { u16 data_read; /* issue a read */ dnet_writel(bp, reg, MACREG_ADDR); /* since a read/write op to the MAC is very slow, * we must wait before reading the data */ ndelay(500); /* read data read from the MAC register */ data_read = dnet_readl(bp, MACREG_DATA); /* all done */ return data_read; } /* function for writing internal MAC register */ static void dnet_writew_mac(struct dnet *bp, u16 reg, u16 val) { /* load data to write */ dnet_writel(bp, val, MACREG_DATA); /* issue a write */ dnet_writel(bp, reg | DNET_INTERNAL_WRITE, MACREG_ADDR); /* since a read/write op to the MAC is very slow, * we must wait before exiting */ ndelay(500); } static void __dnet_set_hwaddr(struct dnet *bp) { u16 tmp; tmp = be16_to_cpup((__be16 *)bp->dev->dev_addr); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG, tmp); tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 2)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG, tmp); tmp = be16_to_cpup((__be16 *)(bp->dev->dev_addr + 4)); dnet_writew_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG, tmp); } static void __devinit dnet_get_hwaddr(struct dnet *bp) { u16 tmp; u8 addr[6]; /* * from MAC docs: * "Note that the MAC address is stored in the registers in Hexadecimal * form. For example, to set the MAC Address to: AC-DE-48-00-00-80 * would require writing 0xAC (octet 0) to address 0x0B (high byte of * Mac_addr[15:0]), 0xDE (octet 1) to address 0x0A (Low byte of * Mac_addr[15:0]), 0x48 (octet 2) to address 0x0D (high byte of * Mac_addr[15:0]), 0x00 (octet 3) to address 0x0C (Low byte of * Mac_addr[15:0]), 0x00 (octet 4) to address 0x0F (high byte of * Mac_addr[15:0]), and 0x80 (octet 5) to address * 0x0E (Low byte of * Mac_addr[15:0]). */ tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_0_REG); *((__be16 *)addr) = cpu_to_be16(tmp); tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_1_REG); *((__be16 *)(addr + 2)) = cpu_to_be16(tmp); tmp = dnet_readw_mac(bp, DNET_INTERNAL_MAC_ADDR_2_REG); *((__be16 *)(addr + 4)) = cpu_to_be16(tmp); if (is_valid_ether_addr(addr)) memcpy(bp->dev->dev_addr, addr, sizeof(addr)); } static int dnet_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct dnet *bp = bus->priv; u16 value; while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); /* only 5 bits allowed for phy-addr and reg_offset */ mii_id &= 0x1f; regnum &= 0x1f; /* prepare reg_value for a read */ value = (mii_id << 8); value |= regnum; /* write control word */ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, value); /* wait for end of transfer */ while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); value = dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG); pr_debug("mdio_read %02x:%02x <- %04x\n", mii_id, regnum, value); return value; } static int dnet_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct dnet *bp = bus->priv; u16 tmp; pr_debug("mdio_write %02x:%02x <- %04x\n", mii_id, regnum, value); while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); /* prepare for a write operation */ tmp = (1 << 13); /* only 5 bits allowed for phy-addr and reg_offset */ mii_id &= 0x1f; regnum &= 0x1f; /* only 16 bits on data */ value &= 0xffff; /* prepare reg_value for a write */ tmp |= (mii_id << 8); tmp |= regnum; /* write data to write first */ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_DAT_REG, value); /* write control word */ dnet_writew_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG, tmp); while (!(dnet_readw_mac(bp, DNET_INTERNAL_GMII_MNG_CTL_REG) & DNET_INTERNAL_GMII_MNG_CMD_FIN)) cpu_relax(); return 0; } static int dnet_mdio_reset(struct mii_bus *bus) { return 0; } static void dnet_handle_link_change(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; unsigned long flags; u32 mode_reg, ctl_reg; int status_change = 0; spin_lock_irqsave(&bp->lock, flags); mode_reg = dnet_readw_mac(bp, DNET_INTERNAL_MODE_REG); ctl_reg = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); if (phydev->link) { if (bp->duplex != phydev->duplex) { if (phydev->duplex) ctl_reg &= ~(DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP); else ctl_reg |= DNET_INTERNAL_RXTX_CONTROL_ENABLEHALFDUP; bp->duplex = phydev->duplex; status_change = 1; } if (bp->speed != phydev->speed) { status_change = 1; switch (phydev->speed) { case 1000: mode_reg |= DNET_INTERNAL_MODE_GBITEN; break; case 100: case 10: mode_reg &= ~DNET_INTERNAL_MODE_GBITEN; break; default: printk(KERN_WARNING "%s: Ack! Speed (%d) is not " "10/100/1000!\n", dev->name, phydev->speed); break; } bp->speed = phydev->speed; } } if (phydev->link != bp->link) { if (phydev->link) { mode_reg |= (DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); } else { mode_reg &= ~(DNET_INTERNAL_MODE_RXEN | DNET_INTERNAL_MODE_TXEN); bp->speed = 0; bp->duplex = -1; } bp->link = phydev->link; status_change = 1; } if (status_change) { dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, ctl_reg); dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, mode_reg); } spin_unlock_irqrestore(&bp->lock, flags); if (status_change) { if (phydev->link) printk(KERN_INFO "%s: link up (%d/%s)\n", dev->name, phydev->speed, DUPLEX_FULL == phydev->duplex ? "Full" : "Half"); else printk(KERN_INFO "%s: link down\n", dev->name); } } static int dnet_mii_probe(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = NULL; int phy_addr; /* find the first phy */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (bp->mii_bus->phy_map[phy_addr]) { phydev = bp->mii_bus->phy_map[phy_addr]; break; } } if (!phydev) { printk(KERN_ERR "%s: no PHY found\n", dev->name); return -ENODEV; } /* TODO : add pin_irq */ /* attach the mac to the phy */ if (bp->capabilities & DNET_HAS_RMII) { phydev = phy_connect(dev, dev_name(&phydev->dev), &dnet_handle_link_change, 0, PHY_INTERFACE_MODE_RMII); } else { phydev = phy_connect(dev, dev_name(&phydev->dev), &dnet_handle_link_change, 0, PHY_INTERFACE_MODE_MII); } if (IS_ERR(phydev)) { printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } /* mask with MAC supported features */ if (bp->capabilities & DNET_HAS_GIGABIT) phydev->supported &= PHY_GBIT_FEATURES; else phydev->supported &= PHY_BASIC_FEATURES; phydev->supported |= SUPPORTED_Asym_Pause | SUPPORTED_Pause; phydev->advertising = phydev->supported; bp->link = 0; bp->speed = 0; bp->duplex = -1; bp->phy_dev = phydev; return 0; } static int dnet_mii_init(struct dnet *bp) { int err, i; bp->mii_bus = mdiobus_alloc(); if (bp->mii_bus == NULL) return -ENOMEM; bp->mii_bus->name = "dnet_mii_bus"; bp->mii_bus->read = &dnet_mdio_read; bp->mii_bus->write = &dnet_mdio_write; bp->mii_bus->reset = &dnet_mdio_reset; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!bp->mii_bus->irq) { err = -ENOMEM; goto err_out; } for (i = 0; i < PHY_MAX_ADDR; i++) bp->mii_bus->irq[i] = PHY_POLL; if (mdiobus_register(bp->mii_bus)) { err = -ENXIO; goto err_out_free_mdio_irq; } if (dnet_mii_probe(bp->dev) != 0) { err = -ENXIO; goto err_out_unregister_bus; } return 0; err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); err_out_free_mdio_irq: kfree(bp->mii_bus->irq); err_out: mdiobus_free(bp->mii_bus); return err; } /* For Neptune board: LINK1000 as Link LED and TX as activity LED */ static int dnet_phy_marvell_fixup(struct phy_device *phydev) { return phy_write(phydev, 0x18, 0x4148); } static void dnet_update_stats(struct dnet *bp) { u32 __iomem *reg = bp->regs + DNET_RX_PKT_IGNR_CNT; u32 *p = &bp->hw_stats.rx_pkt_ignr; u32 *end = &bp->hw_stats.rx_byte + 1; WARN_ON((unsigned long)(end - p - 1) != (DNET_RX_BYTE_CNT - DNET_RX_PKT_IGNR_CNT) / 4); for (; p < end; p++, reg++) *p += readl(reg); reg = bp->regs + DNET_TX_UNICAST_CNT; p = &bp->hw_stats.tx_unicast; end = &bp->hw_stats.tx_byte + 1; WARN_ON((unsigned long)(end - p - 1) != (DNET_TX_BYTE_CNT - DNET_TX_UNICAST_CNT) / 4); for (; p < end; p++, reg++) *p += readl(reg); } static int dnet_poll(struct napi_struct *napi, int budget) { struct dnet *bp = container_of(napi, struct dnet, napi); struct net_device *dev = bp->dev; int npackets = 0; unsigned int pkt_len; struct sk_buff *skb; unsigned int *data_ptr; u32 int_enable; u32 cmd_word; int i; while (npackets < budget) { /* * break out of while loop if there are no more * packets waiting */ if (!(dnet_readl(bp, RX_FIFO_WCNT) >> 16)) { napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); return 0; } cmd_word = dnet_readl(bp, RX_LEN_FIFO); pkt_len = cmd_word & 0xFFFF; if (cmd_word & 0xDF180000) printk(KERN_ERR "%s packet receive error %x\n", __func__, cmd_word); skb = netdev_alloc_skb(dev, pkt_len + 5); if (skb != NULL) { /* Align IP on 16 byte boundaries */ skb_reserve(skb, 2); /* * 'skb_put()' points to the start of sk_buff * data area. */ data_ptr = (unsigned int *)skb_put(skb, pkt_len); for (i = 0; i < (pkt_len + 3) >> 2; i++) *data_ptr++ = dnet_readl(bp, RX_DATA_FIFO); skb->protocol = eth_type_trans(skb, dev); netif_receive_skb(skb); npackets++; } else printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of " "size %u.\n", dev->name, pkt_len); } budget -= npackets; if (npackets < budget) { /* We processed all packets available. Tell NAPI it can * stop polling then re-enable rx interrupts */ napi_complete(napi); int_enable = dnet_readl(bp, INTR_ENB); int_enable |= DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); return 0; } /* There are still packets waiting */ return 1; } static irqreturn_t dnet_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct dnet *bp = netdev_priv(dev); u32 int_src, int_enable, int_current; unsigned long flags; unsigned int handled = 0; spin_lock_irqsave(&bp->lock, flags); /* read and clear the DNET irq (clear on read) */ int_src = dnet_readl(bp, INTR_SRC); int_enable = dnet_readl(bp, INTR_ENB); int_current = int_src & int_enable; /* restart the queue if we had stopped it for TX fifo almost full */ if (int_current & DNET_INTR_SRC_TX_FIFOAE) { int_enable = dnet_readl(bp, INTR_ENB); int_enable &= ~DNET_INTR_ENB_TX_FIFOAE; dnet_writel(bp, int_enable, INTR_ENB); netif_wake_queue(dev); handled = 1; } /* RX FIFO error checking */ if (int_current & (DNET_INTR_SRC_RX_CMDFIFOFF | DNET_INTR_SRC_RX_DATAFIFOFF)) { printk(KERN_ERR "%s: RX fifo error %x, irq %x\n", __func__, dnet_readl(bp, RX_STATUS), int_current); /* we can only flush the RX FIFOs */ dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH, SYS_CTL); ndelay(500); dnet_writel(bp, 0, SYS_CTL); handled = 1; } /* TX FIFO error checking */ if (int_current & (DNET_INTR_SRC_TX_FIFOFULL | DNET_INTR_SRC_TX_DISCFRM)) { printk(KERN_ERR "%s: TX fifo error %x, irq %x\n", __func__, dnet_readl(bp, TX_STATUS), int_current); /* we can only flush the TX FIFOs */ dnet_writel(bp, DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); ndelay(500); dnet_writel(bp, 0, SYS_CTL); handled = 1; } if (int_current & DNET_INTR_SRC_RX_CMDFIFOAF) { if (napi_schedule_prep(&bp->napi)) { /* * There's no point taking any more interrupts * until we have processed the buffers */ /* Disable Rx interrupts and schedule NAPI poll */ int_enable = dnet_readl(bp, INTR_ENB); int_enable &= ~DNET_INTR_SRC_RX_CMDFIFOAF; dnet_writel(bp, int_enable, INTR_ENB); __napi_schedule(&bp->napi); } handled = 1; } if (!handled) pr_debug("%s: irq %x remains\n", __func__, int_current); spin_unlock_irqrestore(&bp->lock, flags); return IRQ_RETVAL(handled); } #ifdef DEBUG static inline void dnet_print_skb(struct sk_buff *skb) { int k; printk(KERN_DEBUG PFX "data:"); for (k = 0; k < skb->len; k++) printk(" %02x", (unsigned int)skb->data[k]); printk("\n"); } #else #define dnet_print_skb(skb) do {} while (0) #endif static netdev_tx_t dnet_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct dnet *bp = netdev_priv(dev); u32 tx_status, irq_enable; unsigned int len, i, tx_cmd, wrsz; unsigned long flags; unsigned int *bufp; tx_status = dnet_readl(bp, TX_STATUS); pr_debug("start_xmit: len %u head %p data %p\n", skb->len, skb->head, skb->data); dnet_print_skb(skb); /* frame size (words) */ len = (skb->len + 3) >> 2; spin_lock_irqsave(&bp->lock, flags); tx_status = dnet_readl(bp, TX_STATUS); bufp = (unsigned int *)(((unsigned long) skb->data) & ~0x3UL); wrsz = (u32) skb->len + 3; wrsz += ((unsigned long) skb->data) & 0x3; wrsz >>= 2; tx_cmd = ((((unsigned long)(skb->data)) & 0x03) << 16) | (u32) skb->len; /* check if there is enough room for the current frame */ if (wrsz < (DNET_FIFO_SIZE - dnet_readl(bp, TX_FIFO_WCNT))) { for (i = 0; i < wrsz; i++) dnet_writel(bp, *bufp++, TX_DATA_FIFO); /* * inform MAC that a packet's written and ready to be * shipped out */ dnet_writel(bp, tx_cmd, TX_LEN_FIFO); } if (dnet_readl(bp, TX_FIFO_WCNT) > DNET_FIFO_TX_DATA_AF_TH) { netif_stop_queue(dev); tx_status = dnet_readl(bp, INTR_SRC); irq_enable = dnet_readl(bp, INTR_ENB); irq_enable |= DNET_INTR_ENB_TX_FIFOAE; dnet_writel(bp, irq_enable, INTR_ENB); } skb_tx_timestamp(skb); /* free the buffer */ dev_kfree_skb(skb); spin_unlock_irqrestore(&bp->lock, flags); return NETDEV_TX_OK; } static void dnet_reset_hw(struct dnet *bp) { /* put ts_mac in IDLE state i.e. disable rx/tx */ dnet_writew_mac(bp, DNET_INTERNAL_MODE_REG, DNET_INTERNAL_MODE_FCEN); /* * RX FIFO almost full threshold: only cmd FIFO almost full is * implemented for RX side */ dnet_writel(bp, DNET_FIFO_RX_CMD_AF_TH, RX_FIFO_TH); /* * TX FIFO almost empty threshold: only data FIFO almost empty * is implemented for TX side */ dnet_writel(bp, DNET_FIFO_TX_DATA_AE_TH, TX_FIFO_TH); /* flush rx/tx fifos */ dnet_writel(bp, DNET_SYS_CTL_RXFIFOFLUSH | DNET_SYS_CTL_TXFIFOFLUSH, SYS_CTL); msleep(1); dnet_writel(bp, 0, SYS_CTL); } static void dnet_init_hw(struct dnet *bp) { u32 config; dnet_reset_hw(bp); __dnet_set_hwaddr(bp); config = dnet_readw_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG); if (bp->dev->flags & IFF_PROMISC) /* Copy All Frames */ config |= DNET_INTERNAL_RXTX_CONTROL_ENPROMISC; if (!(bp->dev->flags & IFF_BROADCAST)) /* No BroadCast */ config |= DNET_INTERNAL_RXTX_CONTROL_RXMULTICAST; config |= DNET_INTERNAL_RXTX_CONTROL_RXPAUSE | DNET_INTERNAL_RXTX_CONTROL_RXBROADCAST | DNET_INTERNAL_RXTX_CONTROL_DROPCONTROL | DNET_INTERNAL_RXTX_CONTROL_DISCFXFCS; dnet_writew_mac(bp, DNET_INTERNAL_RXTX_CONTROL_REG, config); /* clear irq before enabling them */ config = dnet_readl(bp, INTR_SRC); /* enable RX/TX interrupt, recv packet ready interrupt */ dnet_writel(bp, DNET_INTR_ENB_GLOBAL_ENABLE | DNET_INTR_ENB_RX_SUMMARY | DNET_INTR_ENB_TX_SUMMARY | DNET_INTR_ENB_RX_FIFOERR | DNET_INTR_ENB_RX_ERROR | DNET_INTR_ENB_RX_FIFOFULL | DNET_INTR_ENB_TX_FIFOFULL | DNET_INTR_ENB_TX_DISCFRM | DNET_INTR_ENB_RX_PKTRDY, INTR_ENB); } static int dnet_open(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); /* if the phy is not yet register, retry later */ if (!bp->phy_dev) return -EAGAIN; if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; napi_enable(&bp->napi); dnet_init_hw(bp); phy_start_aneg(bp->phy_dev); /* schedule a link state check */ phy_start(bp->phy_dev); netif_start_queue(dev); return 0; } static int dnet_close(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&bp->napi); if (bp->phy_dev) phy_stop(bp->phy_dev); dnet_reset_hw(bp); netif_carrier_off(dev); return 0; } static inline void dnet_print_pretty_hwstats(struct dnet_stats *hwstat) { pr_debug("%s\n", __func__); pr_debug("----------------------------- RX statistics " "-------------------------------\n"); pr_debug("RX_PKT_IGNR_CNT %-8x\n", hwstat->rx_pkt_ignr); pr_debug("RX_LEN_CHK_ERR_CNT %-8x\n", hwstat->rx_len_chk_err); pr_debug("RX_LNG_FRM_CNT %-8x\n", hwstat->rx_lng_frm); pr_debug("RX_SHRT_FRM_CNT %-8x\n", hwstat->rx_shrt_frm); pr_debug("RX_IPG_VIOL_CNT %-8x\n", hwstat->rx_ipg_viol); pr_debug("RX_CRC_ERR_CNT %-8x\n", hwstat->rx_crc_err); pr_debug("RX_OK_PKT_CNT %-8x\n", hwstat->rx_ok_pkt); pr_debug("RX_CTL_FRM_CNT %-8x\n", hwstat->rx_ctl_frm); pr_debug("RX_PAUSE_FRM_CNT %-8x\n", hwstat->rx_pause_frm); pr_debug("RX_MULTICAST_CNT %-8x\n", hwstat->rx_multicast); pr_debug("RX_BROADCAST_CNT %-8x\n", hwstat->rx_broadcast); pr_debug("RX_VLAN_TAG_CNT %-8x\n", hwstat->rx_vlan_tag); pr_debug("RX_PRE_SHRINK_CNT %-8x\n", hwstat->rx_pre_shrink); pr_debug("RX_DRIB_NIB_CNT %-8x\n", hwstat->rx_drib_nib); pr_debug("RX_UNSUP_OPCD_CNT %-8x\n", hwstat->rx_unsup_opcd); pr_debug("RX_BYTE_CNT %-8x\n", hwstat->rx_byte); pr_debug("----------------------------- TX statistics " "-------------------------------\n"); pr_debug("TX_UNICAST_CNT %-8x\n", hwstat->tx_unicast); pr_debug("TX_PAUSE_FRM_CNT %-8x\n", hwstat->tx_pause_frm); pr_debug("TX_MULTICAST_CNT %-8x\n", hwstat->tx_multicast); pr_debug("TX_BRDCAST_CNT %-8x\n", hwstat->tx_brdcast); pr_debug("TX_VLAN_TAG_CNT %-8x\n", hwstat->tx_vlan_tag); pr_debug("TX_BAD_FCS_CNT %-8x\n", hwstat->tx_bad_fcs); pr_debug("TX_JUMBO_CNT %-8x\n", hwstat->tx_jumbo); pr_debug("TX_BYTE_CNT %-8x\n", hwstat->tx_byte); } static struct net_device_stats *dnet_get_stats(struct net_device *dev) { struct dnet *bp = netdev_priv(dev); struct net_device_stats *nstat = &dev->stats; struct dnet_stats *hwstat = &bp->hw_stats; /* read stats from hardware */ dnet_update_stats(bp); /* Convert HW stats into netdevice stats */ nstat->rx_errors = (hwstat->rx_len_chk_err + hwstat->rx_lng_frm + hwstat->rx_shrt_frm + /* ignore IGP violation error hwstat->rx_ipg_viol + */ hwstat->rx_crc_err + hwstat->rx_pre_shrink + hwstat->rx_drib_nib + hwstat->rx_unsup_opcd); nstat->tx_errors = hwstat->tx_bad_fcs; nstat->rx_length_errors = (hwstat->rx_len_chk_err + hwstat->rx_lng_frm + hwstat->rx_shrt_frm + hwstat->rx_pre_shrink); nstat->rx_crc_errors = hwstat->rx_crc_err; nstat->rx_frame_errors = hwstat->rx_pre_shrink + hwstat->rx_drib_nib; nstat->rx_packets = hwstat->rx_ok_pkt; nstat->tx_packets = (hwstat->tx_unicast + hwstat->tx_multicast + hwstat->tx_brdcast); nstat->rx_bytes = hwstat->rx_byte; nstat->tx_bytes = hwstat->tx_byte; nstat->multicast = hwstat->rx_multicast; nstat->rx_missed_errors = hwstat->rx_pkt_ignr; dnet_print_pretty_hwstats(hwstat); return nstat; } static int dnet_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_gset(phydev, cmd); } static int dnet_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, cmd); } static int dnet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct dnet *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!netif_running(dev)) return -EINVAL; if (!phydev) return -ENODEV; return phy_mii_ioctl(phydev, rq, cmd); } static void dnet_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, "0", sizeof(info->bus_info)); } static const struct ethtool_ops dnet_ethtool_ops = { .get_settings = dnet_get_settings, .set_settings = dnet_set_settings, .get_drvinfo = dnet_get_drvinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops dnet_netdev_ops = { .ndo_open = dnet_open, .ndo_stop = dnet_close, .ndo_get_stats = dnet_get_stats, .ndo_start_xmit = dnet_start_xmit, .ndo_do_ioctl = dnet_ioctl, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, }; static int __devinit dnet_probe(struct platform_device *pdev) { struct resource *res; struct net_device *dev; struct dnet *bp; struct phy_device *phydev; int err = -ENXIO; unsigned int mem_base, mem_size, irq; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mmio resource defined\n"); goto err_out; } mem_base = res->start; mem_size = resource_size(res); irq = platform_get_irq(pdev, 0); if (!request_mem_region(mem_base, mem_size, DRV_NAME)) { dev_err(&pdev->dev, "no memory region available\n"); err = -EBUSY; goto err_out; } err = -ENOMEM; dev = alloc_etherdev(sizeof(*bp)); if (!dev) goto err_out_release_mem; /* TODO: Actually, we have some interesting features... */ dev->features |= 0; bp = netdev_priv(dev); bp->dev = dev; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); spin_lock_init(&bp->lock); bp->regs = ioremap(mem_base, mem_size); if (!bp->regs) { dev_err(&pdev->dev, "failed to map registers, aborting.\n"); err = -ENOMEM; goto err_out_free_dev; } dev->irq = irq; err = request_irq(dev->irq, dnet_interrupt, 0, DRV_NAME, dev); if (err) { dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", irq, err); goto err_out_iounmap; } dev->netdev_ops = &dnet_netdev_ops; netif_napi_add(dev, &bp->napi, dnet_poll, 64); dev->ethtool_ops = &dnet_ethtool_ops; dev->base_addr = (unsigned long)bp->regs; bp->capabilities = dnet_readl(bp, VERCAPS) & DNET_CAPS_MASK; dnet_get_hwaddr(bp); if (!is_valid_ether_addr(dev->dev_addr)) { /* choose a random ethernet address */ eth_hw_addr_random(dev); __dnet_set_hwaddr(bp); } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); goto err_out_free_irq; } /* register the PHY board fixup (for Marvell 88E1111) */ err = phy_register_fixup_for_uid(0x01410cc0, 0xfffffff0, dnet_phy_marvell_fixup); /* we can live without it, so just issue a warning */ if (err) dev_warn(&pdev->dev, "Cannot register PHY board fixup.\n"); err = dnet_mii_init(bp); if (err) goto err_out_unregister_netdev; dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n", bp->regs, mem_base, dev->irq, dev->dev_addr); dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n", (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ", (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ", (bp->capabilities & DNET_HAS_GIGABIT) ? "" : "no ", (bp->capabilities & DNET_HAS_DMA) ? "" : "no "); phydev = bp->phy_dev; dev_info(&pdev->dev, "attached PHY driver [%s] " "(mii_bus:phy_addr=%s, irq=%d)\n", phydev->drv->name, dev_name(&phydev->dev), phydev->irq); return 0; err_out_unregister_netdev: unregister_netdev(dev); err_out_free_irq: free_irq(dev->irq, dev); err_out_iounmap: iounmap(bp->regs); err_out_free_dev: free_netdev(dev); err_out_release_mem: release_mem_region(mem_base, mem_size); err_out: return err; } static int __devexit dnet_remove(struct platform_device *pdev) { struct net_device *dev; struct dnet *bp; dev = platform_get_drvdata(pdev); if (dev) { bp = netdev_priv(dev); if (bp->phy_dev) phy_disconnect(bp->phy_dev); mdiobus_unregister(bp->mii_bus); kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); unregister_netdev(dev); free_irq(dev->irq, dev); iounmap(bp->regs); free_netdev(dev); } return 0; } static struct platform_driver dnet_driver = { .probe = dnet_probe, .remove = __devexit_p(dnet_remove), .driver = { .name = "dnet", }, }; module_platform_driver(dnet_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Dave DNET Ethernet driver"); MODULE_AUTHOR("Ilya Yanok <yanok@emcraft.com>, " "Matteo Vit <matteo.vit@dave.eu>");
gpl-2.0
drewx2/android_kernel_htc_dlx
drivers/net/ethernet/emulex/benet/be_cmds.c
4811
62969
/* * Copyright (C) 2005 - 2011 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include "be.h" #include "be_cmds.h" /* Must be a power of 2 or else MODULO will BUG_ON */ static int be_get_temp_freq = 64; static inline void *embedded_payload(struct be_mcc_wrb *wrb) { return wrb->payload.embedded_payload; } static void be_mcc_notify(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; u32 val = 0; if (be_error(adapter)) return; val |= mccq->id & DB_MCCQ_RING_ID_MASK; val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT; wmb(); iowrite32(val, adapter->db + DB_MCCQ_OFFSET); } /* To check if valid bit is set, check the entire word as we don't know * the endianness of the data (old entry is host endian while a new entry is * little endian) */ static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl) { if (compl->flags != 0) { compl->flags = le32_to_cpu(compl->flags); BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0); return true; } else { return false; } } /* Need to reset the entire word that houses the valid bit */ static inline void be_mcc_compl_use(struct be_mcc_compl *compl) { compl->flags = 0; } static int be_mcc_compl_process(struct be_adapter *adapter, struct be_mcc_compl *compl) { u16 compl_status, extd_status; /* Just swap the status to host endian; mcc tag is opaquely copied * from mcc_wrb */ be_dws_le_to_cpu(compl, 4); compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) & CQE_STATUS_COMPL_MASK; if (((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) || (compl->tag0 == OPCODE_COMMON_WRITE_OBJECT)) && (compl->tag1 == CMD_SUBSYSTEM_COMMON)) { adapter->flash_status = compl_status; complete(&adapter->flash_compl); } if (compl_status == MCC_STATUS_SUCCESS) { if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) || (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) && (compl->tag1 == CMD_SUBSYSTEM_ETH)) { be_parse_stats(adapter); adapter->stats_cmd_sent = false; } if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) { struct be_mcc_wrb *mcc_wrb = queue_index_node(&adapter->mcc_obj.q, compl->tag1); struct be_cmd_resp_get_cntl_addnl_attribs *resp = embedded_payload(mcc_wrb); adapter->drv_stats.be_on_die_temperature = resp->on_die_temperature; } } else { if (compl->tag0 == OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES) be_get_temp_freq = 0; if (compl_status == MCC_STATUS_NOT_SUPPORTED || compl_status == MCC_STATUS_ILLEGAL_REQUEST) goto done; if (compl_status == MCC_STATUS_UNAUTHORIZED_REQUEST) { dev_warn(&adapter->pdev->dev, "This domain(VM) is not " "permitted to execute this cmd (opcode %d)\n", compl->tag0); } else { extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) & CQE_STATUS_EXTD_MASK; dev_err(&adapter->pdev->dev, "Cmd (opcode %d) failed:" "status %d, extd-status %d\n", compl->tag0, compl_status, extd_status); } } done: return compl_status; } /* Link state evt is a string of bytes; no need for endian swapping */ static void be_async_link_state_process(struct be_adapter *adapter, struct be_async_event_link_state *evt) { /* When link status changes, link speed must be re-queried from FW */ adapter->link_speed = -1; /* For the initial link status do not rely on the ASYNC event as * it may not be received in some cases. */ if (adapter->flags & BE_FLAGS_LINK_STATUS_INIT) be_link_status_update(adapter, evt->port_link_status); } /* Grp5 CoS Priority evt */ static void be_async_grp5_cos_priority_process(struct be_adapter *adapter, struct be_async_event_grp5_cos_priority *evt) { if (evt->valid) { adapter->vlan_prio_bmap = evt->available_priority_bmap; adapter->recommended_prio &= ~VLAN_PRIO_MASK; adapter->recommended_prio = evt->reco_default_priority << VLAN_PRIO_SHIFT; } } /* Grp5 QOS Speed evt */ static void be_async_grp5_qos_speed_process(struct be_adapter *adapter, struct be_async_event_grp5_qos_link_speed *evt) { if (evt->physical_port == adapter->port_num) { /* qos_link_speed is in units of 10 Mbps */ adapter->link_speed = evt->qos_link_speed * 10; } } /*Grp5 PVID evt*/ static void be_async_grp5_pvid_state_process(struct be_adapter *adapter, struct be_async_event_grp5_pvid_state *evt) { if (evt->enabled) adapter->pvid = le16_to_cpu(evt->tag) & VLAN_VID_MASK; else adapter->pvid = 0; } static void be_async_grp5_evt_process(struct be_adapter *adapter, u32 trailer, struct be_mcc_compl *evt) { u8 event_type = 0; event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) & ASYNC_TRAILER_EVENT_TYPE_MASK; switch (event_type) { case ASYNC_EVENT_COS_PRIORITY: be_async_grp5_cos_priority_process(adapter, (struct be_async_event_grp5_cos_priority *)evt); break; case ASYNC_EVENT_QOS_SPEED: be_async_grp5_qos_speed_process(adapter, (struct be_async_event_grp5_qos_link_speed *)evt); break; case ASYNC_EVENT_PVID_STATE: be_async_grp5_pvid_state_process(adapter, (struct be_async_event_grp5_pvid_state *)evt); break; default: dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n"); break; } } static inline bool is_link_state_evt(u32 trailer) { return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_LINK_STATE; } static inline bool is_grp5_evt(u32 trailer) { return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) & ASYNC_TRAILER_EVENT_CODE_MASK) == ASYNC_EVENT_CODE_GRP_5); } static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter) { struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq; struct be_mcc_compl *compl = queue_tail_node(mcc_cq); if (be_mcc_compl_is_new(compl)) { queue_tail_inc(mcc_cq); return compl; } return NULL; } void be_async_mcc_enable(struct be_adapter *adapter) { spin_lock_bh(&adapter->mcc_cq_lock); be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0); adapter->mcc_obj.rearm_cq = true; spin_unlock_bh(&adapter->mcc_cq_lock); } void be_async_mcc_disable(struct be_adapter *adapter) { adapter->mcc_obj.rearm_cq = false; } int be_process_mcc(struct be_adapter *adapter) { struct be_mcc_compl *compl; int num = 0, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; spin_lock_bh(&adapter->mcc_cq_lock); while ((compl = be_mcc_compl_get(adapter))) { if (compl->flags & CQE_FLAGS_ASYNC_MASK) { /* Interpret flags as an async trailer */ if (is_link_state_evt(compl->flags)) be_async_link_state_process(adapter, (struct be_async_event_link_state *) compl); else if (is_grp5_evt(compl->flags)) be_async_grp5_evt_process(adapter, compl->flags, compl); } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) { status = be_mcc_compl_process(adapter, compl); atomic_dec(&mcc_obj->q.used); } be_mcc_compl_use(compl); num++; } if (num) be_cq_notify(adapter, mcc_obj->cq.id, mcc_obj->rearm_cq, num); spin_unlock_bh(&adapter->mcc_cq_lock); return status; } /* Wait till no more pending mcc requests are present */ static int be_mcc_wait_compl(struct be_adapter *adapter) { #define mcc_timeout 120000 /* 12s timeout */ int i, status = 0; struct be_mcc_obj *mcc_obj = &adapter->mcc_obj; for (i = 0; i < mcc_timeout; i++) { if (be_error(adapter)) return -EIO; status = be_process_mcc(adapter); if (atomic_read(&mcc_obj->q.used) == 0) break; udelay(100); } if (i == mcc_timeout) { dev_err(&adapter->pdev->dev, "FW not responding\n"); adapter->fw_timeout = true; return -1; } return status; } /* Notify MCC requests and wait for completion */ static int be_mcc_notify_wait(struct be_adapter *adapter) { be_mcc_notify(adapter); return be_mcc_wait_compl(adapter); } static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db) { int msecs = 0; u32 ready; do { if (be_error(adapter)) return -EIO; ready = ioread32(db); if (ready == 0xffffffff) return -1; ready &= MPU_MAILBOX_DB_RDY_MASK; if (ready) break; if (msecs > 4000) { dev_err(&adapter->pdev->dev, "FW not responding\n"); adapter->fw_timeout = true; be_detect_dump_ue(adapter); return -1; } msleep(1); msecs++; } while (true); return 0; } /* * Insert the mailbox address into the doorbell in two steps * Polls on the mbox doorbell till a command completion (or a timeout) occurs */ static int be_mbox_notify_wait(struct be_adapter *adapter) { int status; u32 val = 0; void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET; struct be_dma_mem *mbox_mem = &adapter->mbox_mem; struct be_mcc_mailbox *mbox = mbox_mem->va; struct be_mcc_compl *compl = &mbox->compl; /* wait for ready to be set */ status = be_mbox_db_ready_wait(adapter, db); if (status != 0) return status; val |= MPU_MAILBOX_DB_HI_MASK; /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */ val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2; iowrite32(val, db); /* wait for ready to be set */ status = be_mbox_db_ready_wait(adapter, db); if (status != 0) return status; val = 0; /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */ val |= (u32)(mbox_mem->dma >> 4) << 2; iowrite32(val, db); status = be_mbox_db_ready_wait(adapter, db); if (status != 0) return status; /* A cq entry has been made now */ if (be_mcc_compl_is_new(compl)) { status = be_mcc_compl_process(adapter, &mbox->compl); be_mcc_compl_use(compl); if (status) return status; } else { dev_err(&adapter->pdev->dev, "invalid mailbox completion\n"); return -1; } return 0; } static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) { u32 sem; if (lancer_chip(adapter)) sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET); else sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) return -1; else return 0; } int be_cmd_POST(struct be_adapter *adapter) { u16 stage; int status, timeout = 0; struct device *dev = &adapter->pdev->dev; do { status = be_POST_stage_get(adapter, &stage); if (status) { dev_err(dev, "POST error; stage=0x%x\n", stage); return -1; } else if (stage != POST_STAGE_ARMFW_RDY) { if (msleep_interruptible(2000)) { dev_err(dev, "Waiting for POST aborted\n"); return -EINTR; } timeout += 2; } else { return 0; } } while (timeout < 60); dev_err(dev, "POST timeout; stage=0x%x\n", stage); return -1; } static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb) { return &wrb->payload.sgl[0]; } /* Don't touch the hdr after it's prepared */ /* mem will be NULL for embedded commands */ static void be_wrb_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr, u8 subsystem, u8 opcode, int cmd_len, struct be_mcc_wrb *wrb, struct be_dma_mem *mem) { struct be_sge *sge; req_hdr->opcode = opcode; req_hdr->subsystem = subsystem; req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr)); req_hdr->version = 0; wrb->tag0 = opcode; wrb->tag1 = subsystem; wrb->payload_length = cmd_len; if (mem) { wrb->embedded |= (1 & MCC_WRB_SGE_CNT_MASK) << MCC_WRB_SGE_CNT_SHIFT; sge = nonembedded_sgl(wrb); sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma)); sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF); sge->len = cpu_to_le32(mem->size); } else wrb->embedded |= MCC_WRB_EMBEDDED_MASK; be_dws_cpu_to_le(wrb, 8); } static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages, struct be_dma_mem *mem) { int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages); u64 dma = (u64)mem->dma; for (i = 0; i < buf_pages; i++) { pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF); pages[i].hi = cpu_to_le32(upper_32_bits(dma)); dma += PAGE_SIZE_4K; } } /* Converts interrupt delay in microseconds to multiplier value */ static u32 eq_delay_to_mult(u32 usec_delay) { #define MAX_INTR_RATE 651042 const u32 round = 10; u32 multiplier; if (usec_delay == 0) multiplier = 0; else { u32 interrupt_rate = 1000000 / usec_delay; /* Max delay, corresponding to the lowest interrupt rate */ if (interrupt_rate == 0) multiplier = 1023; else { multiplier = (MAX_INTR_RATE - interrupt_rate) * round; multiplier /= interrupt_rate; /* Round the multiplier to the closest value.*/ multiplier = (multiplier + round/2) / round; multiplier = min(multiplier, (u32)1023); } } return multiplier; } static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter) { struct be_dma_mem *mbox_mem = &adapter->mbox_mem; struct be_mcc_wrb *wrb = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb; memset(wrb, 0, sizeof(*wrb)); return wrb; } static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter) { struct be_queue_info *mccq = &adapter->mcc_obj.q; struct be_mcc_wrb *wrb; if (atomic_read(&mccq->used) >= mccq->len) { dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n"); return NULL; } wrb = queue_head_node(mccq); queue_head_inc(mccq); atomic_inc(&mccq->used); memset(wrb, 0, sizeof(*wrb)); return wrb; } /* Tell fw we're about to start firing cmds by writing a * special pattern across the wrb hdr; uses mbox */ int be_cmd_fw_init(struct be_adapter *adapter) { u8 *wrb; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = (u8 *)wrb_from_mbox(adapter); *wrb++ = 0xFF; *wrb++ = 0x12; *wrb++ = 0x34; *wrb++ = 0xFF; *wrb++ = 0xFF; *wrb++ = 0x56; *wrb++ = 0x78; *wrb = 0xFF; status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } /* Tell fw we're done with firing cmds by writing a * special pattern across the wrb hdr; uses mbox */ int be_cmd_fw_clean(struct be_adapter *adapter) { u8 *wrb; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = (u8 *)wrb_from_mbox(adapter); *wrb++ = 0xFF; *wrb++ = 0xAA; *wrb++ = 0xBB; *wrb++ = 0xFF; *wrb++ = 0xFF; *wrb++ = 0xCC; *wrb++ = 0xDD; *wrb = 0xFF; status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_eq_create(struct be_adapter *adapter, struct be_queue_info *eq, int eq_delay) { struct be_mcc_wrb *wrb; struct be_cmd_req_eq_create *req; struct be_dma_mem *q_mem = &eq->dma_mem; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_EQ_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1); /* 4byte eqe*/ AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0); AMAP_SET_BITS(struct amap_eq_context, count, req->context, __ilog2_u32(eq->len/256)); AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context, eq_delay_to_mult(eq_delay)); be_dws_cpu_to_le(req->context, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_eq_create *resp = embedded_payload(wrb); eq->id = le16_to_cpu(resp->eq_id); eq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } /* Use MCC */ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr, u8 type, bool permanent, u32 if_handle, u32 pmac_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_mac_query *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req), wrb, NULL); req->type = type; if (permanent) { req->permanent = 1; } else { req->if_id = cpu_to_le16((u16) if_handle); req->pmac_id = cpu_to_le32(pmac_id); req->permanent = 0; } status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_mac_query *resp = embedded_payload(wrb); memcpy(mac_addr, resp->mac.addr, ETH_ALEN); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous MCCQ */ int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr, u32 if_id, u32 *pmac_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_add *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->if_id = cpu_to_le32(if_id); memcpy(req->mac_address, mac_addr, ETH_ALEN); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb); *pmac_id = le32_to_cpu(resp->pmac_id); } err: spin_unlock_bh(&adapter->mcc_lock); if (status == MCC_STATUS_UNAUTHORIZED_REQUEST) status = -EPERM; return status; } /* Uses synchronous MCCQ */ int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, int pmac_id, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_pmac_del *req; int status; if (pmac_id == -1) return 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req), wrb, NULL); req->hdr.domain = dom; req->if_id = cpu_to_le32(if_id); req->pmac_id = cpu_to_le32(pmac_id); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses Mbox */ int be_cmd_cq_create(struct be_adapter *adapter, struct be_queue_info *cq, struct be_queue_info *eq, bool no_delay, int coalesce_wm) { struct be_mcc_wrb *wrb; struct be_cmd_req_cq_create *req; struct be_dma_mem *q_mem = &cq->dma_mem; void *ctxt; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_CQ_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); if (lancer_chip(adapter)) { req->hdr.version = 2; req->page_size = 1; /* 1 for 4K */ AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt, __ilog2_u32(cq->len/256)); AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_lancer, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_lancer, eqid, ctxt, eq->id); } else { AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt, coalesce_wm); AMAP_SET_BITS(struct amap_cq_context_be, nodelay, ctxt, no_delay); AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt, __ilog2_u32(cq->len/256)); AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1); AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id); } be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_cq_create *resp = embedded_payload(wrb); cq->id = le16_to_cpu(resp->cq_id); cq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } static u32 be_encoded_q_len(int q_len) { u32 len_encoded = fls(q_len); /* log2(len) + 1 */ if (len_encoded == 16) len_encoded = 0; return len_encoded; } int be_cmd_mccq_ext_create(struct be_adapter *adapter, struct be_queue_info *mccq, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_mcc_ext_create *req; struct be_dma_mem *q_mem = &mccq->dma_mem; void *ctxt; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); if (lancer_chip(adapter)) { req->hdr.version = 1; req->cq_id = cpu_to_le16(cq->id); AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id, ctxt, cq->id); AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid, ctxt, 1); } else { AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); } /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ req->async_event_bitmap[0] = cpu_to_le32(0x00000022); be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); mccq->id = le16_to_cpu(resp->id); mccq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_mccq_org_create(struct be_adapter *adapter, struct be_queue_info *mccq, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_mcc_create *req; struct be_dma_mem *q_mem = &mccq->dma_mem; void *ctxt; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MCC_CREATE, sizeof(*req), wrb, NULL); req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1); AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt, be_encoded_q_len(mccq->len)); AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id); be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb); mccq->id = le16_to_cpu(resp->id); mccq->created = true; } mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_mccq_create(struct be_adapter *adapter, struct be_queue_info *mccq, struct be_queue_info *cq) { int status; status = be_cmd_mccq_ext_create(adapter, mccq, cq); if (status && !lancer_chip(adapter)) { dev_warn(&adapter->pdev->dev, "Upgrade to F/W ver 2.102.235.0 " "or newer to avoid conflicting priorities between NIC " "and FCoE traffic"); status = be_cmd_mccq_org_create(adapter, mccq, cq); } return status; } int be_cmd_txq_create(struct be_adapter *adapter, struct be_queue_info *txq, struct be_queue_info *cq) { struct be_mcc_wrb *wrb; struct be_cmd_req_eth_tx_create *req; struct be_dma_mem *q_mem = &txq->dma_mem; void *ctxt; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE, sizeof(*req), wrb, NULL); if (lancer_chip(adapter)) { req->hdr.version = 1; AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt, adapter->if_handle); } req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size); req->ulp_num = BE_ULP1_NUM; req->type = BE_ETH_TX_RING_TYPE_STANDARD; AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt, be_encoded_q_len(txq->len)); AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1); AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id); be_dws_cpu_to_le(ctxt, sizeof(req->context)); be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb); txq->id = le16_to_cpu(resp->cid); txq->created = true; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses MCC */ int be_cmd_rxq_create(struct be_adapter *adapter, struct be_queue_info *rxq, u16 cq_id, u16 frag_size, u32 if_id, u32 rss, u8 *rss_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_eth_rx_create *req; struct be_dma_mem *q_mem = &rxq->dma_mem; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE, sizeof(*req), wrb, NULL); req->cq_id = cpu_to_le16(cq_id); req->frag_size = fls(frag_size) - 1; req->num_pages = 2; be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); req->interface_id = cpu_to_le32(if_id); req->max_frame_size = cpu_to_le16(BE_MAX_JUMBO_FRAME_SIZE); req->rss_queue = cpu_to_le32(rss); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb); rxq->id = le16_to_cpu(resp->id); rxq->created = true; *rss_id = resp->rss_id; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Generic destroyer function for all types of queues * Uses Mbox */ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, int queue_type) { struct be_mcc_wrb *wrb; struct be_cmd_req_q_destroy *req; u8 subsys = 0, opcode = 0; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); switch (queue_type) { case QTYPE_EQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_EQ_DESTROY; break; case QTYPE_CQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_CQ_DESTROY; break; case QTYPE_TXQ: subsys = CMD_SUBSYSTEM_ETH; opcode = OPCODE_ETH_TX_DESTROY; break; case QTYPE_RXQ: subsys = CMD_SUBSYSTEM_ETH; opcode = OPCODE_ETH_RX_DESTROY; break; case QTYPE_MCCQ: subsys = CMD_SUBSYSTEM_COMMON; opcode = OPCODE_COMMON_MCC_DESTROY; break; default: BUG(); } be_wrb_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req), wrb, NULL); req->id = cpu_to_le16(q->id); status = be_mbox_notify_wait(adapter); if (!status) q->created = false; mutex_unlock(&adapter->mbox_lock); return status; } /* Uses MCC */ int be_cmd_rxq_destroy(struct be_adapter *adapter, struct be_queue_info *q) { struct be_mcc_wrb *wrb; struct be_cmd_req_q_destroy *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_DESTROY, sizeof(*req), wrb, NULL); req->id = cpu_to_le16(q->id); status = be_mcc_notify_wait(adapter); if (!status) q->created = false; err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Create an rx filtering policy configuration on an i/f * Uses MCCQ */ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags, u8 *mac, u32 *if_handle, u32 *pmac_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_create *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->capability_flags = cpu_to_le32(cap_flags); req->enable_flags = cpu_to_le32(en_flags); if (mac) memcpy(req->mac_addr, mac, ETH_ALEN); else req->pmac_invalid = true; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_if_create *resp = embedded_payload(wrb); *if_handle = le32_to_cpu(resp->interface_id); if (mac) *pmac_id = le32_to_cpu(resp->pmac_id); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses MCCQ */ int be_cmd_if_destroy(struct be_adapter *adapter, int interface_id, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_if_destroy *req; int status; if (interface_id == -1) return 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->interface_id = cpu_to_le32(interface_id); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Get stats is a non embedded command: the request is not embedded inside * WRB but is a separate dma memory block * Uses asynchronous MCC */ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_hdr *hdr; int status = 0; if (MODULO(adapter->work_counter, be_get_temp_freq) == 0) be_cmd_get_die_temperature(adapter); spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } hdr = nonemb_cmd->va; be_wrb_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size, wrb, nonemb_cmd); if (adapter->generation == BE_GEN3) hdr->version = 1; be_mcc_notify(adapter); adapter->stats_cmd_sent = true; err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Lancer Stats */ int lancer_cmd_get_pport_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_pport_stats *req; int status = 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = nonemb_cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size, wrb, nonemb_cmd); req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num); req->cmd_params.params.reset_stats = 0; be_mcc_notify(adapter); adapter->stats_cmd_sent = true; err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous mcc */ int be_cmd_link_status_query(struct be_adapter *adapter, u8 *mac_speed, u16 *link_speed, u8 *link_status, u32 dom) { struct be_mcc_wrb *wrb; struct be_cmd_req_link_status *req; int status; spin_lock_bh(&adapter->mcc_lock); if (link_status) *link_status = LINK_DOWN; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req), wrb, NULL); if (adapter->generation == BE_GEN3 || lancer_chip(adapter)) req->hdr.version = 1; req->hdr.domain = dom; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_link_status *resp = embedded_payload(wrb); if (resp->mac_speed != PHY_LINK_SPEED_ZERO) { if (link_speed) *link_speed = le16_to_cpu(resp->link_speed); if (mac_speed) *mac_speed = resp->mac_speed; } if (link_status) *link_status = resp->logical_link_status; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous mcc */ int be_cmd_get_die_temperature(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_cntl_addnl_attribs *req; u16 mccq_index; int status; spin_lock_bh(&adapter->mcc_lock); mccq_index = adapter->mcc_obj.q.head; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req), wrb, NULL); wrb->tag1 = mccq_index; be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchronous mcc */ int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MANAGE_FAT, sizeof(*req), wrb, NULL); req->fat_operation = cpu_to_le32(QUERY_FAT); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = embedded_payload(wrb); if (log_size && resp->log_size) *log_size = le32_to_cpu(resp->log_size) - sizeof(u32); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf) { struct be_dma_mem get_fat_cmd; struct be_mcc_wrb *wrb; struct be_cmd_req_get_fat *req; u32 offset = 0, total_size, buf_size, log_offset = sizeof(u32), payload_len; int status; if (buf_len == 0) return; total_size = buf_len; get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024; get_fat_cmd.va = pci_alloc_consistent(adapter->pdev, get_fat_cmd.size, &get_fat_cmd.dma); if (!get_fat_cmd.va) { status = -ENOMEM; dev_err(&adapter->pdev->dev, "Memory allocation failure while retrieving FAT data\n"); return; } spin_lock_bh(&adapter->mcc_lock); while (total_size) { buf_size = min(total_size, (u32)60*1024); total_size -= buf_size; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = get_fat_cmd.va; payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MANAGE_FAT, payload_len, wrb, &get_fat_cmd); req->fat_operation = cpu_to_le32(RETRIEVE_FAT); req->read_log_offset = cpu_to_le32(log_offset); req->read_log_length = cpu_to_le32(buf_size); req->data_buffer_size = cpu_to_le32(buf_size); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fat *resp = get_fat_cmd.va; memcpy(buf + offset, resp->data_buffer, le32_to_cpu(resp->read_log_length)); } else { dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n"); goto err; } offset += buf_size; log_offset += buf_size; } err: pci_free_consistent(adapter->pdev, get_fat_cmd.size, get_fat_cmd.va, get_fat_cmd.dma); spin_unlock_bh(&adapter->mcc_lock); } /* Uses synchronous mcc */ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver, char *fw_on_flash) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_fw_version *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_FW_VERSION, sizeof(*req), wrb, NULL); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb); strcpy(fw_ver, resp->firmware_version_string); if (fw_on_flash) strcpy(fw_on_flash, resp->fw_on_flash_version_string); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* set the EQ delay interval of an EQ to specified value * Uses async mcc */ int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd) { struct be_mcc_wrb *wrb; struct be_cmd_req_modify_eq_delay *req; int status = 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req), wrb, NULL); req->num_eq = cpu_to_le32(1); req->delay[0].eq_id = cpu_to_le32(eq_id); req->delay[0].phase = 0; req->delay[0].delay_multiplier = cpu_to_le32(eqd); be_mcc_notify(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses sycnhronous mcc */ int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array, u32 num, bool untagged, bool promiscuous) { struct be_mcc_wrb *wrb; struct be_cmd_req_vlan_config *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req), wrb, NULL); req->interface_id = if_id; req->promiscuous = promiscuous; req->untagged = untagged; req->num_vlan = num; if (!promiscuous) { memcpy(req->normal_vlan, vtag_array, req->num_vlan * sizeof(vtag_array[0])); } status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 value) { struct be_mcc_wrb *wrb; struct be_dma_mem *mem = &adapter->rx_filter; struct be_cmd_req_rx_filter *req = mem->va; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } memset(req, 0, sizeof(*req)); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req), wrb, mem); req->if_id = cpu_to_le32(adapter->if_handle); if (flags & IFF_PROMISC) { req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | BE_IF_FLAGS_VLAN_PROMISCUOUS); if (value == ON) req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS | BE_IF_FLAGS_VLAN_PROMISCUOUS); } else if (flags & IFF_ALLMULTI) { req->if_flags_mask = req->if_flags = cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); } else { struct netdev_hw_addr *ha; int i = 0; req->if_flags_mask = req->if_flags = cpu_to_le32(BE_IF_FLAGS_MULTICAST); /* Reset mcast promisc mode if already set by setting mask * and not setting flags field */ req->if_flags_mask |= cpu_to_le32(BE_IF_FLAGS_MCAST_PROMISCUOUS); req->mcast_num = cpu_to_le32(netdev_mc_count(adapter->netdev)); netdev_for_each_mc_addr(ha, adapter->netdev) memcpy(req->mcast_mac[i++].byte, ha->addr, ETH_ALEN); } status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses synchrounous mcc */ int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_flow_control *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req), wrb, NULL); req->tx_flow_control = cpu_to_le16((u16)tx_fc); req->rx_flow_control = cpu_to_le16((u16)rx_fc); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses sycn mcc */ int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_flow_control *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req), wrb, NULL); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_flow_control *resp = embedded_payload(wrb); *tx_fc = le16_to_cpu(resp->tx_flow_control); *rx_fc = le16_to_cpu(resp->rx_flow_control); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses mbox */ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num, u32 *mode, u32 *caps) { struct be_mcc_wrb *wrb; struct be_cmd_req_query_fw_cfg *req; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req), wrb, NULL); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb); *port_num = le32_to_cpu(resp->phys_port); *mode = le32_to_cpu(resp->function_mode); *caps = le32_to_cpu(resp->function_caps); } mutex_unlock(&adapter->mbox_lock); return status; } /* Uses mbox */ int be_cmd_reset_function(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_hdr *req; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_FUNCTION_RESET, sizeof(*req), wrb, NULL); status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size) { struct be_mcc_wrb *wrb; struct be_cmd_req_rss_config *req; u32 myhash[10] = {0x15d43fa5, 0x2534685a, 0x5f87693a, 0x5668494e, 0x33cf6a53, 0x383334c6, 0x76ac4257, 0x59b242b2, 0x3ea83c02, 0x4a110304}; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RSS_CONFIG, sizeof(*req), wrb, NULL); req->if_id = cpu_to_le32(adapter->if_handle); req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV6 | RSS_ENABLE_IPV6); req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1); memcpy(req->cpu_table, rsstable, table_size); memcpy(req->hash, myhash, sizeof(myhash)); be_dws_cpu_to_le(req->hash, sizeof(req->hash)); status = be_mbox_notify_wait(adapter); mutex_unlock(&adapter->mbox_lock); return status; } /* Uses sync mcc */ int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num, u8 bcn, u8 sts, u8 state) { struct be_mcc_wrb *wrb; struct be_cmd_req_enable_disable_beacon *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req), wrb, NULL); req->port_num = port_num; req->beacon_state = state; req->beacon_duration = bcn; req->status_duration = sts; status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Uses sync mcc */ int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_beacon_state *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req), wrb, NULL); req->port_num = port_num; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_beacon_state *resp = embedded_payload(wrb); *state = resp->beacon_state; } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_written, u8 *addn_status) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_write_object *req; struct lancer_cmd_resp_write_object *resp; void *ctxt = NULL; int status; spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err_unlock; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_WRITE_OBJECT, sizeof(struct lancer_cmd_req_write_object), wrb, NULL); ctxt = &req->context; AMAP_SET_BITS(struct amap_lancer_write_obj_context, write_length, ctxt, data_size); if (data_size == 0) AMAP_SET_BITS(struct amap_lancer_write_obj_context, eof, ctxt, 1); else AMAP_SET_BITS(struct amap_lancer_write_obj_context, eof, ctxt, 0); be_dws_cpu_to_le(ctxt, sizeof(req->context)); req->write_offset = cpu_to_le32(data_offset); strcpy(req->object_name, obj_name); req->descriptor_count = cpu_to_le32(1); req->buf_len = cpu_to_le32(data_size); req->addr_low = cpu_to_le32((cmd->dma + sizeof(struct lancer_cmd_req_write_object)) & 0xFFFFFFFF); req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma + sizeof(struct lancer_cmd_req_write_object))); be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->flash_compl, msecs_to_jiffies(12000))) status = -1; else status = adapter->flash_status; resp = embedded_payload(wrb); if (!status) { *data_written = le32_to_cpu(resp->actual_write_len); } else { *addn_status = resp->additional_status; status = resp->status; } return status; err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 data_size, u32 data_offset, const char *obj_name, u32 *data_read, u32 *eof, u8 *addn_status) { struct be_mcc_wrb *wrb; struct lancer_cmd_req_read_object *req; struct lancer_cmd_resp_read_object *resp; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err_unlock; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_READ_OBJECT, sizeof(struct lancer_cmd_req_read_object), wrb, NULL); req->desired_read_len = cpu_to_le32(data_size); req->read_offset = cpu_to_le32(data_offset); strcpy(req->object_name, obj_name); req->descriptor_count = cpu_to_le32(1); req->buf_len = cpu_to_le32(data_size); req->addr_low = cpu_to_le32((cmd->dma & 0xFFFFFFFF)); req->addr_high = cpu_to_le32(upper_32_bits(cmd->dma)); status = be_mcc_notify_wait(adapter); resp = embedded_payload(wrb); if (!status) { *data_read = le32_to_cpu(resp->actual_read_len); *eof = le32_to_cpu(resp->eof); } else { *addn_status = resp->additional_status; } err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, u32 flash_type, u32 flash_opcode, u32 buf_size) { struct be_mcc_wrb *wrb; struct be_cmd_write_flashrom *req; int status; spin_lock_bh(&adapter->mcc_lock); adapter->flash_status = 0; wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err_unlock; } req = cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_WRITE_FLASHROM, cmd->size, wrb, cmd); req->params.op_type = cpu_to_le32(flash_type); req->params.op_code = cpu_to_le32(flash_opcode); req->params.data_buf_size = cpu_to_le32(buf_size); be_mcc_notify(adapter); spin_unlock_bh(&adapter->mcc_lock); if (!wait_for_completion_timeout(&adapter->flash_compl, msecs_to_jiffies(40000))) status = -1; else status = adapter->flash_status; return status; err_unlock: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc, int offset) { struct be_mcc_wrb *wrb; struct be_cmd_write_flashrom *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4, wrb, NULL); req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT); req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT); req->params.offset = cpu_to_le32(offset); req->params.data_buf_size = cpu_to_le32(0x4); status = be_mcc_notify_wait(adapter); if (!status) memcpy(flashed_crc, req->params.data_buf, 4); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_acpi_wol_magic_config *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = nonemb_cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req), wrb, nonemb_cmd); memcpy(req->magic_mac, mac, ETH_ALEN); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num, u8 loopback_type, u8 enable) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_lmode *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_SET_LOOPBACK_MODE, sizeof(*req), wrb, NULL); req->src_port = port_num; req->dest_port = port_num; req->loopback_type = loopback_type; req->loopback_state = enable; status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num, u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern) { struct be_mcc_wrb *wrb; struct be_cmd_req_loopback_test *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req), wrb, NULL); req->hdr.timeout = cpu_to_le32(4); req->pattern = cpu_to_le64(pattern); req->src_port = cpu_to_le32(port_num); req->dest_port = cpu_to_le32(port_num); req->pkt_size = cpu_to_le32(pkt_size); req->num_pkts = cpu_to_le32(num_pkts); req->loopback_type = cpu_to_le32(loopback_type); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb); status = le32_to_cpu(resp->status); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern, u32 byte_cnt, struct be_dma_mem *cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_ddrdma_test *req; int status; int i, j = 0; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = cmd->va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL, OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size, wrb, cmd); req->pattern = cpu_to_le64(pattern); req->byte_count = cpu_to_le32(byte_cnt); for (i = 0; i < byte_cnt; i++) { req->snd_buff[i] = (u8)(pattern >> (j*8)); j++; if (j > 7) j = 0; } status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_ddrdma_test *resp; resp = cmd->va; if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) || resp->snd_err) { status = -1; } } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_seeprom_data(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd) { struct be_mcc_wrb *wrb; struct be_cmd_req_seeprom_read *req; struct be_sge *sge; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = nonemb_cmd->va; sge = nonembedded_sgl(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SEEPROM_READ, sizeof(*req), wrb, nonemb_cmd); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_phy_info *phy_info) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_phy_info *req; struct be_dma_mem cmd; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } cmd.size = sizeof(struct be_cmd_req_get_phy_info); cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); status = -ENOMEM; goto err; } req = cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_PHY_DETAILS, sizeof(*req), wrb, &cmd); status = be_mcc_notify_wait(adapter); if (!status) { struct be_phy_info *resp_phy_info = cmd.va + sizeof(struct be_cmd_req_hdr); phy_info->phy_type = le16_to_cpu(resp_phy_info->phy_type); phy_info->interface_type = le16_to_cpu(resp_phy_info->interface_type); } pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_qos *req; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_QOS, sizeof(*req), wrb, NULL); req->hdr.domain = domain; req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC); req->max_bps_nic = cpu_to_le32(bps); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_cntl_attributes(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_cntl_attribs *req; struct be_cmd_resp_cntl_attribs *resp; int status; int payload_len = max(sizeof(*req), sizeof(*resp)); struct mgmt_controller_attrib *attribs; struct be_dma_mem attribs_cmd; memset(&attribs_cmd, 0, sizeof(struct be_dma_mem)); attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs); attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size, &attribs_cmd.dma); if (!attribs_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); return -ENOMEM; } if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); if (!wrb) { status = -EBUSY; goto err; } req = attribs_cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len, wrb, &attribs_cmd); status = be_mbox_notify_wait(adapter); if (!status) { attribs = attribs_cmd.va + sizeof(struct be_cmd_resp_hdr); adapter->hba_port_num = attribs->hba_attribs.phy_port; } err: mutex_unlock(&adapter->mbox_lock); pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va, attribs_cmd.dma); return status; } /* Uses mbox */ int be_cmd_req_native_mode(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_func_cap *req; int status; if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req), wrb, NULL); req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS | CAPABILITY_BE3_NATIVE_ERX_API); req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API); status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb); adapter->be3_native = le32_to_cpu(resp->cap_flags) & CAPABILITY_BE3_NATIVE_ERX_API; } err: mutex_unlock(&adapter->mbox_lock); return status; } /* Uses synchronous MCCQ */ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u32 domain, bool *pmac_id_active, u32 *pmac_id, u8 *mac) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_mac_list *req; int status; int mac_count; struct be_dma_mem get_mac_list_cmd; int i; memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem)); get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list); get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev, get_mac_list_cmd.size, &get_mac_list_cmd.dma); if (!get_mac_list_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure during GET_MAC_LIST\n"); return -ENOMEM; } spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto out; } req = get_mac_list_cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_MAC_LIST, sizeof(*req), wrb, &get_mac_list_cmd); req->hdr.domain = domain; req->mac_type = MAC_ADDRESS_TYPE_NETWORK; req->perm_override = 1; status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_mac_list *resp = get_mac_list_cmd.va; mac_count = resp->true_mac_count + resp->pseudo_mac_count; /* Mac list returned could contain one or more active mac_ids * or one or more pseudo permanant mac addresses. If an active * mac_id is present, return first active mac_id found */ for (i = 0; i < mac_count; i++) { struct get_list_macaddr *mac_entry; u16 mac_addr_size; u32 mac_id; mac_entry = &resp->macaddr_list[i]; mac_addr_size = le16_to_cpu(mac_entry->mac_addr_size); /* mac_id is a 32 bit value and mac_addr size * is 6 bytes */ if (mac_addr_size == sizeof(u32)) { *pmac_id_active = true; mac_id = mac_entry->mac_addr_id.s_mac_id.mac_id; *pmac_id = le32_to_cpu(mac_id); goto out; } } /* If no active mac_id found, return first pseudo mac addr */ *pmac_id_active = false; memcpy(mac, resp->macaddr_list[0].mac_addr_id.macaddr, ETH_ALEN); } out: spin_unlock_bh(&adapter->mcc_lock); pci_free_consistent(adapter->pdev, get_mac_list_cmd.size, get_mac_list_cmd.va, get_mac_list_cmd.dma); return status; } /* Uses synchronous MCCQ */ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array, u8 mac_count, u32 domain) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_mac_list *req; int status; struct be_dma_mem cmd; memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_req_set_mac_list); cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma, GFP_KERNEL); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory alloc failure\n"); return -ENOMEM; } spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_MAC_LIST, sizeof(*req), wrb, &cmd); req->hdr.domain = domain; req->mac_count = mac_count; if (mac_count) memcpy(req->mac, mac_array, ETH_ALEN*mac_count); status = be_mcc_notify_wait(adapter); err: dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma); spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_set_hsw_config(struct be_adapter *adapter, u16 pvid, u32 domain, u16 intf_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_set_hsw_config *req; void *ctxt; int status; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_SET_HSW_CONFIG, sizeof(*req), wrb, NULL); req->hdr.domain = domain; AMAP_SET_BITS(struct amap_set_hsw_context, interface_id, ctxt, intf_id); if (pvid) { AMAP_SET_BITS(struct amap_set_hsw_context, pvid_valid, ctxt, 1); AMAP_SET_BITS(struct amap_set_hsw_context, pvid, ctxt, pvid); } be_dws_cpu_to_le(req->context, sizeof(req->context)); status = be_mcc_notify_wait(adapter); err: spin_unlock_bh(&adapter->mcc_lock); return status; } /* Get Hyper switch config */ int be_cmd_get_hsw_config(struct be_adapter *adapter, u16 *pvid, u32 domain, u16 intf_id) { struct be_mcc_wrb *wrb; struct be_cmd_req_get_hsw_config *req; void *ctxt; int status; u16 vid; spin_lock_bh(&adapter->mcc_lock); wrb = wrb_from_mccq(adapter); if (!wrb) { status = -EBUSY; goto err; } req = embedded_payload(wrb); ctxt = &req->context; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON, OPCODE_COMMON_GET_HSW_CONFIG, sizeof(*req), wrb, NULL); req->hdr.domain = domain; AMAP_SET_BITS(struct amap_get_hsw_req_context, interface_id, ctxt, intf_id); AMAP_SET_BITS(struct amap_get_hsw_req_context, pvid_valid, ctxt, 1); be_dws_cpu_to_le(req->context, sizeof(req->context)); status = be_mcc_notify_wait(adapter); if (!status) { struct be_cmd_resp_get_hsw_config *resp = embedded_payload(wrb); be_dws_le_to_cpu(&resp->context, sizeof(resp->context)); vid = AMAP_GET_BITS(struct amap_get_hsw_resp_context, pvid, &resp->context); *pvid = le16_to_cpu(vid); } err: spin_unlock_bh(&adapter->mcc_lock); return status; } int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter) { struct be_mcc_wrb *wrb; struct be_cmd_req_acpi_wol_magic_config_v1 *req; int status; int payload_len = sizeof(*req); struct be_dma_mem cmd; memset(&cmd, 0, sizeof(struct be_dma_mem)); cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1); cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma); if (!cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure\n"); return -ENOMEM; } if (mutex_lock_interruptible(&adapter->mbox_lock)) return -1; wrb = wrb_from_mbox(adapter); if (!wrb) { status = -EBUSY; goto err; } req = cmd.va; be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, payload_len, wrb, &cmd); req->hdr.version = 1; req->query_options = BE_GET_WOL_CAP; status = be_mbox_notify_wait(adapter); if (!status) { struct be_cmd_resp_acpi_wol_magic_config_v1 *resp; resp = (struct be_cmd_resp_acpi_wol_magic_config_v1 *) cmd.va; /* the command could succeed misleadingly on old f/w * which is not aware of the V1 version. fake an error. */ if (resp->hdr.response_length < payload_len) { status = -1; goto err; } adapter->wol_cap = resp->wol_settings; } err: mutex_unlock(&adapter->mbox_lock); pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma); return status; }
gpl-2.0
mseskir/android_kernel_vestel_g55
sound/soc/codecs/wm8988.c
4811
27756
/* * wm8988.c -- WM8988 ALSA SoC audio driver * * Copyright 2009 Wolfson Microelectronics plc * Copyright 2005 Openedhand Ltd. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/tlv.h> #include <sound/soc.h> #include <sound/initval.h> #include "wm8988.h" /* * wm8988 register cache * We can't read the WM8988 register space when we * are using 2 wire for device control, so we cache them instead. */ static const struct reg_default wm8988_reg_defaults[] = { { 0, 0x0097 }, { 1, 0x0097 }, { 2, 0x0079 }, { 3, 0x0079 }, { 5, 0x0008 }, { 7, 0x000a }, { 8, 0x0000 }, { 10, 0x00ff }, { 11, 0x00ff }, { 12, 0x000f }, { 13, 0x000f }, { 16, 0x0000 }, { 17, 0x007b }, { 18, 0x0000 }, { 19, 0x0032 }, { 20, 0x0000 }, { 21, 0x00c3 }, { 22, 0x00c3 }, { 23, 0x00c0 }, { 24, 0x0000 }, { 25, 0x0000 }, { 26, 0x0000 }, { 27, 0x0000 }, { 31, 0x0000 }, { 32, 0x0000 }, { 33, 0x0000 }, { 34, 0x0050 }, { 35, 0x0050 }, { 36, 0x0050 }, { 37, 0x0050 }, { 40, 0x0079 }, { 41, 0x0079 }, { 42, 0x0079 }, }; static bool wm8988_writeable(struct device *dev, unsigned int reg) { switch (reg) { case WM8988_LINVOL: case WM8988_RINVOL: case WM8988_LOUT1V: case WM8988_ROUT1V: case WM8988_ADCDAC: case WM8988_IFACE: case WM8988_SRATE: case WM8988_LDAC: case WM8988_RDAC: case WM8988_BASS: case WM8988_TREBLE: case WM8988_RESET: case WM8988_3D: case WM8988_ALC1: case WM8988_ALC2: case WM8988_ALC3: case WM8988_NGATE: case WM8988_LADC: case WM8988_RADC: case WM8988_ADCTL1: case WM8988_ADCTL2: case WM8988_PWR1: case WM8988_PWR2: case WM8988_ADCTL3: case WM8988_ADCIN: case WM8988_LADCIN: case WM8988_RADCIN: case WM8988_LOUTM1: case WM8988_LOUTM2: case WM8988_ROUTM1: case WM8988_ROUTM2: case WM8988_LOUT2V: case WM8988_ROUT2V: case WM8988_LPPB: return true; default: return false; } } /* codec private data */ struct wm8988_priv { struct regmap *regmap; unsigned int sysclk; struct snd_pcm_hw_constraint_list *sysclk_constraints; }; #define wm8988_reset(c) snd_soc_write(c, WM8988_RESET, 0) /* * WM8988 Controls */ static const char *bass_boost_txt[] = {"Linear Control", "Adaptive Boost"}; static const struct soc_enum bass_boost = SOC_ENUM_SINGLE(WM8988_BASS, 7, 2, bass_boost_txt); static const char *bass_filter_txt[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" }; static const struct soc_enum bass_filter = SOC_ENUM_SINGLE(WM8988_BASS, 6, 2, bass_filter_txt); static const char *treble_txt[] = {"8kHz", "4kHz"}; static const struct soc_enum treble = SOC_ENUM_SINGLE(WM8988_TREBLE, 6, 2, treble_txt); static const char *stereo_3d_lc_txt[] = {"200Hz", "500Hz"}; static const struct soc_enum stereo_3d_lc = SOC_ENUM_SINGLE(WM8988_3D, 5, 2, stereo_3d_lc_txt); static const char *stereo_3d_uc_txt[] = {"2.2kHz", "1.5kHz"}; static const struct soc_enum stereo_3d_uc = SOC_ENUM_SINGLE(WM8988_3D, 6, 2, stereo_3d_uc_txt); static const char *stereo_3d_func_txt[] = {"Capture", "Playback"}; static const struct soc_enum stereo_3d_func = SOC_ENUM_SINGLE(WM8988_3D, 7, 2, stereo_3d_func_txt); static const char *alc_func_txt[] = {"Off", "Right", "Left", "Stereo"}; static const struct soc_enum alc_func = SOC_ENUM_SINGLE(WM8988_ALC1, 7, 4, alc_func_txt); static const char *ng_type_txt[] = {"Constant PGA Gain", "Mute ADC Output"}; static const struct soc_enum ng_type = SOC_ENUM_SINGLE(WM8988_NGATE, 1, 2, ng_type_txt); static const char *deemph_txt[] = {"None", "32Khz", "44.1Khz", "48Khz"}; static const struct soc_enum deemph = SOC_ENUM_SINGLE(WM8988_ADCDAC, 1, 4, deemph_txt); static const char *adcpol_txt[] = {"Normal", "L Invert", "R Invert", "L + R Invert"}; static const struct soc_enum adcpol = SOC_ENUM_SINGLE(WM8988_ADCDAC, 5, 4, adcpol_txt); static const DECLARE_TLV_DB_SCALE(pga_tlv, -1725, 75, 0); static const DECLARE_TLV_DB_SCALE(adc_tlv, -9750, 50, 1); static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1); static const DECLARE_TLV_DB_SCALE(out_tlv, -12100, 100, 1); static const DECLARE_TLV_DB_SCALE(bypass_tlv, -1500, 300, 0); static const struct snd_kcontrol_new wm8988_snd_controls[] = { SOC_ENUM("Bass Boost", bass_boost), SOC_ENUM("Bass Filter", bass_filter), SOC_SINGLE("Bass Volume", WM8988_BASS, 0, 15, 1), SOC_SINGLE("Treble Volume", WM8988_TREBLE, 0, 15, 0), SOC_ENUM("Treble Cut-off", treble), SOC_SINGLE("3D Switch", WM8988_3D, 0, 1, 0), SOC_SINGLE("3D Volume", WM8988_3D, 1, 15, 0), SOC_ENUM("3D Lower Cut-off", stereo_3d_lc), SOC_ENUM("3D Upper Cut-off", stereo_3d_uc), SOC_ENUM("3D Mode", stereo_3d_func), SOC_SINGLE("ALC Capture Target Volume", WM8988_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8988_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", alc_func), SOC_SINGLE("ALC Capture ZC Switch", WM8988_ALC2, 7, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8988_ALC2, 0, 15, 0), SOC_SINGLE("ALC Capture Decay Time", WM8988_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack Time", WM8988_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8988_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", ng_type), SOC_SINGLE("ALC Capture NG Switch", WM8988_NGATE, 0, 1, 0), SOC_SINGLE("ZC Timeout Switch", WM8988_ADCTL1, 0, 1, 0), SOC_DOUBLE_R_TLV("Capture Digital Volume", WM8988_LADC, WM8988_RADC, 0, 255, 0, adc_tlv), SOC_DOUBLE_R_TLV("Capture Volume", WM8988_LINVOL, WM8988_RINVOL, 0, 63, 0, pga_tlv), SOC_DOUBLE_R("Capture ZC Switch", WM8988_LINVOL, WM8988_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8988_LINVOL, WM8988_RINVOL, 7, 1, 1), SOC_ENUM("Playback De-emphasis", deemph), SOC_ENUM("Capture Polarity", adcpol), SOC_SINGLE("Playback 6dB Attenuate", WM8988_ADCDAC, 7, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8988_ADCDAC, 8, 1, 0), SOC_DOUBLE_R_TLV("PCM Volume", WM8988_LDAC, WM8988_RDAC, 0, 255, 0, dac_tlv), SOC_SINGLE_TLV("Left Mixer Left Bypass Volume", WM8988_LOUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Left Mixer Right Bypass Volume", WM8988_LOUTM2, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Left Bypass Volume", WM8988_ROUTM1, 4, 7, 1, bypass_tlv), SOC_SINGLE_TLV("Right Mixer Right Bypass Volume", WM8988_ROUTM2, 4, 7, 1, bypass_tlv), SOC_DOUBLE_R("Output 1 Playback ZC Switch", WM8988_LOUT1V, WM8988_ROUT1V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 1 Playback Volume", WM8988_LOUT1V, WM8988_ROUT1V, 0, 127, 0, out_tlv), SOC_DOUBLE_R("Output 2 Playback ZC Switch", WM8988_LOUT2V, WM8988_ROUT2V, 7, 1, 0), SOC_DOUBLE_R_TLV("Output 2 Playback Volume", WM8988_LOUT2V, WM8988_ROUT2V, 0, 127, 0, out_tlv), }; /* * DAPM Controls */ static int wm8988_lrc_control(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; u16 adctl2 = snd_soc_read(codec, WM8988_ADCTL2); /* Use the DAC to gate LRC if active, otherwise use ADC */ if (snd_soc_read(codec, WM8988_PWR2) & 0x180) adctl2 &= ~0x4; else adctl2 |= 0x4; return snd_soc_write(codec, WM8988_ADCTL2, adctl2); } static const char *wm8988_line_texts[] = { "Line 1", "Line 2", "PGA", "Differential"}; static const unsigned int wm8988_line_values[] = { 0, 1, 3, 4}; static const struct soc_enum wm8988_lline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LOUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_left_line_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum); static const struct soc_enum wm8988_rline_enum = SOC_VALUE_ENUM_SINGLE(WM8988_ROUTM1, 0, 7, ARRAY_SIZE(wm8988_line_texts), wm8988_line_texts, wm8988_line_values); static const struct snd_kcontrol_new wm8988_right_line_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_lline_enum); /* Left Mixer */ static const struct snd_kcontrol_new wm8988_left_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8988_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_LOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8988_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_LOUTM2, 7, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new wm8988_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8988_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8988_ROUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM8988_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8988_ROUTM2, 7, 1, 0), }; static const char *wm8988_pga_sel[] = {"Line 1", "Line 2", "Differential"}; static const unsigned int wm8988_pga_val[] = { 0, 1, 3 }; /* Left PGA Mux */ static const struct soc_enum wm8988_lpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_LADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_left_pga_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_lpga_enum); /* Right PGA Mux */ static const struct soc_enum wm8988_rpga_enum = SOC_VALUE_ENUM_SINGLE(WM8988_RADCIN, 6, 3, ARRAY_SIZE(wm8988_pga_sel), wm8988_pga_sel, wm8988_pga_val); static const struct snd_kcontrol_new wm8988_right_pga_controls = SOC_DAPM_VALUE_ENUM("Route", wm8988_rpga_enum); /* Differential Mux */ static const char *wm8988_diff_sel[] = {"Line 1", "Line 2"}; static const struct soc_enum diffmux = SOC_ENUM_SINGLE(WM8988_ADCIN, 8, 2, wm8988_diff_sel); static const struct snd_kcontrol_new wm8988_diffmux_controls = SOC_DAPM_ENUM("Route", diffmux); /* Mono ADC Mux */ static const char *wm8988_mono_mux[] = {"Stereo", "Mono (Left)", "Mono (Right)", "Digital Mono"}; static const struct soc_enum monomux = SOC_ENUM_SINGLE(WM8988_ADCIN, 6, 4, wm8988_mono_mux); static const struct snd_kcontrol_new wm8988_monomux_controls = SOC_DAPM_ENUM("Route", monomux); static const struct snd_soc_dapm_widget wm8988_dapm_widgets[] = { SND_SOC_DAPM_SUPPLY("Mic Bias", WM8988_PWR1, 1, 0, NULL, 0), SND_SOC_DAPM_MUX("Differential Mux", SND_SOC_NOPM, 0, 0, &wm8988_diffmux_controls), SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, &wm8988_monomux_controls), SND_SOC_DAPM_MUX("Left PGA Mux", WM8988_PWR1, 5, 0, &wm8988_left_pga_controls), SND_SOC_DAPM_MUX("Right PGA Mux", WM8988_PWR1, 4, 0, &wm8988_right_pga_controls), SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_left_line_controls), SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, &wm8988_right_line_controls), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8988_PWR1, 2, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8988_PWR1, 3, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8988_PWR2, 7, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8988_PWR2, 8, 0), SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, &wm8988_left_mixer_controls[0], ARRAY_SIZE(wm8988_left_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, &wm8988_right_mixer_controls[0], ARRAY_SIZE(wm8988_right_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 2", WM8988_PWR2, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8988_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 1", WM8988_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 1", WM8988_PWR2, 6, 0, NULL, 0), SND_SOC_DAPM_POST("LRC control", wm8988_lrc_control), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("VREF"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("LINPUT2"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("RINPUT2"), }; static const struct snd_soc_dapm_route wm8988_dapm_routes[] = { { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left PGA Mux", "Line 1", "LINPUT1" }, { "Left PGA Mux", "Line 2", "LINPUT2" }, { "Left PGA Mux", "Differential", "Differential Mux" }, { "Right PGA Mux", "Line 1", "RINPUT1" }, { "Right PGA Mux", "Line 2", "RINPUT2" }, { "Right PGA Mux", "Differential", "Differential Mux" }, { "Differential Mux", "Line 1", "LINPUT1" }, { "Differential Mux", "Line 1", "RINPUT1" }, { "Differential Mux", "Line 2", "LINPUT2" }, { "Differential Mux", "Line 2", "RINPUT2" }, { "Left ADC Mux", "Stereo", "Left PGA Mux" }, { "Left ADC Mux", "Mono (Left)", "Left PGA Mux" }, { "Left ADC Mux", "Digital Mono", "Left PGA Mux" }, { "Right ADC Mux", "Stereo", "Right PGA Mux" }, { "Right ADC Mux", "Mono (Right)", "Right PGA Mux" }, { "Right ADC Mux", "Digital Mono", "Right PGA Mux" }, { "Left ADC", NULL, "Left ADC Mux" }, { "Right ADC", NULL, "Right ADC Mux" }, { "Left Line Mux", "Line 1", "LINPUT1" }, { "Left Line Mux", "Line 2", "LINPUT2" }, { "Left Line Mux", "PGA", "Left PGA Mux" }, { "Left Line Mux", "Differential", "Differential Mux" }, { "Right Line Mux", "Line 1", "RINPUT1" }, { "Right Line Mux", "Line 2", "RINPUT2" }, { "Right Line Mux", "PGA", "Right PGA Mux" }, { "Right Line Mux", "Differential", "Differential Mux" }, { "Left Mixer", "Playback Switch", "Left DAC" }, { "Left Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Left Mixer", "Right Playback Switch", "Right DAC" }, { "Left Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Right Mixer", "Left Playback Switch", "Left DAC" }, { "Right Mixer", "Left Bypass Switch", "Left Line Mux" }, { "Right Mixer", "Playback Switch", "Right DAC" }, { "Right Mixer", "Right Bypass Switch", "Right Line Mux" }, { "Left Out 1", NULL, "Left Mixer" }, { "LOUT1", NULL, "Left Out 1" }, { "Right Out 1", NULL, "Right Mixer" }, { "ROUT1", NULL, "Right Out 1" }, { "Left Out 2", NULL, "Left Mixer" }, { "LOUT2", NULL, "Left Out 2" }, { "Right Out 2", NULL, "Right Mixer" }, { "ROUT2", NULL, "Right Out 2" }, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:5; u8 usb:1; }; /* codec hifi mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 1536, 0x6, 0x0}, {11289600, 8000, 1408, 0x16, 0x0}, {18432000, 8000, 2304, 0x7, 0x0}, {16934400, 8000, 2112, 0x17, 0x0}, {12000000, 8000, 1500, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 1024, 0x18, 0x0}, {16934400, 11025, 1536, 0x19, 0x0}, {12000000, 11025, 1088, 0x19, 0x1}, /* 16k */ {12288000, 16000, 768, 0xa, 0x0}, {18432000, 16000, 1152, 0xb, 0x0}, {12000000, 16000, 750, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 512, 0x1a, 0x0}, {16934400, 22050, 768, 0x1b, 0x0}, {12000000, 22050, 544, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 384, 0xc, 0x0}, {18432000, 32000, 576, 0xd, 0x0}, {12000000, 32000, 375, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x10, 0x0}, {16934400, 44100, 384, 0x11, 0x0}, {12000000, 44100, 272, 0x11, 0x1}, /* 48k */ {12288000, 48000, 256, 0x0, 0x0}, {18432000, 48000, 384, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0x1e, 0x0}, {16934400, 88200, 192, 0x1f, 0x0}, {12000000, 88200, 136, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 128, 0xe, 0x0}, {18432000, 96000, 192, 0xf, 0x0}, {12000000, 96000, 125, 0xe, 0x1}, }; static inline int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return -EINVAL; } /* The set of rates we can generate from the above for each SYSCLK */ static unsigned int rates_12288[] = { 8000, 12000, 16000, 24000, 24000, 32000, 48000, 96000, }; static struct snd_pcm_hw_constraint_list constraints_12288 = { .count = ARRAY_SIZE(rates_12288), .list = rates_12288, }; static unsigned int rates_112896[] = { 8000, 11025, 22050, 44100, }; static struct snd_pcm_hw_constraint_list constraints_112896 = { .count = ARRAY_SIZE(rates_112896), .list = rates_112896, }; static unsigned int rates_12[] = { 8000, 11025, 12000, 16000, 22050, 2400, 32000, 41100, 48000, 48000, 88235, 96000, }; static struct snd_pcm_hw_constraint_list constraints_12 = { .count = ARRAY_SIZE(rates_12), .list = rates_12, }; /* * Note that this should be called from init rather than from hw_params. */ static int wm8988_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 18432000: case 22579200: case 36864000: wm8988->sysclk_constraints = &constraints_112896; wm8988->sysclk = freq; return 0; case 12288000: case 16934400: case 24576000: case 33868800: wm8988->sysclk_constraints = &constraints_12288; wm8988->sysclk = freq; return 0; case 12000000: case 24000000: wm8988->sysclk_constraints = &constraints_12; wm8988->sysclk = freq; return 0; } return -EINVAL; } static int wm8988_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface = 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } snd_soc_write(codec, WM8988_IFACE, iface); return 0; } static int wm8988_pcm_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); /* The set of sample rates that can be supported depends on the * MCLK supplied to the CODEC - enforce this. */ if (!wm8988->sysclk) { dev_err(codec->dev, "No MCLK configured, call set_sysclk() on init\n"); return -EINVAL; } snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, wm8988->sysclk_constraints); return 0; } static int wm8988_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); u16 iface = snd_soc_read(codec, WM8988_IFACE) & 0x1f3; u16 srate = snd_soc_read(codec, WM8988_SRATE) & 0x180; int coeff; coeff = get_coeff(wm8988->sysclk, params_rate(params)); if (coeff < 0) { coeff = get_coeff(wm8988->sysclk / 2, params_rate(params)); srate |= 0x40; } if (coeff < 0) { dev_err(codec->dev, "Unable to configure sample rate %dHz with %dHz MCLK\n", params_rate(params), wm8988->sysclk); return coeff; } /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x000c; break; } /* set iface & srate */ snd_soc_write(codec, WM8988_IFACE, iface); if (coeff >= 0) snd_soc_write(codec, WM8988_SRATE, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); return 0; } static int wm8988_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8988_ADCDAC) & 0xfff7; if (mute) snd_soc_write(codec, WM8988_ADCDAC, mute_reg | 0x8); else snd_soc_write(codec, WM8988_ADCDAC, mute_reg); return 0; } static int wm8988_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); u16 pwr_reg = snd_soc_read(codec, WM8988_PWR1) & ~0x1c1; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* VREF, VMID=2x50k, digital enabled */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x00c0); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { regcache_sync(wm8988->regmap); /* VREF, VMID=2x5k */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x1c1); /* Charge caps */ msleep(100); } /* VREF, VMID=2*500k, digital stopped */ snd_soc_write(codec, WM8988_PWR1, pwr_reg | 0x0141); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8988_PWR1, 0x0000); break; } codec->dapm.bias_level = level; return 0; } #define WM8988_RATES SNDRV_PCM_RATE_8000_96000 #define WM8988_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8988_ops = { .startup = wm8988_pcm_startup, .hw_params = wm8988_pcm_hw_params, .set_fmt = wm8988_set_dai_fmt, .set_sysclk = wm8988_set_dai_sysclk, .digital_mute = wm8988_mute, }; static struct snd_soc_dai_driver wm8988_dai = { .name = "wm8988-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8988_RATES, .formats = WM8988_FORMATS, }, .ops = &wm8988_ops, .symmetric_rates = 1, }; static int wm8988_suspend(struct snd_soc_codec *codec) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF); regcache_mark_dirty(wm8988->regmap); return 0; } static int wm8988_resume(struct snd_soc_codec *codec) { wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8988_probe(struct snd_soc_codec *codec) { struct wm8988_priv *wm8988 = snd_soc_codec_get_drvdata(codec); int ret = 0; codec->control_data = wm8988->regmap; ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP); if (ret < 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } ret = wm8988_reset(codec); if (ret < 0) { dev_err(codec->dev, "Failed to issue reset\n"); return ret; } /* set the update bits (we always update left then right) */ snd_soc_update_bits(codec, WM8988_RADC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_ROUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8988_RINVOL, 0x0100, 0x0100); wm8988_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static int wm8988_remove(struct snd_soc_codec *codec) { wm8988_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8988 = { .probe = wm8988_probe, .remove = wm8988_remove, .suspend = wm8988_suspend, .resume = wm8988_resume, .set_bias_level = wm8988_set_bias_level, .controls = wm8988_snd_controls, .num_controls = ARRAY_SIZE(wm8988_snd_controls), .dapm_widgets = wm8988_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8988_dapm_widgets), .dapm_routes = wm8988_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8988_dapm_routes), }; static struct regmap_config wm8988_regmap = { .reg_bits = 7, .val_bits = 9, .max_register = WM8988_LPPB, .writeable_reg = wm8988_writeable, .cache_type = REGCACHE_RBTREE, .reg_defaults = wm8988_reg_defaults, .num_reg_defaults = ARRAY_SIZE(wm8988_reg_defaults), }; #if defined(CONFIG_SPI_MASTER) static int __devinit wm8988_spi_probe(struct spi_device *spi) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&spi->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; wm8988->regmap = regmap_init_spi(spi, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&spi->dev, "Failed to init regmap: %d\n", ret); return ret; } spi_set_drvdata(spi, wm8988); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_wm8988, &wm8988_dai, 1); if (ret != 0) regmap_exit(wm8988->regmap); return ret; } static int __devexit wm8988_spi_remove(struct spi_device *spi) { struct wm8988_priv *wm8988 = spi_get_drvdata(spi); snd_soc_unregister_codec(&spi->dev); regmap_exit(wm8988->regmap); return 0; } static struct spi_driver wm8988_spi_driver = { .driver = { .name = "wm8988", .owner = THIS_MODULE, }, .probe = wm8988_spi_probe, .remove = __devexit_p(wm8988_spi_remove), }; #endif /* CONFIG_SPI_MASTER */ #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static __devinit int wm8988_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8988_priv *wm8988; int ret; wm8988 = devm_kzalloc(&i2c->dev, sizeof(struct wm8988_priv), GFP_KERNEL); if (wm8988 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm8988); wm8988->regmap = regmap_init_i2c(i2c, &wm8988_regmap); if (IS_ERR(wm8988->regmap)) { ret = PTR_ERR(wm8988->regmap); dev_err(&i2c->dev, "Failed to init regmap: %d\n", ret); return ret; } ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8988, &wm8988_dai, 1); if (ret != 0) regmap_exit(wm8988->regmap); return ret; } static __devexit int wm8988_i2c_remove(struct i2c_client *client) { struct wm8988_priv *wm8988 = i2c_get_clientdata(client); snd_soc_unregister_codec(&client->dev); regmap_exit(wm8988->regmap); return 0; } static const struct i2c_device_id wm8988_i2c_id[] = { { "wm8988", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8988_i2c_id); static struct i2c_driver wm8988_i2c_driver = { .driver = { .name = "wm8988", .owner = THIS_MODULE, }, .probe = wm8988_i2c_probe, .remove = __devexit_p(wm8988_i2c_remove), .id_table = wm8988_i2c_id, }; #endif static int __init wm8988_modinit(void) { int ret = 0; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&wm8988_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&wm8988_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8988 SPI driver: %d\n", ret); } #endif return ret; } module_init(wm8988_modinit); static void __exit wm8988_exit(void) { #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&wm8988_i2c_driver); #endif #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&wm8988_spi_driver); #endif } module_exit(wm8988_exit); MODULE_DESCRIPTION("ASoC WM8988 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
IllusionRom-deprecated/android_kernel_lge_msm8974
drivers/uwb/lc-rc.c
5323
11139
/* * Ultra Wide Band * Life cycle of radio controllers * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs * * A UWB radio controller is also a UWB device, so it embeds one... * * List of RCs comes from the 'struct class uwb_rc_class'. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/device.h> #include <linux/err.h> #include <linux/random.h> #include <linux/kdev_t.h> #include <linux/etherdevice.h> #include <linux/usb.h> #include <linux/slab.h> #include <linux/export.h> #include "uwb-internal.h" static int uwb_rc_index_match(struct device *dev, void *data) { int *index = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc->index == *index) return 1; return 0; } static struct uwb_rc *uwb_rc_find_by_index(int index) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, &index, uwb_rc_index_match); if (dev) rc = dev_get_drvdata(dev); return rc; } static int uwb_rc_new_index(void) { int index = 0; for (;;) { if (!uwb_rc_find_by_index(index)) return index; if (++index < 0) index = 0; } } /** * Release the backing device of a uwb_rc that has been dynamically allocated. */ static void uwb_rc_sys_release(struct device *dev) { struct uwb_dev *uwb_dev = container_of(dev, struct uwb_dev, dev); struct uwb_rc *rc = container_of(uwb_dev, struct uwb_rc, uwb_dev); uwb_rc_ie_release(rc); kfree(rc); } void uwb_rc_init(struct uwb_rc *rc) { struct uwb_dev *uwb_dev = &rc->uwb_dev; uwb_dev_init(uwb_dev); rc->uwb_dev.dev.class = &uwb_rc_class; rc->uwb_dev.dev.release = uwb_rc_sys_release; uwb_rc_neh_create(rc); rc->beaconing = -1; rc->scan_type = UWB_SCAN_DISABLED; INIT_LIST_HEAD(&rc->notifs_chain.list); mutex_init(&rc->notifs_chain.mutex); INIT_LIST_HEAD(&rc->uwb_beca.list); mutex_init(&rc->uwb_beca.mutex); uwb_drp_avail_init(rc); uwb_rc_ie_init(rc); uwb_rsv_init(rc); uwb_rc_pal_init(rc); } EXPORT_SYMBOL_GPL(uwb_rc_init); struct uwb_rc *uwb_rc_alloc(void) { struct uwb_rc *rc; rc = kzalloc(sizeof(*rc), GFP_KERNEL); if (rc == NULL) return NULL; uwb_rc_init(rc); return rc; } EXPORT_SYMBOL_GPL(uwb_rc_alloc); static struct attribute *rc_attrs[] = { &dev_attr_mac_address.attr, &dev_attr_scan.attr, &dev_attr_beacon.attr, NULL, }; static struct attribute_group rc_attr_group = { .attrs = rc_attrs, }; /* * Registration of sysfs specific stuff */ static int uwb_rc_sys_add(struct uwb_rc *rc) { return sysfs_create_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); } static void __uwb_rc_sys_rm(struct uwb_rc *rc) { sysfs_remove_group(&rc->uwb_dev.dev.kobj, &rc_attr_group); } /** * uwb_rc_mac_addr_setup - get an RC's EUI-48 address or set it * @rc: the radio controller. * * If the EUI-48 address is 00:00:00:00:00:00 or FF:FF:FF:FF:FF:FF * then a random locally administered EUI-48 is generated and set on * the device. The probability of address collisions is sufficiently * unlikely (1/2^40 = 9.1e-13) that they're not checked for. */ static int uwb_rc_mac_addr_setup(struct uwb_rc *rc) { int result; struct device *dev = &rc->uwb_dev.dev; struct uwb_dev *uwb_dev = &rc->uwb_dev; char devname[UWB_ADDR_STRSIZE]; struct uwb_mac_addr addr; result = uwb_rc_mac_addr_get(rc, &addr); if (result < 0) { dev_err(dev, "cannot retrieve UWB EUI-48 address: %d\n", result); return result; } if (uwb_mac_addr_unset(&addr) || uwb_mac_addr_bcast(&addr)) { addr.data[0] = 0x02; /* locally administered and unicast */ get_random_bytes(&addr.data[1], sizeof(addr.data)-1); result = uwb_rc_mac_addr_set(rc, &addr); if (result < 0) { uwb_mac_addr_print(devname, sizeof(devname), &addr); dev_err(dev, "cannot set EUI-48 address %s: %d\n", devname, result); return result; } } uwb_dev->mac_addr = addr; return 0; } static int uwb_rc_setup(struct uwb_rc *rc) { int result; struct device *dev = &rc->uwb_dev.dev; result = uwb_radio_setup(rc); if (result < 0) { dev_err(dev, "cannot setup UWB radio: %d\n", result); goto error; } result = uwb_rc_mac_addr_setup(rc); if (result < 0) { dev_err(dev, "cannot setup UWB MAC address: %d\n", result); goto error; } result = uwb_rc_dev_addr_assign(rc); if (result < 0) { dev_err(dev, "cannot assign UWB DevAddr: %d\n", result); goto error; } result = uwb_rc_ie_setup(rc); if (result < 0) { dev_err(dev, "cannot setup IE subsystem: %d\n", result); goto error_ie_setup; } result = uwb_rsv_setup(rc); if (result < 0) { dev_err(dev, "cannot setup reservation subsystem: %d\n", result); goto error_rsv_setup; } uwb_dbg_add_rc(rc); return 0; error_rsv_setup: uwb_rc_ie_release(rc); error_ie_setup: error: return result; } /** * Register a new UWB radio controller * * Did you call uwb_rc_init() on your rc? * * We assume that this is being called with a > 0 refcount on * it [through ops->{get|put}_device(). We'll take our own, though. * * @parent_dev is our real device, the one that provides the actual UWB device */ int uwb_rc_add(struct uwb_rc *rc, struct device *parent_dev, void *priv) { int result; struct device *dev; char macbuf[UWB_ADDR_STRSIZE], devbuf[UWB_ADDR_STRSIZE]; rc->index = uwb_rc_new_index(); dev = &rc->uwb_dev.dev; dev_set_name(dev, "uwb%d", rc->index); rc->priv = priv; init_waitqueue_head(&rc->uwbd.wq); INIT_LIST_HEAD(&rc->uwbd.event_list); spin_lock_init(&rc->uwbd.event_list_lock); uwbd_start(rc); result = rc->start(rc); if (result < 0) goto error_rc_start; result = uwb_rc_setup(rc); if (result < 0) { dev_err(dev, "cannot setup UWB radio controller: %d\n", result); goto error_rc_setup; } result = uwb_dev_add(&rc->uwb_dev, parent_dev, rc); if (result < 0 && result != -EADDRNOTAVAIL) goto error_dev_add; result = uwb_rc_sys_add(rc); if (result < 0) { dev_err(parent_dev, "cannot register UWB radio controller " "dev attributes: %d\n", result); goto error_sys_add; } uwb_mac_addr_print(macbuf, sizeof(macbuf), &rc->uwb_dev.mac_addr); uwb_dev_addr_print(devbuf, sizeof(devbuf), &rc->uwb_dev.dev_addr); dev_info(dev, "new uwb radio controller (mac %s dev %s) on %s %s\n", macbuf, devbuf, parent_dev->bus->name, dev_name(parent_dev)); rc->ready = 1; return 0; error_sys_add: uwb_dev_rm(&rc->uwb_dev); error_dev_add: error_rc_setup: rc->stop(rc); error_rc_start: uwbd_stop(rc); return result; } EXPORT_SYMBOL_GPL(uwb_rc_add); static int uwb_dev_offair_helper(struct device *dev, void *priv) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); return __uwb_dev_offair(uwb_dev, uwb_dev->rc); } /* * Remove a Radio Controller; stop beaconing/scanning, disconnect all children */ void uwb_rc_rm(struct uwb_rc *rc) { rc->ready = 0; uwb_dbg_del_rc(rc); uwb_rsv_remove_all(rc); uwb_radio_shutdown(rc); rc->stop(rc); uwbd_stop(rc); uwb_rc_neh_destroy(rc); uwb_dev_lock(&rc->uwb_dev); rc->priv = NULL; rc->cmd = NULL; uwb_dev_unlock(&rc->uwb_dev); mutex_lock(&rc->uwb_beca.mutex); uwb_dev_for_each(rc, uwb_dev_offair_helper, NULL); __uwb_rc_sys_rm(rc); mutex_unlock(&rc->uwb_beca.mutex); uwb_rsv_cleanup(rc); uwb_beca_release(rc); uwb_dev_rm(&rc->uwb_dev); } EXPORT_SYMBOL_GPL(uwb_rc_rm); static int find_rc_try_get(struct device *dev, void *data) { struct uwb_rc *target_rc = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc == NULL) { WARN_ON(1); return 0; } if (rc == target_rc) { if (rc->ready == 0) return 0; else return 1; } return 0; } /** * Given a radio controller descriptor, validate and refcount it * * @returns NULL if the rc does not exist or is quiescing; the ptr to * it otherwise. */ struct uwb_rc *__uwb_rc_try_get(struct uwb_rc *target_rc) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, target_rc, find_rc_try_get); if (dev) { rc = dev_get_drvdata(dev); __uwb_rc_get(rc); } return rc; } EXPORT_SYMBOL_GPL(__uwb_rc_try_get); /* * RC get for external refcount acquirers... * * Increments the refcount of the device and it's backend modules */ static inline struct uwb_rc *uwb_rc_get(struct uwb_rc *rc) { if (rc->ready == 0) return NULL; uwb_dev_get(&rc->uwb_dev); return rc; } static int find_rc_grandpa(struct device *dev, void *data) { struct device *grandpa_dev = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc->uwb_dev.dev.parent->parent == grandpa_dev) { rc = uwb_rc_get(rc); return 1; } return 0; } /** * Locate and refcount a radio controller given a common grand-parent * * @grandpa_dev Pointer to the 'grandparent' device structure. * @returns NULL If the rc does not exist or is quiescing; the ptr to * it otherwise, properly referenced. * * The Radio Control interface (or the UWB Radio Controller) is always * an interface of a device. The parent is the interface, the * grandparent is the device that encapsulates the interface. * * There is no need to lock around as the "grandpa" would be * refcounted by the target, and to remove the referemes, the * uwb_rc_class->sem would have to be taken--we hold it, ergo we * should be safe. */ struct uwb_rc *uwb_rc_get_by_grandpa(const struct device *grandpa_dev) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, (void *)grandpa_dev, find_rc_grandpa); if (dev) rc = dev_get_drvdata(dev); return rc; } EXPORT_SYMBOL_GPL(uwb_rc_get_by_grandpa); /** * Find a radio controller by device address * * @returns the pointer to the radio controller, properly referenced */ static int find_rc_dev(struct device *dev, void *data) { struct uwb_dev_addr *addr = data; struct uwb_rc *rc = dev_get_drvdata(dev); if (rc == NULL) { WARN_ON(1); return 0; } if (!uwb_dev_addr_cmp(&rc->uwb_dev.dev_addr, addr)) { rc = uwb_rc_get(rc); return 1; } return 0; } struct uwb_rc *uwb_rc_get_by_dev(const struct uwb_dev_addr *addr) { struct device *dev; struct uwb_rc *rc = NULL; dev = class_find_device(&uwb_rc_class, NULL, (void *)addr, find_rc_dev); if (dev) rc = dev_get_drvdata(dev); return rc; } EXPORT_SYMBOL_GPL(uwb_rc_get_by_dev); /** * Drop a reference on a radio controller * * This is the version that should be done by entities external to the * UWB Radio Control stack (ie: clients of the API). */ void uwb_rc_put(struct uwb_rc *rc) { __uwb_rc_put(rc); } EXPORT_SYMBOL_GPL(uwb_rc_put);
gpl-2.0
NamelessRom/android_kernel_oppo_n3
drivers/staging/rtl8192u/ieee80211/ieee80211_crypt_tkip.c
7883
20452
/* * Host AP crypt: host-based TKIP encryption implementation for Host AP driver * * Copyright (c) 2003-2004, Jouni Malinen <jkmaline@cc.hut.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. */ //#include <linux/config.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/if_ether.h> #include <linux/if_arp.h> #include <asm/string.h> #include "ieee80211.h" #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/crc32.h> MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP crypt: TKIP"); MODULE_LICENSE("GPL"); struct ieee80211_tkip_data { #define TKIP_KEY_LEN 32 u8 key[TKIP_KEY_LEN]; int key_set; u32 tx_iv32; u16 tx_iv16; u16 tx_ttak[5]; int tx_phase1_done; u32 rx_iv32; u16 rx_iv16; u16 rx_ttak[5]; int rx_phase1_done; u32 rx_iv32_new; u16 rx_iv16_new; u32 dot11RSNAStatsTKIPReplays; u32 dot11RSNAStatsTKIPICVErrors; u32 dot11RSNAStatsTKIPLocalMICFailures; int key_idx; struct crypto_blkcipher *rx_tfm_arc4; struct crypto_hash *rx_tfm_michael; struct crypto_blkcipher *tx_tfm_arc4; struct crypto_hash *tx_tfm_michael; /* scratch buffers for virt_to_page() (crypto API) */ u8 rx_hdr[16], tx_hdr[16]; }; static void * ieee80211_tkip_init(int key_idx) { struct ieee80211_tkip_data *priv; priv = kzalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; priv->key_idx = key_idx; priv->tx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_arc4)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API arc4\n"); priv->tx_tfm_arc4 = NULL; goto fail; } priv->tx_tfm_michael = crypto_alloc_hash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API michael_mic\n"); priv->tx_tfm_michael = NULL; goto fail; } priv->rx_tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_arc4)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API arc4\n"); priv->rx_tfm_arc4 = NULL; goto fail; } priv->rx_tfm_michael = crypto_alloc_hash("michael_mic", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm_michael)) { printk(KERN_DEBUG "ieee80211_crypt_tkip: could not allocate " "crypto API michael_mic\n"); priv->rx_tfm_michael = NULL; goto fail; } return priv; fail: if (priv) { if (priv->tx_tfm_michael) crypto_free_hash(priv->tx_tfm_michael); if (priv->tx_tfm_arc4) crypto_free_blkcipher(priv->tx_tfm_arc4); if (priv->rx_tfm_michael) crypto_free_hash(priv->rx_tfm_michael); if (priv->rx_tfm_arc4) crypto_free_blkcipher(priv->rx_tfm_arc4); kfree(priv); } return NULL; } static void ieee80211_tkip_deinit(void *priv) { struct ieee80211_tkip_data *_priv = priv; if (_priv) { if (_priv->tx_tfm_michael) crypto_free_hash(_priv->tx_tfm_michael); if (_priv->tx_tfm_arc4) crypto_free_blkcipher(_priv->tx_tfm_arc4); if (_priv->rx_tfm_michael) crypto_free_hash(_priv->rx_tfm_michael); if (_priv->rx_tfm_arc4) crypto_free_blkcipher(_priv->rx_tfm_arc4); } kfree(priv); } static inline u16 RotR1(u16 val) { return (val >> 1) | (val << 15); } static inline u8 Lo8(u16 val) { return val & 0xff; } static inline u8 Hi8(u16 val) { return val >> 8; } static inline u16 Lo16(u32 val) { return val & 0xffff; } static inline u16 Hi16(u32 val) { return val >> 16; } static inline u16 Mk16(u8 hi, u8 lo) { return lo | (((u16) hi) << 8); } static inline u16 Mk16_le(u16 *v) { return le16_to_cpu(*v); } static const u16 Sbox[256] = { 0xC6A5, 0xF884, 0xEE99, 0xF68D, 0xFF0D, 0xD6BD, 0xDEB1, 0x9154, 0x6050, 0x0203, 0xCEA9, 0x567D, 0xE719, 0xB562, 0x4DE6, 0xEC9A, 0x8F45, 0x1F9D, 0x8940, 0xFA87, 0xEF15, 0xB2EB, 0x8EC9, 0xFB0B, 0x41EC, 0xB367, 0x5FFD, 0x45EA, 0x23BF, 0x53F7, 0xE496, 0x9B5B, 0x75C2, 0xE11C, 0x3DAE, 0x4C6A, 0x6C5A, 0x7E41, 0xF502, 0x834F, 0x685C, 0x51F4, 0xD134, 0xF908, 0xE293, 0xAB73, 0x6253, 0x2A3F, 0x080C, 0x9552, 0x4665, 0x9D5E, 0x3028, 0x37A1, 0x0A0F, 0x2FB5, 0x0E09, 0x2436, 0x1B9B, 0xDF3D, 0xCD26, 0x4E69, 0x7FCD, 0xEA9F, 0x121B, 0x1D9E, 0x5874, 0x342E, 0x362D, 0xDCB2, 0xB4EE, 0x5BFB, 0xA4F6, 0x764D, 0xB761, 0x7DCE, 0x527B, 0xDD3E, 0x5E71, 0x1397, 0xA6F5, 0xB968, 0x0000, 0xC12C, 0x4060, 0xE31F, 0x79C8, 0xB6ED, 0xD4BE, 0x8D46, 0x67D9, 0x724B, 0x94DE, 0x98D4, 0xB0E8, 0x854A, 0xBB6B, 0xC52A, 0x4FE5, 0xED16, 0x86C5, 0x9AD7, 0x6655, 0x1194, 0x8ACF, 0xE910, 0x0406, 0xFE81, 0xA0F0, 0x7844, 0x25BA, 0x4BE3, 0xA2F3, 0x5DFE, 0x80C0, 0x058A, 0x3FAD, 0x21BC, 0x7048, 0xF104, 0x63DF, 0x77C1, 0xAF75, 0x4263, 0x2030, 0xE51A, 0xFD0E, 0xBF6D, 0x814C, 0x1814, 0x2635, 0xC32F, 0xBEE1, 0x35A2, 0x88CC, 0x2E39, 0x9357, 0x55F2, 0xFC82, 0x7A47, 0xC8AC, 0xBAE7, 0x322B, 0xE695, 0xC0A0, 0x1998, 0x9ED1, 0xA37F, 0x4466, 0x547E, 0x3BAB, 0x0B83, 0x8CCA, 0xC729, 0x6BD3, 0x283C, 0xA779, 0xBCE2, 0x161D, 0xAD76, 0xDB3B, 0x6456, 0x744E, 0x141E, 0x92DB, 0x0C0A, 0x486C, 0xB8E4, 0x9F5D, 0xBD6E, 0x43EF, 0xC4A6, 0x39A8, 0x31A4, 0xD337, 0xF28B, 0xD532, 0x8B43, 0x6E59, 0xDAB7, 0x018C, 0xB164, 0x9CD2, 0x49E0, 0xD8B4, 0xACFA, 0xF307, 0xCF25, 0xCAAF, 0xF48E, 0x47E9, 0x1018, 0x6FD5, 0xF088, 0x4A6F, 0x5C72, 0x3824, 0x57F1, 0x73C7, 0x9751, 0xCB23, 0xA17C, 0xE89C, 0x3E21, 0x96DD, 0x61DC, 0x0D86, 0x0F85, 0xE090, 0x7C42, 0x71C4, 0xCCAA, 0x90D8, 0x0605, 0xF701, 0x1C12, 0xC2A3, 0x6A5F, 0xAEF9, 0x69D0, 0x1791, 0x9958, 0x3A27, 0x27B9, 0xD938, 0xEB13, 0x2BB3, 0x2233, 0xD2BB, 0xA970, 0x0789, 0x33A7, 0x2DB6, 0x3C22, 0x1592, 0xC920, 0x8749, 0xAAFF, 0x5078, 0xA57A, 0x038F, 0x59F8, 0x0980, 0x1A17, 0x65DA, 0xD731, 0x84C6, 0xD0B8, 0x82C3, 0x29B0, 0x5A77, 0x1E11, 0x7BCB, 0xA8FC, 0x6DD6, 0x2C3A, }; static inline u16 _S_(u16 v) { u16 t = Sbox[Hi8(v)]; return Sbox[Lo8(v)] ^ ((t << 8) | (t >> 8)); } #define PHASE1_LOOP_COUNT 8 static void tkip_mixing_phase1(u16 *TTAK, const u8 *TK, const u8 *TA, u32 IV32) { int i, j; /* Initialize the 80-bit TTAK from TSC (IV32) and TA[0..5] */ TTAK[0] = Lo16(IV32); TTAK[1] = Hi16(IV32); TTAK[2] = Mk16(TA[1], TA[0]); TTAK[3] = Mk16(TA[3], TA[2]); TTAK[4] = Mk16(TA[5], TA[4]); for (i = 0; i < PHASE1_LOOP_COUNT; i++) { j = 2 * (i & 1); TTAK[0] += _S_(TTAK[4] ^ Mk16(TK[1 + j], TK[0 + j])); TTAK[1] += _S_(TTAK[0] ^ Mk16(TK[5 + j], TK[4 + j])); TTAK[2] += _S_(TTAK[1] ^ Mk16(TK[9 + j], TK[8 + j])); TTAK[3] += _S_(TTAK[2] ^ Mk16(TK[13 + j], TK[12 + j])); TTAK[4] += _S_(TTAK[3] ^ Mk16(TK[1 + j], TK[0 + j])) + i; } } static void tkip_mixing_phase2(u8 *WEPSeed, const u8 *TK, const u16 *TTAK, u16 IV16) { /* Make temporary area overlap WEP seed so that the final copy can be * avoided on little endian hosts. */ u16 *PPK = (u16 *) &WEPSeed[4]; /* Step 1 - make copy of TTAK and bring in TSC */ PPK[0] = TTAK[0]; PPK[1] = TTAK[1]; PPK[2] = TTAK[2]; PPK[3] = TTAK[3]; PPK[4] = TTAK[4]; PPK[5] = TTAK[4] + IV16; /* Step 2 - 96-bit bijective mixing using S-box */ PPK[0] += _S_(PPK[5] ^ Mk16_le((u16 *) &TK[0])); PPK[1] += _S_(PPK[0] ^ Mk16_le((u16 *) &TK[2])); PPK[2] += _S_(PPK[1] ^ Mk16_le((u16 *) &TK[4])); PPK[3] += _S_(PPK[2] ^ Mk16_le((u16 *) &TK[6])); PPK[4] += _S_(PPK[3] ^ Mk16_le((u16 *) &TK[8])); PPK[5] += _S_(PPK[4] ^ Mk16_le((u16 *) &TK[10])); PPK[0] += RotR1(PPK[5] ^ Mk16_le((u16 *) &TK[12])); PPK[1] += RotR1(PPK[0] ^ Mk16_le((u16 *) &TK[14])); PPK[2] += RotR1(PPK[1]); PPK[3] += RotR1(PPK[2]); PPK[4] += RotR1(PPK[3]); PPK[5] += RotR1(PPK[4]); /* Step 3 - bring in last of TK bits, assign 24-bit WEP IV value * WEPSeed[0..2] is transmitted as WEP IV */ WEPSeed[0] = Hi8(IV16); WEPSeed[1] = (Hi8(IV16) | 0x20) & 0x7F; WEPSeed[2] = Lo8(IV16); WEPSeed[3] = Lo8((PPK[5] ^ Mk16_le((u16 *) &TK[0])) >> 1); #ifdef __BIG_ENDIAN { int i; for (i = 0; i < 6; i++) PPK[i] = (PPK[i] << 8) | (PPK[i] >> 8); } #endif } static int ieee80211_tkip_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; int len; u8 *pos; struct ieee80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); struct blkcipher_desc desc = {.tfm = tkey->tx_tfm_arc4}; int ret = 0; u8 rc4key[16], *icv; u32 crc; struct scatterlist sg; if (skb_headroom(skb) < 8 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; hdr = (struct ieee80211_hdr_4addr *) skb->data; if (!tcb_desc->bHwSec) { if (!tkey->tx_phase1_done) { tkip_mixing_phase1(tkey->tx_ttak, tkey->key, hdr->addr2, tkey->tx_iv32); tkey->tx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->tx_ttak, tkey->tx_iv16); } else tkey->tx_phase1_done = 1; len = skb->len - hdr_len; pos = skb_push(skb, 8); memmove(pos, pos + 8, hdr_len); pos += hdr_len; if (tcb_desc->bHwSec) { *pos++ = Hi8(tkey->tx_iv16); *pos++ = (Hi8(tkey->tx_iv16) | 0x20) & 0x7F; *pos++ = Lo8(tkey->tx_iv16); } else { *pos++ = rc4key[0]; *pos++ = rc4key[1]; *pos++ = rc4key[2]; } *pos++ = (tkey->key_idx << 6) | (1 << 5) /* Ext IV included */; *pos++ = tkey->tx_iv32 & 0xff; *pos++ = (tkey->tx_iv32 >> 8) & 0xff; *pos++ = (tkey->tx_iv32 >> 16) & 0xff; *pos++ = (tkey->tx_iv32 >> 24) & 0xff; if (!tcb_desc->bHwSec) { icv = skb_put(skb, 4); crc = ~crc32_le(~0, pos, len); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; crypto_blkcipher_setkey(tkey->tx_tfm_arc4, rc4key, 16); sg_init_one(&sg, pos, len+4); ret= crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); } tkey->tx_iv16++; if (tkey->tx_iv16 == 0) { tkey->tx_phase1_done = 0; tkey->tx_iv32++; } if (!tcb_desc->bHwSec) return ret; else return 0; } static int ieee80211_tkip_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; u8 keyidx, *pos; u32 iv32; u16 iv16; struct ieee80211_hdr_4addr *hdr; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); struct blkcipher_desc desc = {.tfm = tkey->rx_tfm_arc4}; u8 rc4key[16]; u8 icv[4]; u32 crc; struct scatterlist sg; int plen; if (skb->len < hdr_len + 8 + 4) return -1; hdr = (struct ieee80211_hdr_4addr *) skb->data; pos = skb->data + hdr_len; keyidx = pos[3]; if (!(keyidx & (1 << 5))) { if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: received packet without ExtIV" " flag from %pM\n", hdr->addr2); } return -2; } keyidx >>= 6; if (tkey->key_idx != keyidx) { printk(KERN_DEBUG "TKIP: RX tkey->key_idx=%d frame " "keyidx=%d priv=%p\n", tkey->key_idx, keyidx, priv); return -6; } if (!tkey->key_set) { if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: received packet from %pM" " with keyid=%d that does not have a configured" " key\n", hdr->addr2, keyidx); } return -3; } iv16 = (pos[0] << 8) | pos[2]; iv32 = pos[4] | (pos[5] << 8) | (pos[6] << 16) | (pos[7] << 24); pos += 8; if (!tcb_desc->bHwSec) { if (iv32 < tkey->rx_iv32 || (iv32 == tkey->rx_iv32 && iv16 <= tkey->rx_iv16)) { if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: replay detected: STA=%pM" " previous TSC %08x%04x received TSC " "%08x%04x\n", hdr->addr2, tkey->rx_iv32, tkey->rx_iv16, iv32, iv16); } tkey->dot11RSNAStatsTKIPReplays++; return -4; } if (iv32 != tkey->rx_iv32 || !tkey->rx_phase1_done) { tkip_mixing_phase1(tkey->rx_ttak, tkey->key, hdr->addr2, iv32); tkey->rx_phase1_done = 1; } tkip_mixing_phase2(rc4key, tkey->key, tkey->rx_ttak, iv16); plen = skb->len - hdr_len - 12; crypto_blkcipher_setkey(tkey->rx_tfm_arc4, rc4key, 16); sg_init_one(&sg, pos, plen+4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) { if (net_ratelimit()) { printk(KERN_DEBUG ": TKIP: failed to decrypt " "received packet from %pM\n", hdr->addr2); } return -7; } crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { if (iv32 != tkey->rx_iv32) { /* Previously cached Phase1 result was already lost, so * it needs to be recalculated for the next packet. */ tkey->rx_phase1_done = 0; } if (net_ratelimit()) { printk(KERN_DEBUG "TKIP: ICV error detected: STA=" "%pM\n", hdr->addr2); } tkey->dot11RSNAStatsTKIPICVErrors++; return -5; } } /* Update real counters only after Michael MIC verification has * completed */ tkey->rx_iv32_new = iv32; tkey->rx_iv16_new = iv16; /* Remove IV and ICV */ memmove(skb->data + 8, skb->data, hdr_len); skb_pull(skb, 8); skb_trim(skb, skb->len - 4); return keyidx; } static int michael_mic(struct crypto_hash *tfm_michael, u8 * key, u8 * hdr, u8 * data, size_t data_len, u8 * mic) { struct hash_desc desc; struct scatterlist sg[2]; if (tfm_michael == NULL) { printk(KERN_WARNING "michael_mic: tfm_michael == NULL\n"); return -1; } sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, 16); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, 8)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + 16, mic); } static void michael_mic_hdr(struct sk_buff *skb, u8 *hdr) { struct ieee80211_hdr_4addr *hdr11; hdr11 = (struct ieee80211_hdr_4addr *) skb->data; switch (le16_to_cpu(hdr11->frame_ctl) & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; case IEEE80211_FCTL_FROMDS: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr3, ETH_ALEN); /* SA */ break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: memcpy(hdr, hdr11->addr3, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr4, ETH_ALEN); /* SA */ break; case 0: memcpy(hdr, hdr11->addr1, ETH_ALEN); /* DA */ memcpy(hdr + ETH_ALEN, hdr11->addr2, ETH_ALEN); /* SA */ break; } hdr[12] = 0; /* priority */ hdr[13] = hdr[14] = hdr[15] = 0; /* reserved */ } static int ieee80211_michael_mic_add(struct sk_buff *skb, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; u8 *pos; struct ieee80211_hdr_4addr *hdr; hdr = (struct ieee80211_hdr_4addr *) skb->data; if (skb_tailroom(skb) < 8 || skb->len < hdr_len) { printk(KERN_DEBUG "Invalid packet for Michael MIC add " "(tailroom=%d hdr_len=%d skb->len=%d)\n", skb_tailroom(skb), hdr_len, skb->len); return -1; } michael_mic_hdr(skb, tkey->tx_hdr); // { david, 2006.9.1 // fix the wpa process with wmm enabled. if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) { tkey->tx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07; } // } pos = skb_put(skb, 8); if (michael_mic(tkey->tx_tfm_michael, &tkey->key[16], tkey->tx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, pos)) return -1; return 0; } static void ieee80211_michael_mic_failure(struct net_device *dev, struct ieee80211_hdr_4addr *hdr, int keyidx) { union iwreq_data wrqu; struct iw_michaelmicfailure ev; /* TODO: needed parameters: count, keyid, key type, TSC */ memset(&ev, 0, sizeof(ev)); ev.flags = keyidx & IW_MICFAILURE_KEY_ID; if (hdr->addr1[0] & 0x01) ev.flags |= IW_MICFAILURE_GROUP; else ev.flags |= IW_MICFAILURE_PAIRWISE; ev.src_addr.sa_family = ARPHRD_ETHER; memcpy(ev.src_addr.sa_data, hdr->addr2, ETH_ALEN); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = sizeof(ev); wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &ev); } static int ieee80211_michael_mic_verify(struct sk_buff *skb, int keyidx, int hdr_len, void *priv) { struct ieee80211_tkip_data *tkey = priv; u8 mic[8]; struct ieee80211_hdr_4addr *hdr; hdr = (struct ieee80211_hdr_4addr *) skb->data; if (!tkey->key_set) return -1; michael_mic_hdr(skb, tkey->rx_hdr); // { david, 2006.9.1 // fix the wpa process with wmm enabled. if(IEEE80211_QOS_HAS_SEQ(le16_to_cpu(hdr->frame_ctl))) { tkey->rx_hdr[12] = *(skb->data + hdr_len - 2) & 0x07; } // } if (michael_mic(tkey->rx_tfm_michael, &tkey->key[24], tkey->rx_hdr, skb->data + hdr_len, skb->len - 8 - hdr_len, mic)) return -1; if (memcmp(mic, skb->data + skb->len - 8, 8) != 0) { struct ieee80211_hdr_4addr *hdr; hdr = (struct ieee80211_hdr_4addr *) skb->data; printk(KERN_DEBUG "%s: Michael MIC verification failed for " "MSDU from %pM keyidx=%d\n", skb->dev ? skb->dev->name : "N/A", hdr->addr2, keyidx); if (skb->dev) ieee80211_michael_mic_failure(skb->dev, hdr, keyidx); tkey->dot11RSNAStatsTKIPLocalMICFailures++; return -1; } /* Update TSC counters for RX now that the packet verification has * completed. */ tkey->rx_iv32 = tkey->rx_iv32_new; tkey->rx_iv16 = tkey->rx_iv16_new; skb_trim(skb, skb->len - 8); return 0; } static int ieee80211_tkip_set_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_tkip_data *tkey = priv; int keyidx; struct crypto_hash *tfm = tkey->tx_tfm_michael; struct crypto_blkcipher *tfm2 = tkey->tx_tfm_arc4; struct crypto_hash *tfm3 = tkey->rx_tfm_michael; struct crypto_blkcipher *tfm4 = tkey->rx_tfm_arc4; keyidx = tkey->key_idx; memset(tkey, 0, sizeof(*tkey)); tkey->key_idx = keyidx; tkey->tx_tfm_michael = tfm; tkey->tx_tfm_arc4 = tfm2; tkey->rx_tfm_michael = tfm3; tkey->rx_tfm_arc4 = tfm4; if (len == TKIP_KEY_LEN) { memcpy(tkey->key, key, TKIP_KEY_LEN); tkey->key_set = 1; tkey->tx_iv16 = 1; /* TSC is initialized to 1 */ if (seq) { tkey->rx_iv32 = (seq[5] << 24) | (seq[4] << 16) | (seq[3] << 8) | seq[2]; tkey->rx_iv16 = (seq[1] << 8) | seq[0]; } } else if (len == 0) tkey->key_set = 0; else return -1; return 0; } static int ieee80211_tkip_get_key(void *key, int len, u8 *seq, void *priv) { struct ieee80211_tkip_data *tkey = priv; if (len < TKIP_KEY_LEN) return -1; if (!tkey->key_set) return 0; memcpy(key, tkey->key, TKIP_KEY_LEN); if (seq) { /* Return the sequence number of the last transmitted frame. */ u16 iv16 = tkey->tx_iv16; u32 iv32 = tkey->tx_iv32; if (iv16 == 0) iv32--; iv16--; seq[0] = tkey->tx_iv16; seq[1] = tkey->tx_iv16 >> 8; seq[2] = tkey->tx_iv32; seq[3] = tkey->tx_iv32 >> 8; seq[4] = tkey->tx_iv32 >> 16; seq[5] = tkey->tx_iv32 >> 24; } return TKIP_KEY_LEN; } static char * ieee80211_tkip_print_stats(char *p, void *priv) { struct ieee80211_tkip_data *tkip = priv; p += sprintf(p, "key[%d] alg=TKIP key_set=%d " "tx_pn=%02x%02x%02x%02x%02x%02x " "rx_pn=%02x%02x%02x%02x%02x%02x " "replays=%d icv_errors=%d local_mic_failures=%d\n", tkip->key_idx, tkip->key_set, (tkip->tx_iv32 >> 24) & 0xff, (tkip->tx_iv32 >> 16) & 0xff, (tkip->tx_iv32 >> 8) & 0xff, tkip->tx_iv32 & 0xff, (tkip->tx_iv16 >> 8) & 0xff, tkip->tx_iv16 & 0xff, (tkip->rx_iv32 >> 24) & 0xff, (tkip->rx_iv32 >> 16) & 0xff, (tkip->rx_iv32 >> 8) & 0xff, tkip->rx_iv32 & 0xff, (tkip->rx_iv16 >> 8) & 0xff, tkip->rx_iv16 & 0xff, tkip->dot11RSNAStatsTKIPReplays, tkip->dot11RSNAStatsTKIPICVErrors, tkip->dot11RSNAStatsTKIPLocalMICFailures); return p; } static struct ieee80211_crypto_ops ieee80211_crypt_tkip = { .name = "TKIP", .init = ieee80211_tkip_init, .deinit = ieee80211_tkip_deinit, .encrypt_mpdu = ieee80211_tkip_encrypt, .decrypt_mpdu = ieee80211_tkip_decrypt, .encrypt_msdu = ieee80211_michael_mic_add, .decrypt_msdu = ieee80211_michael_mic_verify, .set_key = ieee80211_tkip_set_key, .get_key = ieee80211_tkip_get_key, .print_stats = ieee80211_tkip_print_stats, .extra_prefix_len = 4 + 4, /* IV + ExtIV */ .extra_postfix_len = 8 + 4, /* MIC + ICV */ .owner = THIS_MODULE, }; int __init ieee80211_crypto_tkip_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_tkip); } void __exit ieee80211_crypto_tkip_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_tkip); } void ieee80211_tkip_null(void) { // printk("============>%s()\n", __FUNCTION__); return; }
gpl-2.0
bilalliberty/android_kernel_htc_memul
sound/oss/uart401.c
9419
10693
/* * sound/oss/uart401.c * * MPU-401 UART driver (formerly uart401_midi.c) * * * Copyright (C) by Hannu Savolainen 1993-1997 * * OSS/Free for Linux is distributed under the GNU GENERAL PUBLIC LICENSE (GPL) * Version 2 (June 1991). See the "COPYING" file distributed with this software * for more info. * * Changes: * Alan Cox Reformatted, removed sound_mem usage, use normal Linux * interrupt allocation. Protect against bogus unload * Fixed to allow IRQ > 15 * Christoph Hellwig Adapted to module_init/module_exit * Arnaldo C. de Melo got rid of check_region * * Status: * Untested */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/spinlock.h> #include "sound_config.h" #include "mpu401.h" typedef struct uart401_devc { int base; int irq; int *osp; void (*midi_input_intr) (int dev, unsigned char data); int opened, disabled; volatile unsigned char input_byte; int my_dev; int share_irq; spinlock_t lock; } uart401_devc; #define DATAPORT (devc->base) #define COMDPORT (devc->base+1) #define STATPORT (devc->base+1) static int uart401_status(uart401_devc * devc) { return inb(STATPORT); } #define input_avail(devc) (!(uart401_status(devc)&INPUT_AVAIL)) #define output_ready(devc) (!(uart401_status(devc)&OUTPUT_READY)) static void uart401_cmd(uart401_devc * devc, unsigned char cmd) { outb((cmd), COMDPORT); } static int uart401_read(uart401_devc * devc) { return inb(DATAPORT); } static void uart401_write(uart401_devc * devc, unsigned char byte) { outb((byte), DATAPORT); } #define OUTPUT_READY 0x40 #define INPUT_AVAIL 0x80 #define MPU_ACK 0xFE #define MPU_RESET 0xFF #define UART_MODE_ON 0x3F static int reset_uart401(uart401_devc * devc); static void enter_uart_mode(uart401_devc * devc); static void uart401_input_loop(uart401_devc * devc) { int work_limit=30000; while (input_avail(devc) && --work_limit) { unsigned char c = uart401_read(devc); if (c == MPU_ACK) devc->input_byte = c; else if (devc->opened & OPEN_READ && devc->midi_input_intr) devc->midi_input_intr(devc->my_dev, c); } if(work_limit==0) printk(KERN_WARNING "Too much work in interrupt on uart401 (0x%X). UART jabbering ??\n", devc->base); } irqreturn_t uart401intr(int irq, void *dev_id) { uart401_devc *devc = dev_id; if (devc == NULL) { printk(KERN_ERR "uart401: bad devc\n"); return IRQ_NONE; } if (input_avail(devc)) uart401_input_loop(devc); return IRQ_HANDLED; } static int uart401_open(int dev, int mode, void (*input) (int dev, unsigned char data), void (*output) (int dev) ) { uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc; if (devc->opened) return -EBUSY; /* Flush the UART */ while (input_avail(devc)) uart401_read(devc); devc->midi_input_intr = input; devc->opened = mode; enter_uart_mode(devc); devc->disabled = 0; return 0; } static void uart401_close(int dev) { uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc; reset_uart401(devc); devc->opened = 0; } static int uart401_out(int dev, unsigned char midi_byte) { int timeout; unsigned long flags; uart401_devc *devc = (uart401_devc *) midi_devs[dev]->devc; if (devc->disabled) return 1; /* * Test for input since pending input seems to block the output. */ spin_lock_irqsave(&devc->lock,flags); if (input_avail(devc)) uart401_input_loop(devc); spin_unlock_irqrestore(&devc->lock,flags); /* * Sometimes it takes about 13000 loops before the output becomes ready * (After reset). Normally it takes just about 10 loops. */ for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--); if (!output_ready(devc)) { printk(KERN_WARNING "uart401: Timeout - Device not responding\n"); devc->disabled = 1; reset_uart401(devc); enter_uart_mode(devc); return 1; } uart401_write(devc, midi_byte); return 1; } static inline int uart401_start_read(int dev) { return 0; } static inline int uart401_end_read(int dev) { return 0; } static inline void uart401_kick(int dev) { } static inline int uart401_buffer_status(int dev) { return 0; } #define MIDI_SYNTH_NAME "MPU-401 UART" #define MIDI_SYNTH_CAPS SYNTH_CAP_INPUT #include "midi_synth.h" static const struct midi_operations uart401_operations = { .owner = THIS_MODULE, .info = {"MPU-401 (UART) MIDI", 0, 0, SNDCARD_MPU401}, .converter = &std_midi_synth, .in_info = {0}, .open = uart401_open, .close = uart401_close, .outputc = uart401_out, .start_read = uart401_start_read, .end_read = uart401_end_read, .kick = uart401_kick, .buffer_status = uart401_buffer_status, }; static void enter_uart_mode(uart401_devc * devc) { int ok, timeout; unsigned long flags; spin_lock_irqsave(&devc->lock,flags); for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--); devc->input_byte = 0; uart401_cmd(devc, UART_MODE_ON); ok = 0; for (timeout = 50000; timeout > 0 && !ok; timeout--) if (devc->input_byte == MPU_ACK) ok = 1; else if (input_avail(devc)) if (uart401_read(devc) == MPU_ACK) ok = 1; spin_unlock_irqrestore(&devc->lock,flags); } static int reset_uart401(uart401_devc * devc) { int ok, timeout, n; /* * Send the RESET command. Try again if no success at the first time. */ ok = 0; for (n = 0; n < 2 && !ok; n++) { for (timeout = 30000; timeout > 0 && !output_ready(devc); timeout--); devc->input_byte = 0; uart401_cmd(devc, MPU_RESET); /* * Wait at least 25 msec. This method is not accurate so let's make the * loop bit longer. Cannot sleep since this is called during boot. */ for (timeout = 50000; timeout > 0 && !ok; timeout--) { if (devc->input_byte == MPU_ACK) /* Interrupt */ ok = 1; else if (input_avail(devc)) { if (uart401_read(devc) == MPU_ACK) ok = 1; } } } if (ok) { DEB(printk("Reset UART401 OK\n")); } else DDB(printk("Reset UART401 failed - No hardware detected.\n")); if (ok) uart401_input_loop(devc); /* * Flush input before enabling interrupts */ return ok; } int probe_uart401(struct address_info *hw_config, struct module *owner) { uart401_devc *devc; char *name = "MPU-401 (UART) MIDI"; int ok = 0; unsigned long flags; DDB(printk("Entered probe_uart401()\n")); /* Default to "not found" */ hw_config->slots[4] = -1; if (!request_region(hw_config->io_base, 4, "MPU-401 UART")) { printk(KERN_INFO "uart401: could not request_region(%d, 4)\n", hw_config->io_base); return 0; } devc = kmalloc(sizeof(uart401_devc), GFP_KERNEL); if (!devc) { printk(KERN_WARNING "uart401: Can't allocate memory\n"); goto cleanup_region; } devc->base = hw_config->io_base; devc->irq = hw_config->irq; devc->osp = hw_config->osp; devc->midi_input_intr = NULL; devc->opened = 0; devc->input_byte = 0; devc->my_dev = 0; devc->share_irq = 0; spin_lock_init(&devc->lock); spin_lock_irqsave(&devc->lock,flags); ok = reset_uart401(devc); spin_unlock_irqrestore(&devc->lock,flags); if (!ok) goto cleanup_devc; if (hw_config->name) name = hw_config->name; if (devc->irq < 0) { devc->share_irq = 1; devc->irq *= -1; } else devc->share_irq = 0; if (!devc->share_irq) if (request_irq(devc->irq, uart401intr, 0, "MPU-401 UART", devc) < 0) { printk(KERN_WARNING "uart401: Failed to allocate IRQ%d\n", devc->irq); devc->share_irq = 1; } devc->my_dev = sound_alloc_mididev(); enter_uart_mode(devc); if (devc->my_dev == -1) { printk(KERN_INFO "uart401: Too many midi devices detected\n"); goto cleanup_irq; } conf_printf(name, hw_config); midi_devs[devc->my_dev] = kmalloc(sizeof(struct midi_operations), GFP_KERNEL); if (!midi_devs[devc->my_dev]) { printk(KERN_ERR "uart401: Failed to allocate memory\n"); goto cleanup_unload_mididev; } memcpy(midi_devs[devc->my_dev], &uart401_operations, sizeof(struct midi_operations)); if (owner) midi_devs[devc->my_dev]->owner = owner; midi_devs[devc->my_dev]->devc = devc; midi_devs[devc->my_dev]->converter = kmalloc(sizeof(struct synth_operations), GFP_KERNEL); if (!midi_devs[devc->my_dev]->converter) { printk(KERN_WARNING "uart401: Failed to allocate memory\n"); goto cleanup_midi_devs; } memcpy(midi_devs[devc->my_dev]->converter, &std_midi_synth, sizeof(struct synth_operations)); strcpy(midi_devs[devc->my_dev]->info.name, name); midi_devs[devc->my_dev]->converter->id = "UART401"; midi_devs[devc->my_dev]->converter->midi_dev = devc->my_dev; if (owner) midi_devs[devc->my_dev]->converter->owner = owner; hw_config->slots[4] = devc->my_dev; sequencer_init(); devc->opened = 0; return 1; cleanup_midi_devs: kfree(midi_devs[devc->my_dev]); cleanup_unload_mididev: sound_unload_mididev(devc->my_dev); cleanup_irq: if (!devc->share_irq) free_irq(devc->irq, devc); cleanup_devc: kfree(devc); cleanup_region: release_region(hw_config->io_base, 4); return 0; } void unload_uart401(struct address_info *hw_config) { uart401_devc *devc; int n=hw_config->slots[4]; /* Not set up */ if(n==-1 || midi_devs[n]==NULL) return; /* Not allocated (erm ??) */ devc = midi_devs[hw_config->slots[4]]->devc; if (devc == NULL) return; reset_uart401(devc); release_region(hw_config->io_base, 4); if (!devc->share_irq) free_irq(devc->irq, devc); if (devc) { kfree(midi_devs[devc->my_dev]->converter); kfree(midi_devs[devc->my_dev]); kfree(devc); devc = NULL; } /* This kills midi_devs[x] */ sound_unload_mididev(hw_config->slots[4]); } EXPORT_SYMBOL(probe_uart401); EXPORT_SYMBOL(unload_uart401); EXPORT_SYMBOL(uart401intr); static struct address_info cfg_mpu; static int io = -1; static int irq = -1; module_param(io, int, 0444); module_param(irq, int, 0444); static int __init init_uart401(void) { cfg_mpu.irq = irq; cfg_mpu.io_base = io; /* Can be loaded either for module use or to provide functions to others */ if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1) { printk(KERN_INFO "MPU-401 UART driver Copyright (C) Hannu Savolainen 1993-1997"); if (!probe_uart401(&cfg_mpu, THIS_MODULE)) return -ENODEV; } return 0; } static void __exit cleanup_uart401(void) { if (cfg_mpu.io_base != -1 && cfg_mpu.irq != -1) unload_uart401(&cfg_mpu); } module_init(init_uart401); module_exit(cleanup_uart401); #ifndef MODULE static int __init setup_uart401(char *str) { /* io, irq */ int ints[3]; str = get_options(str, ARRAY_SIZE(ints), ints); io = ints[1]; irq = ints[2]; return 1; } __setup("uart401=", setup_uart401); #endif MODULE_LICENSE("GPL");
gpl-2.0
TeamHorizon/android_kernel_samsung_hlte
arch/m68k/sun3/sun3dvma.c
10699
6997
/* * linux/arch/m68k/sun3/sun3dvma.c * * Copyright (C) 2000 Sam Creasey * * Contains common routines for sun3/sun3x DVMA management. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/list.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/dvma.h> #undef DVMA_DEBUG #ifdef CONFIG_SUN3X extern void dvma_unmap_iommu(unsigned long baddr, int len); #else static inline void dvma_unmap_iommu(unsigned long a, int b) { } #endif #ifdef CONFIG_SUN3 extern void sun3_dvma_init(void); #endif static unsigned long iommu_use[IOMMU_TOTAL_ENTRIES]; #define dvma_index(baddr) ((baddr - DVMA_START) >> DVMA_PAGE_SHIFT) #define dvma_entry_use(baddr) (iommu_use[dvma_index(baddr)]) struct hole { unsigned long start; unsigned long end; unsigned long size; struct list_head list; }; static struct list_head hole_list; static struct list_head hole_cache; static struct hole initholes[64]; #ifdef DVMA_DEBUG static unsigned long dvma_allocs; static unsigned long dvma_frees; static unsigned long long dvma_alloc_bytes; static unsigned long long dvma_free_bytes; static void print_use(void) { int i; int j = 0; printk("dvma entry usage:\n"); for(i = 0; i < IOMMU_TOTAL_ENTRIES; i++) { if(!iommu_use[i]) continue; j++; printk("dvma entry: %08lx len %08lx\n", ( i << DVMA_PAGE_SHIFT) + DVMA_START, iommu_use[i]); } printk("%d entries in use total\n", j); printk("allocation/free calls: %lu/%lu\n", dvma_allocs, dvma_frees); printk("allocation/free bytes: %Lx/%Lx\n", dvma_alloc_bytes, dvma_free_bytes); } static void print_holes(struct list_head *holes) { struct list_head *cur; struct hole *hole; printk("listing dvma holes\n"); list_for_each(cur, holes) { hole = list_entry(cur, struct hole, list); if((hole->start == 0) && (hole->end == 0) && (hole->size == 0)) continue; printk("hole: start %08lx end %08lx size %08lx\n", hole->start, hole->end, hole->size); } printk("end of hole listing...\n"); } #endif /* DVMA_DEBUG */ static inline int refill(void) { struct hole *hole; struct hole *prev = NULL; struct list_head *cur; int ret = 0; list_for_each(cur, &hole_list) { hole = list_entry(cur, struct hole, list); if(!prev) { prev = hole; continue; } if(hole->end == prev->start) { hole->size += prev->size; hole->end = prev->end; list_move(&(prev->list), &hole_cache); ret++; } } return ret; } static inline struct hole *rmcache(void) { struct hole *ret; if(list_empty(&hole_cache)) { if(!refill()) { printk("out of dvma hole cache!\n"); BUG(); } } ret = list_entry(hole_cache.next, struct hole, list); list_del(&(ret->list)); return ret; } static inline unsigned long get_baddr(int len, unsigned long align) { struct list_head *cur; struct hole *hole; if(list_empty(&hole_list)) { #ifdef DVMA_DEBUG printk("out of dvma holes! (printing hole cache)\n"); print_holes(&hole_cache); print_use(); #endif BUG(); } list_for_each(cur, &hole_list) { unsigned long newlen; hole = list_entry(cur, struct hole, list); if(align > DVMA_PAGE_SIZE) newlen = len + ((hole->end - len) & (align-1)); else newlen = len; if(hole->size > newlen) { hole->end -= newlen; hole->size -= newlen; dvma_entry_use(hole->end) = newlen; #ifdef DVMA_DEBUG dvma_allocs++; dvma_alloc_bytes += newlen; #endif return hole->end; } else if(hole->size == newlen) { list_move(&(hole->list), &hole_cache); dvma_entry_use(hole->start) = newlen; #ifdef DVMA_DEBUG dvma_allocs++; dvma_alloc_bytes += newlen; #endif return hole->start; } } printk("unable to find dvma hole!\n"); BUG(); return 0; } static inline int free_baddr(unsigned long baddr) { unsigned long len; struct hole *hole; struct list_head *cur; unsigned long orig_baddr; orig_baddr = baddr; len = dvma_entry_use(baddr); dvma_entry_use(baddr) = 0; baddr &= DVMA_PAGE_MASK; dvma_unmap_iommu(baddr, len); #ifdef DVMA_DEBUG dvma_frees++; dvma_free_bytes += len; #endif list_for_each(cur, &hole_list) { hole = list_entry(cur, struct hole, list); if(hole->end == baddr) { hole->end += len; hole->size += len; return 0; } else if(hole->start == (baddr + len)) { hole->start = baddr; hole->size += len; return 0; } } hole = rmcache(); hole->start = baddr; hole->end = baddr + len; hole->size = len; // list_add_tail(&(hole->list), cur); list_add(&(hole->list), cur); return 0; } void dvma_init(void) { struct hole *hole; int i; INIT_LIST_HEAD(&hole_list); INIT_LIST_HEAD(&hole_cache); /* prepare the hole cache */ for(i = 0; i < 64; i++) list_add(&(initholes[i].list), &hole_cache); hole = rmcache(); hole->start = DVMA_START; hole->end = DVMA_END; hole->size = DVMA_SIZE; list_add(&(hole->list), &hole_list); memset(iommu_use, 0, sizeof(iommu_use)); dvma_unmap_iommu(DVMA_START, DVMA_SIZE); #ifdef CONFIG_SUN3 sun3_dvma_init(); #endif } inline unsigned long dvma_map_align(unsigned long kaddr, int len, int align) { unsigned long baddr; unsigned long off; if(!len) len = 0x800; if(!kaddr || !len) { // printk("error: kaddr %lx len %x\n", kaddr, len); // *(int *)4 = 0; return 0; } #ifdef DEBUG printk("dvma_map request %08lx bytes from %08lx\n", len, kaddr); #endif off = kaddr & ~DVMA_PAGE_MASK; kaddr &= PAGE_MASK; len += off; len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); if(align == 0) align = DVMA_PAGE_SIZE; else align = ((align + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); baddr = get_baddr(len, align); // printk("using baddr %lx\n", baddr); if(!dvma_map_iommu(kaddr, baddr, len)) return (baddr + off); printk("dvma_map failed kaddr %lx baddr %lx len %x\n", kaddr, baddr, len); BUG(); return 0; } EXPORT_SYMBOL(dvma_map_align); void dvma_unmap(void *baddr) { unsigned long addr; addr = (unsigned long)baddr; /* check if this is a vme mapping */ if(!(addr & 0x00f00000)) addr |= 0xf00000; free_baddr(addr); return; } EXPORT_SYMBOL(dvma_unmap); void *dvma_malloc_align(unsigned long len, unsigned long align) { unsigned long kaddr; unsigned long baddr; unsigned long vaddr; if(!len) return NULL; #ifdef DEBUG printk("dvma_malloc request %lx bytes\n", len); #endif len = ((len + (DVMA_PAGE_SIZE-1)) & DVMA_PAGE_MASK); if((kaddr = __get_free_pages(GFP_ATOMIC, get_order(len))) == 0) return NULL; if((baddr = (unsigned long)dvma_map_align(kaddr, len, align)) == 0) { free_pages(kaddr, get_order(len)); return NULL; } vaddr = dvma_btov(baddr); if(dvma_map_cpu(kaddr, vaddr, len) < 0) { dvma_unmap((void *)baddr); free_pages(kaddr, get_order(len)); return NULL; } #ifdef DEBUG printk("mapped %08lx bytes %08lx kern -> %08lx bus\n", len, kaddr, baddr); #endif return (void *)vaddr; } EXPORT_SYMBOL(dvma_malloc_align); void dvma_free(void *vaddr) { return; } EXPORT_SYMBOL(dvma_free);
gpl-2.0
Nothing-Dev/android_kernel_motorola_msm8610
drivers/media/video/cx18/cx18-av-audio.c
12747
13482
/* * cx18 ADEC audio functions * * Derived from cx25840-audio.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include "cx18-driver.h" static int set_audclk_freq(struct cx18 *cx, u32 freq) { struct cx18_av_state *state = &cx->av_state; if (freq != 32000 && freq != 44100 && freq != 48000) return -EINVAL; /* * The PLL parameters are based on the external crystal frequency that * would ideally be: * * NTSC Color subcarrier freq * 8 = * 4.5 MHz/286 * 455/2 * 8 = 28.63636363... MHz * * The accidents of history and rationale that explain from where this * combination of magic numbers originate can be found in: * * [1] Abrahams, I. C., "Choice of Chrominance Subcarrier Frequency in * the NTSC Standards", Proceedings of the I-R-E, January 1954, pp 79-80 * * [2] Abrahams, I. C., "The 'Frequency Interleaving' Principle in the * NTSC Standards", Proceedings of the I-R-E, January 1954, pp 81-83 * * As Mike Bradley has rightly pointed out, it's not the exact crystal * frequency that matters, only that all parts of the driver and * firmware are using the same value (close to the ideal value). * * Since I have a strong suspicion that, if the firmware ever assumes a * crystal value at all, it will assume 28.636360 MHz, the crystal * freq used in calculations in this driver will be: * * xtal_freq = 28.636360 MHz * * an error of less than 0.13 ppm which is way, way better than any off * the shelf crystal will have for accuracy anyway. * * Below I aim to run the PLLs' VCOs near 400 MHz to minimze error. * * Many thanks to Jeff Campbell and Mike Bradley for their extensive * investigation, experimentation, testing, and suggested solutions of * of audio/video sync problems with SVideo and CVBS captures. */ if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { switch (freq) { case 32000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0d, AUX PLL Post Divider = 0x20 */ cx18_av_write4(cx, 0x108, 0x200d040f); /* VID_PLL Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/ cx18_av_write4(cx, 0x10c, 0x002be2fe); /* AUX_PLL Fraction = 0x176740c */ /* xtal * 0xd.bb3a060/0x20 = 32000 * 384: 393 MHz p-pd*/ cx18_av_write4(cx, 0x110, 0x0176740c); /* src3/4/6_ctl */ /* 0x1.f77f = (4 * xtal/8*2/455) / 32000 */ cx18_av_write4(cx, 0x900, 0x0801f77f); cx18_av_write4(cx, 0x904, 0x0801f77f); cx18_av_write4(cx, 0x90c, 0x0801f77f); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x20 */ cx18_av_write(cx, 0x127, 0x60); /* AUD_COUNT = 0x2fff = 8 samples * 4 * 384 - 1 */ cx18_av_write4(cx, 0x12c, 0x11202fff); /* * EN_AV_LOCK = 0 * VID_COUNT = 0x0d2ef8 = 107999.000 * 8 = * ((8 samples/32,000) * (13,500,000 * 8) * 4 - 1) * 8 */ cx18_av_write4(cx, 0x128, 0xa00d2ef8); break; case 44100: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0e, AUX PLL Post Divider = 0x18 */ cx18_av_write4(cx, 0x108, 0x180e040f); /* VID_PLL Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/ cx18_av_write4(cx, 0x10c, 0x002be2fe); /* AUX_PLL Fraction = 0x062a1f2 */ /* xtal * 0xe.3150f90/0x18 = 44100 * 384: 406 MHz p-pd*/ cx18_av_write4(cx, 0x110, 0x0062a1f2); /* src3/4/6_ctl */ /* 0x1.6d59 = (4 * xtal/8*2/455) / 44100 */ cx18_av_write4(cx, 0x900, 0x08016d59); cx18_av_write4(cx, 0x904, 0x08016d59); cx18_av_write4(cx, 0x90c, 0x08016d59); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x18 */ cx18_av_write(cx, 0x127, 0x58); /* AUD_COUNT = 0x92ff = 49 samples * 2 * 384 - 1 */ cx18_av_write4(cx, 0x12c, 0x112092ff); /* * EN_AV_LOCK = 0 * VID_COUNT = 0x1d4bf8 = 239999.000 * 8 = * ((49 samples/44,100) * (13,500,000 * 8) * 2 - 1) * 8 */ cx18_av_write4(cx, 0x128, 0xa01d4bf8); break; case 48000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0e, AUX PLL Post Divider = 0x16 */ cx18_av_write4(cx, 0x108, 0x160e040f); /* VID_PLL Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/ cx18_av_write4(cx, 0x10c, 0x002be2fe); /* AUX_PLL Fraction = 0x05227ad */ /* xtal * 0xe.2913d68/0x16 = 48000 * 384: 406 MHz p-pd*/ cx18_av_write4(cx, 0x110, 0x005227ad); /* src3/4/6_ctl */ /* 0x1.4faa = (4 * xtal/8*2/455) / 48000 */ cx18_av_write4(cx, 0x900, 0x08014faa); cx18_av_write4(cx, 0x904, 0x08014faa); cx18_av_write4(cx, 0x90c, 0x08014faa); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x16 */ cx18_av_write(cx, 0x127, 0x56); /* AUD_COUNT = 0x5fff = 4 samples * 16 * 384 - 1 */ cx18_av_write4(cx, 0x12c, 0x11205fff); /* * EN_AV_LOCK = 0 * VID_COUNT = 0x1193f8 = 143999.000 * 8 = * ((4 samples/48,000) * (13,500,000 * 8) * 16 - 1) * 8 */ cx18_av_write4(cx, 0x128, 0xa01193f8); break; } } else { switch (freq) { case 32000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0d, AUX PLL Post Divider = 0x30 */ cx18_av_write4(cx, 0x108, 0x300d040f); /* VID_PLL Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/ cx18_av_write4(cx, 0x10c, 0x002be2fe); /* AUX_PLL Fraction = 0x176740c */ /* xtal * 0xd.bb3a060/0x30 = 32000 * 256: 393 MHz p-pd*/ cx18_av_write4(cx, 0x110, 0x0176740c); /* src1_ctl */ /* 0x1.0000 = 32000/32000 */ cx18_av_write4(cx, 0x8f8, 0x08010000); /* src3/4/6_ctl */ /* 0x2.0000 = 2 * (32000/32000) */ cx18_av_write4(cx, 0x900, 0x08020000); cx18_av_write4(cx, 0x904, 0x08020000); cx18_av_write4(cx, 0x90c, 0x08020000); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x30 */ cx18_av_write(cx, 0x127, 0x70); /* AUD_COUNT = 0x1fff = 8 samples * 4 * 256 - 1 */ cx18_av_write4(cx, 0x12c, 0x11201fff); /* * EN_AV_LOCK = 0 * VID_COUNT = 0x0d2ef8 = 107999.000 * 8 = * ((8 samples/32,000) * (13,500,000 * 8) * 4 - 1) * 8 */ cx18_av_write4(cx, 0x128, 0xa00d2ef8); break; case 44100: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0e, AUX PLL Post Divider = 0x24 */ cx18_av_write4(cx, 0x108, 0x240e040f); /* VID_PLL Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/ cx18_av_write4(cx, 0x10c, 0x002be2fe); /* AUX_PLL Fraction = 0x062a1f2 */ /* xtal * 0xe.3150f90/0x24 = 44100 * 256: 406 MHz p-pd*/ cx18_av_write4(cx, 0x110, 0x0062a1f2); /* src1_ctl */ /* 0x1.60cd = 44100/32000 */ cx18_av_write4(cx, 0x8f8, 0x080160cd); /* src3/4/6_ctl */ /* 0x1.7385 = 2 * (32000/44100) */ cx18_av_write4(cx, 0x900, 0x08017385); cx18_av_write4(cx, 0x904, 0x08017385); cx18_av_write4(cx, 0x90c, 0x08017385); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x24 */ cx18_av_write(cx, 0x127, 0x64); /* AUD_COUNT = 0x61ff = 49 samples * 2 * 256 - 1 */ cx18_av_write4(cx, 0x12c, 0x112061ff); /* * EN_AV_LOCK = 0 * VID_COUNT = 0x1d4bf8 = 239999.000 * 8 = * ((49 samples/44,100) * (13,500,000 * 8) * 2 - 1) * 8 */ cx18_av_write4(cx, 0x128, 0xa01d4bf8); break; case 48000: /* * VID_PLL Integer = 0x0f, VID_PLL Post Divider = 0x04 * AUX_PLL Integer = 0x0d, AUX PLL Post Divider = 0x20 */ cx18_av_write4(cx, 0x108, 0x200d040f); /* VID_PLL Fraction = 0x2be2fe */ /* xtal * 0xf.15f17f0/4 = 108 MHz: 432 MHz pre-postdiv*/ cx18_av_write4(cx, 0x10c, 0x002be2fe); /* AUX_PLL Fraction = 0x176740c */ /* xtal * 0xd.bb3a060/0x20 = 48000 * 256: 393 MHz p-pd*/ cx18_av_write4(cx, 0x110, 0x0176740c); /* src1_ctl */ /* 0x1.8000 = 48000/32000 */ cx18_av_write4(cx, 0x8f8, 0x08018000); /* src3/4/6_ctl */ /* 0x1.5555 = 2 * (32000/48000) */ cx18_av_write4(cx, 0x900, 0x08015555); cx18_av_write4(cx, 0x904, 0x08015555); cx18_av_write4(cx, 0x90c, 0x08015555); /* SA_MCLK_SEL=1, SA_MCLK_DIV=0x20 */ cx18_av_write(cx, 0x127, 0x60); /* AUD_COUNT = 0x3fff = 4 samples * 16 * 256 - 1 */ cx18_av_write4(cx, 0x12c, 0x11203fff); /* * EN_AV_LOCK = 0 * VID_COUNT = 0x1193f8 = 143999.000 * 8 = * ((4 samples/48,000) * (13,500,000 * 8) * 16 - 1) * 8 */ cx18_av_write4(cx, 0x128, 0xa01193f8); break; } } state->audclk_freq = freq; return 0; } void cx18_av_audio_set_path(struct cx18 *cx) { struct cx18_av_state *state = &cx->av_state; u8 v; /* stop microcontroller */ v = cx18_av_read(cx, 0x803) & ~0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); /* assert soft reset */ v = cx18_av_read(cx, 0x810) | 0x01; cx18_av_write_expect(cx, 0x810, v, v, 0x0f); /* Mute everything to prevent the PFFT! */ cx18_av_write(cx, 0x8d3, 0x1f); if (state->aud_input <= CX18_AV_AUDIO_SERIAL2) { /* Set Path1 to Serial Audio Input */ cx18_av_write4(cx, 0x8d0, 0x01011012); /* The microcontroller should not be started for the * non-tuner inputs: autodetection is specific for * TV audio. */ } else { /* Set Path1 to Analog Demod Main Channel */ cx18_av_write4(cx, 0x8d0, 0x1f063870); } set_audclk_freq(cx, state->audclk_freq); /* deassert soft reset */ v = cx18_av_read(cx, 0x810) & ~0x01; cx18_av_write_expect(cx, 0x810, v, v, 0x0f); if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { /* When the microcontroller detects the * audio format, it will unmute the lines */ v = cx18_av_read(cx, 0x803) | 0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); } } static void set_volume(struct cx18 *cx, int volume) { /* First convert the volume to msp3400 values (0-127) */ int vol = volume >> 9; /* now scale it up to cx18_av values * -114dB to -96dB maps to 0 * this should be 19, but in my testing that was 4dB too loud */ if (vol <= 23) vol = 0; else vol -= 23; /* PATH1_VOLUME */ cx18_av_write(cx, 0x8d4, 228 - (vol * 2)); } static void set_bass(struct cx18 *cx, int bass) { /* PATH1_EQ_BASS_VOL */ cx18_av_and_or(cx, 0x8d9, ~0x3f, 48 - (bass * 48 / 0xffff)); } static void set_treble(struct cx18 *cx, int treble) { /* PATH1_EQ_TREBLE_VOL */ cx18_av_and_or(cx, 0x8db, ~0x3f, 48 - (treble * 48 / 0xffff)); } static void set_balance(struct cx18 *cx, int balance) { int bal = balance >> 8; if (bal > 0x80) { /* PATH1_BAL_LEFT */ cx18_av_and_or(cx, 0x8d5, 0x7f, 0x80); /* PATH1_BAL_LEVEL */ cx18_av_and_or(cx, 0x8d5, ~0x7f, bal & 0x7f); } else { /* PATH1_BAL_LEFT */ cx18_av_and_or(cx, 0x8d5, 0x7f, 0x00); /* PATH1_BAL_LEVEL */ cx18_av_and_or(cx, 0x8d5, ~0x7f, 0x80 - bal); } } static void set_mute(struct cx18 *cx, int mute) { struct cx18_av_state *state = &cx->av_state; u8 v; if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { /* Must turn off microcontroller in order to mute sound. * Not sure if this is the best method, but it does work. * If the microcontroller is running, then it will undo any * changes to the mute register. */ v = cx18_av_read(cx, 0x803); if (mute) { /* disable microcontroller */ v &= ~0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); cx18_av_write(cx, 0x8d3, 0x1f); } else { /* enable microcontroller */ v |= 0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); } } else { /* SRC1_MUTE_EN */ cx18_av_and_or(cx, 0x8d3, ~0x2, mute ? 0x02 : 0x00); } } int cx18_av_s_clock_freq(struct v4l2_subdev *sd, u32 freq) { struct cx18 *cx = v4l2_get_subdevdata(sd); struct cx18_av_state *state = &cx->av_state; int retval; u8 v; if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { v = cx18_av_read(cx, 0x803) & ~0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); cx18_av_write(cx, 0x8d3, 0x1f); } v = cx18_av_read(cx, 0x810) | 0x1; cx18_av_write_expect(cx, 0x810, v, v, 0x0f); retval = set_audclk_freq(cx, freq); v = cx18_av_read(cx, 0x810) & ~0x1; cx18_av_write_expect(cx, 0x810, v, v, 0x0f); if (state->aud_input > CX18_AV_AUDIO_SERIAL2) { v = cx18_av_read(cx, 0x803) | 0x10; cx18_av_write_expect(cx, 0x803, v, v, 0x1f); } return retval; } static int cx18_av_audio_s_ctrl(struct v4l2_ctrl *ctrl) { struct v4l2_subdev *sd = to_sd(ctrl); struct cx18 *cx = v4l2_get_subdevdata(sd); switch (ctrl->id) { case V4L2_CID_AUDIO_VOLUME: set_volume(cx, ctrl->val); break; case V4L2_CID_AUDIO_BASS: set_bass(cx, ctrl->val); break; case V4L2_CID_AUDIO_TREBLE: set_treble(cx, ctrl->val); break; case V4L2_CID_AUDIO_BALANCE: set_balance(cx, ctrl->val); break; case V4L2_CID_AUDIO_MUTE: set_mute(cx, ctrl->val); break; default: return -EINVAL; } return 0; } const struct v4l2_ctrl_ops cx18_av_audio_ctrl_ops = { .s_ctrl = cx18_av_audio_s_ctrl, };
gpl-2.0
crseanpaul/staging
arch/m32r/kernel/signal.c
460
7515
/* * linux/arch/m32r/kernel/signal.c * * Copyright (c) 2003 Hitoshi Yamamoto * * Taken from i386 version. * Copyright (C) 1991, 1992 Linus Torvalds * * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson * 2000-06-20 Pentium III FXSR, SSE support by Gareth Hughes */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/unistd.h> #include <linux/stddef.h> #include <linux/personality.h> #include <linux/tracehook.h> #include <asm/cacheflush.h> #include <asm/ucontext.h> #include <asm/uaccess.h> #define DEBUG_SIG 0 /* * Do a signal return; undo the signal stack. */ struct rt_sigframe { int sig; struct siginfo __user *pinfo; void __user *puc; struct siginfo info; struct ucontext uc; // struct _fpstate fpstate; }; static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc, int *r0_p) { unsigned int err = 0; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; #define COPY(x) err |= __get_user(regs->x, &sc->sc_##x) COPY(r4); COPY(r5); COPY(r6); COPY(pt_regs); /* COPY(r0); Skip r0 */ COPY(r1); COPY(r2); COPY(r3); COPY(r7); COPY(r8); COPY(r9); COPY(r10); COPY(r11); COPY(r12); COPY(acc0h); COPY(acc0l); COPY(acc1h); /* ISA_DSP_LEVEL2 only */ COPY(acc1l); /* ISA_DSP_LEVEL2 only */ COPY(psw); COPY(bpc); COPY(bbpsw); COPY(bbpc); COPY(spu); COPY(fp); COPY(lr); COPY(spi); #undef COPY regs->syscall_nr = -1; /* disable syscall checks */ err |= __get_user(*r0_p, &sc->sc_r0); return err; } asmlinkage int sys_rt_sigreturn(unsigned long r0, unsigned long r1, unsigned long r2, unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, struct pt_regs *regs) { struct rt_sigframe __user *frame = (struct rt_sigframe __user *)regs->spu; sigset_t set; int result; if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto badframe; set_current_blocked(&set); if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result)) goto badframe; if (restore_altstack(&frame->uc.uc_stack)) goto badframe; return result; badframe: force_sig(SIGSEGV, current); return 0; } /* * Set up a signal frame. */ static int setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask) { int err = 0; #define COPY(x) err |= __put_user(regs->x, &sc->sc_##x) COPY(r4); COPY(r5); COPY(r6); COPY(pt_regs); COPY(r0); COPY(r1); COPY(r2); COPY(r3); COPY(r7); COPY(r8); COPY(r9); COPY(r10); COPY(r11); COPY(r12); COPY(acc0h); COPY(acc0l); COPY(acc1h); /* ISA_DSP_LEVEL2 only */ COPY(acc1l); /* ISA_DSP_LEVEL2 only */ COPY(psw); COPY(bpc); COPY(bbpsw); COPY(bbpc); COPY(spu); COPY(fp); COPY(lr); COPY(spi); #undef COPY err |= __put_user(mask, &sc->oldmask); return err; } /* * Determine which stack to use.. */ static inline void __user * get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) { return (void __user *)((sigsp(sp, ksig) - frame_size) & -8ul); } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { struct rt_sigframe __user *frame; int err = 0; int signal, sig = ksig->sig; frame = get_sigframe(ksig, regs->spu, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; signal = current_thread_info()->exec_domain && current_thread_info()->exec_domain->signal_invmap && sig < 32 ? current_thread_info()->exec_domain->signal_invmap[sig] : sig; err |= __put_user(signal, &frame->sig); if (err) return -EFAULT; err |= __put_user(&frame->info, &frame->pinfo); err |= __put_user(&frame->uc, &frame->puc); err |= copy_siginfo_to_user(&frame->info, &ksig->info); if (err) return -EFAULT; /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __save_altstack(&frame->uc.uc_stack, regs->spu); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0]); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) return -EFAULT; /* Set up to return from userspace. */ regs->lr = (unsigned long)ksig->ka.sa.sa_restorer; /* Set up registers for signal handler */ regs->spu = (unsigned long)frame; regs->r0 = signal; /* Arg for signal handler */ regs->r1 = (unsigned long)&frame->info; regs->r2 = (unsigned long)&frame->uc; regs->bpc = (unsigned long)ksig->ka.sa.sa_handler; set_fs(USER_DS); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p\n", current->comm, current->pid, frame, regs->pc); #endif return 0; } static int prev_insn(struct pt_regs *regs) { u16 inst; if (get_user(inst, (u16 __user *)(regs->bpc - 2))) return -EFAULT; if ((inst & 0xfff0) == 0x10f0) /* trap ? */ regs->bpc -= 2; else regs->bpc -= 4; regs->syscall_nr = -1; return 0; } /* * OK, we're invoking a handler */ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { int ret; /* Are we from a system call? */ if (regs->syscall_nr >= 0) { /* If so, check system call restarting.. */ switch (regs->r0) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->r0 = -EINTR; break; case -ERESTARTSYS: if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { regs->r0 = -EINTR; break; } /* fallthrough */ case -ERESTARTNOINTR: regs->r0 = regs->orig_r0; if (prev_insn(regs) < 0) return; } } /* Set up the stack frame */ ret = setup_rt_frame(ksig, sigmask_to_save(), regs); signal_setup_done(ret, ksig, 0); } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. */ static void do_signal(struct pt_regs *regs) { struct ksignal ksig; /* * We want the common case to go fast, which * is why we may in certain cases get here from * kernel mode. Just return without doing anything * if so. */ if (!user_mode(regs)) return; if (get_signal(&ksig)) { /* Re-enable any watchpoints before delivering the * signal to user space. The processor register will * have been cleared if the watchpoint triggered * inside the kernel. */ /* Whee! Actually deliver the signal. */ handle_signal(&ksig, regs); return; } /* Did we come from a system call? */ if (regs->syscall_nr >= 0) { /* Restart the system call - no handlers present */ if (regs->r0 == -ERESTARTNOHAND || regs->r0 == -ERESTARTSYS || regs->r0 == -ERESTARTNOINTR) { regs->r0 = regs->orig_r0; prev_insn(regs); } else if (regs->r0 == -ERESTART_RESTARTBLOCK){ regs->r0 = regs->orig_r0; regs->r7 = __NR_restart_syscall; prev_insn(regs); } } restore_saved_sigmask(); } /* * notification of userspace execution resumption * - triggered by current->work.notify_resume */ void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags) { /* Pending single-step? */ if (thread_info_flags & _TIF_SINGLESTEP) clear_thread_flag(TIF_SINGLESTEP); /* deal with pending signal delivery */ if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } }
gpl-2.0
Framework43/touchpad-kernel
arch/arm/mach-fsm/sirc.c
460
5034
/* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/io.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <asm/irq.h> static void sirc_irq_mask(unsigned int irq); static void sirc_irq_unmask(unsigned int irq); static void sirc_irq_ack(unsigned int irq); static int sirc_irq_set_wake(unsigned int irq, unsigned int on); static int sirc_irq_set_type(unsigned int irq, unsigned int flow_type); static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc); static unsigned int int_enable; static unsigned int wake_enable; static struct sirc_regs_t sirc_regs = { .int_enable = SPSS_SIRC_INT_ENABLE, .int_enable_clear = SPSS_SIRC_INT_ENABLE_CLEAR, .int_enable_set = SPSS_SIRC_INT_ENABLE_SET, .int_type = SPSS_SIRC_INT_TYPE, .int_polarity = SPSS_SIRC_INT_POLARITY, .int_clear = SPSS_SIRC_INT_CLEAR, }; static struct sirc_cascade_regs sirc_reg_table[] = { { .int_status = SPSS_SIRC_IRQ_STATUS, .cascade_irq = INT_SIRC_0, } }; static unsigned int save_type; static unsigned int save_polarity; /* Mask off the given interrupt. Keep the int_enable mask in sync with the enable reg, so it can be restored after power collapse. */ static void sirc_irq_mask(unsigned int irq) { unsigned int mask; mask = 1 << (irq - FIRST_SIRC_IRQ); writel(mask, sirc_regs.int_enable_clear); int_enable &= ~mask; return; } /* Unmask the given interrupt. Keep the int_enable mask in sync with the enable reg, so it can be restored after power collapse. */ static void sirc_irq_unmask(unsigned int irq) { unsigned int mask; mask = 1 << (irq - FIRST_SIRC_IRQ); writel(mask, sirc_regs.int_enable_set); int_enable |= mask; return; } static void sirc_irq_ack(unsigned int irq) { unsigned int mask; mask = 1 << (irq - FIRST_SIRC_IRQ); writel(mask, sirc_regs.int_clear); return; } static int sirc_irq_set_wake(unsigned int irq, unsigned int on) { unsigned int mask; /* Used to set the interrupt enable mask during power collapse. */ mask = 1 << (irq - FIRST_SIRC_IRQ); if (on) wake_enable |= mask; else wake_enable &= ~mask; return 0; } static int sirc_irq_set_type(unsigned int irq, unsigned int flow_type) { unsigned int mask; unsigned int val; mask = 1 << (irq - FIRST_SIRC_IRQ); val = readl(sirc_regs.int_polarity); if (flow_type & (IRQF_TRIGGER_LOW | IRQF_TRIGGER_FALLING)) val |= mask; else val &= ~mask; writel(val, sirc_regs.int_polarity); val = readl(sirc_regs.int_type); if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { val |= mask; irq_desc[irq].handle_irq = handle_edge_irq; } else { val &= ~mask; irq_desc[irq].handle_irq = handle_level_irq; } writel(val, sirc_regs.int_type); return 0; } /* Finds the pending interrupt on the passed cascade irq and redrives it */ static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned int reg = 0; unsigned int sirq; unsigned int status; while ((reg < ARRAY_SIZE(sirc_reg_table)) && (sirc_reg_table[reg].cascade_irq != irq)) reg++; status = readl(sirc_reg_table[reg].int_status); status &= SIRC_MASK; if (status == 0) return; for (sirq = 0; (sirq < NR_SIRC_IRQS) && ((status & (1U << sirq)) == 0); sirq++) ; generic_handle_irq(sirq+FIRST_SIRC_IRQ); desc->chip->ack(irq); } void msm_sirc_enter_sleep(void) { save_type = readl(sirc_regs.int_type); save_polarity = readl(sirc_regs.int_polarity); writel(wake_enable, sirc_regs.int_enable); return; } void msm_sirc_exit_sleep(void) { writel(save_type, sirc_regs.int_type); writel(save_polarity, sirc_regs.int_polarity); writel(int_enable, sirc_regs.int_enable); return; } static struct irq_chip sirc_irq_chip = { .name = "sirc", .ack = sirc_irq_ack, .mask = sirc_irq_mask, .unmask = sirc_irq_unmask, .set_wake = sirc_irq_set_wake, .set_type = sirc_irq_set_type, }; void __init msm_init_sirc(void) { int i; int_enable = 0; wake_enable = 0; for (i = FIRST_SIRC_IRQ; i < LAST_SIRC_IRQ; i++) { set_irq_chip(i, &sirc_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } for (i = 0; i < ARRAY_SIZE(sirc_reg_table); i++) { set_irq_chained_handler(sirc_reg_table[i].cascade_irq, sirc_irq_handler); set_irq_wake(sirc_reg_table[i].cascade_irq, 1); } return; }
gpl-2.0
TeamAlto45/android_kernel_tcl_alto45
arch/mips/ath79/mach-ap136.c
1996
4070
/* * Qualcomm Atheros AP136 reference board support * * Copyright (c) 2012 Qualcomm Atheros * Copyright (c) 2012-2013 Gabor Juhos <juhosg@openwrt.org> * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * */ #include <linux/pci.h> #include <linux/ath9k_platform.h> #include "machtypes.h" #include "dev-gpio-buttons.h" #include "dev-leds-gpio.h" #include "dev-spi.h" #include "dev-usb.h" #include "dev-wmac.h" #include "pci.h" #define AP136_GPIO_LED_STATUS_RED 14 #define AP136_GPIO_LED_STATUS_GREEN 19 #define AP136_GPIO_LED_USB 4 #define AP136_GPIO_LED_WLAN_2G 13 #define AP136_GPIO_LED_WLAN_5G 12 #define AP136_GPIO_LED_WPS_RED 15 #define AP136_GPIO_LED_WPS_GREEN 20 #define AP136_GPIO_BTN_WPS 16 #define AP136_GPIO_BTN_RFKILL 21 #define AP136_KEYS_POLL_INTERVAL 20 /* msecs */ #define AP136_KEYS_DEBOUNCE_INTERVAL (3 * AP136_KEYS_POLL_INTERVAL) #define AP136_WMAC_CALDATA_OFFSET 0x1000 #define AP136_PCIE_CALDATA_OFFSET 0x5000 static struct gpio_led ap136_leds_gpio[] __initdata = { { .name = "qca:green:status", .gpio = AP136_GPIO_LED_STATUS_GREEN, .active_low = 1, }, { .name = "qca:red:status", .gpio = AP136_GPIO_LED_STATUS_RED, .active_low = 1, }, { .name = "qca:green:wps", .gpio = AP136_GPIO_LED_WPS_GREEN, .active_low = 1, }, { .name = "qca:red:wps", .gpio = AP136_GPIO_LED_WPS_RED, .active_low = 1, }, { .name = "qca:red:wlan-2g", .gpio = AP136_GPIO_LED_WLAN_2G, .active_low = 1, }, { .name = "qca:red:usb", .gpio = AP136_GPIO_LED_USB, .active_low = 1, } }; static struct gpio_keys_button ap136_gpio_keys[] __initdata = { { .desc = "WPS button", .type = EV_KEY, .code = KEY_WPS_BUTTON, .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, .gpio = AP136_GPIO_BTN_WPS, .active_low = 1, }, { .desc = "RFKILL button", .type = EV_KEY, .code = KEY_RFKILL, .debounce_interval = AP136_KEYS_DEBOUNCE_INTERVAL, .gpio = AP136_GPIO_BTN_RFKILL, .active_low = 1, }, }; static struct spi_board_info ap136_spi_info[] = { { .bus_num = 0, .chip_select = 0, .max_speed_hz = 25000000, .modalias = "mx25l6405d", } }; static struct ath79_spi_platform_data ap136_spi_data = { .bus_num = 0, .num_chipselect = 1, }; #ifdef CONFIG_PCI static struct ath9k_platform_data ap136_ath9k_data; static int ap136_pci_plat_dev_init(struct pci_dev *dev) { if (dev->bus->number == 1 && (PCI_SLOT(dev->devfn)) == 0) dev->dev.platform_data = &ap136_ath9k_data; return 0; } static void __init ap136_pci_init(u8 *eeprom) { memcpy(ap136_ath9k_data.eeprom_data, eeprom, sizeof(ap136_ath9k_data.eeprom_data)); ath79_pci_set_plat_dev_init(ap136_pci_plat_dev_init); ath79_register_pci(); } #else static inline void ap136_pci_init(void) {} #endif /* CONFIG_PCI */ static void __init ap136_setup(void) { u8 *art = (u8 *) KSEG1ADDR(0x1fff0000); ath79_register_leds_gpio(-1, ARRAY_SIZE(ap136_leds_gpio), ap136_leds_gpio); ath79_register_gpio_keys_polled(-1, AP136_KEYS_POLL_INTERVAL, ARRAY_SIZE(ap136_gpio_keys), ap136_gpio_keys); ath79_register_spi(&ap136_spi_data, ap136_spi_info, ARRAY_SIZE(ap136_spi_info)); ath79_register_usb(); ath79_register_wmac(art + AP136_WMAC_CALDATA_OFFSET); ap136_pci_init(art + AP136_PCIE_CALDATA_OFFSET); } MIPS_MACHINE(ATH79_MACH_AP136_010, "AP136-010", "Atheros AP136-010 reference board", ap136_setup);
gpl-2.0
peyo-hd/kernel_odrc
drivers/staging/cxt1e1/musycc.c
2252
54727
unsigned int max_intcnt = 0; unsigned int max_bh = 0; /*----------------------------------------------------------------------------- * musycc.c - * * Copyright (C) 2007 One Stop Systems, Inc. * Copyright (C) 2003-2006 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * For further information, contact via email: support@onestopsystems.com * One Stop Systems, Inc. Escondido, California U.S.A. *----------------------------------------------------------------------------- */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include "pmcc4_sysdep.h" #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include "sbecom_inline_linux.h" #include "libsbew.h" #include "pmcc4_private.h" #include "pmcc4.h" #include "musycc.h" #ifdef SBE_INCLUDE_SYMBOLS #define STATIC #else #define STATIC static #endif #define sd_find_chan(ci,ch) c4_find_chan(ch) /*******************************************************************/ /* global driver variables */ extern ci_t *c4_list; extern int drvr_state; extern int cxt1e1_log_level; extern int cxt1e1_max_mru; extern int cxt1e1_max_mtu; extern int max_rxdesc_used; extern int max_txdesc_used; extern ci_t *CI; /* dummy pointr to board ZEROE's data - DEBUG * USAGE */ /*******************************************************************/ /* forward references */ void c4_fifo_free(mpi_t *, int); void c4_wk_chan_restart(mch_t *); void musycc_bh_tx_eom(mpi_t *, int); int musycc_chan_up(ci_t *, int); status_t __init musycc_init(ci_t *); STATIC void __init musycc_init_port(mpi_t *); void musycc_intr_bh_tasklet(ci_t *); void musycc_serv_req(mpi_t *, u_int32_t); void musycc_update_timeslots(mpi_t *); /*******************************************************************/ #if 1 STATIC int musycc_dump_rxbuffer_ring(mch_t * ch, int lockit) { struct mdesc *m; unsigned long flags = 0; u_int32_t status; int n; if (lockit) spin_lock_irqsave(&ch->ch_rxlock, flags); if (ch->rxd_num == 0) pr_info(" ZERO receive buffers allocated for this channel."); else { FLUSH_MEM_READ(); m = &ch->mdr[ch->rxix_irq_srv]; for (n = ch->rxd_num; n; n--) { status = le32_to_cpu(m->status); { pr_info("%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n", (m == &ch->mdr[ch->rxix_irq_srv]) ? 'F' : ' ', (unsigned long) m, n, status, m->data ? (status & HOST_RX_OWNED ? 'H' : 'M') : '-', status & POLL_DISABLED ? 'P' : '-', status & EOBIRQ_ENABLE ? 'b' : '-', status & EOMIRQ_ENABLE ? 'm' : '-', status & LENGTH_MASK, le32_to_cpu(m->data), le32_to_cpu(m->next)); #ifdef RLD_DUMP_BUFDATA { u_int32_t *dp; int len = status & LENGTH_MASK; #if 1 if (m->data && (status & HOST_RX_OWNED)) #else if (m->data) /* always dump regardless of valid RX * data */ #endif { dp = (u_int32_t *) OS_phystov((void *) (le32_to_cpu(m->data))); if (len >= 0x10) pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len, *dp, *(dp + 1), *(dp + 2), *(dp + 3)); else if (len >= 0x08) pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len, *dp, *(dp + 1)); else pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp); } } #endif } m = m->snext; } } /* -for- */ pr_info("\n"); if (lockit) spin_unlock_irqrestore(&ch->ch_rxlock, flags); return 0; } #endif #if 1 STATIC int musycc_dump_txbuffer_ring(mch_t * ch, int lockit) { struct mdesc *m; unsigned long flags = 0; u_int32_t status; int n; if (lockit) spin_lock_irqsave(&ch->ch_txlock, flags); if (ch->txd_num == 0) pr_info(" ZERO transmit buffers allocated for this channel."); else { FLUSH_MEM_READ(); m = ch->txd_irq_srv; for (n = ch->txd_num; n; n--) { status = le32_to_cpu(m->status); { pr_info("%c%c %08lx[%2d]: sts %08x (%c%c%c%c:%d.) Data [%08x] Next [%08x]\n", (m == ch->txd_usr_add) ? 'F' : ' ', (m == ch->txd_irq_srv) ? 'L' : ' ', (unsigned long) m, n, status, m->data ? (status & MUSYCC_TX_OWNED ? 'M' : 'H') : '-', status & POLL_DISABLED ? 'P' : '-', status & EOBIRQ_ENABLE ? 'b' : '-', status & EOMIRQ_ENABLE ? 'm' : '-', status & LENGTH_MASK, le32_to_cpu(m->data), le32_to_cpu(m->next)); #ifdef RLD_DUMP_BUFDATA { u_int32_t *dp; int len = status & LENGTH_MASK; if (m->data) { dp = (u_int32_t *) OS_phystov((void *) (le32_to_cpu(m->data))); if (len >= 0x10) pr_info(" %x[%x]: %08X %08X %08X %08x\n", (u_int32_t) dp, len, *dp, *(dp + 1), *(dp + 2), *(dp + 3)); else if (len >= 0x08) pr_info(" %x[%x]: %08X %08X\n", (u_int32_t) dp, len, *dp, *(dp + 1)); else pr_info(" %x[%x]: %08X\n", (u_int32_t) dp, len, *dp); } } #endif } m = m->snext; } } /* -for- */ pr_info("\n"); if (lockit) spin_unlock_irqrestore(&ch->ch_txlock, flags); return 0; } #endif /* * The following supports a backdoor debug facility which can be used to * display the state of a board's channel. */ status_t musycc_dump_ring(ci_t * ci, unsigned int chan) { mch_t *ch; if (chan >= MAX_CHANS_USED) return SBE_DRVR_FAIL; /* E2BIG */ { int bh; bh = atomic_read(&ci->bh_pending); pr_info(">> bh_pend %d [%d] ihead %d itail %d [%d] th_cnt %d bh_cnt %d wdcnt %d note %d\n", bh, max_bh, ci->iqp_headx, ci->iqp_tailx, max_intcnt, ci->intlog.drvr_intr_thcount, ci->intlog.drvr_intr_bhcount, ci->wdcount, ci->wd_notify); max_bh = 0; /* reset counter */ max_intcnt = 0; /* reset counter */ } if (!(ch = sd_find_chan(dummy, chan))) { pr_info(">> musycc_dump_ring: channel %d not up.\n", chan); return ENOENT; } pr_info(">> CI %p CHANNEL %3d @ %p: state %x status/p %x/%x\n", ci, chan, ch, ch->state, ch->status, ch->p.status); pr_info("--------------------------------\nTX Buffer Ring - Channel %d, txd_num %d. (bd/ch pend %d %d), TXD required %d, txpkt %lu\n", chan, ch->txd_num, (u_int32_t) atomic_read(&ci->tx_pending), (u_int32_t) atomic_read(&ch->tx_pending), ch->txd_required, ch->s.tx_packets); pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", ch->user, ch->txd_irq_srv, ch->txd_usr_add, sd_queue_stopped(ch->user), ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); musycc_dump_txbuffer_ring(ch, 1); pr_info("RX Buffer Ring - Channel %d, rxd_num %d. IRQ_SRV[%d] 0x%p, start_rx %x rxpkt %lu\n", chan, ch->rxd_num, ch->rxix_irq_srv, &ch->mdr[ch->rxix_irq_srv], ch->ch_start_rx, ch->s.rx_packets); musycc_dump_rxbuffer_ring(ch, 1); return SBE_DRVR_SUCCESS; } status_t musycc_dump_rings(ci_t * ci, unsigned int start_chan) { unsigned int chan; for (chan = start_chan; chan < (start_chan + 5); chan++) musycc_dump_ring(ci, chan); return SBE_DRVR_SUCCESS; } /* * NOTE on musycc_init_mdt(): These MUSYCC writes are only operational after * a MUSYCC GROUP_INIT command has been issued. */ void musycc_init_mdt(mpi_t * pi) { u_int32_t *addr, cfg; int i; /* * This Idle Code insertion takes effect prior to channel's first * transmitted message. After that, each message contains its own Idle * Code information which is to be issued after the message is * transmitted (Ref.MUSYCC 5.2.2.3: MCENBL bit in Group Configuration * Descriptor). */ addr = (u_int32_t *) ((u_long) pi->reg + MUSYCC_MDT_BASE03_ADDR); cfg = CFG_CH_FLAG_7E << IDLE_CODE; for (i = 0; i < 32; addr++, i++) pci_write_32(addr, cfg); } /* Set TX thp to the next unprocessed md */ void musycc_update_tx_thp(mch_t * ch) { struct mdesc *md; unsigned long flags; spin_lock_irqsave(&ch->ch_txlock, flags); while (1) { md = ch->txd_irq_srv; FLUSH_MEM_READ(); if (!md->data) { /* No MDs with buffers to process */ spin_unlock_irqrestore(&ch->ch_txlock, flags); return; } if ((le32_to_cpu(md->status)) & MUSYCC_TX_OWNED) { /* this is the MD to restart TX with */ break; } /* * Otherwise, we have a valid, host-owned message descriptor which * has been successfully transmitted and whose buffer can be freed, * so... process this MD, it's owned by the host. (This might give * as a new, updated txd_irq_srv.) */ musycc_bh_tx_eom(ch->up, ch->gchan); } md = ch->txd_irq_srv; ch->up->regram->thp[ch->gchan] = cpu_to_le32(OS_vtophys(md)); FLUSH_MEM_WRITE(); if (ch->tx_full) { ch->tx_full = 0; ch->txd_required = 0; sd_enable_xmit(ch->user); /* re-enable to catch flow controlled * channel */ } spin_unlock_irqrestore(&ch->ch_txlock, flags); #ifdef RLD_TRANS_DEBUG pr_info("++ musycc_update_tx_thp[%d]: setting thp = %p, sts %x\n", ch->channum, md, md->status); #endif } /* * This is the workq task executed by the OS when our queue_work() is * scheduled and run. It can fire off either RX or TX ACTIVATION depending * upon the channel's ch_start_tx and ch_start_rx variables. This routine * is implemented as a work queue so that the call to the service request is * able to sleep, awaiting an interrupt acknowledgment response (SACK) from * the hardware. */ void musycc_wq_chan_restart(void *arg) /* channel private structure */ { mch_t *ch; mpi_t *pi; struct mdesc *md; #if 0 unsigned long flags; #endif ch = container_of(arg, struct c4_chan_info, ch_work); pi = ch->up; #ifdef RLD_TRANS_DEBUG pr_info("wq_chan_restart[%d]: start_RT[%d/%d] status %x\n", ch->channum, ch->ch_start_rx, ch->ch_start_tx, ch->status); #endif /**********************************/ /** check for RX restart request **/ /**********************************/ if ((ch->ch_start_rx) && (ch->status & RX_ENABLED)) { ch->ch_start_rx = 0; #if defined(RLD_TRANS_DEBUG) || defined(RLD_RXACT_DEBUG) { static int hereb4 = 7; if (hereb4) { /* RLD DEBUG */ hereb4--; #ifdef RLD_TRANS_DEBUG md = &ch->mdr[ch->rxix_irq_srv]; pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n", ch->channum, ch->rxix_irq_srv, md, le32_to_cpu(md->status), ch->s.rx_packets); #elif defined(RLD_RXACT_DEBUG) md = &ch->mdr[ch->rxix_irq_srv]; pr_info("++ musycc_wq_chan_restart[%d] CHAN RX ACTIVATE: rxix_irq_srv %d, md %p sts %x, rxpkt %lu\n", ch->channum, ch->rxix_irq_srv, md, le32_to_cpu(md->status), ch->s.rx_packets); musycc_dump_rxbuffer_ring(ch, 1); /* RLD DEBUG */ #endif } } #endif musycc_serv_req(pi, SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION | ch->gchan); } /**********************************/ /** check for TX restart request **/ /**********************************/ if ((ch->ch_start_tx) && (ch->status & TX_ENABLED)) { /* find next unprocessed message, then set TX thp to it */ musycc_update_tx_thp(ch); #if 0 spin_lock_irqsave(&ch->ch_txlock, flags); #endif md = ch->txd_irq_srv; if (!md) { #ifdef RLD_TRANS_DEBUG pr_info("-- musycc_wq_chan_restart[%d]: WARNING, starting NULL md\n", ch->channum); #endif #if 0 spin_unlock_irqrestore(&ch->ch_txlock, flags); #endif } else if (md->data && ((le32_to_cpu(md->status)) & MUSYCC_TX_OWNED)) { ch->ch_start_tx = 0; #if 0 spin_unlock_irqrestore(&ch->ch_txlock, flags); /* allow interrupts for service request */ #endif #ifdef RLD_TRANS_DEBUG pr_info("++ musycc_wq_chan_restart() CHAN TX ACTIVATE: chan %d txd_irq_srv %p = sts %x, txpkt %lu\n", ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status, ch->s.tx_packets); #endif musycc_serv_req(pi, SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION | ch->gchan); } #ifdef RLD_RESTART_DEBUG else { /* retain request to start until retried and we have data to xmit */ pr_info("-- musycc_wq_chan_restart[%d]: DELAYED due to md %p sts %x data %x, start_tx %x\n", ch->channum, md, le32_to_cpu(md->status), le32_to_cpu(md->data), ch->ch_start_tx); musycc_dump_txbuffer_ring(ch, 0); #if 0 spin_unlock_irqrestore(&ch->ch_txlock, flags); /* allow interrupts for service request */ #endif } #endif } } /* * Channel restart either fires of a workqueue request (2.6) or lodges a * watchdog activation sequence (2.4). */ void musycc_chan_restart(mch_t * ch) { #ifdef RLD_RESTART_DEBUG pr_info("++ musycc_chan_restart[%d]: txd_irq_srv @ %p = sts %x\n", ch->channum, ch->txd_irq_srv, ch->txd_irq_srv->status); #endif /* 2.6 - find next unprocessed message, then set TX thp to it */ #ifdef RLD_RESTART_DEBUG pr_info(">> musycc_chan_restart: scheduling Chan %x workQ @ %p\n", ch->channum, &ch->ch_work); #endif c4_wk_chan_restart(ch); /* work queue mechanism fires off: Ref: * musycc_wq_chan_restart () */ } void rld_put_led(mpi_t * pi, u_int32_t ledval) { static u_int32_t led = 0; if (ledval == 0) led = 0; else led |= ledval; pci_write_32((u_int32_t *) &pi->up->cpldbase->leds, led); /* RLD DEBUG TRANHANG */ } #define MUSYCC_SR_RETRY_CNT 9 void musycc_serv_req(mpi_t * pi, u_int32_t req) { volatile u_int32_t r; int rcnt; /* * PORT NOTE: Semaphore protect service loop guarantees only a single * operation at a time. Per MUSYCC Manual - "Issuing service requests to * the same channel group without first receiving ACK from each request * may cause the host to lose track of which service request has been * acknowledged." */ SD_SEM_TAKE(&pi->sr_sem_busy, "serv"); /* only 1 thru here, per * group */ if (pi->sr_last == req) { #ifdef RLD_TRANS_DEBUG pr_info(">> same SR, Port %d Req %x\n", pi->portnum, req); #endif /* * The most likely repeated request is the channel activation command * which follows the occurrence of a Transparent mode TX ONR or a * BUFF error. If the previous command was a CHANNEL ACTIVATE, * precede it with a NOOP command in order maintain coherent control * of this current (re)ACTIVATE. */ r = (pi->sr_last & ~SR_GCHANNEL_MASK); if ((r == (SR_CHANNEL_ACTIVATE | SR_TX_DIRECTION)) || (r == (SR_CHANNEL_ACTIVATE | SR_RX_DIRECTION))) { #ifdef RLD_TRANS_DEBUG pr_info(">> same CHAN ACT SR, Port %d Req %x => issue SR_NOOP CMD\n", pi->portnum, req); #endif SD_SEM_GIVE(&pi->sr_sem_busy); /* allow this next request */ musycc_serv_req(pi, SR_NOOP); SD_SEM_TAKE(&pi->sr_sem_busy, "serv"); /* relock & continue w/ * original req */ } else if (req == SR_NOOP) { /* no need to issue back-to-back SR_NOOP commands at this time */ #ifdef RLD_TRANS_DEBUG pr_info(">> same Port SR_NOOP skipped, Port %d\n", pi->portnum); #endif SD_SEM_GIVE(&pi->sr_sem_busy); /* allow this next request */ return; } } rcnt = 0; pi->sr_last = req; rewrite: pci_write_32((u_int32_t *) &pi->reg->srd, req); FLUSH_MEM_WRITE(); /* * Per MUSYCC Manual, Section 6.1,2 - "When writing an SCR service * request, the host must ensure at least one PCI bus clock cycle has * elapsed before writing another service request. To meet this minimum * elapsed service request write timing interval, it is recommended that * the host follow any SCR write with another operation which reads from * the same address." */ r = pci_read_32((u_int32_t *) &pi->reg->srd); /* adhere to write * timing imposition */ if ((r != req) && (req != SR_CHIP_RESET) && (++rcnt <= MUSYCC_SR_RETRY_CNT)) { if (cxt1e1_log_level >= LOG_MONITOR) pr_info("%s: %d - reissue srv req/last %x/%x (hdw reads %x), Chan %d.\n", pi->up->devname, rcnt, req, pi->sr_last, r, (pi->portnum * MUSYCC_NCHANS) + (req & 0x1f)); OS_uwait_dummy(); /* this delay helps reduce reissue counts * (reason not yet researched) */ goto rewrite; } if (rcnt > MUSYCC_SR_RETRY_CNT) { pr_warning("%s: failed service request (#%d)= %x, group %d.\n", pi->up->devname, MUSYCC_SR_RETRY_CNT, req, pi->portnum); SD_SEM_GIVE(&pi->sr_sem_busy); /* allow any next request */ return; } if (req == SR_CHIP_RESET) { /* * PORT NOTE: the CHIP_RESET command is NOT ack'd by the MUSYCC, thus * the upcoming delay is used. Though the MUSYCC documentation * suggests a read-after-write would supply the required delay, it's * unclear what CPU/BUS clock speeds might have been assumed when * suggesting this 'lack of ACK' workaround. Thus the use of uwait. */ OS_uwait(100000, "icard"); /* 100ms */ } else { FLUSH_MEM_READ(); SD_SEM_TAKE(&pi->sr_sem_wait, "sakack"); /* sleep until SACK * interrupt occurs */ } SD_SEM_GIVE(&pi->sr_sem_busy); /* allow any next request */ } #ifdef SBE_PMCC4_ENABLE void musycc_update_timeslots(mpi_t * pi) { int i, ch; char e1mode = IS_FRAME_ANY_E1(pi->p.port_mode); for (i = 0; i < 32; i++) { int usedby = 0, last = 0, ts, j, bits[8]; u_int8_t lastval = 0; if (((i == 0) && e1mode) || /* disable if E1 mode */ ((i == 16) && ((pi->p.port_mode == CFG_FRAME_E1CRC_CAS) || (pi->p.port_mode == CFG_FRAME_E1CRC_CAS_AMI))) || ((i > 23) && (!e1mode))) /* disable if T1 mode */ pi->tsm[i] = 0xff; /* make tslot unavailable for this mode */ else pi->tsm[i] = 0x00; /* make tslot available for assignment */ for (j = 0; j < 8; j++) bits[j] = -1; for (ch = 0; ch < MUSYCC_NCHANS; ch++) { if ((pi->chan[ch]->state == UP) && (pi->chan[ch]->p.bitmask[i])) { usedby++; last = ch; lastval = pi->chan[ch]->p.bitmask[i]; for (j = 0; j < 8; j++) if (lastval & (1 << j)) bits[j] = ch; pi->tsm[i] |= lastval; } } if (!usedby) ts = 0; else if ((usedby == 1) && (lastval == 0xff)) ts = (4 << 5) | last; else if ((usedby == 1) && (lastval == 0x7f)) ts = (5 << 5) | last; else { int idx; if (bits[0] < 0) ts = (6 << 5) | (idx = last); else ts = (7 << 5) | (idx = bits[0]); for (j = 1; j < 8; j++) { pi->regram->rscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]); pi->regram->tscm[idx * 8 + j] = (bits[j] < 0) ? 0 : (0x80 | bits[j]); } } pi->regram->rtsm[i] = ts; pi->regram->ttsm[i] = ts; } FLUSH_MEM_WRITE(); musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION); musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION); musycc_serv_req(pi, SR_SUBCHANNEL_MAP | SR_RX_DIRECTION); musycc_serv_req(pi, SR_SUBCHANNEL_MAP | SR_TX_DIRECTION); } #endif #ifdef SBE_WAN256T3_ENABLE void musycc_update_timeslots(mpi_t * pi) { mch_t *ch; u_int8_t ts, hmask, tsen; int gchan; int i; #ifdef SBE_PMCC4_ENABLE hmask = (0x1f << pi->up->p.hypersize) & 0x1f; #endif #ifdef SBE_WAN256T3_ENABLE hmask = (0x1f << hyperdummy) & 0x1f; #endif for (i = 0; i < 128; i++) { gchan = ((pi->portnum * MUSYCC_NCHANS) + (i & hmask)) % MUSYCC_NCHANS; ch = pi->chan[gchan]; if (ch->p.mode_56k) tsen = MODE_56KBPS; else tsen = MODE_64KBPS; /* also the default */ ts = ((pi->portnum % 4) == (i / 32)) ? (tsen << 5) | (i & hmask) : 0; pi->regram->rtsm[i] = ts; pi->regram->ttsm[i] = ts; } FLUSH_MEM_WRITE(); musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_RX_DIRECTION); musycc_serv_req(pi, SR_TIMESLOT_MAP | SR_TX_DIRECTION); } #endif /* * This routine converts a generic library channel configuration parameter * into a hardware specific register value (IE. MUSYCC CCD Register). */ u_int32_t musycc_chan_proto(int proto) { int reg; switch (proto) { case CFG_CH_PROTO_TRANS: /* 0 */ reg = MUSYCC_CCD_TRANS; break; case CFG_CH_PROTO_SS7: /* 1 */ reg = MUSYCC_CCD_SS7; break; default: case CFG_CH_PROTO_ISLP_MODE: /* 4 */ case CFG_CH_PROTO_HDLC_FCS16: /* 2 */ reg = MUSYCC_CCD_HDLC_FCS16; break; case CFG_CH_PROTO_HDLC_FCS32: /* 3 */ reg = MUSYCC_CCD_HDLC_FCS32; break; } return reg; } #ifdef SBE_WAN256T3_ENABLE STATIC void __init musycc_init_port(mpi_t * pi) { pci_write_32((u_int32_t *) &pi->reg->gbp, OS_vtophys(pi->regram)); pi->regram->grcd = __constant_cpu_to_le32(MUSYCC_GRCD_RX_ENABLE | MUSYCC_GRCD_TX_ENABLE | MUSYCC_GRCD_SF_ALIGN | MUSYCC_GRCD_SUBCHAN_DISABLE | MUSYCC_GRCD_OOFMP_DISABLE | MUSYCC_GRCD_COFAIRQ_DISABLE | MUSYCC_GRCD_MC_ENABLE | (MUSYCC_GRCD_POLLTH_32 << MUSYCC_GRCD_POLLTH_SHIFT)); pi->regram->pcd = __constant_cpu_to_le32(MUSYCC_PCD_E1X4_MODE | MUSYCC_PCD_TXDATA_RISING | MUSYCC_PCD_TX_DRIVEN); /* Message length descriptor */ pi->regram->mld = __constant_cpu_to_le32(cxt1e1_max_mru | (cxt1e1_max_mru << 16)); FLUSH_MEM_WRITE(); musycc_serv_req(pi, SR_GROUP_INIT | SR_RX_DIRECTION); musycc_serv_req(pi, SR_GROUP_INIT | SR_TX_DIRECTION); musycc_init_mdt(pi); musycc_update_timeslots(pi); } #endif status_t __init musycc_init(ci_t * ci) { char *regaddr; /* temp for address boundary calculations */ int i, gchan; OS_sem_init(&ci->sem_wdbusy, SEM_AVAILABLE); /* watchdog exclusion */ /* * Per MUSYCC manual, Section 6.3.4 - "The host must allocate a dword * aligned memory segment for interrupt queue pointers." */ #define INT_QUEUE_BOUNDARY 4 regaddr = OS_kmalloc((INT_QUEUE_SIZE + 1) * sizeof(u_int32_t)); if (regaddr == 0) return ENOMEM; ci->iqd_p_saved = regaddr; /* save orig value for free's usage */ ci->iqd_p = (u_int32_t *) ((unsigned long) (regaddr + INT_QUEUE_BOUNDARY - 1) & (~(INT_QUEUE_BOUNDARY - 1))); /* this calculates * closest boundary */ for (i = 0; i < INT_QUEUE_SIZE; i++) ci->iqd_p[i] = __constant_cpu_to_le32(INT_EMPTY_ENTRY); for (i = 0; i < ci->max_port; i++) { mpi_t *pi = &ci->port[i]; /* * Per MUSYCC manual, Section 6.3.2 - "The host must allocate a 2KB * bound memory segment for Channel Group 0." */ #define GROUP_BOUNDARY 0x800 regaddr = OS_kmalloc(sizeof(struct musycc_groupr) + GROUP_BOUNDARY); if (regaddr == 0) { for (gchan = 0; gchan < i; gchan++) { pi = &ci->port[gchan]; OS_kfree(pi->reg); pi->reg = 0; } return ENOMEM; } pi->regram_saved = regaddr; /* save orig value for free's usage */ pi->regram = (struct musycc_groupr *) ((unsigned long) (regaddr + GROUP_BOUNDARY - 1) & (~(GROUP_BOUNDARY - 1))); /* this calculates * closest boundary */ } /* any board centric MUSYCC commands will use group ZERO as its "home" */ ci->regram = ci->port[0].regram; musycc_serv_req(&ci->port[0], SR_CHIP_RESET); pci_write_32((u_int32_t *) &ci->reg->gbp, OS_vtophys(ci->regram)); pci_flush_write(ci); #ifdef CONFIG_SBE_PMCC4_NCOMM ci->regram->__glcd = __constant_cpu_to_le32(GCD_MAGIC); #else /* standard driver POLLS for INTB via CPLD register */ ci->regram->__glcd = __constant_cpu_to_le32(GCD_MAGIC | MUSYCC_GCD_INTB_DISABLE); #endif ci->regram->__iqp = cpu_to_le32(OS_vtophys(&ci->iqd_p[0])); ci->regram->__iql = __constant_cpu_to_le32(INT_QUEUE_SIZE - 1); pci_write_32((u_int32_t *) &ci->reg->dacbp, 0); FLUSH_MEM_WRITE(); ci->state = C_RUNNING; /* mark as full interrupt processing * available */ musycc_serv_req(&ci->port[0], SR_GLOBAL_INIT); /* FIRST INTERRUPT ! */ /* sanity check settable parameters */ if (cxt1e1_max_mru > 0xffe) { pr_warning("Maximum allowed MRU exceeded, resetting %d to %d.\n", cxt1e1_max_mru, 0xffe); cxt1e1_max_mru = 0xffe; } if (cxt1e1_max_mtu > 0xffe) { pr_warning("Maximum allowed MTU exceeded, resetting %d to %d.\n", cxt1e1_max_mtu, 0xffe); cxt1e1_max_mtu = 0xffe; } #ifdef SBE_WAN256T3_ENABLE for (i = 0; i < MUSYCC_NPORTS; i++) musycc_init_port(&ci->port[i]); #endif return SBE_DRVR_SUCCESS; /* no error */ } void musycc_bh_tx_eom(mpi_t * pi, int gchan) { mch_t *ch; struct mdesc *md; #if 0 #ifndef SBE_ISR_INLINE unsigned long flags; #endif #endif volatile u_int32_t status; ch = pi->chan[gchan]; if (ch == 0 || ch->state != UP) { if (cxt1e1_log_level >= LOG_ERROR) pr_info("%s: intr: xmit EOM on uninitialized channel %d\n", pi->up->devname, gchan); } if (ch == 0 || ch->mdt == 0) return; /* note: mdt==0 implies a malloc() * failure w/in chan_up() routine */ #if 0 #ifdef SBE_ISR_INLINE spin_lock_irq(&ch->ch_txlock); #else spin_lock_irqsave(&ch->ch_txlock, flags); #endif #endif do { FLUSH_MEM_READ(); md = ch->txd_irq_srv; status = le32_to_cpu(md->status); /* * Note: Per MUSYCC Ref 6.4.9, the host does not poll a host-owned * Transmit Buffer Descriptor during Transparent Mode. */ if (status & MUSYCC_TX_OWNED) { int readCount, loopCount; /***********************************************************/ /* HW Bug Fix */ /* ---------- */ /* Under certain PCI Bus loading conditions, the data */ /* associated with an update of Shared Memory is delayed */ /* relative to its PCI Interrupt. This is caught when */ /* the host determines it does not yet OWN the descriptor. */ /***********************************************************/ readCount = 0; while (status & MUSYCC_TX_OWNED) { for (loopCount = 0; loopCount < 0x30; loopCount++) OS_uwait_dummy(); /* use call to avoid optimization * removal of dummy delay */ FLUSH_MEM_READ(); status = le32_to_cpu(md->status); if (readCount++ > 40) break; /* don't wait any longer */ } if (status & MUSYCC_TX_OWNED) { if (cxt1e1_log_level >= LOG_MONITOR) { pr_info("%s: Port %d Chan %2d - unexpected TX msg ownership intr (md %p sts %x)\n", pi->up->devname, pi->portnum, ch->channum, md, status); pr_info("++ User 0x%p IRQ_SRV 0x%p USR_ADD 0x%p QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", ch->user, ch->txd_irq_srv, ch->txd_usr_add, sd_queue_stopped(ch->user), ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); musycc_dump_txbuffer_ring(ch, 0); } break; /* Not our mdesc, done */ } else { if (cxt1e1_log_level >= LOG_MONITOR) pr_info("%s: Port %d Chan %2d - recovered TX msg ownership [%d] (md %p sts %x)\n", pi->up->devname, pi->portnum, ch->channum, readCount, md, status); } } ch->txd_irq_srv = md->snext; md->data = 0; if (md->mem_token != 0) { /* upcount channel */ atomic_sub(OS_mem_token_tlen(md->mem_token), &ch->tx_pending); /* upcount card */ atomic_sub(OS_mem_token_tlen(md->mem_token), &pi->up->tx_pending); #ifdef SBE_WAN256T3_ENABLE if (!atomic_read(&pi->up->tx_pending)) wan256t3_led(pi->up, LED_TX, 0); #endif #ifdef CONFIG_SBE_WAN256T3_NCOMM /* callback that our packet was sent */ { int hdlcnum = (pi->portnum * 32 + gchan); if (hdlcnum >= 228) { if (nciProcess_TX_complete) (*nciProcess_TX_complete) (hdlcnum, getuserbychan(gchan)); } } #endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/ OS_mem_token_free_irq(md->mem_token); md->mem_token = 0; } md->status = 0; #ifdef RLD_TXFULL_DEBUG if (cxt1e1_log_level >= LOG_MONITOR2) pr_info("~~ tx_eom: tx_full %x txd_free %d -> %d\n", ch->tx_full, ch->txd_free, ch->txd_free + 1); #endif ++ch->txd_free; FLUSH_MEM_WRITE(); if ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && (status & EOBIRQ_ENABLE)) { if (cxt1e1_log_level >= LOG_MONITOR) pr_info("%s: Mode (%x) incorrect EOB status (%x)\n", pi->up->devname, ch->p.chan_mode, status); if ((status & EOMIRQ_ENABLE) == 0) break; } } while ((ch->p.chan_mode != CFG_CH_PROTO_TRANS) && ((status & EOMIRQ_ENABLE) == 0)); /* * NOTE: (The above 'while' is coupled w/ previous 'do', way above.) Each * Transparent data buffer has the EOB bit, and NOT the EOM bit, set and * will furthermore have a separate IQD associated with each messages * buffer. */ FLUSH_MEM_READ(); /* * Smooth flow control hysterisis by maintaining task stoppage until half * the available write buffers are available. */ if (ch->tx_full && (ch->txd_free >= (ch->txd_num / 2))) { /* * Then, only releave task stoppage if we actually have enough * buffers to service the last requested packet. It may require MORE * than half the available! */ if (ch->txd_free >= ch->txd_required) { #ifdef RLD_TXFULL_DEBUG if (cxt1e1_log_level >= LOG_MONITOR2) pr_info("tx_eom[%d]: enable xmit tx_full no more, txd_free %d txd_num/2 %d\n", ch->channum, ch->txd_free, ch->txd_num / 2); #endif ch->tx_full = 0; ch->txd_required = 0; sd_enable_xmit(ch->user); /* re-enable to catch flow controlled * channel */ } } #ifdef RLD_TXFULL_DEBUG else if (ch->tx_full) { if (cxt1e1_log_level >= LOG_MONITOR2) pr_info("tx_eom[%d]: bypass TX enable though room available? (txd_free %d txd_num/2 %d)\n", ch->channum, ch->txd_free, ch->txd_num / 2); } #endif FLUSH_MEM_WRITE(); #if 0 #ifdef SBE_ISR_INLINE spin_unlock_irq(&ch->ch_txlock); #else spin_unlock_irqrestore(&ch->ch_txlock, flags); #endif #endif } STATIC void musycc_bh_rx_eom(mpi_t * pi, int gchan) { mch_t *ch; void *m, *m2; struct mdesc *md; volatile u_int32_t status; u_int32_t error; ch = pi->chan[gchan]; if (ch == 0 || ch->state != UP) { if (cxt1e1_log_level > LOG_ERROR) pr_info("%s: intr: receive EOM on uninitialized channel %d\n", pi->up->devname, gchan); return; } if (ch->mdr == 0) return; /* can this happen ? */ for (;;) { FLUSH_MEM_READ(); md = &ch->mdr[ch->rxix_irq_srv]; status = le32_to_cpu(md->status); if (!(status & HOST_RX_OWNED)) break; /* Not our mdesc, done */ m = md->mem_token; error = (status >> 16) & 0xf; if (error == 0) { #ifdef CONFIG_SBE_WAN256T3_NCOMM int hdlcnum = (pi->portnum * 32 + gchan); /* * if the packet number belongs to NCOMM, then send it to the TMS * driver */ if (hdlcnum >= 228) { if (nciProcess_RX_packet) (*nciProcess_RX_packet) (hdlcnum, status & 0x3fff, m, ch->user); } else #endif /*** CONFIG_SBE_WAN256T3_NCOMM ***/ { if ((m2 = OS_mem_token_alloc(cxt1e1_max_mru))) { /* substitute the mbuf+cluster */ md->mem_token = m2; md->data = cpu_to_le32(OS_vtophys(OS_mem_token_data(m2))); /* pass the received mbuf upward */ sd_recv_consume(m, status & LENGTH_MASK, ch->user); ch->s.rx_packets++; ch->s.rx_bytes += status & LENGTH_MASK; } else ch->s.rx_dropped++; } } else if (error == ERR_FCS) ch->s.rx_crc_errors++; else if (error == ERR_ALIGN) ch->s.rx_missed_errors++; else if (error == ERR_ABT) ch->s.rx_missed_errors++; else if (error == ERR_LNG) ch->s.rx_length_errors++; else if (error == ERR_SHT) ch->s.rx_length_errors++; FLUSH_MEM_WRITE(); status = cxt1e1_max_mru; if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) status |= EOBIRQ_ENABLE; md->status = cpu_to_le32(status); /* Check next mdesc in the ring */ if (++ch->rxix_irq_srv >= ch->rxd_num) ch->rxix_irq_srv = 0; FLUSH_MEM_WRITE(); } } irqreturn_t musycc_intr_th_handler(void *devp) { ci_t *ci = (ci_t *) devp; volatile u_int32_t status, currInt = 0; u_int32_t nextInt, intCnt; /* * Hardware not available, potential interrupt hang. But since interrupt * might be shared, just return. */ if (ci->state == C_INIT) return IRQ_NONE; /* * Marked as hardware available. Don't service interrupts, just clear the * event. */ if (ci->state == C_IDLE) { status = pci_read_32((u_int32_t *) &ci->reg->isd); /* clear the interrupt but process nothing else */ pci_write_32((u_int32_t *) &ci->reg->isd, status); return IRQ_HANDLED; } FLUSH_PCI_READ(); FLUSH_MEM_READ(); status = pci_read_32((u_int32_t *) &ci->reg->isd); nextInt = INTRPTS_NEXTINT(status); intCnt = INTRPTS_INTCNT(status); ci->intlog.drvr_intr_thcount++; /*********************************************************/ /* HW Bug Fix */ /* ---------- */ /* Under certain PCI Bus loading conditions, the */ /* MUSYCC looses the data associated with an update */ /* of its ISD and erroneously returns the immediately */ /* preceding 'nextInt' value. However, the 'intCnt' */ /* value appears to be correct. By not starting service */ /* where the 'missing' 'nextInt' SHOULD point causes */ /* the IQD not to be serviced - the 'not serviced' */ /* entries then remain and continue to increase as more */ /* incorrect ISD's are encountered. */ /*********************************************************/ if (nextInt != INTRPTS_NEXTINT(ci->intlog.this_status_new)) { if (cxt1e1_log_level >= LOG_MONITOR) { pr_info("%s: note - updated ISD from %08x to %08x\n", ci->devname, status, (status & (~INTRPTS_NEXTINT_M)) | ci->intlog.this_status_new); } /* * Replace bogus status with software corrected value. * * It's not known whether, during this problem occurrence, if the * INTFULL bit is correctly reported or not. */ status = (status & (~INTRPTS_NEXTINT_M)) | (ci->intlog.this_status_new); nextInt = INTRPTS_NEXTINT(status); } /**********************************************/ /* Cn847x Bug Fix */ /* -------------- */ /* Fix for inability to write back same index */ /* as read for a full interrupt queue. */ /**********************************************/ if (intCnt == INT_QUEUE_SIZE) currInt = ((intCnt - 1) + nextInt) & (INT_QUEUE_SIZE - 1); else /************************************************/ /* Interrupt Write Location Issues */ /* ------------------------------- */ /* When the interrupt status descriptor is */ /* written, the interrupt line is de-asserted */ /* by the Cn847x. In the case of MIPS */ /* microprocessors, this must occur at the */ /* beginning of the interrupt handler so that */ /* the interrupt handle is not re-entered due */ /* to interrupt dis-assertion latency. */ /* In the case of all other processors, this */ /* action should occur at the end of the */ /* interrupt handler to avoid overwriting the */ /* interrupt queue. */ /************************************************/ if (intCnt) currInt = (intCnt + nextInt) & (INT_QUEUE_SIZE - 1); else { /* * NOTE: Servicing an interrupt whose ISD contains a count of ZERO * can be indicative of a Shared Interrupt chain. Our driver can be * called from the system's interrupt handler as a matter of the OS * walking the chain. As the chain is walked, the interrupt will * eventually be serviced by the correct driver/handler. */ #if 0 /* chained interrupt = not ours */ pr_info(">> %s: intCnt NULL, sts %x, possibly a chained interrupt!\n", ci->devname, status); #endif return IRQ_NONE; } ci->iqp_tailx = currInt; currInt <<= INTRPTS_NEXTINT_S; ci->intlog.last_status_new = ci->intlog.this_status_new; ci->intlog.this_status_new = currInt; if ((cxt1e1_log_level >= LOG_WARN) && (status & INTRPTS_INTFULL_M)) pr_info("%s: Interrupt queue full condition occurred\n", ci->devname); if (cxt1e1_log_level >= LOG_DEBUG) pr_info("%s: interrupts pending, isd @ 0x%p: %x curr %d cnt %d NEXT %d\n", ci->devname, &ci->reg->isd, status, nextInt, intCnt, (intCnt + nextInt) & (INT_QUEUE_SIZE - 1)); FLUSH_MEM_WRITE(); #if defined(SBE_ISR_TASKLET) pci_write_32((u_int32_t *) &ci->reg->isd, currInt); atomic_inc(&ci->bh_pending); tasklet_schedule(&ci->ci_musycc_isr_tasklet); #elif defined(SBE_ISR_IMMEDIATE) pci_write_32((u_int32_t *) &ci->reg->isd, currInt); atomic_inc(&ci->bh_pending); queue_task(&ci->ci_musycc_isr_tq, &tq_immediate); mark_bh(IMMEDIATE_BH); #elif defined(SBE_ISR_INLINE) (void) musycc_intr_bh_tasklet(ci); pci_write_32((u_int32_t *) &ci->reg->isd, currInt); #endif return IRQ_HANDLED; } #if defined(SBE_ISR_IMMEDIATE) unsigned long #else void #endif musycc_intr_bh_tasklet(ci_t * ci) { mpi_t *pi; mch_t *ch; unsigned int intCnt; volatile u_int32_t currInt = 0; volatile unsigned int headx, tailx; int readCount, loopCount; int group, gchan, event, err, tx; u_int32_t badInt = INT_EMPTY_ENTRY; u_int32_t badInt2 = INT_EMPTY_ENTRY2; /* * Hardware not available, potential interrupt hang. But since interrupt * might be shared, just return. */ if ((drvr_state != SBE_DRVR_AVAILABLE) || (ci->state == C_INIT)) { #if defined(SBE_ISR_IMMEDIATE) return 0L; #else return; #endif } #if defined(SBE_ISR_TASKLET) || defined(SBE_ISR_IMMEDIATE) if (drvr_state != SBE_DRVR_AVAILABLE) { #if defined(SBE_ISR_TASKLET) return; #elif defined(SBE_ISR_IMMEDIATE) return 0L; #endif } #elif defined(SBE_ISR_INLINE) /* no semaphore taken, no double checks */ #endif ci->intlog.drvr_intr_bhcount++; FLUSH_MEM_READ(); { unsigned int bh = atomic_read(&ci->bh_pending); max_bh = max(bh, max_bh); } atomic_set(&ci->bh_pending, 0);/* if here, no longer pending */ while ((headx = ci->iqp_headx) != (tailx = ci->iqp_tailx)) { intCnt = (tailx >= headx) ? (tailx - headx) : (tailx - headx + INT_QUEUE_SIZE); currInt = le32_to_cpu(ci->iqd_p[headx]); max_intcnt = max(intCnt, max_intcnt); /* RLD DEBUG */ /**************************************************/ /* HW Bug Fix */ /* ---------- */ /* The following code checks for the condition */ /* of interrupt assertion before interrupt */ /* queue update. This is a problem on several */ /* PCI-Local bridge chips found on some products. */ /**************************************************/ readCount = 0; if ((currInt == badInt) || (currInt == badInt2)) ci->intlog.drvr_int_failure++; while ((currInt == badInt) || (currInt == badInt2)) { for (loopCount = 0; loopCount < 0x30; loopCount++) OS_uwait_dummy(); /* use call to avoid optimization removal * of dummy delay */ FLUSH_MEM_READ(); currInt = le32_to_cpu(ci->iqd_p[headx]); if (readCount++ > 20) break; } if ((currInt == badInt) || (currInt == badInt2)) { /* catch failure of Bug * Fix checking */ if (cxt1e1_log_level >= LOG_WARN) pr_info("%s: Illegal Interrupt Detected @ 0x%p, mod %d.)\n", ci->devname, &ci->iqd_p[headx], headx); /* * If the descriptor has not recovered, then leaving the EMPTY * entry set will not signal to the MUSYCC that this descriptor * has been serviced. The Interrupt Queue can then start losing * available descriptors and MUSYCC eventually encounters and * reports the INTFULL condition. Per manual, changing any bit * marks descriptor as available, thus the use of different * EMPTY_ENTRY values. */ if (currInt == badInt) ci->iqd_p[headx] = __constant_cpu_to_le32(INT_EMPTY_ENTRY2); else ci->iqd_p[headx] = __constant_cpu_to_le32(INT_EMPTY_ENTRY); ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */ FLUSH_MEM_WRITE(); FLUSH_MEM_READ(); continue; } group = INTRPT_GRP(currInt); gchan = INTRPT_CH(currInt); event = INTRPT_EVENT(currInt); err = INTRPT_ERROR(currInt); tx = currInt & INTRPT_DIR_M; ci->iqd_p[headx] = __constant_cpu_to_le32(INT_EMPTY_ENTRY); FLUSH_MEM_WRITE(); if (cxt1e1_log_level >= LOG_DEBUG) { if (err != 0) pr_info(" %08x -> err: %2d,", currInt, err); pr_info("+ interrupt event: %d, grp: %d, chan: %2d, side: %cX\n", event, group, gchan, tx ? 'T' : 'R'); } pi = &ci->port[group]; /* notice that here we assume 1-1 group - * port mapping */ ch = pi->chan[gchan]; switch (event) { case EVE_SACK: /* Service Request Acknowledge */ if (cxt1e1_log_level >= LOG_DEBUG) { volatile u_int32_t r; r = pci_read_32((u_int32_t *) &pi->reg->srd); pr_info("- SACK cmd: %08x (hdw= %08x)\n", pi->sr_last, r); } SD_SEM_GIVE(&pi->sr_sem_wait); /* wake up waiting process */ break; case EVE_CHABT: /* Change To Abort Code (0x7e -> 0xff) */ case EVE_CHIC: /* Change To Idle Code (0xff -> 0x7e) */ break; case EVE_EOM: /* End Of Message */ case EVE_EOB: /* End Of Buffer (Transparent mode) */ if (tx) musycc_bh_tx_eom(pi, gchan); else musycc_bh_rx_eom(pi, gchan); #if 0 break; #else /* * MUSYCC Interrupt Descriptor section states that EOB and EOM * can be combined with the NONE error (as well as others). So * drop thru to catch this... */ #endif case EVE_NONE: if (err == ERR_SHT) ch->s.rx_length_errors++; break; default: if (cxt1e1_log_level >= LOG_WARN) pr_info("%s: unexpected interrupt event: %d, iqd[%d]: %08x, port: %d\n", ci->devname, event, headx, currInt, group); break; } /* switch on event */ /* * Per MUSYCC Manual, Section 6.4.8.3 [Transmit Errors], TX errors * are service-affecting and require action to resume normal * bit-level processing. */ switch (err) { case ERR_ONR: /* * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors], this * error requires Transmit channel reactivation. * * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], this error * requires Receive channel reactivation. */ if (tx) { /* * TX ONR Error only occurs when channel is configured for * Transparent Mode. However, this code will catch and * re-activate on ANY TX ONR error. */ /* * Set flag to re-enable on any next transmit attempt. */ ch->ch_start_tx = CH_START_TX_ONR; { #ifdef RLD_TRANS_DEBUG if (1 || cxt1e1_log_level >= LOG_MONITOR) #else if (cxt1e1_log_level >= LOG_MONITOR) #endif { pr_info("%s: TX buffer underflow [ONR] on channel %d, mode %x QStopped %x free %d\n", ci->devname, ch->channum, ch->p.chan_mode, sd_queue_stopped(ch->user), ch->txd_free); #ifdef RLD_DEBUG if (ch->p.chan_mode == 2) { /* problem = ONR on HDLC * mode */ pr_info("++ Failed Last %x Next %x QStopped %x, start_tx %x tx_full %d txd_free %d mode %x\n", (u_int32_t) ch->txd_irq_srv, (u_int32_t) ch->txd_usr_add, sd_queue_stopped(ch->user), ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->p.chan_mode); musycc_dump_txbuffer_ring(ch, 0); } #endif } } } else { /* RX buffer overrun */ /* * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], * channel recovery for this RX ONR error IS required. It is * also suggested to increase the number of receive buffers * for this channel. Receive channel reactivation IS * required, and data has been lost. */ ch->s.rx_over_errors++; ch->ch_start_rx = CH_START_RX_ONR; if (cxt1e1_log_level >= LOG_WARN) { pr_info("%s: RX buffer overflow [ONR] on channel %d, mode %x\n", ci->devname, ch->channum, ch->p.chan_mode); //musycc_dump_rxbuffer_ring (ch, 0); /* RLD DEBUG */ } } musycc_chan_restart(ch); break; case ERR_BUF: if (tx) { ch->s.tx_fifo_errors++; ch->ch_start_tx = CH_START_TX_BUF; /* * Per MUSYCC manual, Section 6.4.8.3 [Transmit Errors], * this BUFF error requires Transmit channel reactivation. */ if (cxt1e1_log_level >= LOG_MONITOR) pr_info("%s: TX buffer underrun [BUFF] on channel %d, mode %x\n", ci->devname, ch->channum, ch->p.chan_mode); } else { /* RX buffer overrun */ ch->s.rx_over_errors++; /* * Per MUSYCC manual, Section 6.4.8.4 [Receive Errors], HDLC * mode requires NO recovery for this RX BUFF error is * required. It is suggested to increase the FIFO buffer * space for this channel. Receive channel reactivation is * not required, but data has been lost. */ if (cxt1e1_log_level >= LOG_WARN) pr_info("%s: RX buffer overrun [BUFF] on channel %d, mode %x\n", ci->devname, ch->channum, ch->p.chan_mode); /* * Per MUSYCC manual, Section 6.4.9.4 [Receive Errors], * Transparent mode DOES require recovery for the RX BUFF * error. It is suggested to increase the FIFO buffer space * for this channel. Receive channel reactivation IS * required and data has been lost. */ if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) ch->ch_start_rx = CH_START_RX_BUF; } if (tx || (ch->p.chan_mode == CFG_CH_PROTO_TRANS)) musycc_chan_restart(ch); break; default: break; } /* switch on err */ /* Check for interrupt lost condition */ if ((currInt & INTRPT_ILOST_M) && (cxt1e1_log_level >= LOG_ERROR)) pr_info("%s: Interrupt queue overflow - ILOST asserted\n", ci->devname); ci->iqp_headx = (headx + 1) & (INT_QUEUE_SIZE - 1); /* insure wrapness */ FLUSH_MEM_WRITE(); FLUSH_MEM_READ(); } /* while */ if ((cxt1e1_log_level >= LOG_MONITOR2) && (ci->iqp_headx != ci->iqp_tailx)) { int bh; bh = atomic_read(&CI->bh_pending); pr_info("_bh_: late arrivals, head %d != tail %d, pending %d\n", ci->iqp_headx, ci->iqp_tailx, bh); } #if defined(SBE_ISR_IMMEDIATE) return 0L; #endif /* else, nothing returned */ } #if 0 int __init musycc_new_chan(ci_t * ci, int channum, void *user) { mch_t *ch; ch = ci->port[channum / MUSYCC_NCHANS].chan[channum % MUSYCC_NCHANS]; if (ch->state != UNASSIGNED) return EEXIST; /* NOTE: mch_t already cleared during OS_kmalloc() */ ch->state = DOWN; ch->user = user; #if 0 ch->status = 0; ch->p.status = 0; ch->p.intr_mask = 0; #endif ch->p.chan_mode = CFG_CH_PROTO_HDLC_FCS16; ch->p.idlecode = CFG_CH_FLAG_7E; ch->p.pad_fill_count = 2; spin_lock_init(&ch->ch_rxlock); spin_lock_init(&ch->ch_txlock); return 0; } #endif #ifdef SBE_PMCC4_ENABLE status_t musycc_chan_down(ci_t * dummy, int channum) { mpi_t *pi; mch_t *ch; int i, gchan; if (!(ch = sd_find_chan(dummy, channum))) return EINVAL; pi = ch->up; gchan = ch->gchan; /* Deactivate the channel */ musycc_serv_req(pi, SR_CHANNEL_DEACTIVATE | SR_RX_DIRECTION | gchan); ch->ch_start_rx = 0; musycc_serv_req(pi, SR_CHANNEL_DEACTIVATE | SR_TX_DIRECTION | gchan); ch->ch_start_tx = 0; if (ch->state == DOWN) return 0; ch->state = DOWN; pi->regram->thp[gchan] = 0; pi->regram->tmp[gchan] = 0; pi->regram->rhp[gchan] = 0; pi->regram->rmp[gchan] = 0; FLUSH_MEM_WRITE(); for (i = 0; i < ch->txd_num; i++) if (ch->mdt[i].mem_token != 0) OS_mem_token_free(ch->mdt[i].mem_token); for (i = 0; i < ch->rxd_num; i++) if (ch->mdr[i].mem_token != 0) OS_mem_token_free(ch->mdr[i].mem_token); OS_kfree(ch->mdr); ch->mdr = 0; ch->rxd_num = 0; OS_kfree(ch->mdt); ch->mdt = 0; ch->txd_num = 0; musycc_update_timeslots(pi); c4_fifo_free(pi, ch->gchan); pi->openchans--; return 0; } #endif int musycc_del_chan(ci_t * ci, int channum) { mch_t *ch; if ((channum < 0) || (channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS))) /* sanity chk param */ return ECHRNG; if (!(ch = sd_find_chan(ci, channum))) return ENOENT; if (ch->state == UP) musycc_chan_down(ci, channum); ch->state = UNASSIGNED; return 0; } int musycc_del_chan_stats(ci_t * ci, int channum) { mch_t *ch; if (channum < 0 || channum >= (MUSYCC_NPORTS * MUSYCC_NCHANS)) /* sanity chk param */ return ECHRNG; if (!(ch = sd_find_chan(ci, channum))) return ENOENT; memset(&ch->s, 0, sizeof(struct sbecom_chan_stats)); return 0; } int musycc_start_xmit(ci_t * ci, int channum, void *mem_token) { mch_t *ch; struct mdesc *md; void *m2; #if 0 unsigned long flags; #endif int txd_need_cnt; u_int32_t len; if (!(ch = sd_find_chan(ci, channum))) return -ENOENT; if (ci->state != C_RUNNING) /* full interrupt processing available */ return -EINVAL; if (ch->state != UP) return -EINVAL; if (!(ch->status & TX_ENABLED)) return -EROFS; /* how else to flag unwritable state ? */ #ifdef RLD_TRANS_DEBUGx if (1 || cxt1e1_log_level >= LOG_MONITOR2) #else if (cxt1e1_log_level >= LOG_MONITOR2) #endif { pr_info("++ start_xmt[%d]: state %x start %x full %d free %d required %d stopped %x\n", channum, ch->state, ch->ch_start_tx, ch->tx_full, ch->txd_free, ch->txd_required, sd_queue_stopped(ch->user)); } /***********************************************/ /** Determine total amount of data to be sent **/ /***********************************************/ m2 = mem_token; txd_need_cnt = 0; for (len = OS_mem_token_tlen(m2); len > 0; m2 = (void *) OS_mem_token_next(m2)) { if (!OS_mem_token_len(m2)) continue; txd_need_cnt++; len -= OS_mem_token_len(m2); } if (txd_need_cnt == 0) { if (cxt1e1_log_level >= LOG_MONITOR2) pr_info("%s channel %d: no TX data in User buffer\n", ci->devname, channum); OS_mem_token_free(mem_token); return 0; /* no data to send */ } /*************************************************/ /** Are there sufficient descriptors available? **/ /*************************************************/ if (txd_need_cnt > ch->txd_num) { /* never enough descriptors for this * large a buffer */ if (cxt1e1_log_level >= LOG_DEBUG) pr_info("start_xmit: discarding buffer, insufficient descriptor cnt %d, need %d.\n", ch->txd_num, txd_need_cnt + 1); ch->s.tx_dropped++; OS_mem_token_free(mem_token); return 0; } #if 0 spin_lock_irqsave(&ch->ch_txlock, flags); #endif /************************************************************/ /** flow control the line if not enough descriptors remain **/ /************************************************************/ if (txd_need_cnt > ch->txd_free) { if (cxt1e1_log_level >= LOG_MONITOR2) pr_info("start_xmit[%d]: EBUSY - need more descriptors, have %d of %d need %d\n", channum, ch->txd_free, ch->txd_num, txd_need_cnt); ch->tx_full = 1; ch->txd_required = txd_need_cnt; sd_disable_xmit(ch->user); #if 0 spin_unlock_irqrestore(&ch->ch_txlock, flags); #endif return -EBUSY; /* tell user to try again later */ } /**************************************************/ /** Put the user data into MUSYCC data buffer(s) **/ /**************************************************/ m2 = mem_token; md = ch->txd_usr_add; /* get current available descriptor */ for (len = OS_mem_token_tlen(m2); len > 0; m2 = OS_mem_token_next(m2)) { int u = OS_mem_token_len(m2); if (!u) continue; len -= u; /* * Enable following chunks, yet wait to enable the FIRST chunk until * after ALL subsequent chunks are setup. */ if (md != ch->txd_usr_add) /* not first chunk */ u |= MUSYCC_TX_OWNED; /* transfer ownership from HOST to MUSYCC */ if (len) /* not last chunk */ u |= EOBIRQ_ENABLE; else if (ch->p.chan_mode == CFG_CH_PROTO_TRANS) { /* * Per MUSYCC Ref 6.4.9 for Transparent Mode, the host must * always clear EOMIRQ_ENABLE in every Transmit Buffer Descriptor * (IE. don't set herein). */ u |= EOBIRQ_ENABLE; } else u |= EOMIRQ_ENABLE; /* EOM, last HDLC chunk */ /* last chunk in hdlc mode */ u |= (ch->p.idlecode << IDLE_CODE); if (ch->p.pad_fill_count) { #if 0 /* NOOP NOTE: u_int8_t cannot be > 0xFF */ /* sanitize pad_fill_count for maximums allowed by hardware */ if (ch->p.pad_fill_count > EXTRA_FLAGS_MASK) ch->p.pad_fill_count = EXTRA_FLAGS_MASK; #endif u |= (PADFILL_ENABLE | (ch->p.pad_fill_count << EXTRA_FLAGS)); } md->mem_token = len ? 0 : mem_token; /* Fill in mds on last * segment, others set ZERO * so that entire token is * removed ONLY when ALL * segments have been * transmitted. */ md->data = cpu_to_le32(OS_vtophys(OS_mem_token_data(m2))); FLUSH_MEM_WRITE(); md->status = cpu_to_le32(u); --ch->txd_free; md = md->snext; } FLUSH_MEM_WRITE(); /* * Now transfer ownership of first chunk from HOST to MUSYCC in order to * fire-off this XMIT. */ ch->txd_usr_add->status |= __constant_cpu_to_le32(MUSYCC_TX_OWNED); FLUSH_MEM_WRITE(); ch->txd_usr_add = md; len = OS_mem_token_tlen(mem_token); atomic_add(len, &ch->tx_pending); atomic_add(len, &ci->tx_pending); ch->s.tx_packets++; ch->s.tx_bytes += len; /* * If an ONR was seen, then channel requires poking to restart * transmission. */ if (ch->ch_start_tx) musycc_chan_restart(ch); #ifdef SBE_WAN256T3_ENABLE wan256t3_led(ci, LED_TX, LEDV_G); #endif return 0; } /*** End-of-File ***/
gpl-2.0
apasricha/KVMTrace-kernel-mod
drivers/staging/comedi/drivers/addi-data/APCI1710_Dig_io.c
2252
35490
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstraße 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : API APCI1710 | Compiler : gcc | | Module name : DIG_IO.C | Version : 2.96 | +-------------------------------+---------------------------------------+ | Project manager: Eric Stolz | Date : 02/12/2002 | +-----------------------------------------------------------------------+ | Description : APCI-1710 digital I/O module | | | | | +-----------------------------------------------------------------------+ | UPDATES | +-----------------------------------------------------------------------+ | Date | Author | Description of updates | +----------+-----------+------------------------------------------------+ | 16/06/98 | S. Weber | Digital input / output implementation | |----------|-----------|------------------------------------------------| | 08/05/00 | Guinot C | - 0400/0228 All Function in RING 0 | | | | available | +-----------------------------------------------------------------------+ | | | | | | | | +-----------------------------------------------------------------------+ */ /* Digital Output ON or OFF */ #define APCI1710_ON 1 #define APCI1710_OFF 0 /* Digital I/O */ #define APCI1710_INPUT 0 #define APCI1710_OUTPUT 1 #define APCI1710_DIGIO_MEMORYONOFF 0x10 #define APCI1710_DIGIO_INIT 0x11 /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI1710_InsnConfigDigitalIO(struct comedi_device *dev, | | struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data)| +----------------------------------------------------------------------------+ | Task : Configure the digital I/O operating mode from selected | | module (b_ModulNbr). You must calling this function be| | for you call any other function witch access of digital| | I/O. | +----------------------------------------------------------------------------+ | Input Parameters : | | unsigned char_ b_ModulNbr data[0]: Module number to | | configure (0 to 3) | | unsigned char_ b_ChannelAMode data[1] : Channel A mode selection | | 0 : Channel used for digital | | input | | 1 : Channel used for digital | | output | | unsigned char_ b_ChannelBMode data[2] : Channel B mode selection | | 0 : Channel used for digital | | input | | 1 : Channel used for digital | | output | data[0] memory on/off Activates and deactivates the digital output memory. After having | | called up this function with memory on,the output you have previously| | activated with the function are not reset +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: The module parameter is wrong | | -3: The module is not a digital I/O module | | -4: Bi-directional channel A configuration error | | -5: Bi-directional channel B configuration error | +----------------------------------------------------------------------------+ */ static int i_APCI1710_InsnConfigDigitalIO(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_private *devpriv = dev->private; unsigned char b_ModulNbr, b_ChannelAMode, b_ChannelBMode; unsigned char b_MemoryOnOff, b_ConfigType; int i_ReturnValue = 0; unsigned int dw_WriteConfig = 0; b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_ConfigType = (unsigned char) data[0]; /* Memory or Init */ b_ChannelAMode = (unsigned char) data[1]; b_ChannelBMode = (unsigned char) data[2]; b_MemoryOnOff = (unsigned char) data[1]; /* if memory operation */ i_ReturnValue = insn->n; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr >= 4) { DPRINTK("Module Number invalid\n"); i_ReturnValue = -2; return i_ReturnValue; } switch (b_ConfigType) { case APCI1710_DIGIO_MEMORYONOFF: if (b_MemoryOnOff) /* If Memory ON */ { /****************************/ /* Set the output memory on */ /****************************/ devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.b_OutputMemoryEnabled = 1; /***************************/ /* Clear the output memory */ /***************************/ devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.dw_OutputMemory = 0; } else /* If memory off */ { /*****************************/ /* Set the output memory off */ /*****************************/ devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.b_OutputMemoryEnabled = 0; } break; case APCI1710_DIGIO_INIT: /*******************************/ /* Test if digital I/O counter */ /*******************************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_DIGITAL_IO) { /***************************************************/ /* Test the bi-directional channel A configuration */ /***************************************************/ if ((b_ChannelAMode == 0) || (b_ChannelAMode == 1)) { /***************************************************/ /* Test the bi-directional channel B configuration */ /***************************************************/ if ((b_ChannelBMode == 0) || (b_ChannelBMode == 1)) { devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.b_DigitalInit = 1; /********************************/ /* Save channel A configuration */ /********************************/ devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo. b_ChannelAMode = b_ChannelAMode; /********************************/ /* Save channel B configuration */ /********************************/ devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo. b_ChannelBMode = b_ChannelBMode; /*****************************************/ /* Set the channel A and B configuration */ /*****************************************/ dw_WriteConfig = (unsigned int) (b_ChannelAMode | (b_ChannelBMode * 2)); /***************************/ /* Write the configuration */ /***************************/ outl(dw_WriteConfig, devpriv->s_BoardInfos. ui_Address + 4 + (64 * b_ModulNbr)); } else { /************************************************/ /* Bi-directional channel B configuration error */ /************************************************/ DPRINTK("Bi-directional channel B configuration error\n"); i_ReturnValue = -5; } } else { /************************************************/ /* Bi-directional channel A configuration error */ /************************************************/ DPRINTK("Bi-directional channel A configuration error\n"); i_ReturnValue = -4; } } else { /******************************************/ /* The module is not a digital I/O module */ /******************************************/ DPRINTK("The module is not a digital I/O module\n"); i_ReturnValue = -3; } } /* end of Switch */ printk("Return Value %d\n", i_ReturnValue); return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | INPUT FUNCTIONS | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ |INT i_APCI1710_InsnReadDigitalIOChlValue(struct comedi_device *dev,comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) +----------------------------------------------------------------------------+ | Task : Read the status from selected digital I/O digital input| | (b_InputChannel) | +----------------------------------------------------------------------------| | | unsigned char_ b_ModulNbr CR_AREF(chanspec) : Selected module number | | (0 to 3) | | unsigned char_ b_InputChannel CR_CHAN(chanspec) : Selection from digital | | input ( 0 to 6) | | 0 : Channel C | | 1 : Channel D | | 2 : Channel E | | 3 : Channel F | | 4 : Channel G | | 5 : Channel A | | 6 : Channel B | +----------------------------------------------------------------------------+ | Output Parameters : data[0] : Digital input channel | | status | | 0 : Channle is not active| | 1 : Channle is active | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: The module parameter is wrong | | -3: The module is not a digital I/O module | | -4: The selected digital I/O digital input is wrong | | -5: Digital I/O not initialised | | -6: The digital channel A is used for output | | -7: The digital channel B is used for output | +----------------------------------------------------------------------------+ */ /* _INT_ i_APCI1710_ReadDigitalIOChlValue (unsigned char_ b_BoardHandle, */ /* * unsigned char_ b_ModulNbr, unsigned char_ b_InputChannel, * unsigned char *_ pb_ChannelStatus) */ static int i_APCI1710_InsnReadDigitalIOChlValue(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_private *devpriv = dev->private; int i_ReturnValue = 0; unsigned int dw_StatusReg; unsigned char b_ModulNbr, b_InputChannel; unsigned char *pb_ChannelStatus; b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_InputChannel = (unsigned char) CR_CHAN(insn->chanspec); data[0] = 0; pb_ChannelStatus = (unsigned char *) &data[0]; i_ReturnValue = insn->n; /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /*******************************/ /* Test if digital I/O counter */ /*******************************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_DIGITAL_IO) { /******************************************/ /* Test the digital imnput channel number */ /******************************************/ if (b_InputChannel <= 6) { /**********************************************/ /* Test if the digital I/O module initialised */ /**********************************************/ if (devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.b_DigitalInit == 1) { /**********************************/ /* Test if channel A or channel B */ /**********************************/ if (b_InputChannel > 4) { /*********************/ /* Test if channel A */ /*********************/ if (b_InputChannel == 5) { /***************************/ /* Test the channel A mode */ /***************************/ if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_ChannelAMode != 0) { /********************************************/ /* The digital channel A is used for output */ /********************************************/ i_ReturnValue = -6; } } /* if (b_InputChannel == 5) */ else { /***************************/ /* Test the channel B mode */ /***************************/ if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_ChannelBMode != 0) { /********************************************/ /* The digital channel B is used for output */ /********************************************/ i_ReturnValue = -7; } } /* if (b_InputChannel == 5) */ } /* if (b_InputChannel > 4) */ /***********************/ /* Test if error occur */ /***********************/ if (i_ReturnValue >= 0) { /**************************/ /* Read all digital input */ /**************************/ /* * INPDW (ps_APCI1710Variable-> s_Board [b_BoardHandle]. * s_BoardInfos. ui_Address + (64 * b_ModulNbr), &dw_StatusReg); */ dw_StatusReg = inl(devpriv-> s_BoardInfos. ui_Address + (64 * b_ModulNbr)); *pb_ChannelStatus = (unsigned char) ((dw_StatusReg ^ 0x1C) >> b_InputChannel) & 1; } /* if (i_ReturnValue == 0) */ } else { /*******************************/ /* Digital I/O not initialised */ /*******************************/ DPRINTK("Digital I/O not initialised\n"); i_ReturnValue = -5; } } else { /********************************/ /* Selected digital input error */ /********************************/ DPRINTK("Selected digital input error\n"); i_ReturnValue = -4; } } else { /******************************************/ /* The module is not a digital I/O module */ /******************************************/ DPRINTK("The module is not a digital I/O module\n"); i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ DPRINTK("Module number error\n"); i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ | OUTPUT FUNCTIONS | +----------------------------------------------------------------------------+ */ /* +----------------------------------------------------------------------------+ | Function Name : int i_APCI1710_InsnWriteDigitalIOChlOnOff(comedi_device |*dev,struct comedi_subdevice *s,struct comedi_insn *insn,unsigned int *data) +----------------------------------------------------------------------------+ | Task : Sets or resets the output witch has been passed with the | | parameter b_Channel. Setting an output means setting | | an ouput high. | +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr (aref ) : Selected module number (0 to 3)| | unsigned char_ b_OutputChannel (CR_CHAN) : Selection from digital output | | channel (0 to 2) | | 0 : Channel H | | 1 : Channel A | | 2 : Channel B | +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : 0: No error | | -1: The handle parameter of the board is wrong | | -2: The module parameter is wrong | | -3: The module is not a digital I/O module | | -4: The selected digital output is wrong | | -5: digital I/O not initialised see function | | " i_APCI1710_InitDigitalIO" | | -6: The digital channel A is used for input | | -7: The digital channel B is used for input -8: Digital Output Memory OFF. | | Use previously the function | | "i_APCI1710_SetDigitalIOMemoryOn". | +----------------------------------------------------------------------------+ */ /* * _INT_ i_APCI1710_SetDigitalIOChlOn (unsigned char_ b_BoardHandle, * unsigned char_ b_ModulNbr, unsigned char_ b_OutputChannel) */ static int i_APCI1710_InsnWriteDigitalIOChlOnOff(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_private *devpriv = dev->private; int i_ReturnValue = 0; unsigned int dw_WriteValue = 0; unsigned char b_ModulNbr, b_OutputChannel; i_ReturnValue = insn->n; b_ModulNbr = CR_AREF(insn->chanspec); b_OutputChannel = CR_CHAN(insn->chanspec); /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /*******************************/ /* Test if digital I/O counter */ /*******************************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_DIGITAL_IO) { /**********************************************/ /* Test if the digital I/O module initialised */ /**********************************************/ if (devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.b_DigitalInit == 1) { /******************************************/ /* Test the digital output channel number */ /******************************************/ switch (b_OutputChannel) { /*************/ /* Channel H */ /*************/ case 0: break; /*************/ /* Channel A */ /*************/ case 1: if (devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo. b_ChannelAMode != 1) { /*******************************************/ /* The digital channel A is used for input */ /*******************************************/ i_ReturnValue = -6; } break; /*************/ /* Channel B */ /*************/ case 2: if (devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo. b_ChannelBMode != 1) { /*******************************************/ /* The digital channel B is used for input */ /*******************************************/ i_ReturnValue = -7; } break; default: /****************************************/ /* The selected digital output is wrong */ /****************************************/ i_ReturnValue = -4; break; } /***********************/ /* Test if error occur */ /***********************/ if (i_ReturnValue >= 0) { /*********************************/ /* Test if set channel ON */ /*********************************/ if (data[0]) { /*********************************/ /* Test if output memory enabled */ /*********************************/ if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_OutputMemoryEnabled == 1) { dw_WriteValue = devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory | (1 << b_OutputChannel); devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory = dw_WriteValue; } else { dw_WriteValue = 1 << b_OutputChannel; } } /* set channel off */ else { if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_OutputMemoryEnabled == 1) { dw_WriteValue = devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory & (0xFFFFFFFFUL - (1 << b_OutputChannel)); devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory = dw_WriteValue; } else { /*****************************/ /* Digital Output Memory OFF */ /*****************************/ /* +Use previously the function "i_APCI1710_SetDigitalIOMemoryOn" */ i_ReturnValue = -8; } } /*******************/ /* Write the value */ /*******************/ /* OUTPDW (ps_APCI1710Variable-> * s_Board [b_BoardHandle]. * s_BoardInfos. ui_Address + (64 * b_ModulNbr), * dw_WriteValue); */ */ outl(dw_WriteValue, devpriv->s_BoardInfos. ui_Address + (64 * b_ModulNbr)); } } else { /*******************************/ /* Digital I/O not initialised */ /*******************************/ i_ReturnValue = -5; } } else { /******************************************/ /* The module is not a digital I/O module */ /******************************************/ i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ i_ReturnValue = -2; } return i_ReturnValue; } /* +----------------------------------------------------------------------------+ |INT i_APCI1710_InsnBitsDigitalIOPortOnOff(struct comedi_device *dev,comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) +----------------------------------------------------------------------------+ | Task : write: Sets or resets one or several outputs from port. | | Setting an output means setting an output high. | | If you have switched OFF the digital output memory | | (OFF), all the other output are set to "0". | read: Read the status from digital input port | | from selected digital I/O module (b_ModulNbr) +----------------------------------------------------------------------------+ | Input Parameters : unsigned char_ b_BoardHandle : Handle of board APCI-1710 | | unsigned char_ b_ModulNbr CR_AREF(aref) : Selected module number (0 to 3)| | unsigned char_ b_PortValue CR_CHAN(chanspec) : Output Value ( 0 To 7 ) | data[0] read or write port | data[1] if write then indicate ON or OFF | if read : data[1] will return port status. +----------------------------------------------------------------------------+ | Output Parameters : - | +----------------------------------------------------------------------------+ | Return Value : | INPUT : 0: No error | | -1: The handle parameter of the board is wrong | | -2: The module parameter is wrong | | -3: The module is not a digital I/O module | | -4: Digital I/O not initialised OUTPUT: 0: No error | | -1: The handle parameter of the board is wrong | | -2: The module parameter is wrong | | -3: The module is not a digital I/O module | | -4: Output value wrong | | -5: digital I/O not initialised see function | | " i_APCI1710_InitDigitalIO" | | -6: The digital channel A is used for input | | -7: The digital channel B is used for input -8: Digital Output Memory OFF. | | Use previously the function | | "i_APCI1710_SetDigitalIOMemoryOn". | +----------------------------------------------------------------------------+ */ /* * _INT_ i_APCI1710_SetDigitalIOPortOn (unsigned char_ * b_BoardHandle, unsigned char_ b_ModulNbr, unsigned char_ * b_PortValue) */ static int i_APCI1710_InsnBitsDigitalIOPortOnOff(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { struct addi_private *devpriv = dev->private; int i_ReturnValue = 0; unsigned int dw_WriteValue = 0; unsigned int dw_StatusReg; unsigned char b_ModulNbr, b_PortValue; unsigned char b_PortOperation, b_PortOnOFF; unsigned char *pb_PortValue; b_ModulNbr = (unsigned char) CR_AREF(insn->chanspec); b_PortOperation = (unsigned char) data[0]; /* Input or output */ b_PortOnOFF = (unsigned char) data[1]; /* if output then On or Off */ b_PortValue = (unsigned char) data[2]; /* if out put then Value */ i_ReturnValue = insn->n; pb_PortValue = (unsigned char *) &data[0]; /* if input then read value */ switch (b_PortOperation) { case APCI1710_INPUT: /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /*******************************/ /* Test if digital I/O counter */ /*******************************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_DIGITAL_IO) { /**********************************************/ /* Test if the digital I/O module initialised */ /**********************************************/ if (devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.b_DigitalInit == 1) { /**************************/ /* Read all digital input */ /**************************/ /* INPDW (ps_APCI1710Variable-> * s_Board [b_BoardHandle]. * s_BoardInfos. * ui_Address + (64 * b_ModulNbr), * &dw_StatusReg); */ dw_StatusReg = inl(devpriv->s_BoardInfos. ui_Address + (64 * b_ModulNbr)); *pb_PortValue = (unsigned char) (dw_StatusReg ^ 0x1C); } else { /*******************************/ /* Digital I/O not initialised */ /*******************************/ i_ReturnValue = -4; } } else { /******************************************/ /* The module is not a digital I/O module */ /******************************************/ i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ i_ReturnValue = -2; } break; case APCI1710_OUTPUT: /**************************/ /* Test the module number */ /**************************/ if (b_ModulNbr < 4) { /*******************************/ /* Test if digital I/O counter */ /*******************************/ if ((devpriv->s_BoardInfos. dw_MolduleConfiguration[b_ModulNbr] & 0xFFFF0000UL) == APCI1710_DIGITAL_IO) { /**********************************************/ /* Test if the digital I/O module initialised */ /**********************************************/ if (devpriv->s_ModuleInfo[b_ModulNbr]. s_DigitalIOInfo.b_DigitalInit == 1) { /***********************/ /* Test the port value */ /***********************/ if (b_PortValue <= 7) { /***********************************/ /* Test the digital output channel */ /***********************************/ /**************************/ /* Test if channel A used */ /**************************/ if ((b_PortValue & 2) == 2) { if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_ChannelAMode != 1) { /*******************************************/ /* The digital channel A is used for input */ /*******************************************/ i_ReturnValue = -6; } } /* if ((b_PortValue & 2) == 2) */ /**************************/ /* Test if channel B used */ /**************************/ if ((b_PortValue & 4) == 4) { if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_ChannelBMode != 1) { /*******************************************/ /* The digital channel B is used for input */ /*******************************************/ i_ReturnValue = -7; } } /* if ((b_PortValue & 4) == 4) */ /***********************/ /* Test if error occur */ /***********************/ if (i_ReturnValue >= 0) { /* if(data[1]) { */ switch (b_PortOnOFF) { /*********************************/ /* Test if set Port ON */ /*********************************/ case APCI1710_ON: /*********************************/ /* Test if output memory enabled */ /*********************************/ if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_OutputMemoryEnabled == 1) { dw_WriteValue = devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory | b_PortValue; devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory = dw_WriteValue; } else { dw_WriteValue = b_PortValue; } break; /* If Set PORT OFF */ case APCI1710_OFF: /*********************************/ /* Test if output memory enabled */ /*********************************/ if (devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. b_OutputMemoryEnabled == 1) { dw_WriteValue = devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory & (0xFFFFFFFFUL - b_PortValue); devpriv-> s_ModuleInfo [b_ModulNbr]. s_DigitalIOInfo. dw_OutputMemory = dw_WriteValue; } else { /*****************************/ /* Digital Output Memory OFF */ /*****************************/ i_ReturnValue = -8; } } /* switch */ /*******************/ /* Write the value */ /*******************/ /* OUTPDW (ps_APCI1710Variable-> * s_Board [b_BoardHandle]. * s_BoardInfos. * ui_Address + (64 * b_ModulNbr), * dw_WriteValue); */ outl(dw_WriteValue, devpriv-> s_BoardInfos. ui_Address + (64 * b_ModulNbr)); } } else { /**********************/ /* Output value wrong */ /**********************/ i_ReturnValue = -4; } } else { /*******************************/ /* Digital I/O not initialised */ /*******************************/ i_ReturnValue = -5; } } else { /******************************************/ /* The module is not a digital I/O module */ /******************************************/ i_ReturnValue = -3; } } else { /***********************/ /* Module number error */ /***********************/ i_ReturnValue = -2; } break; default: i_ReturnValue = -9; DPRINTK("NO INPUT/OUTPUT specified\n"); } /* switch INPUT / OUTPUT */ return i_ReturnValue; }
gpl-2.0
Umang88/Radon-Kenzo
drivers/infiniband/hw/cxgb4/mem.c
2252
24525
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <rdma/ib_umem.h> #include <linux/atomic.h> #include "iw_cxgb4.h" int use_dsgl = 1; module_param(use_dsgl, int, 0644); MODULE_PARM_DESC(use_dsgl, "Use DSGL for PBL/FastReg (default=1)"); #define T4_ULPTX_MIN_IO 32 #define C4IW_MAX_INLINE_SIZE 96 #define T4_ULPTX_MAX_DMA 1024 #define C4IW_INLINE_THRESHOLD 128 static int inline_threshold = C4IW_INLINE_THRESHOLD; module_param(inline_threshold, int, 0644); MODULE_PARM_DESC(inline_threshold, "inline vs dsgl threshold (default=128)"); static int _c4iw_write_mem_dma_aligned(struct c4iw_rdev *rdev, u32 addr, u32 len, dma_addr_t data, int wait) { struct sk_buff *skb; struct ulp_mem_io *req; struct ulptx_sgl *sgl; u8 wr_len; int ret = 0; struct c4iw_wr_wait wr_wait; addr &= 0x7FFFFFF; if (wait) c4iw_init_wr_wait(&wr_wait); wr_len = roundup(sizeof(*req) + sizeof(*sgl), 16); skb = alloc_skb(wr_len, GFP_KERNEL | __GFP_NOFAIL); if (!skb) return -ENOMEM; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); req = (struct ulp_mem_io *)__skb_put(skb, wr_len); memset(req, 0, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | (wait ? FW_WR_COMPL(1) : 0)); req->wr.wr_lo = wait ? (__force __be64)&wr_wait : 0; req->wr.wr_mid = cpu_to_be32(FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); req->cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); req->cmd |= cpu_to_be32(V_T5_ULP_MEMIO_ORDER(1)); req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN(len>>5)); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr)); sgl = (struct ulptx_sgl *)(req + 1); sgl->cmd_nsge = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(1)); sgl->len0 = cpu_to_be32(len); sgl->addr0 = cpu_to_be64(data); ret = c4iw_ofld_send(rdev, skb); if (ret) return ret; if (wait) ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); return ret; } static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) { struct sk_buff *skb; struct ulp_mem_io *req; struct ulptx_idata *sc; u8 wr_len, *to_dp, *from_dp; int copy_len, num_wqe, i, ret = 0; struct c4iw_wr_wait wr_wait; __be32 cmd = cpu_to_be32(ULPTX_CMD(ULP_TX_MEM_WRITE)); if (is_t4(rdev->lldi.adapter_type)) cmd |= cpu_to_be32(ULP_MEMIO_ORDER(1)); else cmd |= cpu_to_be32(V_T5_ULP_MEMIO_IMM(1)); addr &= 0x7FFFFFF; PDBG("%s addr 0x%x len %u\n", __func__, addr, len); num_wqe = DIV_ROUND_UP(len, C4IW_MAX_INLINE_SIZE); c4iw_init_wr_wait(&wr_wait); for (i = 0; i < num_wqe; i++) { copy_len = len > C4IW_MAX_INLINE_SIZE ? C4IW_MAX_INLINE_SIZE : len; wr_len = roundup(sizeof *req + sizeof *sc + roundup(copy_len, T4_ULPTX_MIN_IO), 16); skb = alloc_skb(wr_len, GFP_KERNEL); if (!skb) return -ENOMEM; set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0); req = (struct ulp_mem_io *)__skb_put(skb, wr_len); memset(req, 0, wr_len); INIT_ULPTX_WR(req, wr_len, 0, 0); if (i == (num_wqe-1)) { req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR) | FW_WR_COMPL(1)); req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait; } else req->wr.wr_hi = cpu_to_be32(FW_WR_OP(FW_ULPTX_WR)); req->wr.wr_mid = cpu_to_be32( FW_WR_LEN16(DIV_ROUND_UP(wr_len, 16))); req->cmd = cmd; req->dlen = cpu_to_be32(ULP_MEMIO_DATA_LEN( DIV_ROUND_UP(copy_len, T4_ULPTX_MIN_IO))); req->len16 = cpu_to_be32(DIV_ROUND_UP(wr_len-sizeof(req->wr), 16)); req->lock_addr = cpu_to_be32(ULP_MEMIO_ADDR(addr + i * 3)); sc = (struct ulptx_idata *)(req + 1); sc->cmd_more = cpu_to_be32(ULPTX_CMD(ULP_TX_SC_IMM)); sc->len = cpu_to_be32(roundup(copy_len, T4_ULPTX_MIN_IO)); to_dp = (u8 *)(sc + 1); from_dp = (u8 *)data + i * C4IW_MAX_INLINE_SIZE; if (data) memcpy(to_dp, from_dp, copy_len); else memset(to_dp, 0, copy_len); if (copy_len % T4_ULPTX_MIN_IO) memset(to_dp + copy_len, 0, T4_ULPTX_MIN_IO - (copy_len % T4_ULPTX_MIN_IO)); ret = c4iw_ofld_send(rdev, skb); if (ret) return ret; len -= C4IW_MAX_INLINE_SIZE; } ret = c4iw_wait_for_reply(rdev, &wr_wait, 0, 0, __func__); return ret; } int _c4iw_write_mem_dma(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) { u32 remain = len; u32 dmalen; int ret = 0; dma_addr_t daddr; dma_addr_t save; daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE); if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) return -1; save = daddr; while (remain > inline_threshold) { if (remain < T4_ULPTX_MAX_DMA) { if (remain & ~T4_ULPTX_MIN_IO) dmalen = remain & ~(T4_ULPTX_MIN_IO-1); else dmalen = remain; } else dmalen = T4_ULPTX_MAX_DMA; remain -= dmalen; ret = _c4iw_write_mem_dma_aligned(rdev, addr, dmalen, daddr, !remain); if (ret) goto out; addr += dmalen >> 5; data += dmalen; daddr += dmalen; } if (remain) ret = _c4iw_write_mem_inline(rdev, addr, remain, data); out: dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); return ret; } /* * write len bytes of data into addr (32B aligned address) * If data is NULL, clear len byte of memory to zero. */ static int write_adapter_mem(struct c4iw_rdev *rdev, u32 addr, u32 len, void *data) { if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { if (len > inline_threshold) { if (_c4iw_write_mem_dma(rdev, addr, len, data)) { printk_ratelimited(KERN_WARNING "%s: dma map" " failure (non fatal)\n", pci_name(rdev->lldi.pdev)); return _c4iw_write_mem_inline(rdev, addr, len, data); } else return 0; } else return _c4iw_write_mem_inline(rdev, addr, len, data); } else return _c4iw_write_mem_inline(rdev, addr, len, data); } /* * Build and write a TPT entry. * IN: stag key, pdid, perm, bind_enabled, zbva, to, len, page_size, * pbl_size and pbl_addr * OUT: stag index */ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry, u32 *stag, u8 stag_state, u32 pdid, enum fw_ri_stag_type type, enum fw_ri_mem_perms perm, int bind_enabled, u32 zbva, u64 to, u64 len, u8 page_size, u32 pbl_size, u32 pbl_addr) { int err; struct fw_ri_tpte tpt; u32 stag_idx; static atomic_t key; if (c4iw_fatal_error(rdev)) return -EIO; stag_state = stag_state > 0; stag_idx = (*stag) >> 8; if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { stag_idx = c4iw_get_resource(&rdev->resource.tpt_table); if (!stag_idx) return -ENOMEM; mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur += 32; if (rdev->stats.stag.cur > rdev->stats.stag.max) rdev->stats.stag.max = rdev->stats.stag.cur; mutex_unlock(&rdev->stats.lock); *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); } PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n", __func__, stag_state, type, pdid, stag_idx); /* write TPT entry */ if (reset_tpt_entry) memset(&tpt, 0, sizeof(tpt)); else { tpt.valid_to_pdid = cpu_to_be32(F_FW_RI_TPTE_VALID | V_FW_RI_TPTE_STAGKEY((*stag & M_FW_RI_TPTE_STAGKEY)) | V_FW_RI_TPTE_STAGSTATE(stag_state) | V_FW_RI_TPTE_STAGTYPE(type) | V_FW_RI_TPTE_PDID(pdid)); tpt.locread_to_qpid = cpu_to_be32(V_FW_RI_TPTE_PERM(perm) | (bind_enabled ? F_FW_RI_TPTE_MWBINDEN : 0) | V_FW_RI_TPTE_ADDRTYPE((zbva ? FW_RI_ZERO_BASED_TO : FW_RI_VA_BASED_TO))| V_FW_RI_TPTE_PS(page_size)); tpt.nosnoop_pbladdr = !pbl_size ? 0 : cpu_to_be32( V_FW_RI_TPTE_PBLADDR(PBL_OFF(rdev, pbl_addr)>>3)); tpt.len_lo = cpu_to_be32((u32)(len & 0xffffffffUL)); tpt.va_hi = cpu_to_be32((u32)(to >> 32)); tpt.va_lo_fbo = cpu_to_be32((u32)(to & 0xffffffffUL)); tpt.dca_mwbcnt_pstag = cpu_to_be32(0); tpt.len_hi = cpu_to_be32((u32)(len >> 32)); } err = write_adapter_mem(rdev, stag_idx + (rdev->lldi.vr->stag.start >> 5), sizeof(tpt), &tpt); if (reset_tpt_entry) { c4iw_put_resource(&rdev->resource.tpt_table, stag_idx); mutex_lock(&rdev->stats.lock); rdev->stats.stag.cur -= 32; mutex_unlock(&rdev->stats.lock); } return err; } static int write_pbl(struct c4iw_rdev *rdev, __be64 *pbl, u32 pbl_addr, u32 pbl_size) { int err; PDBG("%s *pdb_addr 0x%x, pbl_base 0x%x, pbl_size %d\n", __func__, pbl_addr, rdev->lldi.vr->pbl.start, pbl_size); err = write_adapter_mem(rdev, pbl_addr >> 5, pbl_size << 3, pbl); return err; } static int dereg_mem(struct c4iw_rdev *rdev, u32 stag, u32 pbl_size, u32 pbl_addr) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr); } static int allocate_window(struct c4iw_rdev *rdev, u32 * stag, u32 pdid) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_MW, 0, 0, 0, 0UL, 0, 0, 0, 0); } static int deallocate_window(struct c4iw_rdev *rdev, u32 stag) { return write_tpt_entry(rdev, 1, &stag, 0, 0, 0, 0, 0, 0, 0UL, 0, 0, 0, 0); } static int allocate_stag(struct c4iw_rdev *rdev, u32 *stag, u32 pdid, u32 pbl_size, u32 pbl_addr) { *stag = T4_STAG_UNSET; return write_tpt_entry(rdev, 0, stag, 0, pdid, FW_RI_STAG_NSMR, 0, 0, 0, 0UL, 0, 0, pbl_size, pbl_addr); } static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag) { u32 mmid; mhp->attr.state = 1; mhp->attr.stag = stag; mmid = stag >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp); return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid); } static int register_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift) { u32 stag = T4_STAG_UNSET; int ret; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, FW_RI_STAG_NSMR, mhp->attr.perms, mhp->attr.mw_bind_enable, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len, shift - 12, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (ret) return ret; ret = finish_mem_reg(mhp, stag); if (ret) dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); return ret; } static int reregister_mem(struct c4iw_dev *rhp, struct c4iw_pd *php, struct c4iw_mr *mhp, int shift, int npages) { u32 stag; int ret; if (npages > mhp->attr.pbl_size) return -ENOMEM; stag = mhp->attr.stag; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, mhp->attr.pdid, FW_RI_STAG_NSMR, mhp->attr.perms, mhp->attr.mw_bind_enable, mhp->attr.zbva, mhp->attr.va_fbo, mhp->attr.len, shift - 12, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (ret) return ret; ret = finish_mem_reg(mhp, stag); if (ret) dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); return ret; } static int alloc_pbl(struct c4iw_mr *mhp, int npages) { mhp->attr.pbl_addr = c4iw_pblpool_alloc(&mhp->rhp->rdev, npages << 3); if (!mhp->attr.pbl_addr) return -ENOMEM; mhp->attr.pbl_size = npages; return 0; } static int build_phys_page_list(struct ib_phys_buf *buffer_list, int num_phys_buf, u64 *iova_start, u64 *total_size, int *npages, int *shift, __be64 **page_list) { u64 mask; int i, j, n; mask = 0; *total_size = 0; for (i = 0; i < num_phys_buf; ++i) { if (i != 0 && buffer_list[i].addr & ~PAGE_MASK) return -EINVAL; if (i != 0 && i != num_phys_buf - 1 && (buffer_list[i].size & ~PAGE_MASK)) return -EINVAL; *total_size += buffer_list[i].size; if (i > 0) mask |= buffer_list[i].addr; else mask |= buffer_list[i].addr & PAGE_MASK; if (i != num_phys_buf - 1) mask |= buffer_list[i].addr + buffer_list[i].size; else mask |= (buffer_list[i].addr + buffer_list[i].size + PAGE_SIZE - 1) & PAGE_MASK; } if (*total_size > 0xFFFFFFFFULL) return -ENOMEM; /* Find largest page shift we can use to cover buffers */ for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift)) if ((1ULL << *shift) & mask) break; buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1); buffer_list[0].addr &= ~0ull << *shift; *npages = 0; for (i = 0; i < num_phys_buf; ++i) *npages += (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift; if (!*npages) return -EINVAL; *page_list = kmalloc(sizeof(u64) * *npages, GFP_KERNEL); if (!*page_list) return -ENOMEM; n = 0; for (i = 0; i < num_phys_buf; ++i) for (j = 0; j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift; ++j) (*page_list)[n++] = cpu_to_be64(buffer_list[i].addr + ((u64) j << *shift)); PDBG("%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d\n", __func__, (unsigned long long)*iova_start, (unsigned long long)mask, *shift, (unsigned long long)*total_size, *npages); return 0; } int c4iw_reregister_phys_mem(struct ib_mr *mr, int mr_rereg_mask, struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) { struct c4iw_mr mh, *mhp; struct c4iw_pd *php; struct c4iw_dev *rhp; __be64 *page_list = NULL; int shift = 0; u64 total_size; int npages; int ret; PDBG("%s ib_mr %p ib_pd %p\n", __func__, mr, pd); /* There can be no memory windows */ if (atomic_read(&mr->usecnt)) return -EINVAL; mhp = to_c4iw_mr(mr); rhp = mhp->rhp; php = to_c4iw_pd(mr->pd); /* make sure we are on the same adapter */ if (rhp != php->rhp) return -EINVAL; memcpy(&mh, mhp, sizeof *mhp); if (mr_rereg_mask & IB_MR_REREG_PD) php = to_c4iw_pd(pd); if (mr_rereg_mask & IB_MR_REREG_ACCESS) { mh.attr.perms = c4iw_ib_to_tpt_access(acc); mh.attr.mw_bind_enable = (acc & IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; } if (mr_rereg_mask & IB_MR_REREG_TRANS) { ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) return ret; } ret = reregister_mem(rhp, php, &mh, shift, npages); kfree(page_list); if (ret) return ret; if (mr_rereg_mask & IB_MR_REREG_PD) mhp->attr.pdid = php->pdid; if (mr_rereg_mask & IB_MR_REREG_ACCESS) mhp->attr.perms = c4iw_ib_to_tpt_access(acc); if (mr_rereg_mask & IB_MR_REREG_TRANS) { mhp->attr.zbva = 0; mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) total_size; mhp->attr.pbl_size = npages; } return 0; } struct ib_mr *c4iw_register_phys_mem(struct ib_pd *pd, struct ib_phys_buf *buffer_list, int num_phys_buf, int acc, u64 *iova_start) { __be64 *page_list; int shift; u64 total_size; int npages; struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; int ret; PDBG("%s ib_pd %p\n", __func__, pd); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; /* First check that we have enough alignment */ if ((*iova_start & ~PAGE_MASK) != (buffer_list[0].addr & ~PAGE_MASK)) { ret = -EINVAL; goto err; } if (num_phys_buf > 1 && ((buffer_list[0].addr + buffer_list[0].size) & ~PAGE_MASK)) { ret = -EINVAL; goto err; } ret = build_phys_page_list(buffer_list, num_phys_buf, iova_start, &total_size, &npages, &shift, &page_list); if (ret) goto err; ret = alloc_pbl(mhp, npages); if (ret) { kfree(page_list); goto err; } ret = write_pbl(&mhp->rhp->rdev, page_list, mhp->attr.pbl_addr, npages); kfree(page_list); if (ret) goto err_pbl; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.va_fbo = *iova_start; mhp->attr.page_size = shift - 12; mhp->attr.len = (u32) total_size; mhp->attr.pbl_size = npages; ret = register_mem(rhp, php, mhp, shift); if (ret) goto err_pbl; return &mhp->ibmr; err_pbl: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err: kfree(mhp); return ERR_PTR(ret); } struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; int ret; u32 stag = T4_STAG_UNSET; PDBG("%s ib_pd %p\n", __func__, pd); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.mw_bind_enable = (acc&IB_ACCESS_MW_BIND) == IB_ACCESS_MW_BIND; mhp->attr.zbva = 0; mhp->attr.va_fbo = 0; mhp->attr.page_size = 0; mhp->attr.len = ~0UL; mhp->attr.pbl_size = 0; ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid, FW_RI_STAG_NSMR, mhp->attr.perms, mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0); if (ret) goto err1; ret = finish_mem_reg(mhp, stag); if (ret) goto err2; return &mhp->ibmr; err2: dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); err1: kfree(mhp); return ERR_PTR(ret); } struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt, int acc, struct ib_udata *udata) { __be64 *pages; int shift, n, len; int i, j, k; int err = 0; struct ib_umem_chunk *chunk; struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; PDBG("%s ib_pd %p\n", __func__, pd); if (length == ~0ULL) return ERR_PTR(-EINVAL); if ((length + start) < start) return ERR_PTR(-EINVAL); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); mhp->rhp = rhp; mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); if (IS_ERR(mhp->umem)) { err = PTR_ERR(mhp->umem); kfree(mhp); return ERR_PTR(err); } shift = ffs(mhp->umem->page_size) - 1; n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) n += chunk->nents; err = alloc_pbl(mhp, n); if (err) goto err; pages = (__be64 *) __get_free_page(GFP_KERNEL); if (!pages) { err = -ENOMEM; goto err_pbl; } i = n = 0; list_for_each_entry(chunk, &mhp->umem->chunk_list, list) for (j = 0; j < chunk->nmap; ++j) { len = sg_dma_len(&chunk->page_list[j]) >> shift; for (k = 0; k < len; ++k) { pages[i++] = cpu_to_be64(sg_dma_address( &chunk->page_list[j]) + mhp->umem->page_size * k); if (i == PAGE_SIZE / sizeof *pages) { err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); if (err) goto pbl_done; n += i; i = 0; } } } if (i) err = write_pbl(&mhp->rhp->rdev, pages, mhp->attr.pbl_addr + (n << 3), i); pbl_done: free_page((unsigned long) pages); if (err) goto err_pbl; mhp->attr.pdid = php->pdid; mhp->attr.zbva = 0; mhp->attr.perms = c4iw_ib_to_tpt_access(acc); mhp->attr.va_fbo = virt; mhp->attr.page_size = shift - 12; mhp->attr.len = length; err = register_mem(rhp, php, mhp, shift); if (err) goto err_pbl; return &mhp->ibmr; err_pbl: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err: ib_umem_release(mhp->umem); kfree(mhp); return ERR_PTR(err); } struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mw *mhp; u32 mmid; u32 stag = 0; int ret; if (type != IB_MW_TYPE_1) return ERR_PTR(-EINVAL); php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) return ERR_PTR(-ENOMEM); ret = allocate_window(&rhp->rdev, &stag, php->pdid); if (ret) { kfree(mhp); return ERR_PTR(ret); } mhp->rhp = rhp; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_MW; mhp->attr.stag = stag; mmid = (stag) >> 8; mhp->ibmw.rkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { deallocate_window(&rhp->rdev, mhp->attr.stag); kfree(mhp); return ERR_PTR(-ENOMEM); } PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmw); } int c4iw_dealloc_mw(struct ib_mw *mw) { struct c4iw_dev *rhp; struct c4iw_mw *mhp; u32 mmid; mhp = to_c4iw_mw(mw); rhp = mhp->rhp; mmid = (mw->rkey) >> 8; remove_handle(rhp, &rhp->mmidr, mmid); deallocate_window(&rhp->rdev, mhp->attr.stag); kfree(mhp); PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); return 0; } struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth) { struct c4iw_dev *rhp; struct c4iw_pd *php; struct c4iw_mr *mhp; u32 mmid; u32 stag = 0; int ret = 0; php = to_c4iw_pd(pd); rhp = php->rhp; mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); if (!mhp) { ret = -ENOMEM; goto err; } mhp->rhp = rhp; ret = alloc_pbl(mhp, pbl_depth); if (ret) goto err1; mhp->attr.pbl_size = pbl_depth; ret = allocate_stag(&rhp->rdev, &stag, php->pdid, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (ret) goto err2; mhp->attr.pdid = php->pdid; mhp->attr.type = FW_RI_STAG_NSMR; mhp->attr.stag = stag; mhp->attr.state = 1; mmid = (stag) >> 8; mhp->ibmr.rkey = mhp->ibmr.lkey = stag; if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) { ret = -ENOMEM; goto err3; } PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag); return &(mhp->ibmr); err3: dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); err2: c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); err1: kfree(mhp); err: return ERR_PTR(ret); } struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(struct ib_device *device, int page_list_len) { struct c4iw_fr_page_list *c4pl; struct c4iw_dev *dev = to_c4iw_dev(device); dma_addr_t dma_addr; int pll_len = roundup(page_list_len * sizeof(u64), 32); c4pl = kmalloc(sizeof(*c4pl), GFP_KERNEL); if (!c4pl) return ERR_PTR(-ENOMEM); c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, pll_len, &dma_addr, GFP_KERNEL); if (!c4pl->ibpl.page_list) { kfree(c4pl); return ERR_PTR(-ENOMEM); } dma_unmap_addr_set(c4pl, mapping, dma_addr); c4pl->dma_addr = dma_addr; c4pl->dev = dev; c4pl->ibpl.max_page_list_len = pll_len; return &c4pl->ibpl; } void c4iw_free_fastreg_pbl(struct ib_fast_reg_page_list *ibpl) { struct c4iw_fr_page_list *c4pl = to_c4iw_fr_page_list(ibpl); dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, c4pl->ibpl.max_page_list_len, c4pl->ibpl.page_list, dma_unmap_addr(c4pl, mapping)); kfree(c4pl); } int c4iw_dereg_mr(struct ib_mr *ib_mr) { struct c4iw_dev *rhp; struct c4iw_mr *mhp; u32 mmid; PDBG("%s ib_mr %p\n", __func__, ib_mr); /* There can be no memory windows */ if (atomic_read(&ib_mr->usecnt)) return -EINVAL; mhp = to_c4iw_mr(ib_mr); rhp = mhp->rhp; mmid = mhp->attr.stag >> 8; remove_handle(rhp, &rhp->mmidr, mmid); dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size, mhp->attr.pbl_addr); if (mhp->attr.pbl_size) c4iw_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr, mhp->attr.pbl_size << 3); if (mhp->kva) kfree((void *) (unsigned long) mhp->kva); if (mhp->umem) ib_umem_release(mhp->umem); PDBG("%s mmid 0x%x ptr %p\n", __func__, mmid, mhp); kfree(mhp); return 0; }
gpl-2.0
tyeo098/MK908-Kernel-NAND
fs/ext3/acl.c
2508
10889
/* * linux/fs/ext3/acl.c * * Copyright (C) 2001-2003 Andreas Gruenbacher, <agruen@suse.de> */ #include <linux/init.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/capability.h> #include <linux/fs.h> #include <linux/ext3_jbd.h> #include <linux/ext3_fs.h> #include "xattr.h" #include "acl.h" /* * Convert from filesystem to in-memory representation. */ static struct posix_acl * ext3_acl_from_disk(const void *value, size_t size) { const char *end = (char *)value + size; int n, count; struct posix_acl *acl; if (!value) return NULL; if (size < sizeof(ext3_acl_header)) return ERR_PTR(-EINVAL); if (((ext3_acl_header *)value)->a_version != cpu_to_le32(EXT3_ACL_VERSION)) return ERR_PTR(-EINVAL); value = (char *)value + sizeof(ext3_acl_header); count = ext3_acl_count(size); if (count < 0) return ERR_PTR(-EINVAL); if (count == 0) return NULL; acl = posix_acl_alloc(count, GFP_NOFS); if (!acl) return ERR_PTR(-ENOMEM); for (n=0; n < count; n++) { ext3_acl_entry *entry = (ext3_acl_entry *)value; if ((char *)value + sizeof(ext3_acl_entry_short) > end) goto fail; acl->a_entries[n].e_tag = le16_to_cpu(entry->e_tag); acl->a_entries[n].e_perm = le16_to_cpu(entry->e_perm); switch(acl->a_entries[n].e_tag) { case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_MASK: case ACL_OTHER: value = (char *)value + sizeof(ext3_acl_entry_short); acl->a_entries[n].e_id = ACL_UNDEFINED_ID; break; case ACL_USER: case ACL_GROUP: value = (char *)value + sizeof(ext3_acl_entry); if ((char *)value > end) goto fail; acl->a_entries[n].e_id = le32_to_cpu(entry->e_id); break; default: goto fail; } } if (value != end) goto fail; return acl; fail: posix_acl_release(acl); return ERR_PTR(-EINVAL); } /* * Convert from in-memory to filesystem representation. */ static void * ext3_acl_to_disk(const struct posix_acl *acl, size_t *size) { ext3_acl_header *ext_acl; char *e; size_t n; *size = ext3_acl_size(acl->a_count); ext_acl = kmalloc(sizeof(ext3_acl_header) + acl->a_count * sizeof(ext3_acl_entry), GFP_NOFS); if (!ext_acl) return ERR_PTR(-ENOMEM); ext_acl->a_version = cpu_to_le32(EXT3_ACL_VERSION); e = (char *)ext_acl + sizeof(ext3_acl_header); for (n=0; n < acl->a_count; n++) { ext3_acl_entry *entry = (ext3_acl_entry *)e; entry->e_tag = cpu_to_le16(acl->a_entries[n].e_tag); entry->e_perm = cpu_to_le16(acl->a_entries[n].e_perm); switch(acl->a_entries[n].e_tag) { case ACL_USER: case ACL_GROUP: entry->e_id = cpu_to_le32(acl->a_entries[n].e_id); e += sizeof(ext3_acl_entry); break; case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_MASK: case ACL_OTHER: e += sizeof(ext3_acl_entry_short); break; default: goto fail; } } return (char *)ext_acl; fail: kfree(ext_acl); return ERR_PTR(-EINVAL); } /* * Inode operation get_posix_acl(). * * inode->i_mutex: don't care */ static struct posix_acl * ext3_get_acl(struct inode *inode, int type) { int name_index; char *value = NULL; struct posix_acl *acl; int retval; if (!test_opt(inode->i_sb, POSIX_ACL)) return NULL; acl = get_cached_acl(inode, type); if (acl != ACL_NOT_CACHED) return acl; switch (type) { case ACL_TYPE_ACCESS: name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; break; case ACL_TYPE_DEFAULT: name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT; break; default: BUG(); } retval = ext3_xattr_get(inode, name_index, "", NULL, 0); if (retval > 0) { value = kmalloc(retval, GFP_NOFS); if (!value) return ERR_PTR(-ENOMEM); retval = ext3_xattr_get(inode, name_index, "", value, retval); } if (retval > 0) acl = ext3_acl_from_disk(value, retval); else if (retval == -ENODATA || retval == -ENOSYS) acl = NULL; else acl = ERR_PTR(retval); kfree(value); if (!IS_ERR(acl)) set_cached_acl(inode, type, acl); return acl; } /* * Set the access or default ACL of an inode. * * inode->i_mutex: down unless called from ext3_new_inode */ static int ext3_set_acl(handle_t *handle, struct inode *inode, int type, struct posix_acl *acl) { int name_index; void *value = NULL; size_t size = 0; int error; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; switch(type) { case ACL_TYPE_ACCESS: name_index = EXT3_XATTR_INDEX_POSIX_ACL_ACCESS; if (acl) { mode_t mode = inode->i_mode; error = posix_acl_equiv_mode(acl, &mode); if (error < 0) return error; else { inode->i_mode = mode; inode->i_ctime = CURRENT_TIME_SEC; ext3_mark_inode_dirty(handle, inode); if (error == 0) acl = NULL; } } break; case ACL_TYPE_DEFAULT: name_index = EXT3_XATTR_INDEX_POSIX_ACL_DEFAULT; if (!S_ISDIR(inode->i_mode)) return acl ? -EACCES : 0; break; default: return -EINVAL; } if (acl) { value = ext3_acl_to_disk(acl, &size); if (IS_ERR(value)) return (int)PTR_ERR(value); } error = ext3_xattr_set_handle(handle, inode, name_index, "", value, size, 0); kfree(value); if (!error) set_cached_acl(inode, type, acl); return error; } int ext3_check_acl(struct inode *inode, int mask, unsigned int flags) { struct posix_acl *acl; if (flags & IPERM_FLAG_RCU) { if (!negative_cached_acl(inode, ACL_TYPE_ACCESS)) return -ECHILD; return -EAGAIN; } acl = ext3_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl) { int error = posix_acl_permission(inode, acl, mask); posix_acl_release(acl); return error; } return -EAGAIN; } /* * Initialize the ACLs of a new inode. Called from ext3_new_inode. * * dir->i_mutex: down * inode->i_mutex: up (access to inode is still exclusive) */ int ext3_init_acl(handle_t *handle, struct inode *inode, struct inode *dir) { struct posix_acl *acl = NULL; int error = 0; if (!S_ISLNK(inode->i_mode)) { if (test_opt(dir->i_sb, POSIX_ACL)) { acl = ext3_get_acl(dir, ACL_TYPE_DEFAULT); if (IS_ERR(acl)) return PTR_ERR(acl); } if (!acl) inode->i_mode &= ~current_umask(); } if (test_opt(inode->i_sb, POSIX_ACL) && acl) { struct posix_acl *clone; mode_t mode; if (S_ISDIR(inode->i_mode)) { error = ext3_set_acl(handle, inode, ACL_TYPE_DEFAULT, acl); if (error) goto cleanup; } clone = posix_acl_clone(acl, GFP_NOFS); error = -ENOMEM; if (!clone) goto cleanup; mode = inode->i_mode; error = posix_acl_create_masq(clone, &mode); if (error >= 0) { inode->i_mode = mode; if (error > 0) { /* This is an extended ACL */ error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, clone); } } posix_acl_release(clone); } cleanup: posix_acl_release(acl); return error; } /* * Does chmod for an inode that may have an Access Control List. The * inode->i_mode field must be updated to the desired value by the caller * before calling this function. * Returns 0 on success, or a negative error number. * * We change the ACL rather than storing some ACL entries in the file * mode permission bits (which would be more efficient), because that * would break once additional permissions (like ACL_APPEND, ACL_DELETE * for directories) are added. There are no more bits available in the * file mode. * * inode->i_mutex: down */ int ext3_acl_chmod(struct inode *inode) { struct posix_acl *acl, *clone; int error; if (S_ISLNK(inode->i_mode)) return -EOPNOTSUPP; if (!test_opt(inode->i_sb, POSIX_ACL)) return 0; acl = ext3_get_acl(inode, ACL_TYPE_ACCESS); if (IS_ERR(acl) || !acl) return PTR_ERR(acl); clone = posix_acl_clone(acl, GFP_KERNEL); posix_acl_release(acl); if (!clone) return -ENOMEM; error = posix_acl_chmod_masq(clone, inode->i_mode); if (!error) { handle_t *handle; int retries = 0; retry: handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) { error = PTR_ERR(handle); ext3_std_error(inode->i_sb, error); goto out; } error = ext3_set_acl(handle, inode, ACL_TYPE_ACCESS, clone); ext3_journal_stop(handle); if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; } out: posix_acl_release(clone); return error; } /* * Extended attribute handlers */ static size_t ext3_xattr_list_acl_access(struct dentry *dentry, char *list, size_t list_len, const char *name, size_t name_len, int type) { const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS); if (!test_opt(dentry->d_sb, POSIX_ACL)) return 0; if (list && size <= list_len) memcpy(list, POSIX_ACL_XATTR_ACCESS, size); return size; } static size_t ext3_xattr_list_acl_default(struct dentry *dentry, char *list, size_t list_len, const char *name, size_t name_len, int type) { const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT); if (!test_opt(dentry->d_sb, POSIX_ACL)) return 0; if (list && size <= list_len) memcpy(list, POSIX_ACL_XATTR_DEFAULT, size); return size; } static int ext3_xattr_get_acl(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { struct posix_acl *acl; int error; if (strcmp(name, "") != 0) return -EINVAL; if (!test_opt(dentry->d_sb, POSIX_ACL)) return -EOPNOTSUPP; acl = ext3_get_acl(dentry->d_inode, type); if (IS_ERR(acl)) return PTR_ERR(acl); if (acl == NULL) return -ENODATA; error = posix_acl_to_xattr(acl, buffer, size); posix_acl_release(acl); return error; } static int ext3_xattr_set_acl(struct dentry *dentry, const char *name, const void *value, size_t size, int flags, int type) { struct inode *inode = dentry->d_inode; handle_t *handle; struct posix_acl *acl; int error, retries = 0; if (strcmp(name, "") != 0) return -EINVAL; if (!test_opt(inode->i_sb, POSIX_ACL)) return -EOPNOTSUPP; if (!inode_owner_or_capable(inode)) return -EPERM; if (value) { acl = posix_acl_from_xattr(value, size); if (IS_ERR(acl)) return PTR_ERR(acl); else if (acl) { error = posix_acl_valid(acl); if (error) goto release_and_out; } } else acl = NULL; retry: handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb)); if (IS_ERR(handle)) return PTR_ERR(handle); error = ext3_set_acl(handle, inode, type, acl); ext3_journal_stop(handle); if (error == -ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries)) goto retry; release_and_out: posix_acl_release(acl); return error; } const struct xattr_handler ext3_xattr_acl_access_handler = { .prefix = POSIX_ACL_XATTR_ACCESS, .flags = ACL_TYPE_ACCESS, .list = ext3_xattr_list_acl_access, .get = ext3_xattr_get_acl, .set = ext3_xattr_set_acl, }; const struct xattr_handler ext3_xattr_acl_default_handler = { .prefix = POSIX_ACL_XATTR_DEFAULT, .flags = ACL_TYPE_DEFAULT, .list = ext3_xattr_list_acl_default, .get = ext3_xattr_get_acl, .set = ext3_xattr_set_acl, };
gpl-2.0
ehalls/at100-kernel
net/bridge/netfilter/ebtable_broute.c
3020
2344
/* * ebtable_broute * * Authors: * Bart De Schuymer <bdschuym@pandora.be> * * April, 2002 * * This table lets you choose between routing and bridging for frames * entering on a bridge enslaved nic. This table is traversed before any * other ebtables table. See net/bridge/br_input.c. */ #include <linux/netfilter_bridge/ebtables.h> #include <linux/module.h> #include <linux/if_bridge.h> /* EBT_ACCEPT means the frame will be bridged * EBT_DROP means the frame will be routed */ static struct ebt_entries initial_chain = { .name = "BROUTING", .policy = EBT_ACCEPT, }; static struct ebt_replace_kernel initial_table = { .name = "broute", .valid_hooks = 1 << NF_BR_BROUTING, .entries_size = sizeof(struct ebt_entries), .hook_entry = { [NF_BR_BROUTING] = &initial_chain, }, .entries = (char *)&initial_chain, }; static int check(const struct ebt_table_info *info, unsigned int valid_hooks) { if (valid_hooks & ~(1 << NF_BR_BROUTING)) return -EINVAL; return 0; } static const struct ebt_table broute_table = { .name = "broute", .table = &initial_table, .valid_hooks = 1 << NF_BR_BROUTING, .check = check, .me = THIS_MODULE, }; static int ebt_broute(struct sk_buff *skb) { int ret; ret = ebt_do_table(NF_BR_BROUTING, skb, skb->dev, NULL, dev_net(skb->dev)->xt.broute_table); if (ret == NF_DROP) return 1; /* route it */ return 0; /* bridge it */ } static int __net_init broute_net_init(struct net *net) { net->xt.broute_table = ebt_register_table(net, &broute_table); if (IS_ERR(net->xt.broute_table)) return PTR_ERR(net->xt.broute_table); return 0; } static void __net_exit broute_net_exit(struct net *net) { ebt_unregister_table(net, net->xt.broute_table); } static struct pernet_operations broute_net_ops = { .init = broute_net_init, .exit = broute_net_exit, }; static int __init ebtable_broute_init(void) { int ret; ret = register_pernet_subsys(&broute_net_ops); if (ret < 0) return ret; /* see br_input.c */ rcu_assign_pointer(br_should_route_hook, (br_should_route_hook_t *)ebt_broute); return 0; } static void __exit ebtable_broute_fini(void) { rcu_assign_pointer(br_should_route_hook, NULL); synchronize_net(); unregister_pernet_subsys(&broute_net_ops); } module_init(ebtable_broute_init); module_exit(ebtable_broute_fini); MODULE_LICENSE("GPL");
gpl-2.0
InfinitiveOS-Devices/kernel_xiaomi_armani
drivers/regulator/wm831x-dcdc.c
3276
26421
/* * wm831x-dcdc.c -- DC-DC buck convertor driver for the WM831x series * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/bitops.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/regulator.h> #include <linux/mfd/wm831x/pdata.h> #define WM831X_BUCKV_MAX_SELECTOR 0x68 #define WM831X_BUCKP_MAX_SELECTOR 0x66 #define WM831X_DCDC_MODE_FAST 0 #define WM831X_DCDC_MODE_NORMAL 1 #define WM831X_DCDC_MODE_IDLE 2 #define WM831X_DCDC_MODE_STANDBY 3 #define WM831X_DCDC_MAX_NAME 6 /* Register offsets in control block */ #define WM831X_DCDC_CONTROL_1 0 #define WM831X_DCDC_CONTROL_2 1 #define WM831X_DCDC_ON_CONFIG 2 #define WM831X_DCDC_SLEEP_CONTROL 3 #define WM831X_DCDC_DVS_CONTROL 4 /* * Shared */ struct wm831x_dcdc { char name[WM831X_DCDC_MAX_NAME]; struct regulator_desc desc; int base; struct wm831x *wm831x; struct regulator_dev *regulator; int dvs_gpio; int dvs_gpio_state; int on_vsel; int dvs_vsel; }; static int wm831x_dcdc_is_enabled(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; int mask = 1 << rdev_get_id(rdev); int reg; reg = wm831x_reg_read(wm831x, WM831X_DCDC_ENABLE); if (reg < 0) return reg; if (reg & mask) return 1; else return 0; } static int wm831x_dcdc_enable(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; int mask = 1 << rdev_get_id(rdev); return wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, mask, mask); } static int wm831x_dcdc_disable(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; int mask = 1 << rdev_get_id(rdev); return wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, mask, 0); } static unsigned int wm831x_dcdc_get_mode(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; int val; val = wm831x_reg_read(wm831x, reg); if (val < 0) return val; val = (val & WM831X_DC1_ON_MODE_MASK) >> WM831X_DC1_ON_MODE_SHIFT; switch (val) { case WM831X_DCDC_MODE_FAST: return REGULATOR_MODE_FAST; case WM831X_DCDC_MODE_NORMAL: return REGULATOR_MODE_NORMAL; case WM831X_DCDC_MODE_STANDBY: return REGULATOR_MODE_STANDBY; case WM831X_DCDC_MODE_IDLE: return REGULATOR_MODE_IDLE; default: BUG(); return -EINVAL; } } static int wm831x_dcdc_set_mode_int(struct wm831x *wm831x, int reg, unsigned int mode) { int val; switch (mode) { case REGULATOR_MODE_FAST: val = WM831X_DCDC_MODE_FAST; break; case REGULATOR_MODE_NORMAL: val = WM831X_DCDC_MODE_NORMAL; break; case REGULATOR_MODE_STANDBY: val = WM831X_DCDC_MODE_STANDBY; break; case REGULATOR_MODE_IDLE: val = WM831X_DCDC_MODE_IDLE; break; default: return -EINVAL; } return wm831x_set_bits(wm831x, reg, WM831X_DC1_ON_MODE_MASK, val << WM831X_DC1_ON_MODE_SHIFT); } static int wm831x_dcdc_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; return wm831x_dcdc_set_mode_int(wm831x, reg, mode); } static int wm831x_dcdc_set_suspend_mode(struct regulator_dev *rdev, unsigned int mode) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; return wm831x_dcdc_set_mode_int(wm831x, reg, mode); } static int wm831x_dcdc_get_status(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; int ret; /* First, check for errors */ ret = wm831x_reg_read(wm831x, WM831X_DCDC_UV_STATUS); if (ret < 0) return ret; if (ret & (1 << rdev_get_id(rdev))) { dev_dbg(wm831x->dev, "DCDC%d under voltage\n", rdev_get_id(rdev) + 1); return REGULATOR_STATUS_ERROR; } /* DCDC1 and DCDC2 can additionally detect high voltage/current */ if (rdev_get_id(rdev) < 2) { if (ret & (WM831X_DC1_OV_STS << rdev_get_id(rdev))) { dev_dbg(wm831x->dev, "DCDC%d over voltage\n", rdev_get_id(rdev) + 1); return REGULATOR_STATUS_ERROR; } if (ret & (WM831X_DC1_HC_STS << rdev_get_id(rdev))) { dev_dbg(wm831x->dev, "DCDC%d over current\n", rdev_get_id(rdev) + 1); return REGULATOR_STATUS_ERROR; } } /* Is the regulator on? */ ret = wm831x_reg_read(wm831x, WM831X_DCDC_STATUS); if (ret < 0) return ret; if (!(ret & (1 << rdev_get_id(rdev)))) return REGULATOR_STATUS_OFF; /* TODO: When we handle hardware control modes so we can report the * current mode. */ return REGULATOR_STATUS_ON; } static irqreturn_t wm831x_dcdc_uv_irq(int irq, void *data) { struct wm831x_dcdc *dcdc = data; regulator_notifier_call_chain(dcdc->regulator, REGULATOR_EVENT_UNDER_VOLTAGE, NULL); return IRQ_HANDLED; } static irqreturn_t wm831x_dcdc_oc_irq(int irq, void *data) { struct wm831x_dcdc *dcdc = data; regulator_notifier_call_chain(dcdc->regulator, REGULATOR_EVENT_OVER_CURRENT, NULL); return IRQ_HANDLED; } /* * BUCKV specifics */ static int wm831x_buckv_list_voltage(struct regulator_dev *rdev, unsigned selector) { if (selector <= 0x8) return 600000; if (selector <= WM831X_BUCKV_MAX_SELECTOR) return 600000 + ((selector - 0x8) * 12500); return -EINVAL; } static int wm831x_buckv_select_min_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { u16 vsel; if (min_uV < 600000) vsel = 0; else if (min_uV <= 1800000) vsel = ((min_uV - 600000) / 12500) + 8; else return -EINVAL; if (wm831x_buckv_list_voltage(rdev, vsel) > max_uV) return -EINVAL; return vsel; } static int wm831x_buckv_set_dvs(struct regulator_dev *rdev, int state) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); if (state == dcdc->dvs_gpio_state) return 0; dcdc->dvs_gpio_state = state; gpio_set_value(dcdc->dvs_gpio, state); /* Should wait for DVS state change to be asserted if we have * a GPIO for it, for now assume the device is configured * for the fastest possible transition. */ return 0; } static int wm831x_buckv_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; int on_reg = dcdc->base + WM831X_DCDC_ON_CONFIG; int dvs_reg = dcdc->base + WM831X_DCDC_DVS_CONTROL; int vsel, ret; vsel = wm831x_buckv_select_min_voltage(rdev, min_uV, max_uV); if (vsel < 0) return vsel; *selector = vsel; /* If this value is already set then do a GPIO update if we can */ if (dcdc->dvs_gpio && dcdc->on_vsel == vsel) return wm831x_buckv_set_dvs(rdev, 0); if (dcdc->dvs_gpio && dcdc->dvs_vsel == vsel) return wm831x_buckv_set_dvs(rdev, 1); /* Always set the ON status to the minimum voltage */ ret = wm831x_set_bits(wm831x, on_reg, WM831X_DC1_ON_VSEL_MASK, vsel); if (ret < 0) return ret; dcdc->on_vsel = vsel; if (!dcdc->dvs_gpio) return ret; /* Kick the voltage transition now */ ret = wm831x_buckv_set_dvs(rdev, 0); if (ret < 0) return ret; /* * If this VSEL is higher than the last one we've seen then * remember it as the DVS VSEL. This is optimised for CPUfreq * usage where we want to get to the highest voltage very * quickly. */ if (vsel > dcdc->dvs_vsel) { ret = wm831x_set_bits(wm831x, dvs_reg, WM831X_DC1_DVS_VSEL_MASK, dcdc->dvs_vsel); if (ret == 0) dcdc->dvs_vsel = vsel; else dev_warn(wm831x->dev, "Failed to set DCDC DVS VSEL: %d\n", ret); } return 0; } static int wm831x_buckv_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; int vsel; vsel = wm831x_buckv_select_min_voltage(rdev, uV, uV); if (vsel < 0) return vsel; return wm831x_set_bits(wm831x, reg, WM831X_DC1_SLP_VSEL_MASK, vsel); } static int wm831x_buckv_get_voltage_sel(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); if (dcdc->dvs_gpio && dcdc->dvs_gpio_state) return dcdc->dvs_vsel; else return dcdc->on_vsel; } /* Current limit options */ static u16 wm831x_dcdc_ilim[] = { 125, 250, 375, 500, 625, 750, 875, 1000 }; static int wm831x_buckv_set_current_limit(struct regulator_dev *rdev, int min_uA, int max_uA) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; int i; for (i = 0; i < ARRAY_SIZE(wm831x_dcdc_ilim); i++) { if ((min_uA <= wm831x_dcdc_ilim[i]) && (wm831x_dcdc_ilim[i] <= max_uA)) break; } if (i == ARRAY_SIZE(wm831x_dcdc_ilim)) return -EINVAL; return wm831x_set_bits(wm831x, reg, WM831X_DC1_HC_THR_MASK, i << WM831X_DC1_HC_THR_SHIFT); } static int wm831x_buckv_get_current_limit(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 reg = dcdc->base + WM831X_DCDC_CONTROL_2; int val; val = wm831x_reg_read(wm831x, reg); if (val < 0) return val; val = (val & WM831X_DC1_HC_THR_MASK) >> WM831X_DC1_HC_THR_SHIFT; return wm831x_dcdc_ilim[val]; } static struct regulator_ops wm831x_buckv_ops = { .set_voltage = wm831x_buckv_set_voltage, .get_voltage_sel = wm831x_buckv_get_voltage_sel, .list_voltage = wm831x_buckv_list_voltage, .set_suspend_voltage = wm831x_buckv_set_suspend_voltage, .set_current_limit = wm831x_buckv_set_current_limit, .get_current_limit = wm831x_buckv_get_current_limit, .is_enabled = wm831x_dcdc_is_enabled, .enable = wm831x_dcdc_enable, .disable = wm831x_dcdc_disable, .get_status = wm831x_dcdc_get_status, .get_mode = wm831x_dcdc_get_mode, .set_mode = wm831x_dcdc_set_mode, .set_suspend_mode = wm831x_dcdc_set_suspend_mode, }; /* * Set up DVS control. We just log errors since we can still run * (with reduced performance) if we fail. */ static __devinit void wm831x_buckv_dvs_init(struct wm831x_dcdc *dcdc, struct wm831x_buckv_pdata *pdata) { struct wm831x *wm831x = dcdc->wm831x; int ret; u16 ctrl; if (!pdata || !pdata->dvs_gpio) return; ret = gpio_request(pdata->dvs_gpio, "DCDC DVS"); if (ret < 0) { dev_err(wm831x->dev, "Failed to get %s DVS GPIO: %d\n", dcdc->name, ret); return; } /* gpiolib won't let us read the GPIO status so pick the higher * of the two existing voltages so we take it as platform data. */ dcdc->dvs_gpio_state = pdata->dvs_init_state; ret = gpio_direction_output(pdata->dvs_gpio, dcdc->dvs_gpio_state); if (ret < 0) { dev_err(wm831x->dev, "Failed to enable %s DVS GPIO: %d\n", dcdc->name, ret); gpio_free(pdata->dvs_gpio); return; } dcdc->dvs_gpio = pdata->dvs_gpio; switch (pdata->dvs_control_src) { case 1: ctrl = 2 << WM831X_DC1_DVS_SRC_SHIFT; break; case 2: ctrl = 3 << WM831X_DC1_DVS_SRC_SHIFT; break; default: dev_err(wm831x->dev, "Invalid DVS control source %d for %s\n", pdata->dvs_control_src, dcdc->name); return; } /* If DVS_VSEL is set to the minimum value then raise it to ON_VSEL * to make bootstrapping a bit smoother. */ if (!dcdc->dvs_vsel) { ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL, WM831X_DC1_DVS_VSEL_MASK, dcdc->on_vsel); if (ret == 0) dcdc->dvs_vsel = dcdc->on_vsel; else dev_warn(wm831x->dev, "Failed to set DVS_VSEL: %d\n", ret); } ret = wm831x_set_bits(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL, WM831X_DC1_DVS_SRC_MASK, ctrl); if (ret < 0) { dev_err(wm831x->dev, "Failed to set %s DVS source: %d\n", dcdc->name, ret); } } static __devinit int wm831x_buckv_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id; struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; if (pdata && pdata->wm831x_num) id = (pdata->wm831x_num * 10) + 1; else id = 0; id = pdev->id - id; dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) return -ENODEV; dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } dcdc->base = res->start; snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.n_voltages = WM831X_BUCKV_MAX_SELECTOR + 1; dcdc->desc.ops = &wm831x_buckv_ops; dcdc->desc.owner = THIS_MODULE; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_ON_CONFIG); if (ret < 0) { dev_err(wm831x->dev, "Failed to read ON VSEL: %d\n", ret); goto err; } dcdc->on_vsel = ret & WM831X_DC1_ON_VSEL_MASK; ret = wm831x_reg_read(wm831x, dcdc->base + WM831X_DCDC_DVS_CONTROL); if (ret < 0) { dev_err(wm831x->dev, "Failed to read DVS VSEL: %d\n", ret); goto err; } dcdc->dvs_vsel = ret & WM831X_DC1_DVS_VSEL_MASK; if (pdata->dcdc[id]) wm831x_buckv_dvs_init(dcdc, pdata->dcdc[id]->driver_data); dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->dcdc[id], dcdc, NULL); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } irq = platform_get_irq_byname(pdev, "HC"); ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request HC IRQ %d: %d\n", irq, ret); goto err_uv; } platform_set_drvdata(pdev, dcdc); return 0; err_uv: free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); err_regulator: regulator_unregister(dcdc->regulator); err: if (dcdc->dvs_gpio) gpio_free(dcdc->dvs_gpio); return ret; } static __devexit int wm831x_buckv_remove(struct platform_device *pdev) { struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq_byname(pdev, "HC"), dcdc); free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); regulator_unregister(dcdc->regulator); if (dcdc->dvs_gpio) gpio_free(dcdc->dvs_gpio); return 0; } static struct platform_driver wm831x_buckv_driver = { .probe = wm831x_buckv_probe, .remove = __devexit_p(wm831x_buckv_remove), .driver = { .name = "wm831x-buckv", .owner = THIS_MODULE, }, }; /* * BUCKP specifics */ static int wm831x_buckp_list_voltage(struct regulator_dev *rdev, unsigned selector) { if (selector <= WM831X_BUCKP_MAX_SELECTOR) return 850000 + (selector * 25000); else return -EINVAL; } static int wm831x_buckp_set_voltage_int(struct regulator_dev *rdev, int reg, int min_uV, int max_uV, int *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 vsel; if (min_uV <= 34000000) vsel = (min_uV - 850000) / 25000; else return -EINVAL; if (wm831x_buckp_list_voltage(rdev, vsel) > max_uV) return -EINVAL; *selector = vsel; return wm831x_set_bits(wm831x, reg, WM831X_DC3_ON_VSEL_MASK, vsel); } static int wm831x_buckp_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; return wm831x_buckp_set_voltage_int(rdev, reg, min_uV, max_uV, selector); } static int wm831x_buckp_set_suspend_voltage(struct regulator_dev *rdev, int uV) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); u16 reg = dcdc->base + WM831X_DCDC_SLEEP_CONTROL; unsigned selector; return wm831x_buckp_set_voltage_int(rdev, reg, uV, uV, &selector); } static int wm831x_buckp_get_voltage_sel(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; u16 reg = dcdc->base + WM831X_DCDC_ON_CONFIG; int val; val = wm831x_reg_read(wm831x, reg); if (val < 0) return val; return val & WM831X_DC3_ON_VSEL_MASK; } static struct regulator_ops wm831x_buckp_ops = { .set_voltage = wm831x_buckp_set_voltage, .get_voltage_sel = wm831x_buckp_get_voltage_sel, .list_voltage = wm831x_buckp_list_voltage, .set_suspend_voltage = wm831x_buckp_set_suspend_voltage, .is_enabled = wm831x_dcdc_is_enabled, .enable = wm831x_dcdc_enable, .disable = wm831x_dcdc_disable, .get_status = wm831x_dcdc_get_status, .get_mode = wm831x_dcdc_get_mode, .set_mode = wm831x_dcdc_set_mode, .set_suspend_mode = wm831x_dcdc_set_suspend_mode, }; static __devinit int wm831x_buckp_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id; struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; if (pdata && pdata->wm831x_num) id = (pdata->wm831x_num * 10) + 1; else id = 0; id = pdev->id - id; dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) return -ENODEV; dcdc = devm_kzalloc(&pdev->dev, sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } dcdc->base = res->start; snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.n_voltages = WM831X_BUCKP_MAX_SELECTOR + 1; dcdc->desc.ops = &wm831x_buckp_ops; dcdc->desc.owner = THIS_MODULE; dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->dcdc[id], dcdc, NULL); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } platform_set_drvdata(pdev, dcdc); return 0; err_regulator: regulator_unregister(dcdc->regulator); err: return ret; } static __devexit int wm831x_buckp_remove(struct platform_device *pdev) { struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); regulator_unregister(dcdc->regulator); return 0; } static struct platform_driver wm831x_buckp_driver = { .probe = wm831x_buckp_probe, .remove = __devexit_p(wm831x_buckp_remove), .driver = { .name = "wm831x-buckp", .owner = THIS_MODULE, }, }; /* * DCDC boost convertors */ static int wm831x_boostp_get_status(struct regulator_dev *rdev) { struct wm831x_dcdc *dcdc = rdev_get_drvdata(rdev); struct wm831x *wm831x = dcdc->wm831x; int ret; /* First, check for errors */ ret = wm831x_reg_read(wm831x, WM831X_DCDC_UV_STATUS); if (ret < 0) return ret; if (ret & (1 << rdev_get_id(rdev))) { dev_dbg(wm831x->dev, "DCDC%d under voltage\n", rdev_get_id(rdev) + 1); return REGULATOR_STATUS_ERROR; } /* Is the regulator on? */ ret = wm831x_reg_read(wm831x, WM831X_DCDC_STATUS); if (ret < 0) return ret; if (ret & (1 << rdev_get_id(rdev))) return REGULATOR_STATUS_ON; else return REGULATOR_STATUS_OFF; } static struct regulator_ops wm831x_boostp_ops = { .get_status = wm831x_boostp_get_status, .is_enabled = wm831x_dcdc_is_enabled, .enable = wm831x_dcdc_enable, .disable = wm831x_dcdc_disable, }; static __devinit int wm831x_boostp_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id = pdev->id % ARRAY_SIZE(pdata->dcdc); struct wm831x_dcdc *dcdc; struct resource *res; int ret, irq; dev_dbg(&pdev->dev, "Probing DCDC%d\n", id + 1); if (pdata == NULL || pdata->dcdc[id] == NULL) return -ENODEV; dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) { dev_err(&pdev->dev, "No I/O resource\n"); ret = -EINVAL; goto err; } dcdc->base = res->start; snprintf(dcdc->name, sizeof(dcdc->name), "DCDC%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.ops = &wm831x_boostp_ops; dcdc->desc.owner = THIS_MODULE; dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->dcdc[id], dcdc, NULL); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register DCDC%d: %d\n", id + 1, ret); goto err; } irq = platform_get_irq_byname(pdev, "UV"); ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, IRQF_TRIGGER_RISING, dcdc->name, dcdc); if (ret != 0) { dev_err(&pdev->dev, "Failed to request UV IRQ %d: %d\n", irq, ret); goto err_regulator; } platform_set_drvdata(pdev, dcdc); return 0; err_regulator: regulator_unregister(dcdc->regulator); err: kfree(dcdc); return ret; } static __devexit int wm831x_boostp_remove(struct platform_device *pdev) { struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); regulator_unregister(dcdc->regulator); kfree(dcdc); return 0; } static struct platform_driver wm831x_boostp_driver = { .probe = wm831x_boostp_probe, .remove = __devexit_p(wm831x_boostp_remove), .driver = { .name = "wm831x-boostp", .owner = THIS_MODULE, }, }; /* * External Power Enable * * These aren't actually DCDCs but look like them in hardware so share * code. */ #define WM831X_EPE_BASE 6 static struct regulator_ops wm831x_epe_ops = { .is_enabled = wm831x_dcdc_is_enabled, .enable = wm831x_dcdc_enable, .disable = wm831x_dcdc_disable, .get_status = wm831x_dcdc_get_status, }; static __devinit int wm831x_epe_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = wm831x->dev->platform_data; int id = pdev->id % ARRAY_SIZE(pdata->epe); struct wm831x_dcdc *dcdc; int ret; dev_dbg(&pdev->dev, "Probing EPE%d\n", id + 1); if (pdata == NULL || pdata->epe[id] == NULL) return -ENODEV; dcdc = kzalloc(sizeof(struct wm831x_dcdc), GFP_KERNEL); if (dcdc == NULL) { dev_err(&pdev->dev, "Unable to allocate private data\n"); return -ENOMEM; } dcdc->wm831x = wm831x; /* For current parts this is correct; probably need to revisit * in future. */ snprintf(dcdc->name, sizeof(dcdc->name), "EPE%d", id + 1); dcdc->desc.name = dcdc->name; dcdc->desc.id = id + WM831X_EPE_BASE; /* Offset in DCDC registers */ dcdc->desc.ops = &wm831x_epe_ops; dcdc->desc.type = REGULATOR_VOLTAGE; dcdc->desc.owner = THIS_MODULE; dcdc->regulator = regulator_register(&dcdc->desc, &pdev->dev, pdata->epe[id], dcdc, NULL); if (IS_ERR(dcdc->regulator)) { ret = PTR_ERR(dcdc->regulator); dev_err(wm831x->dev, "Failed to register EPE%d: %d\n", id + 1, ret); goto err; } platform_set_drvdata(pdev, dcdc); return 0; err: kfree(dcdc); return ret; } static __devexit int wm831x_epe_remove(struct platform_device *pdev) { struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); regulator_unregister(dcdc->regulator); kfree(dcdc); return 0; } static struct platform_driver wm831x_epe_driver = { .probe = wm831x_epe_probe, .remove = __devexit_p(wm831x_epe_remove), .driver = { .name = "wm831x-epe", .owner = THIS_MODULE, }, }; static int __init wm831x_dcdc_init(void) { int ret; ret = platform_driver_register(&wm831x_buckv_driver); if (ret != 0) pr_err("Failed to register WM831x BUCKV driver: %d\n", ret); ret = platform_driver_register(&wm831x_buckp_driver); if (ret != 0) pr_err("Failed to register WM831x BUCKP driver: %d\n", ret); ret = platform_driver_register(&wm831x_boostp_driver); if (ret != 0) pr_err("Failed to register WM831x BOOST driver: %d\n", ret); ret = platform_driver_register(&wm831x_epe_driver); if (ret != 0) pr_err("Failed to register WM831x EPE driver: %d\n", ret); return 0; } subsys_initcall(wm831x_dcdc_init); static void __exit wm831x_dcdc_exit(void) { platform_driver_unregister(&wm831x_epe_driver); platform_driver_unregister(&wm831x_boostp_driver); platform_driver_unregister(&wm831x_buckp_driver); platform_driver_unregister(&wm831x_buckv_driver); } module_exit(wm831x_dcdc_exit); /* Module information */ MODULE_AUTHOR("Mark Brown"); MODULE_DESCRIPTION("WM831x DC-DC convertor driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-buckv"); MODULE_ALIAS("platform:wm831x-buckp"); MODULE_ALIAS("platform:wm831x-epe");
gpl-2.0
gerbert/linux-tas5715
drivers/ide/ide-pm.c
3788
6681
#include <linux/kernel.h> #include <linux/gfp.h> #include <linux/ide.h> int generic_ide_suspend(struct device *dev, pm_message_t mesg) { ide_drive_t *drive = to_ide_device(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; struct request_pm_state rqpm; int ret; if (ide_port_acpi(hwif)) { /* call ACPI _GTM only once */ if ((drive->dn & 1) == 0 || pair == NULL) ide_acpi_get_timing(hwif); } memset(&rqpm, 0, sizeof(rqpm)); rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_PM_SUSPEND; rq->special = &rqpm; rqpm.pm_step = IDE_PM_START_SUSPEND; if (mesg.event == PM_EVENT_PRETHAW) mesg.event = PM_EVENT_FREEZE; rqpm.pm_state = mesg.event; ret = blk_execute_rq(drive->queue, NULL, rq, 0); blk_put_request(rq); if (ret == 0 && ide_port_acpi(hwif)) { /* call ACPI _PS3 only after both devices are suspended */ if ((drive->dn & 1) || pair == NULL) ide_acpi_set_state(hwif, 0); } return ret; } int generic_ide_resume(struct device *dev) { ide_drive_t *drive = to_ide_device(dev); ide_drive_t *pair = ide_get_pair_dev(drive); ide_hwif_t *hwif = drive->hwif; struct request *rq; struct request_pm_state rqpm; int err; if (ide_port_acpi(hwif)) { /* call ACPI _PS0 / _STM only once */ if ((drive->dn & 1) == 0 || pair == NULL) { ide_acpi_set_state(hwif, 1); ide_acpi_push_timing(hwif); } ide_acpi_exec_tfs(drive); } memset(&rqpm, 0, sizeof(rqpm)); rq = blk_get_request(drive->queue, READ, __GFP_WAIT); rq->cmd_type = REQ_TYPE_PM_RESUME; rq->cmd_flags |= REQ_PREEMPT; rq->special = &rqpm; rqpm.pm_step = IDE_PM_START_RESUME; rqpm.pm_state = PM_EVENT_ON; err = blk_execute_rq(drive->queue, NULL, rq, 1); blk_put_request(rq); if (err == 0 && dev->driver) { struct ide_driver *drv = to_ide_driver(dev->driver); if (drv->resume) drv->resume(drive); } return err; } void ide_complete_power_step(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->special; #ifdef DEBUG_PM printk(KERN_INFO "%s: complete_power_step(step: %d)\n", drive->name, pm->pm_step); #endif if (drive->media != ide_disk) return; switch (pm->pm_step) { case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ if (pm->pm_state == PM_EVENT_FREEZE) pm->pm_step = IDE_PM_COMPLETED; else pm->pm_step = IDE_PM_STANDBY; break; case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ pm->pm_step = IDE_PM_COMPLETED; break; case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ pm->pm_step = IDE_PM_IDLE; break; case IDE_PM_IDLE: /* Resume step 2 (idle)*/ pm->pm_step = IDE_PM_RESTORE_DMA; break; } } ide_startstop_t ide_start_power_step(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->special; struct ide_cmd cmd = { }; switch (pm->pm_step) { case IDE_PM_FLUSH_CACHE: /* Suspend step 1 (flush cache) */ if (drive->media != ide_disk) break; /* Not supported? Switch to next step now. */ if (ata_id_flush_enabled(drive->id) == 0 || (drive->dev_flags & IDE_DFLAG_WCACHE) == 0) { ide_complete_power_step(drive, rq); return ide_stopped; } if (ata_id_flush_ext_enabled(drive->id)) cmd.tf.command = ATA_CMD_FLUSH_EXT; else cmd.tf.command = ATA_CMD_FLUSH; goto out_do_tf; case IDE_PM_STANDBY: /* Suspend step 2 (standby) */ cmd.tf.command = ATA_CMD_STANDBYNOW1; goto out_do_tf; case IDE_PM_RESTORE_PIO: /* Resume step 1 (restore PIO) */ ide_set_max_pio(drive); /* * skip IDE_PM_IDLE for ATAPI devices */ if (drive->media != ide_disk) pm->pm_step = IDE_PM_RESTORE_DMA; else ide_complete_power_step(drive, rq); return ide_stopped; case IDE_PM_IDLE: /* Resume step 2 (idle) */ cmd.tf.command = ATA_CMD_IDLEIMMEDIATE; goto out_do_tf; case IDE_PM_RESTORE_DMA: /* Resume step 3 (restore DMA) */ /* * Right now, all we do is call ide_set_dma(drive), * we could be smarter and check for current xfer_speed * in struct drive etc... */ if (drive->hwif->dma_ops == NULL) break; /* * TODO: respect IDE_DFLAG_USING_DMA */ ide_set_dma(drive); break; } pm->pm_step = IDE_PM_COMPLETED; return ide_stopped; out_do_tf: cmd.valid.out.tf = IDE_VALID_OUT_TF | IDE_VALID_DEVICE; cmd.valid.in.tf = IDE_VALID_IN_TF | IDE_VALID_DEVICE; cmd.protocol = ATA_PROT_NODATA; return do_rw_taskfile(drive, &cmd); } /** * ide_complete_pm_rq - end the current Power Management request * @drive: target drive * @rq: request * * This function cleans up the current PM request and stops the queue * if necessary. */ void ide_complete_pm_rq(ide_drive_t *drive, struct request *rq) { struct request_queue *q = drive->queue; struct request_pm_state *pm = rq->special; unsigned long flags; ide_complete_power_step(drive, rq); if (pm->pm_step != IDE_PM_COMPLETED) return; #ifdef DEBUG_PM printk("%s: completing PM request, %s\n", drive->name, (rq->cmd_type == REQ_TYPE_PM_SUSPEND) ? "suspend" : "resume"); #endif spin_lock_irqsave(q->queue_lock, flags); if (rq->cmd_type == REQ_TYPE_PM_SUSPEND) blk_stop_queue(q); else drive->dev_flags &= ~IDE_DFLAG_BLOCKED; spin_unlock_irqrestore(q->queue_lock, flags); drive->hwif->rq = NULL; if (blk_end_request(rq, 0, 0)) BUG(); } void ide_check_pm_state(ide_drive_t *drive, struct request *rq) { struct request_pm_state *pm = rq->special; if (rq->cmd_type == REQ_TYPE_PM_SUSPEND && pm->pm_step == IDE_PM_START_SUSPEND) /* Mark drive blocked when starting the suspend sequence. */ drive->dev_flags |= IDE_DFLAG_BLOCKED; else if (rq->cmd_type == REQ_TYPE_PM_RESUME && pm->pm_step == IDE_PM_START_RESUME) { /* * The first thing we do on wakeup is to wait for BSY bit to * go away (with a looong timeout) as a drive on this hwif may * just be POSTing itself. * We do that before even selecting as the "other" device on * the bus may be broken enough to walk on our toes at this * point. */ ide_hwif_t *hwif = drive->hwif; const struct ide_tp_ops *tp_ops = hwif->tp_ops; struct request_queue *q = drive->queue; unsigned long flags; int rc; #ifdef DEBUG_PM printk("%s: Wakeup request inited, waiting for !BSY...\n", drive->name); #endif rc = ide_wait_not_busy(hwif, 35000); if (rc) printk(KERN_WARNING "%s: bus not ready on wakeup\n", drive->name); tp_ops->dev_select(drive); tp_ops->write_devctl(hwif, ATA_DEVCTL_OBS); rc = ide_wait_not_busy(hwif, 100000); if (rc) printk(KERN_WARNING "%s: drive not ready on wakeup\n", drive->name); spin_lock_irqsave(q->queue_lock, flags); blk_start_queue(q); spin_unlock_irqrestore(q->queue_lock, flags); } }
gpl-2.0
InsomniaAOSP/android_kernel_samsung_d2
arch/um/os-Linux/start_up.c
3788
12610
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <sched.h> #include <signal.h> #include <string.h> #include <sys/mman.h> #include <sys/stat.h> #include <sys/wait.h> #include <asm/unistd.h> #include "init.h" #include "os.h" #include "mem_user.h" #include "ptrace_user.h" #include "registers.h" #include "skas.h" #include "skas_ptrace.h" static void ptrace_child(void) { int ret; /* Calling os_getpid because some libcs cached getpid incorrectly */ int pid = os_getpid(), ppid = getppid(); int sc_result; if (change_sig(SIGWINCH, 0) < 0 || ptrace(PTRACE_TRACEME, 0, 0, 0) < 0) { perror("ptrace"); kill(pid, SIGKILL); } kill(pid, SIGSTOP); /* * This syscall will be intercepted by the parent. Don't call more than * once, please. */ sc_result = os_getpid(); if (sc_result == pid) /* Nothing modified by the parent, we are running normally. */ ret = 1; else if (sc_result == ppid) /* * Expected in check_ptrace and check_sysemu when they succeed * in modifying the stack frame */ ret = 0; else /* Serious trouble! This could be caused by a bug in host 2.6 * SKAS3/2.6 patch before release -V6, together with a bug in * the UML code itself. */ ret = 2; exit(ret); } static void fatal_perror(const char *str) { perror(str); exit(1); } static void fatal(char *fmt, ...) { va_list list; va_start(list, fmt); vfprintf(stderr, fmt, list); va_end(list); exit(1); } static void non_fatal(char *fmt, ...) { va_list list; va_start(list, fmt); vfprintf(stderr, fmt, list); va_end(list); } static int start_ptraced_child(void) { int pid, n, status; pid = fork(); if (pid == 0) ptrace_child(); else if (pid < 0) fatal_perror("start_ptraced_child : fork failed"); CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_ptrace : waitpid failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGSTOP)) fatal("check_ptrace : expected SIGSTOP, got status = %d", status); return pid; } /* When testing for SYSEMU support, if it is one of the broken versions, we * must just avoid using sysemu, not panic, but only if SYSEMU features are * broken. * So only for SYSEMU features we test mustpanic, while normal host features * must work anyway! */ static int stop_ptraced_child(int pid, int exitcode, int mustexit) { int status, n, ret = 0; if (ptrace(PTRACE_CONT, pid, 0, 0) < 0) { perror("stop_ptraced_child : ptrace failed"); return -1; } CATCH_EINTR(n = waitpid(pid, &status, 0)); if (!WIFEXITED(status) || (WEXITSTATUS(status) != exitcode)) { int exit_with = WEXITSTATUS(status); if (exit_with == 2) non_fatal("check_ptrace : child exited with status 2. " "\nDisabling SYSEMU support.\n"); non_fatal("check_ptrace : child exited with exitcode %d, while " "expecting %d; status 0x%x\n", exit_with, exitcode, status); if (mustexit) exit(1); ret = -1; } return ret; } /* Changed only during early boot */ int ptrace_faultinfo; static int disable_ptrace_faultinfo; int ptrace_ldt; static int disable_ptrace_ldt; int proc_mm; static int disable_proc_mm; int have_switch_mm; static int disable_switch_mm; int skas_needs_stub; static int __init skas0_cmd_param(char *str, int* add) { disable_ptrace_faultinfo = 1; disable_ptrace_ldt = 1; disable_proc_mm = 1; disable_switch_mm = 1; return 0; } /* The two __uml_setup would conflict, without this stupid alias. */ static int __init mode_skas0_cmd_param(char *str, int* add) __attribute__((alias("skas0_cmd_param"))); __uml_setup("skas0", skas0_cmd_param, "skas0\n" " Disables SKAS3 and SKAS4 usage, so that SKAS0 is used\n\n"); __uml_setup("mode=skas0", mode_skas0_cmd_param, "mode=skas0\n" " Disables SKAS3 and SKAS4 usage, so that SKAS0 is used.\n\n"); /* Changed only during early boot */ static int force_sysemu_disabled = 0; static int __init nosysemu_cmd_param(char *str, int* add) { force_sysemu_disabled = 1; return 0; } __uml_setup("nosysemu", nosysemu_cmd_param, "nosysemu\n" " Turns off syscall emulation patch for ptrace (SYSEMU) on.\n" " SYSEMU is a performance-patch introduced by Laurent Vivier. It changes\n" " behaviour of ptrace() and helps reducing host context switch rate.\n" " To make it working, you need a kernel patch for your host, too.\n" " See http://perso.wanadoo.fr/laurent.vivier/UML/ for further \n" " information.\n\n"); static void __init check_sysemu(void) { unsigned long regs[MAX_REG_NR]; int pid, n, status, count=0; non_fatal("Checking syscall emulation patch for ptrace..."); sysemu_supported = 0; pid = start_ptraced_child(); if (ptrace(PTRACE_SYSEMU, pid, 0, 0) < 0) goto fail; CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_sysemu : wait failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != SIGTRAP)) fatal("check_sysemu : expected SIGTRAP, got status = %d\n", status); if (ptrace(PTRACE_GETREGS, pid, 0, regs) < 0) fatal_perror("check_sysemu : PTRACE_GETREGS failed"); if (PT_SYSCALL_NR(regs) != __NR_getpid) { non_fatal("check_sysemu got system call number %d, " "expected %d...", PT_SYSCALL_NR(regs), __NR_getpid); goto fail; } n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); if (n < 0) { non_fatal("check_sysemu : failed to modify system call " "return"); goto fail; } if (stop_ptraced_child(pid, 0, 0) < 0) goto fail_stopped; sysemu_supported = 1; non_fatal("OK\n"); set_using_sysemu(!force_sysemu_disabled); non_fatal("Checking advanced syscall emulation patch for ptrace..."); pid = start_ptraced_child(); if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *) PTRACE_O_TRACESYSGOOD) < 0)) fatal_perror("check_sysemu: PTRACE_OLDSETOPTIONS failed"); while (1) { count++; if (ptrace(PTRACE_SYSEMU_SINGLESTEP, pid, 0, 0) < 0) goto fail; CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_sysemu: wait failed"); if (WIFSTOPPED(status) && (WSTOPSIG(status) == (SIGTRAP|0x80))) { if (!count) { non_fatal("check_sysemu: SYSEMU_SINGLESTEP " "doesn't singlestep"); goto fail; } n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_RET_OFFSET, os_getpid()); if (n < 0) fatal_perror("check_sysemu : failed to modify " "system call return"); break; } else if (WIFSTOPPED(status) && (WSTOPSIG(status) == SIGTRAP)) count++; else { non_fatal("check_sysemu: expected SIGTRAP or " "(SIGTRAP | 0x80), got status = %d\n", status); goto fail; } } if (stop_ptraced_child(pid, 0, 0) < 0) goto fail_stopped; sysemu_supported = 2; non_fatal("OK\n"); if (!force_sysemu_disabled) set_using_sysemu(sysemu_supported); return; fail: stop_ptraced_child(pid, 1, 0); fail_stopped: non_fatal("missing\n"); } static void __init check_ptrace(void) { int pid, syscall, n, status; non_fatal("Checking that ptrace can change system call numbers..."); pid = start_ptraced_child(); if ((ptrace(PTRACE_OLDSETOPTIONS, pid, 0, (void *) PTRACE_O_TRACESYSGOOD) < 0)) fatal_perror("check_ptrace: PTRACE_OLDSETOPTIONS failed"); while (1) { if (ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0) fatal_perror("check_ptrace : ptrace failed"); CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED)); if (n < 0) fatal_perror("check_ptrace : wait failed"); if (!WIFSTOPPED(status) || (WSTOPSIG(status) != (SIGTRAP | 0x80))) fatal("check_ptrace : expected (SIGTRAP|0x80), " "got status = %d", status); syscall = ptrace(PTRACE_PEEKUSER, pid, PT_SYSCALL_NR_OFFSET, 0); if (syscall == __NR_getpid) { n = ptrace(PTRACE_POKEUSER, pid, PT_SYSCALL_NR_OFFSET, __NR_getppid); if (n < 0) fatal_perror("check_ptrace : failed to modify " "system call"); break; } } stop_ptraced_child(pid, 0, 1); non_fatal("OK\n"); check_sysemu(); } extern void check_tmpexec(void); static void __init check_coredump_limit(void) { struct rlimit lim; int err = getrlimit(RLIMIT_CORE, &lim); if (err) { perror("Getting core dump limit"); return; } printf("Core dump limits :\n\tsoft - "); if (lim.rlim_cur == RLIM_INFINITY) printf("NONE\n"); else printf("%lu\n", lim.rlim_cur); printf("\thard - "); if (lim.rlim_max == RLIM_INFINITY) printf("NONE\n"); else printf("%lu\n", lim.rlim_max); } void __init os_early_checks(void) { int pid; /* Print out the core dump limits early */ check_coredump_limit(); check_ptrace(); /* Need to check this early because mmapping happens before the * kernel is running. */ check_tmpexec(); pid = start_ptraced_child(); if (init_registers(pid)) fatal("Failed to initialize default registers"); stop_ptraced_child(pid, 1, 1); } static int __init noprocmm_cmd_param(char *str, int* add) { disable_proc_mm = 1; return 0; } __uml_setup("noprocmm", noprocmm_cmd_param, "noprocmm\n" " Turns off usage of /proc/mm, even if host supports it.\n" " To support /proc/mm, the host needs to be patched using\n" " the current skas3 patch.\n\n"); static int __init noptracefaultinfo_cmd_param(char *str, int* add) { disable_ptrace_faultinfo = 1; return 0; } __uml_setup("noptracefaultinfo", noptracefaultinfo_cmd_param, "noptracefaultinfo\n" " Turns off usage of PTRACE_FAULTINFO, even if host supports\n" " it. To support PTRACE_FAULTINFO, the host needs to be patched\n" " using the current skas3 patch.\n\n"); static int __init noptraceldt_cmd_param(char *str, int* add) { disable_ptrace_ldt = 1; return 0; } __uml_setup("noptraceldt", noptraceldt_cmd_param, "noptraceldt\n" " Turns off usage of PTRACE_LDT, even if host supports it.\n" " To support PTRACE_LDT, the host needs to be patched using\n" " the current skas3 patch.\n\n"); static inline void check_skas3_ptrace_faultinfo(void) { struct ptrace_faultinfo fi; int pid, n; non_fatal(" - PTRACE_FAULTINFO..."); pid = start_ptraced_child(); n = ptrace(PTRACE_FAULTINFO, pid, 0, &fi); if (n < 0) { if (errno == EIO) non_fatal("not found\n"); else perror("not found"); } else if (disable_ptrace_faultinfo) non_fatal("found but disabled on command line\n"); else { ptrace_faultinfo = 1; non_fatal("found\n"); } stop_ptraced_child(pid, 1, 1); } static inline void check_skas3_ptrace_ldt(void) { #ifdef PTRACE_LDT int pid, n; unsigned char ldtbuf[40]; struct ptrace_ldt ldt_op = (struct ptrace_ldt) { .func = 2, /* read default ldt */ .ptr = ldtbuf, .bytecount = sizeof(ldtbuf)}; non_fatal(" - PTRACE_LDT..."); pid = start_ptraced_child(); n = ptrace(PTRACE_LDT, pid, 0, (unsigned long) &ldt_op); if (n < 0) { if (errno == EIO) non_fatal("not found\n"); else perror("not found"); } else if (disable_ptrace_ldt) non_fatal("found, but use is disabled\n"); else { ptrace_ldt = 1; non_fatal("found\n"); } stop_ptraced_child(pid, 1, 1); #endif } static inline void check_skas3_proc_mm(void) { non_fatal(" - /proc/mm..."); if (access("/proc/mm", W_OK) < 0) perror("not found"); else if (disable_proc_mm) non_fatal("found but disabled on command line\n"); else { proc_mm = 1; non_fatal("found\n"); } } void can_do_skas(void) { non_fatal("Checking for the skas3 patch in the host:\n"); check_skas3_proc_mm(); check_skas3_ptrace_faultinfo(); check_skas3_ptrace_ldt(); if (!proc_mm || !ptrace_faultinfo || !ptrace_ldt) skas_needs_stub = 1; } int __init parse_iomem(char *str, int *add) { struct iomem_region *new; struct stat64 buf; char *file, *driver; int fd, size; driver = str; file = strchr(str,','); if (file == NULL) { fprintf(stderr, "parse_iomem : failed to parse iomem\n"); goto out; } *file = '\0'; file++; fd = open(file, O_RDWR, 0); if (fd < 0) { perror("parse_iomem - Couldn't open io file"); goto out; } if (fstat64(fd, &buf) < 0) { perror("parse_iomem - cannot stat_fd file"); goto out_close; } new = malloc(sizeof(*new)); if (new == NULL) { perror("Couldn't allocate iomem_region struct"); goto out_close; } size = (buf.st_size + UM_KERN_PAGE_SIZE) & ~(UM_KERN_PAGE_SIZE - 1); *new = ((struct iomem_region) { .next = iomem_regions, .driver = driver, .fd = fd, .size = size, .phys = 0, .virt = 0 }); iomem_regions = new; iomem_size += new->size + UM_KERN_PAGE_SIZE; return 0; out_close: close(fd); out: return 1; }
gpl-2.0
Colonel-Corn/kernel_htc_msm8974
drivers/s390/char/con3215.c
4300
30233
/* * 3215 line mode terminal driver. * * Copyright IBM Corp. 1999, 2009 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> * * Updated: * Aug-2000: Added tab support * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kdev_t.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/vt_kern.h> #include <linux/init.h> #include <linux/console.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/reboot.h> #include <linux/slab.h> #include <asm/ccwdev.h> #include <asm/cio.h> #include <asm/io.h> #include <asm/ebcdic.h> #include <asm/uaccess.h> #include <asm/delay.h> #include <asm/cpcmd.h> #include <asm/setup.h> #include "ctrlchar.h" #define NR_3215 1 #define NR_3215_REQ (4*NR_3215) #define RAW3215_BUFFER_SIZE 65536 /* output buffer size */ #define RAW3215_INBUF_SIZE 256 /* input buffer size */ #define RAW3215_MIN_SPACE 128 /* minimum free space for wakeup */ #define RAW3215_MIN_WRITE 1024 /* min. length for immediate output */ #define RAW3215_MAX_BYTES 3968 /* max. bytes to write with one ssch */ #define RAW3215_MAX_NEWLINE 50 /* max. lines to write with one ssch */ #define RAW3215_NR_CCWS 3 #define RAW3215_TIMEOUT HZ/10 /* time for delayed output */ #define RAW3215_FIXED 1 /* 3215 console device is not be freed */ #define RAW3215_ACTIVE 2 /* set if the device is in use */ #define RAW3215_WORKING 4 /* set if a request is being worked on */ #define RAW3215_THROTTLED 8 /* set if reading is disabled */ #define RAW3215_STOPPED 16 /* set if writing is disabled */ #define RAW3215_CLOSING 32 /* set while in close process */ #define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */ #define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */ #define RAW3215_FROZEN 256 /* set if 3215 is frozen for suspend */ #define TAB_STOP_SIZE 8 /* tab stop size */ /* * Request types for a 3215 device */ enum raw3215_type { RAW3215_FREE, RAW3215_READ, RAW3215_WRITE }; /* * Request structure for a 3215 device */ struct raw3215_req { enum raw3215_type type; /* type of the request */ int start, len; /* start index & len in output buffer */ int delayable; /* indication to wait for more data */ int residual; /* residual count for read request */ struct ccw1 ccws[RAW3215_NR_CCWS]; /* space for the channel program */ struct raw3215_info *info; /* pointer to main structure */ struct raw3215_req *next; /* pointer to next request */ } __attribute__ ((aligned(8))); struct raw3215_info { struct ccw_device *cdev; /* device for tty driver */ spinlock_t *lock; /* pointer to irq lock */ int flags; /* state flags */ char *buffer; /* pointer to output buffer */ char *inbuf; /* pointer to input buffer */ int head; /* first free byte in output buffer */ int count; /* number of bytes in output buffer */ int written; /* number of bytes in write requests */ struct tty_struct *tty; /* pointer to tty structure if present */ struct raw3215_req *queued_read; /* pointer to queued read requests */ struct raw3215_req *queued_write;/* pointer to queued write requests */ struct tasklet_struct tlet; /* tasklet to invoke tty_wakeup */ wait_queue_head_t empty_wait; /* wait queue for flushing */ struct timer_list timer; /* timer for delayed output */ int line_pos; /* position on the line (for tabs) */ char ubuffer[80]; /* copy_from_user buffer */ }; /* array of 3215 devices structures */ static struct raw3215_info *raw3215[NR_3215]; /* spinlock to protect the raw3215 array */ static DEFINE_SPINLOCK(raw3215_device_lock); /* list of free request structures */ static struct raw3215_req *raw3215_freelist; /* spinlock to protect free list */ static spinlock_t raw3215_freelist_lock; static struct tty_driver *tty3215_driver; /* * Get a request structure from the free list */ static inline struct raw3215_req *raw3215_alloc_req(void) { struct raw3215_req *req; unsigned long flags; spin_lock_irqsave(&raw3215_freelist_lock, flags); req = raw3215_freelist; raw3215_freelist = req->next; spin_unlock_irqrestore(&raw3215_freelist_lock, flags); return req; } /* * Put a request structure back to the free list */ static inline void raw3215_free_req(struct raw3215_req *req) { unsigned long flags; if (req->type == RAW3215_FREE) return; /* don't free a free request */ req->type = RAW3215_FREE; spin_lock_irqsave(&raw3215_freelist_lock, flags); req->next = raw3215_freelist; raw3215_freelist = req; spin_unlock_irqrestore(&raw3215_freelist_lock, flags); } /* * Set up a read request that reads up to 160 byte from the 3215 device. * If there is a queued read request it is used, but that shouldn't happen * because a 3215 terminal won't accept a new read before the old one is * completed. */ static void raw3215_mk_read_req(struct raw3215_info *raw) { struct raw3215_req *req; struct ccw1 *ccw; /* there can only be ONE read request at a time */ req = raw->queued_read; if (req == NULL) { /* no queued read request, use new req structure */ req = raw3215_alloc_req(); req->type = RAW3215_READ; req->info = raw; raw->queued_read = req; } ccw = req->ccws; ccw->cmd_code = 0x0A; /* read inquiry */ ccw->flags = 0x20; /* ignore incorrect length */ ccw->count = 160; ccw->cda = (__u32) __pa(raw->inbuf); } /* * Set up a write request with the information from the main structure. * A ccw chain is created that writes as much as possible from the output * buffer to the 3215 device. If a queued write exists it is replaced by * the new, probably lengthened request. */ static void raw3215_mk_write_req(struct raw3215_info *raw) { struct raw3215_req *req; struct ccw1 *ccw; int len, count, ix, lines; if (raw->count <= raw->written) return; /* check if there is a queued write request */ req = raw->queued_write; if (req == NULL) { /* no queued write request, use new req structure */ req = raw3215_alloc_req(); req->type = RAW3215_WRITE; req->info = raw; raw->queued_write = req; } else { raw->written -= req->len; } ccw = req->ccws; req->start = (raw->head - raw->count + raw->written) & (RAW3215_BUFFER_SIZE - 1); /* * now we have to count newlines. We can at max accept * RAW3215_MAX_NEWLINE newlines in a single ssch due to * a restriction in VM */ lines = 0; ix = req->start; while (lines < RAW3215_MAX_NEWLINE && ix != raw->head) { if (raw->buffer[ix] == 0x15) lines++; ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1); } len = ((ix - 1 - req->start) & (RAW3215_BUFFER_SIZE - 1)) + 1; if (len > RAW3215_MAX_BYTES) len = RAW3215_MAX_BYTES; req->len = len; raw->written += len; /* set the indication if we should try to enlarge this request */ req->delayable = (ix == raw->head) && (len < RAW3215_MIN_WRITE); ix = req->start; while (len > 0) { if (ccw > req->ccws) ccw[-1].flags |= 0x40; /* use command chaining */ ccw->cmd_code = 0x01; /* write, auto carrier return */ ccw->flags = 0x20; /* ignore incorrect length ind. */ ccw->cda = (__u32) __pa(raw->buffer + ix); count = len; if (ix + count > RAW3215_BUFFER_SIZE) count = RAW3215_BUFFER_SIZE - ix; ccw->count = count; len -= count; ix = (ix + count) & (RAW3215_BUFFER_SIZE - 1); ccw++; } /* * Add a NOP to the channel program. 3215 devices are purely * emulated and its much better to avoid the channel end * interrupt in this case. */ if (ccw > req->ccws) ccw[-1].flags |= 0x40; /* use command chaining */ ccw->cmd_code = 0x03; /* NOP */ ccw->flags = 0; ccw->cda = 0; ccw->count = 1; } /* * Start a read or a write request */ static void raw3215_start_io(struct raw3215_info *raw) { struct raw3215_req *req; int res; req = raw->queued_read; if (req != NULL && !(raw->flags & (RAW3215_WORKING | RAW3215_THROTTLED))) { /* dequeue request */ raw->queued_read = NULL; res = ccw_device_start(raw->cdev, req->ccws, (unsigned long) req, 0, 0); if (res != 0) { /* do_IO failed, put request back to queue */ raw->queued_read = req; } else { raw->flags |= RAW3215_WORKING; } } req = raw->queued_write; if (req != NULL && !(raw->flags & (RAW3215_WORKING | RAW3215_STOPPED))) { /* dequeue request */ raw->queued_write = NULL; res = ccw_device_start(raw->cdev, req->ccws, (unsigned long) req, 0, 0); if (res != 0) { /* do_IO failed, put request back to queue */ raw->queued_write = req; } else { raw->flags |= RAW3215_WORKING; } } } /* * Function to start a delayed output after RAW3215_TIMEOUT seconds */ static void raw3215_timeout(unsigned long __data) { struct raw3215_info *raw = (struct raw3215_info *) __data; unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if (raw->flags & RAW3215_TIMER_RUNS) { del_timer(&raw->timer); raw->flags &= ~RAW3215_TIMER_RUNS; if (!(raw->flags & RAW3215_FROZEN)) { raw3215_mk_write_req(raw); raw3215_start_io(raw); } } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } /* * Function to conditionally start an IO. A read is started immediately, * a write is only started immediately if the flush flag is on or the * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not * done immediately a timer is started with a delay of RAW3215_TIMEOUT. */ static inline void raw3215_try_io(struct raw3215_info *raw) { if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN)) return; if (raw->queued_read != NULL) raw3215_start_io(raw); else if (raw->queued_write != NULL) { if ((raw->queued_write->delayable == 0) || (raw->flags & RAW3215_FLUSHING)) { /* execute write requests bigger than minimum size */ raw3215_start_io(raw); if (raw->flags & RAW3215_TIMER_RUNS) { del_timer(&raw->timer); raw->flags &= ~RAW3215_TIMER_RUNS; } } else if (!(raw->flags & RAW3215_TIMER_RUNS)) { /* delay small writes */ init_timer(&raw->timer); raw->timer.expires = RAW3215_TIMEOUT + jiffies; raw->timer.data = (unsigned long) raw; raw->timer.function = raw3215_timeout; add_timer(&raw->timer); raw->flags |= RAW3215_TIMER_RUNS; } } } /* * Call tty_wakeup from tasklet context */ static void raw3215_wakeup(unsigned long data) { struct raw3215_info *raw = (struct raw3215_info *) data; tty_wakeup(raw->tty); } /* * Try to start the next IO and wake up processes waiting on the tty. */ static void raw3215_next_io(struct raw3215_info *raw) { raw3215_mk_write_req(raw); raw3215_try_io(raw); if (raw->tty && RAW3215_BUFFER_SIZE - raw->count >= RAW3215_MIN_SPACE) tasklet_schedule(&raw->tlet); } /* * Interrupt routine, called from common io layer */ static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) { struct raw3215_info *raw; struct raw3215_req *req; struct tty_struct *tty; int cstat, dstat; int count; raw = dev_get_drvdata(&cdev->dev); req = (struct raw3215_req *) intparm; cstat = irb->scsw.cmd.cstat; dstat = irb->scsw.cmd.dstat; if (cstat != 0) raw3215_next_io(raw); if (dstat & 0x01) { /* we got a unit exception */ dstat &= ~0x01; /* we can ignore it */ } switch (dstat) { case 0x80: if (cstat != 0) break; /* Attention interrupt, someone hit the enter key */ raw3215_mk_read_req(raw); raw3215_next_io(raw); break; case 0x08: case 0x0C: /* Channel end interrupt. */ if ((raw = req->info) == NULL) return; /* That shouldn't happen ... */ if (req->type == RAW3215_READ) { /* store residual count, then wait for device end */ req->residual = irb->scsw.cmd.count; } if (dstat == 0x08) break; case 0x04: /* Device end interrupt. */ if ((raw = req->info) == NULL) return; /* That shouldn't happen ... */ if (req->type == RAW3215_READ && raw->tty != NULL) { unsigned int cchar; tty = raw->tty; count = 160 - req->residual; EBCASC(raw->inbuf, count); cchar = ctrlchar_handle(raw->inbuf, count, tty); switch (cchar & CTRLCHAR_MASK) { case CTRLCHAR_SYSRQ: break; case CTRLCHAR_CTRL: tty_insert_flip_char(tty, cchar, TTY_NORMAL); tty_flip_buffer_push(raw->tty); break; case CTRLCHAR_NONE: if (count < 2 || (strncmp(raw->inbuf+count-2, "\252n", 2) && strncmp(raw->inbuf+count-2, "^n", 2)) ) { /* add the auto \n */ raw->inbuf[count] = '\n'; count++; } else count -= 2; tty_insert_flip_string(tty, raw->inbuf, count); tty_flip_buffer_push(raw->tty); break; } } else if (req->type == RAW3215_WRITE) { raw->count -= req->len; raw->written -= req->len; } raw->flags &= ~RAW3215_WORKING; raw3215_free_req(req); /* check for empty wait */ if (waitqueue_active(&raw->empty_wait) && raw->queued_write == NULL && raw->queued_read == NULL) { wake_up_interruptible(&raw->empty_wait); } raw3215_next_io(raw); break; default: /* Strange interrupt, I'll do my best to clean up */ if (req != NULL && req->type != RAW3215_FREE) { if (req->type == RAW3215_WRITE) { raw->count -= req->len; raw->written -= req->len; } raw->flags &= ~RAW3215_WORKING; raw3215_free_req(req); } raw3215_next_io(raw); } return; } /* * Drop the oldest line from the output buffer. */ static void raw3215_drop_line(struct raw3215_info *raw) { int ix; char ch; BUG_ON(raw->written != 0); ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1); while (raw->count > 0) { ch = raw->buffer[ix]; ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1); raw->count--; if (ch == 0x15) break; } raw->head = ix; } /* * Wait until length bytes are available int the output buffer. * Has to be called with the s390irq lock held. Can be called * disabled. */ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) { while (RAW3215_BUFFER_SIZE - raw->count < length) { /* While console is frozen for suspend we have no other * choice but to drop message from the buffer to make * room for even more messages. */ if (raw->flags & RAW3215_FROZEN) { raw3215_drop_line(raw); continue; } /* there might be a request pending */ raw->flags |= RAW3215_FLUSHING; raw3215_mk_write_req(raw); raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; #ifdef CONFIG_TN3215_CONSOLE wait_cons_dev(); #endif /* Enough room freed up ? */ if (RAW3215_BUFFER_SIZE - raw->count >= length) break; /* there might be another cpu waiting for the lock */ spin_unlock(get_ccwdev_lock(raw->cdev)); udelay(100); spin_lock(get_ccwdev_lock(raw->cdev)); } } /* * String write routine for 3215 devices */ static void raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length) { unsigned long flags; int c, count; while (length > 0) { spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); count = (length > RAW3215_BUFFER_SIZE) ? RAW3215_BUFFER_SIZE : length; length -= count; raw3215_make_room(raw, count); /* copy string to output buffer and convert it to EBCDIC */ while (1) { c = min_t(int, count, min(RAW3215_BUFFER_SIZE - raw->count, RAW3215_BUFFER_SIZE - raw->head)); if (c <= 0) break; memcpy(raw->buffer + raw->head, str, c); ASCEBC(raw->buffer + raw->head, c); raw->head = (raw->head + c) & (RAW3215_BUFFER_SIZE - 1); raw->count += c; raw->line_pos += c; str += c; count -= c; } if (!(raw->flags & RAW3215_WORKING)) { raw3215_mk_write_req(raw); /* start or queue request */ raw3215_try_io(raw); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } } /* * Put character routine for 3215 devices */ static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch) { unsigned long flags; unsigned int length, i; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if (ch == '\t') { length = TAB_STOP_SIZE - (raw->line_pos%TAB_STOP_SIZE); raw->line_pos += length; ch = ' '; } else if (ch == '\n') { length = 1; raw->line_pos = 0; } else { length = 1; raw->line_pos++; } raw3215_make_room(raw, length); for (i = 0; i < length; i++) { raw->buffer[raw->head] = (char) _ascebc[(int) ch]; raw->head = (raw->head + 1) & (RAW3215_BUFFER_SIZE - 1); raw->count++; } if (!(raw->flags & RAW3215_WORKING)) { raw3215_mk_write_req(raw); /* start or queue request */ raw3215_try_io(raw); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } /* * Flush routine, it simply sets the flush flag and tries to start * pending IO. */ static void raw3215_flush_buffer(struct raw3215_info *raw) { unsigned long flags; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if (raw->count > 0) { raw->flags |= RAW3215_FLUSHING; raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } /* * Fire up a 3215 device. */ static int raw3215_startup(struct raw3215_info *raw) { unsigned long flags; if (raw->flags & RAW3215_ACTIVE) return 0; raw->line_pos = 0; raw->flags |= RAW3215_ACTIVE; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; } /* * Shutdown a 3215 device. */ static void raw3215_shutdown(struct raw3215_info *raw) { DECLARE_WAITQUEUE(wait, current); unsigned long flags; if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FIXED)) return; /* Wait for outstanding requests, then free irq */ spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); if ((raw->flags & RAW3215_WORKING) || raw->queued_write != NULL || raw->queued_read != NULL) { raw->flags |= RAW3215_CLOSING; add_wait_queue(&raw->empty_wait, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); schedule(); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); remove_wait_queue(&raw->empty_wait, &wait); set_current_state(TASK_RUNNING); raw->flags &= ~(RAW3215_ACTIVE | RAW3215_CLOSING); } spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } static int raw3215_probe (struct ccw_device *cdev) { struct raw3215_info *raw; int line; /* Console is special. */ if (raw3215[0] && (raw3215[0] == dev_get_drvdata(&cdev->dev))) return 0; raw = kmalloc(sizeof(struct raw3215_info) + RAW3215_INBUF_SIZE, GFP_KERNEL|GFP_DMA); if (raw == NULL) return -ENOMEM; spin_lock(&raw3215_device_lock); for (line = 0; line < NR_3215; line++) { if (!raw3215[line]) { raw3215[line] = raw; break; } } spin_unlock(&raw3215_device_lock); if (line == NR_3215) { kfree(raw); return -ENODEV; } raw->cdev = cdev; raw->inbuf = (char *) raw + sizeof(struct raw3215_info); memset(raw, 0, sizeof(struct raw3215_info)); raw->buffer = kmalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL|GFP_DMA); if (raw->buffer == NULL) { spin_lock(&raw3215_device_lock); raw3215[line] = NULL; spin_unlock(&raw3215_device_lock); kfree(raw); return -ENOMEM; } init_waitqueue_head(&raw->empty_wait); tasklet_init(&raw->tlet, raw3215_wakeup, (unsigned long) raw); dev_set_drvdata(&cdev->dev, raw); cdev->handler = raw3215_irq; return 0; } static void raw3215_remove (struct ccw_device *cdev) { struct raw3215_info *raw; ccw_device_set_offline(cdev); raw = dev_get_drvdata(&cdev->dev); if (raw) { dev_set_drvdata(&cdev->dev, NULL); kfree(raw->buffer); kfree(raw); } } static int raw3215_set_online (struct ccw_device *cdev) { struct raw3215_info *raw; raw = dev_get_drvdata(&cdev->dev); if (!raw) return -ENODEV; return raw3215_startup(raw); } static int raw3215_set_offline (struct ccw_device *cdev) { struct raw3215_info *raw; raw = dev_get_drvdata(&cdev->dev); if (!raw) return -ENODEV; raw3215_shutdown(raw); return 0; } static int raw3215_pm_stop(struct ccw_device *cdev) { struct raw3215_info *raw; unsigned long flags; /* Empty the output buffer, then prevent new I/O. */ raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); raw->flags |= RAW3215_FROZEN; spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; } static int raw3215_pm_start(struct ccw_device *cdev) { struct raw3215_info *raw; unsigned long flags; /* Allow I/O again and flush output buffer. */ raw = dev_get_drvdata(&cdev->dev); spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw->flags &= ~RAW3215_FROZEN; raw->flags |= RAW3215_FLUSHING; raw3215_try_io(raw); raw->flags &= ~RAW3215_FLUSHING; spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); return 0; } static struct ccw_device_id raw3215_id[] = { { CCW_DEVICE(0x3215, 0) }, { /* end of list */ }, }; static struct ccw_driver raw3215_ccw_driver = { .driver = { .name = "3215", .owner = THIS_MODULE, }, .ids = raw3215_id, .probe = &raw3215_probe, .remove = &raw3215_remove, .set_online = &raw3215_set_online, .set_offline = &raw3215_set_offline, .freeze = &raw3215_pm_stop, .thaw = &raw3215_pm_start, .restore = &raw3215_pm_start, .int_class = IOINT_C15, }; #ifdef CONFIG_TN3215_CONSOLE /* * Write a string to the 3215 console */ static void con3215_write(struct console *co, const char *str, unsigned int count) { struct raw3215_info *raw; int i; if (count <= 0) return; raw = raw3215[0]; /* console 3215 is the first one */ while (count > 0) { for (i = 0; i < count; i++) if (str[i] == '\t' || str[i] == '\n') break; raw3215_write(raw, str, i); count -= i; str += i; if (count > 0) { raw3215_putchar(raw, *str); count--; str++; } } } static struct tty_driver *con3215_device(struct console *c, int *index) { *index = c->index; return tty3215_driver; } /* * panic() calls con3215_flush through a panic_notifier * before the system enters a disabled, endless loop. */ static void con3215_flush(void) { struct raw3215_info *raw; unsigned long flags; raw = raw3215[0]; /* console 3215 is the first one */ if (raw->flags & RAW3215_FROZEN) /* The console is still frozen for suspend. */ if (ccw_device_force_console()) /* Forcing didn't work, no panic message .. */ return; spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw3215_make_room(raw, RAW3215_BUFFER_SIZE); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } static int con3215_notify(struct notifier_block *self, unsigned long event, void *data) { con3215_flush(); return NOTIFY_OK; } static struct notifier_block on_panic_nb = { .notifier_call = con3215_notify, .priority = 0, }; static struct notifier_block on_reboot_nb = { .notifier_call = con3215_notify, .priority = 0, }; /* * The console structure for the 3215 console */ static struct console con3215 = { .name = "ttyS", .write = con3215_write, .device = con3215_device, .flags = CON_PRINTBUFFER, }; /* * 3215 console initialization code called from console_init(). */ static int __init con3215_init(void) { struct ccw_device *cdev; struct raw3215_info *raw; struct raw3215_req *req; int i; /* Check if 3215 is to be the console */ if (!CONSOLE_IS_3215) return -ENODEV; /* Set the console mode for VM */ if (MACHINE_IS_VM) { cpcmd("TERM CONMODE 3215", NULL, 0, NULL); cpcmd("TERM AUTOCR OFF", NULL, 0, NULL); } /* allocate 3215 request structures */ raw3215_freelist = NULL; spin_lock_init(&raw3215_freelist_lock); for (i = 0; i < NR_3215_REQ; i++) { req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA); req->next = raw3215_freelist; raw3215_freelist = req; } cdev = ccw_device_probe_console(); if (IS_ERR(cdev)) return -ENODEV; raw3215[0] = raw = (struct raw3215_info *) kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA); raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); raw->cdev = cdev; dev_set_drvdata(&cdev->dev, raw); cdev->handler = raw3215_irq; raw->flags |= RAW3215_FIXED; init_waitqueue_head(&raw->empty_wait); tasklet_init(&raw->tlet, raw3215_wakeup, (unsigned long) raw); /* Request the console irq */ if (raw3215_startup(raw) != 0) { kfree(raw->inbuf); kfree(raw->buffer); kfree(raw); raw3215[0] = NULL; return -ENODEV; } atomic_notifier_chain_register(&panic_notifier_list, &on_panic_nb); register_reboot_notifier(&on_reboot_nb); register_console(&con3215); return 0; } console_initcall(con3215_init); #endif /* * tty3215_open * * This routine is called whenever a 3215 tty is opened. */ static int tty3215_open(struct tty_struct *tty, struct file * filp) { struct raw3215_info *raw; int retval; raw = raw3215[tty->index]; if (raw == NULL) return -ENODEV; tty->driver_data = raw; raw->tty = tty; tty->low_latency = 0; /* don't use bottom half for pushing chars */ /* * Start up 3215 device */ retval = raw3215_startup(raw); if (retval) return retval; return 0; } /* * tty3215_close() * * This routine is called when the 3215 tty is closed. We wait * for the remaining request to be completed. Then we clean up. */ static void tty3215_close(struct tty_struct *tty, struct file * filp) { struct raw3215_info *raw; raw = (struct raw3215_info *) tty->driver_data; if (raw == NULL || tty->count > 1) return; tty->closing = 1; /* Shutdown the terminal */ raw3215_shutdown(raw); tasklet_kill(&raw->tlet); tty->closing = 0; raw->tty = NULL; } /* * Returns the amount of free space in the output buffer. */ static int tty3215_write_room(struct tty_struct *tty) { struct raw3215_info *raw; raw = (struct raw3215_info *) tty->driver_data; /* Subtract TAB_STOP_SIZE to allow for a tab, 8 <<< 64K */ if ((RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE) >= 0) return RAW3215_BUFFER_SIZE - raw->count - TAB_STOP_SIZE; else return 0; } /* * String write routine for 3215 ttys */ static int tty3215_write(struct tty_struct * tty, const unsigned char *buf, int count) { struct raw3215_info *raw; if (!tty) return 0; raw = (struct raw3215_info *) tty->driver_data; raw3215_write(raw, buf, count); return count; } /* * Put character routine for 3215 ttys */ static int tty3215_put_char(struct tty_struct *tty, unsigned char ch) { struct raw3215_info *raw; if (!tty) return 0; raw = (struct raw3215_info *) tty->driver_data; raw3215_putchar(raw, ch); return 1; } static void tty3215_flush_chars(struct tty_struct *tty) { } /* * Returns the number of characters in the output buffer */ static int tty3215_chars_in_buffer(struct tty_struct *tty) { struct raw3215_info *raw; raw = (struct raw3215_info *) tty->driver_data; return raw->count; } static void tty3215_flush_buffer(struct tty_struct *tty) { struct raw3215_info *raw; raw = (struct raw3215_info *) tty->driver_data; raw3215_flush_buffer(raw); tty_wakeup(tty); } /* * Disable reading from a 3215 tty */ static void tty3215_throttle(struct tty_struct * tty) { struct raw3215_info *raw; raw = (struct raw3215_info *) tty->driver_data; raw->flags |= RAW3215_THROTTLED; } /* * Enable reading from a 3215 tty */ static void tty3215_unthrottle(struct tty_struct * tty) { struct raw3215_info *raw; unsigned long flags; raw = (struct raw3215_info *) tty->driver_data; if (raw->flags & RAW3215_THROTTLED) { spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw->flags &= ~RAW3215_THROTTLED; raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } } /* * Disable writing to a 3215 tty */ static void tty3215_stop(struct tty_struct *tty) { struct raw3215_info *raw; raw = (struct raw3215_info *) tty->driver_data; raw->flags |= RAW3215_STOPPED; } /* * Enable writing to a 3215 tty */ static void tty3215_start(struct tty_struct *tty) { struct raw3215_info *raw; unsigned long flags; raw = (struct raw3215_info *) tty->driver_data; if (raw->flags & RAW3215_STOPPED) { spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); raw->flags &= ~RAW3215_STOPPED; raw3215_try_io(raw); spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); } } static const struct tty_operations tty3215_ops = { .open = tty3215_open, .close = tty3215_close, .write = tty3215_write, .put_char = tty3215_put_char, .flush_chars = tty3215_flush_chars, .write_room = tty3215_write_room, .chars_in_buffer = tty3215_chars_in_buffer, .flush_buffer = tty3215_flush_buffer, .throttle = tty3215_throttle, .unthrottle = tty3215_unthrottle, .stop = tty3215_stop, .start = tty3215_start, }; /* * 3215 tty registration code called from tty_init(). * Most kernel services (incl. kmalloc) are available at this poimt. */ static int __init tty3215_init(void) { struct tty_driver *driver; int ret; if (!CONSOLE_IS_3215) return 0; driver = alloc_tty_driver(NR_3215); if (!driver) return -ENOMEM; ret = ccw_driver_register(&raw3215_ccw_driver); if (ret) { put_tty_driver(driver); return ret; } /* * Initialize the tty_driver structure * Entries in tty3215_driver that are NOT initialized: * proc_entry, set_termios, flush_buffer, set_ldisc, write_proc */ driver->driver_name = "tty3215"; driver->name = "ttyS"; driver->major = TTY_MAJOR; driver->minor_start = 64; driver->type = TTY_DRIVER_TYPE_SYSTEM; driver->subtype = SYSTEM_TYPE_TTY; driver->init_termios = tty_std_termios; driver->init_termios.c_iflag = IGNBRK | IGNPAR; driver->init_termios.c_oflag = ONLCR | XTABS; driver->init_termios.c_lflag = ISIG; driver->flags = TTY_DRIVER_REAL_RAW; tty_set_operations(driver, &tty3215_ops); ret = tty_register_driver(driver); if (ret) { put_tty_driver(driver); return ret; } tty3215_driver = driver; return 0; } static void __exit tty3215_exit(void) { tty_unregister_driver(tty3215_driver); put_tty_driver(tty3215_driver); ccw_driver_unregister(&raw3215_ccw_driver); } module_init(tty3215_init); module_exit(tty3215_exit);
gpl-2.0
zeroblade1984/LG_MSM8974
arch/arm/mach-omap2/timer.c
4556
13221
/* * linux/arch/arm/mach-omap2/timer.c * * OMAP2 GP timer support. * * Copyright (C) 2009 Nokia Corporation * * Update to use new clocksource/clockevent layers * Author: Kevin Hilman, MontaVista Software, Inc. <source@mvista.com> * Copyright (C) 2007 MontaVista Software, Inc. * * Original driver: * Copyright (C) 2005 Nokia Corporation * Author: Paul Mundt <paul.mundt@nokia.com> * Juha Yrjölä <juha.yrjola@nokia.com> * OMAP Dual-mode timer framework support by Timo Teras * * Some parts based off of TI's 24xx code: * * Copyright (C) 2004-2009 Texas Instruments, Inc. * * Roughly modelled after the OMAP1 MPU timer code. * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/time.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/slab.h> #include <asm/mach/time.h> #include <plat/dmtimer.h> #include <asm/smp_twd.h> #include <asm/sched_clock.h> #include "common.h" #include <plat/omap_hwmod.h> #include <plat/omap_device.h> #include <plat/omap-pm.h> #include "powerdomain.h" /* Parent clocks, eventually these will come from the clock framework */ #define OMAP2_MPU_SOURCE "sys_ck" #define OMAP3_MPU_SOURCE OMAP2_MPU_SOURCE #define OMAP4_MPU_SOURCE "sys_clkin_ck" #define OMAP2_32K_SOURCE "func_32k_ck" #define OMAP3_32K_SOURCE "omap_32k_fck" #define OMAP4_32K_SOURCE "sys_32k_ck" #ifdef CONFIG_OMAP_32K_TIMER #define OMAP2_CLKEV_SOURCE OMAP2_32K_SOURCE #define OMAP3_CLKEV_SOURCE OMAP3_32K_SOURCE #define OMAP4_CLKEV_SOURCE OMAP4_32K_SOURCE #define OMAP3_SECURE_TIMER 12 #else #define OMAP2_CLKEV_SOURCE OMAP2_MPU_SOURCE #define OMAP3_CLKEV_SOURCE OMAP3_MPU_SOURCE #define OMAP4_CLKEV_SOURCE OMAP4_MPU_SOURCE #define OMAP3_SECURE_TIMER 1 #endif /* MAX_GPTIMER_ID: number of GPTIMERs on the chip */ #define MAX_GPTIMER_ID 12 static u32 sys_timer_reserved; /* Clockevent code */ static struct omap_dm_timer clkev; static struct clock_event_device clockevent_gpt; static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id) { struct clock_event_device *evt = &clockevent_gpt; __omap_dm_timer_write_status(&clkev, OMAP_TIMER_INT_OVERFLOW); evt->event_handler(evt); return IRQ_HANDLED; } static struct irqaction omap2_gp_timer_irq = { .name = "gp timer", .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, .handler = omap2_gp_timer_interrupt, }; static int omap2_gp_timer_set_next_event(unsigned long cycles, struct clock_event_device *evt) { __omap_dm_timer_load_start(&clkev, OMAP_TIMER_CTRL_ST, 0xffffffff - cycles, 1); return 0; } static void omap2_gp_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *evt) { u32 period; __omap_dm_timer_stop(&clkev, 1, clkev.rate); switch (mode) { case CLOCK_EVT_MODE_PERIODIC: period = clkev.rate / HZ; period -= 1; /* Looks like we need to first set the load value separately */ __omap_dm_timer_write(&clkev, OMAP_TIMER_LOAD_REG, 0xffffffff - period, 1); __omap_dm_timer_load_start(&clkev, OMAP_TIMER_CTRL_AR | OMAP_TIMER_CTRL_ST, 0xffffffff - period, 1); break; case CLOCK_EVT_MODE_ONESHOT: break; case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_RESUME: break; } } static struct clock_event_device clockevent_gpt = { .name = "gp timer", .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, .shift = 32, .set_next_event = omap2_gp_timer_set_next_event, .set_mode = omap2_gp_timer_set_mode, }; static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer, int gptimer_id, const char *fck_source) { char name[10]; /* 10 = sizeof("gptXX_Xck0") */ struct omap_hwmod *oh; size_t size; int res = 0; sprintf(name, "timer%d", gptimer_id); omap_hwmod_setup_one(name); oh = omap_hwmod_lookup(name); if (!oh) return -ENODEV; timer->irq = oh->mpu_irqs[0].irq; timer->phys_base = oh->slaves[0]->addr->pa_start; size = oh->slaves[0]->addr->pa_end - timer->phys_base; /* Static mapping, never released */ timer->io_base = ioremap(timer->phys_base, size); if (!timer->io_base) return -ENXIO; /* After the dmtimer is using hwmod these clocks won't be needed */ sprintf(name, "gpt%d_fck", gptimer_id); timer->fclk = clk_get(NULL, name); if (IS_ERR(timer->fclk)) return -ENODEV; sprintf(name, "gpt%d_ick", gptimer_id); timer->iclk = clk_get(NULL, name); if (IS_ERR(timer->iclk)) { clk_put(timer->fclk); return -ENODEV; } omap_hwmod_enable(oh); sys_timer_reserved |= (1 << (gptimer_id - 1)); if (gptimer_id != 12) { struct clk *src; src = clk_get(NULL, fck_source); if (IS_ERR(src)) { res = -EINVAL; } else { res = __omap_dm_timer_set_source(timer->fclk, src); if (IS_ERR_VALUE(res)) pr_warning("%s: timer%i cannot set source\n", __func__, gptimer_id); clk_put(src); } } __omap_dm_timer_init_regs(timer); __omap_dm_timer_reset(timer, 1, 1); timer->posted = 1; timer->rate = clk_get_rate(timer->fclk); timer->reserved = 1; return res; } static void __init omap2_gp_clockevent_init(int gptimer_id, const char *fck_source) { int res; res = omap_dm_timer_init_one(&clkev, gptimer_id, fck_source); BUG_ON(res); omap2_gp_timer_irq.dev_id = (void *)&clkev; setup_irq(clkev.irq, &omap2_gp_timer_irq); __omap_dm_timer_int_enable(&clkev, OMAP_TIMER_INT_OVERFLOW); clockevent_gpt.mult = div_sc(clkev.rate, NSEC_PER_SEC, clockevent_gpt.shift); clockevent_gpt.max_delta_ns = clockevent_delta2ns(0xffffffff, &clockevent_gpt); clockevent_gpt.min_delta_ns = clockevent_delta2ns(3, &clockevent_gpt); /* Timer internal resynch latency. */ clockevent_gpt.cpumask = cpumask_of(0); clockevents_register_device(&clockevent_gpt); pr_info("OMAP clockevent source: GPTIMER%d at %lu Hz\n", gptimer_id, clkev.rate); } /* Clocksource code */ #ifdef CONFIG_OMAP_32K_TIMER /* * When 32k-timer is enabled, don't use GPTimer for clocksource * instead, just leave default clocksource which uses the 32k * sync counter. See clocksource setup in plat-omap/counter_32k.c */ static void __init omap2_gp_clocksource_init(int unused, const char *dummy) { omap_init_clocksource_32k(); } #else static struct omap_dm_timer clksrc; /* * clocksource */ static cycle_t clocksource_read_cycles(struct clocksource *cs) { return (cycle_t)__omap_dm_timer_read_counter(&clksrc, 1); } static struct clocksource clocksource_gpt = { .name = "gp timer", .rating = 300, .read = clocksource_read_cycles, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; static u32 notrace dmtimer_read_sched_clock(void) { if (clksrc.reserved) return __omap_dm_timer_read_counter(&clksrc, 1); return 0; } /* Setup free-running counter for clocksource */ static void __init omap2_gp_clocksource_init(int gptimer_id, const char *fck_source) { int res; res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source); BUG_ON(res); pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n", gptimer_id, clksrc.rate); __omap_dm_timer_load_start(&clksrc, OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1); setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate); if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) pr_err("Could not register clocksource %s\n", clocksource_gpt.name); } #endif #define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src, \ clksrc_nr, clksrc_src) \ static void __init omap##name##_timer_init(void) \ { \ omap2_gp_clockevent_init((clkev_nr), clkev_src); \ omap2_gp_clocksource_init((clksrc_nr), clksrc_src); \ } #define OMAP_SYS_TIMER(name) \ struct sys_timer omap##name##_timer = { \ .init = omap##name##_timer_init, \ }; #ifdef CONFIG_ARCH_OMAP2 OMAP_SYS_TIMER_INIT(2, 1, OMAP2_CLKEV_SOURCE, 2, OMAP2_MPU_SOURCE) OMAP_SYS_TIMER(2) #endif #ifdef CONFIG_ARCH_OMAP3 OMAP_SYS_TIMER_INIT(3, 1, OMAP3_CLKEV_SOURCE, 2, OMAP3_MPU_SOURCE) OMAP_SYS_TIMER(3) OMAP_SYS_TIMER_INIT(3_secure, OMAP3_SECURE_TIMER, OMAP3_CLKEV_SOURCE, 2, OMAP3_MPU_SOURCE) OMAP_SYS_TIMER(3_secure) #endif #ifdef CONFIG_ARCH_OMAP4 #ifdef CONFIG_LOCAL_TIMERS static DEFINE_TWD_LOCAL_TIMER(twd_local_timer, OMAP44XX_LOCAL_TWD_BASE, OMAP44XX_IRQ_LOCALTIMER); #endif static void __init omap4_timer_init(void) { omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE); omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE); #ifdef CONFIG_LOCAL_TIMERS /* Local timers are not supprted on OMAP4430 ES1.0 */ if (omap_rev() != OMAP4430_REV_ES1_0) { int err; err = twd_local_timer_register(&twd_local_timer); if (err) pr_err("twd_local_timer_register failed %d\n", err); } #endif } OMAP_SYS_TIMER(4) #endif /** * omap2_dm_timer_set_src - change the timer input clock source * @pdev: timer platform device pointer * @source: array index of parent clock source */ static int omap2_dm_timer_set_src(struct platform_device *pdev, int source) { int ret; struct dmtimer_platform_data *pdata = pdev->dev.platform_data; struct clk *fclk, *parent; char *parent_name = NULL; fclk = clk_get(&pdev->dev, "fck"); if (IS_ERR_OR_NULL(fclk)) { dev_err(&pdev->dev, "%s: %d: clk_get() FAILED\n", __func__, __LINE__); return -EINVAL; } switch (source) { case OMAP_TIMER_SRC_SYS_CLK: parent_name = "sys_ck"; break; case OMAP_TIMER_SRC_32_KHZ: parent_name = "32k_ck"; break; case OMAP_TIMER_SRC_EXT_CLK: if (pdata->timer_ip_version == OMAP_TIMER_IP_VERSION_1) { parent_name = "alt_ck"; break; } dev_err(&pdev->dev, "%s: %d: invalid clk src.\n", __func__, __LINE__); clk_put(fclk); return -EINVAL; } parent = clk_get(&pdev->dev, parent_name); if (IS_ERR_OR_NULL(parent)) { dev_err(&pdev->dev, "%s: %d: clk_get() %s FAILED\n", __func__, __LINE__, parent_name); clk_put(fclk); return -EINVAL; } ret = clk_set_parent(fclk, parent); if (IS_ERR_VALUE(ret)) { dev_err(&pdev->dev, "%s: clk_set_parent() to %s FAILED\n", __func__, parent_name); ret = -EINVAL; } clk_put(parent); clk_put(fclk); return ret; } /** * omap_timer_init - build and register timer device with an * associated timer hwmod * @oh: timer hwmod pointer to be used to build timer device * @user: parameter that can be passed from calling hwmod API * * Called by omap_hwmod_for_each_by_class to register each of the timer * devices present in the system. The number of timer devices is known * by parsing through the hwmod database for a given class name. At the * end of function call memory is allocated for timer device and it is * registered to the framework ready to be proved by the driver. */ static int __init omap_timer_init(struct omap_hwmod *oh, void *unused) { int id; int ret = 0; char *name = "omap_timer"; struct dmtimer_platform_data *pdata; struct platform_device *pdev; struct omap_timer_capability_dev_attr *timer_dev_attr; struct powerdomain *pwrdm; pr_debug("%s: %s\n", __func__, oh->name); /* on secure device, do not register secure timer */ timer_dev_attr = oh->dev_attr; if (omap_type() != OMAP2_DEVICE_TYPE_GP && timer_dev_attr) if (timer_dev_attr->timer_capability == OMAP_TIMER_SECURE) return ret; pdata = kzalloc(sizeof(*pdata), GFP_KERNEL); if (!pdata) { pr_err("%s: No memory for [%s]\n", __func__, oh->name); return -ENOMEM; } /* * Extract the IDs from name field in hwmod database * and use the same for constructing ids' for the * timer devices. In a way, we are avoiding usage of * static variable witin the function to do the same. * CAUTION: We have to be careful and make sure the * name in hwmod database does not change in which case * we might either make corresponding change here or * switch back static variable mechanism. */ sscanf(oh->name, "timer%2d", &id); pdata->set_timer_src = omap2_dm_timer_set_src; pdata->timer_ip_version = oh->class->rev; /* Mark clocksource and clockevent timers as reserved */ if ((sys_timer_reserved >> (id - 1)) & 0x1) pdata->reserved = 1; pwrdm = omap_hwmod_get_pwrdm(oh); pdata->loses_context = pwrdm_can_ever_lose_context(pwrdm); #ifdef CONFIG_PM pdata->get_context_loss_count = omap_pm_get_dev_context_loss_count; #endif pdev = omap_device_build(name, id, oh, pdata, sizeof(*pdata), NULL, 0, 0); if (IS_ERR(pdev)) { pr_err("%s: Can't build omap_device for %s: %s.\n", __func__, name, oh->name); ret = -EINVAL; } kfree(pdata); return ret; } /** * omap2_dm_timer_init - top level regular device initialization * * Uses dedicated hwmod api to parse through hwmod database for * given class name and then build and register the timer device. */ static int __init omap2_dm_timer_init(void) { int ret; ret = omap_hwmod_for_each_by_class("timer", omap_timer_init, NULL); if (unlikely(ret)) { pr_err("%s: device registration failed.\n", __func__); return -EINVAL; } return 0; } arch_initcall(omap2_dm_timer_init);
gpl-2.0
garwynn/SMN900P_MI3_Kernel
sound/soc/codecs/wm9081.c
4812
36643
/* * wm9081.c -- WM9081 ALSA SoC Audio driver * * Author: Mark Brown * * Copyright 2009 Wolfson Microelectronics plc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include <sound/wm9081.h> #include "wm9081.h" static struct reg_default wm9081_reg[] = { { 2, 0x00B9 }, /* R2 - Analogue Lineout */ { 3, 0x00B9 }, /* R3 - Analogue Speaker PGA */ { 4, 0x0001 }, /* R4 - VMID Control */ { 5, 0x0068 }, /* R5 - Bias Control 1 */ { 7, 0x0000 }, /* R7 - Analogue Mixer */ { 8, 0x0000 }, /* R8 - Anti Pop Control */ { 9, 0x01DB }, /* R9 - Analogue Speaker 1 */ { 10, 0x0018 }, /* R10 - Analogue Speaker 2 */ { 11, 0x0180 }, /* R11 - Power Management */ { 12, 0x0000 }, /* R12 - Clock Control 1 */ { 13, 0x0038 }, /* R13 - Clock Control 2 */ { 14, 0x4000 }, /* R14 - Clock Control 3 */ { 16, 0x0000 }, /* R16 - FLL Control 1 */ { 17, 0x0200 }, /* R17 - FLL Control 2 */ { 18, 0x0000 }, /* R18 - FLL Control 3 */ { 19, 0x0204 }, /* R19 - FLL Control 4 */ { 20, 0x0000 }, /* R20 - FLL Control 5 */ { 22, 0x0000 }, /* R22 - Audio Interface 1 */ { 23, 0x0002 }, /* R23 - Audio Interface 2 */ { 24, 0x0008 }, /* R24 - Audio Interface 3 */ { 25, 0x0022 }, /* R25 - Audio Interface 4 */ { 27, 0x0006 }, /* R27 - Interrupt Status Mask */ { 28, 0x0000 }, /* R28 - Interrupt Polarity */ { 29, 0x0000 }, /* R29 - Interrupt Control */ { 30, 0x00C0 }, /* R30 - DAC Digital 1 */ { 31, 0x0008 }, /* R31 - DAC Digital 2 */ { 32, 0x09AF }, /* R32 - DRC 1 */ { 33, 0x4201 }, /* R33 - DRC 2 */ { 34, 0x0000 }, /* R34 - DRC 3 */ { 35, 0x0000 }, /* R35 - DRC 4 */ { 38, 0x0000 }, /* R38 - Write Sequencer 1 */ { 39, 0x0000 }, /* R39 - Write Sequencer 2 */ { 40, 0x0002 }, /* R40 - MW Slave 1 */ { 42, 0x0000 }, /* R42 - EQ 1 */ { 43, 0x0000 }, /* R43 - EQ 2 */ { 44, 0x0FCA }, /* R44 - EQ 3 */ { 45, 0x0400 }, /* R45 - EQ 4 */ { 46, 0x00B8 }, /* R46 - EQ 5 */ { 47, 0x1EB5 }, /* R47 - EQ 6 */ { 48, 0xF145 }, /* R48 - EQ 7 */ { 49, 0x0B75 }, /* R49 - EQ 8 */ { 50, 0x01C5 }, /* R50 - EQ 9 */ { 51, 0x169E }, /* R51 - EQ 10 */ { 52, 0xF829 }, /* R52 - EQ 11 */ { 53, 0x07AD }, /* R53 - EQ 12 */ { 54, 0x1103 }, /* R54 - EQ 13 */ { 55, 0x1C58 }, /* R55 - EQ 14 */ { 56, 0xF373 }, /* R56 - EQ 15 */ { 57, 0x0A54 }, /* R57 - EQ 16 */ { 58, 0x0558 }, /* R58 - EQ 17 */ { 59, 0x0564 }, /* R59 - EQ 18 */ { 60, 0x0559 }, /* R60 - EQ 19 */ { 61, 0x4000 }, /* R61 - EQ 20 */ }; static struct { int ratio; int clk_sys_rate; } clk_sys_rates[] = { { 64, 0 }, { 128, 1 }, { 192, 2 }, { 256, 3 }, { 384, 4 }, { 512, 5 }, { 768, 6 }, { 1024, 7 }, { 1408, 8 }, { 1536, 9 }, }; static struct { int rate; int sample_rate; } sample_rates[] = { { 8000, 0 }, { 11025, 1 }, { 12000, 2 }, { 16000, 3 }, { 22050, 4 }, { 24000, 5 }, { 32000, 6 }, { 44100, 7 }, { 48000, 8 }, { 88200, 9 }, { 96000, 10 }, }; static struct { int div; /* *10 due to .5s */ int bclk_div; } bclk_divs[] = { { 10, 0 }, { 15, 1 }, { 20, 2 }, { 30, 3 }, { 40, 4 }, { 50, 5 }, { 55, 6 }, { 60, 7 }, { 80, 8 }, { 100, 9 }, { 110, 10 }, { 120, 11 }, { 160, 12 }, { 200, 13 }, { 220, 14 }, { 240, 15 }, { 250, 16 }, { 300, 17 }, { 320, 18 }, { 440, 19 }, { 480, 20 }, }; struct wm9081_priv { struct regmap *regmap; int sysclk_source; int mclk_rate; int sysclk_rate; int fs; int bclk; int master; int fll_fref; int fll_fout; int tdm_width; struct wm9081_pdata pdata; }; static bool wm9081_volatile_register(struct device *dev, unsigned int reg) { switch (reg) { case WM9081_SOFTWARE_RESET: case WM9081_INTERRUPT_STATUS: return true; default: return false; } } static bool wm9081_readable_register(struct device *dev, unsigned int reg) { switch (reg) { case WM9081_SOFTWARE_RESET: case WM9081_ANALOGUE_LINEOUT: case WM9081_ANALOGUE_SPEAKER_PGA: case WM9081_VMID_CONTROL: case WM9081_BIAS_CONTROL_1: case WM9081_ANALOGUE_MIXER: case WM9081_ANTI_POP_CONTROL: case WM9081_ANALOGUE_SPEAKER_1: case WM9081_ANALOGUE_SPEAKER_2: case WM9081_POWER_MANAGEMENT: case WM9081_CLOCK_CONTROL_1: case WM9081_CLOCK_CONTROL_2: case WM9081_CLOCK_CONTROL_3: case WM9081_FLL_CONTROL_1: case WM9081_FLL_CONTROL_2: case WM9081_FLL_CONTROL_3: case WM9081_FLL_CONTROL_4: case WM9081_FLL_CONTROL_5: case WM9081_AUDIO_INTERFACE_1: case WM9081_AUDIO_INTERFACE_2: case WM9081_AUDIO_INTERFACE_3: case WM9081_AUDIO_INTERFACE_4: case WM9081_INTERRUPT_STATUS: case WM9081_INTERRUPT_STATUS_MASK: case WM9081_INTERRUPT_POLARITY: case WM9081_INTERRUPT_CONTROL: case WM9081_DAC_DIGITAL_1: case WM9081_DAC_DIGITAL_2: case WM9081_DRC_1: case WM9081_DRC_2: case WM9081_DRC_3: case WM9081_DRC_4: case WM9081_WRITE_SEQUENCER_1: case WM9081_WRITE_SEQUENCER_2: case WM9081_MW_SLAVE_1: case WM9081_EQ_1: case WM9081_EQ_2: case WM9081_EQ_3: case WM9081_EQ_4: case WM9081_EQ_5: case WM9081_EQ_6: case WM9081_EQ_7: case WM9081_EQ_8: case WM9081_EQ_9: case WM9081_EQ_10: case WM9081_EQ_11: case WM9081_EQ_12: case WM9081_EQ_13: case WM9081_EQ_14: case WM9081_EQ_15: case WM9081_EQ_16: case WM9081_EQ_17: case WM9081_EQ_18: case WM9081_EQ_19: case WM9081_EQ_20: return true; default: return false; } } static int wm9081_reset(struct regmap *map) { return regmap_write(map, WM9081_SOFTWARE_RESET, 0x9081); } static const DECLARE_TLV_DB_SCALE(drc_in_tlv, -4500, 75, 0); static const DECLARE_TLV_DB_SCALE(drc_out_tlv, -2250, 75, 0); static const DECLARE_TLV_DB_SCALE(drc_min_tlv, -1800, 600, 0); static unsigned int drc_max_tlv[] = { TLV_DB_RANGE_HEAD(4), 0, 0, TLV_DB_SCALE_ITEM(1200, 0, 0), 1, 1, TLV_DB_SCALE_ITEM(1800, 0, 0), 2, 2, TLV_DB_SCALE_ITEM(2400, 0, 0), 3, 3, TLV_DB_SCALE_ITEM(3600, 0, 0), }; static const DECLARE_TLV_DB_SCALE(drc_qr_tlv, 1200, 600, 0); static const DECLARE_TLV_DB_SCALE(drc_startup_tlv, -300, 50, 0); static const DECLARE_TLV_DB_SCALE(eq_tlv, -1200, 100, 0); static const DECLARE_TLV_DB_SCALE(in_tlv, -600, 600, 0); static const DECLARE_TLV_DB_SCALE(dac_tlv, -7200, 75, 1); static const DECLARE_TLV_DB_SCALE(out_tlv, -5700, 100, 0); static const char *drc_high_text[] = { "1", "1/2", "1/4", "1/8", "1/16", "0", }; static const struct soc_enum drc_high = SOC_ENUM_SINGLE(WM9081_DRC_3, 3, 6, drc_high_text); static const char *drc_low_text[] = { "1", "1/2", "1/4", "1/8", "0", }; static const struct soc_enum drc_low = SOC_ENUM_SINGLE(WM9081_DRC_3, 0, 5, drc_low_text); static const char *drc_atk_text[] = { "181us", "181us", "363us", "726us", "1.45ms", "2.9ms", "5.8ms", "11.6ms", "23.2ms", "46.4ms", "92.8ms", "185.6ms", }; static const struct soc_enum drc_atk = SOC_ENUM_SINGLE(WM9081_DRC_2, 12, 12, drc_atk_text); static const char *drc_dcy_text[] = { "186ms", "372ms", "743ms", "1.49s", "2.97s", "5.94s", "11.89s", "23.78s", "47.56s", }; static const struct soc_enum drc_dcy = SOC_ENUM_SINGLE(WM9081_DRC_2, 8, 9, drc_dcy_text); static const char *drc_qr_dcy_text[] = { "0.725ms", "1.45ms", "5.8ms", }; static const struct soc_enum drc_qr_dcy = SOC_ENUM_SINGLE(WM9081_DRC_2, 4, 3, drc_qr_dcy_text); static const char *dac_deemph_text[] = { "None", "32kHz", "44.1kHz", "48kHz", }; static const struct soc_enum dac_deemph = SOC_ENUM_SINGLE(WM9081_DAC_DIGITAL_2, 1, 4, dac_deemph_text); static const char *speaker_mode_text[] = { "Class D", "Class AB", }; static const struct soc_enum speaker_mode = SOC_ENUM_SINGLE(WM9081_ANALOGUE_SPEAKER_2, 6, 2, speaker_mode_text); static int speaker_mode_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg; reg = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2); if (reg & WM9081_SPK_MODE) ucontrol->value.integer.value[0] = 1; else ucontrol->value.integer.value[0] = 0; return 0; } /* * Stop any attempts to change speaker mode while the speaker is enabled. * * We also have some special anti-pop controls dependent on speaker * mode which must be changed along with the mode. */ static int speaker_mode_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); unsigned int reg_pwr = snd_soc_read(codec, WM9081_POWER_MANAGEMENT); unsigned int reg2 = snd_soc_read(codec, WM9081_ANALOGUE_SPEAKER_2); /* Are we changing anything? */ if (ucontrol->value.integer.value[0] == ((reg2 & WM9081_SPK_MODE) != 0)) return 0; /* Don't try to change modes while enabled */ if (reg_pwr & WM9081_SPK_ENA) return -EINVAL; if (ucontrol->value.integer.value[0]) { /* Class AB */ reg2 &= ~(WM9081_SPK_INV_MUTE | WM9081_OUT_SPK_CTRL); reg2 |= WM9081_SPK_MODE; } else { /* Class D */ reg2 |= WM9081_SPK_INV_MUTE | WM9081_OUT_SPK_CTRL; reg2 &= ~WM9081_SPK_MODE; } snd_soc_write(codec, WM9081_ANALOGUE_SPEAKER_2, reg2); return 0; } static const struct snd_kcontrol_new wm9081_snd_controls[] = { SOC_SINGLE_TLV("IN1 Volume", WM9081_ANALOGUE_MIXER, 1, 1, 1, in_tlv), SOC_SINGLE_TLV("IN2 Volume", WM9081_ANALOGUE_MIXER, 3, 1, 1, in_tlv), SOC_SINGLE_TLV("Playback Volume", WM9081_DAC_DIGITAL_1, 1, 96, 0, dac_tlv), SOC_SINGLE("LINEOUT Switch", WM9081_ANALOGUE_LINEOUT, 7, 1, 1), SOC_SINGLE("LINEOUT ZC Switch", WM9081_ANALOGUE_LINEOUT, 6, 1, 0), SOC_SINGLE_TLV("LINEOUT Volume", WM9081_ANALOGUE_LINEOUT, 0, 63, 0, out_tlv), SOC_SINGLE("DRC Switch", WM9081_DRC_1, 15, 1, 0), SOC_ENUM("DRC High Slope", drc_high), SOC_ENUM("DRC Low Slope", drc_low), SOC_SINGLE_TLV("DRC Input Volume", WM9081_DRC_4, 5, 60, 1, drc_in_tlv), SOC_SINGLE_TLV("DRC Output Volume", WM9081_DRC_4, 0, 30, 1, drc_out_tlv), SOC_SINGLE_TLV("DRC Minimum Volume", WM9081_DRC_2, 2, 3, 1, drc_min_tlv), SOC_SINGLE_TLV("DRC Maximum Volume", WM9081_DRC_2, 0, 3, 0, drc_max_tlv), SOC_ENUM("DRC Attack", drc_atk), SOC_ENUM("DRC Decay", drc_dcy), SOC_SINGLE("DRC Quick Release Switch", WM9081_DRC_1, 2, 1, 0), SOC_SINGLE_TLV("DRC Quick Release Volume", WM9081_DRC_2, 6, 3, 0, drc_qr_tlv), SOC_ENUM("DRC Quick Release Decay", drc_qr_dcy), SOC_SINGLE_TLV("DRC Startup Volume", WM9081_DRC_1, 6, 18, 0, drc_startup_tlv), SOC_SINGLE("EQ Switch", WM9081_EQ_1, 0, 1, 0), SOC_SINGLE("Speaker DC Volume", WM9081_ANALOGUE_SPEAKER_1, 3, 5, 0), SOC_SINGLE("Speaker AC Volume", WM9081_ANALOGUE_SPEAKER_1, 0, 5, 0), SOC_SINGLE("Speaker Switch", WM9081_ANALOGUE_SPEAKER_PGA, 7, 1, 1), SOC_SINGLE("Speaker ZC Switch", WM9081_ANALOGUE_SPEAKER_PGA, 6, 1, 0), SOC_SINGLE_TLV("Speaker Volume", WM9081_ANALOGUE_SPEAKER_PGA, 0, 63, 0, out_tlv), SOC_ENUM("DAC Deemphasis", dac_deemph), SOC_ENUM_EXT("Speaker Mode", speaker_mode, speaker_mode_get, speaker_mode_put), }; static const struct snd_kcontrol_new wm9081_eq_controls[] = { SOC_SINGLE_TLV("EQ1 Volume", WM9081_EQ_1, 11, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ2 Volume", WM9081_EQ_1, 6, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ3 Volume", WM9081_EQ_1, 1, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ4 Volume", WM9081_EQ_2, 11, 24, 0, eq_tlv), SOC_SINGLE_TLV("EQ5 Volume", WM9081_EQ_2, 6, 24, 0, eq_tlv), }; static const struct snd_kcontrol_new mixer[] = { SOC_DAPM_SINGLE("IN1 Switch", WM9081_ANALOGUE_MIXER, 0, 1, 0), SOC_DAPM_SINGLE("IN2 Switch", WM9081_ANALOGUE_MIXER, 2, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM9081_ANALOGUE_MIXER, 4, 1, 0), }; struct _fll_div { u16 fll_fratio; u16 fll_outdiv; u16 fll_clk_ref_div; u16 n; u16 k; }; /* The size in bits of the FLL divide multiplied by 10 * to allow rounding later */ #define FIXED_FLL_SIZE ((1 << 16) * 10) static struct { unsigned int min; unsigned int max; u16 fll_fratio; int ratio; } fll_fratios[] = { { 0, 64000, 4, 16 }, { 64000, 128000, 3, 8 }, { 128000, 256000, 2, 4 }, { 256000, 1000000, 1, 2 }, { 1000000, 13500000, 0, 1 }, }; static int fll_factors(struct _fll_div *fll_div, unsigned int Fref, unsigned int Fout) { u64 Kpart; unsigned int K, Ndiv, Nmod, target; unsigned int div; int i; /* Fref must be <=13.5MHz */ div = 1; while ((Fref / div) > 13500000) { div *= 2; if (div > 8) { pr_err("Can't scale %dMHz input down to <=13.5MHz\n", Fref); return -EINVAL; } } fll_div->fll_clk_ref_div = div / 2; pr_debug("Fref=%u Fout=%u\n", Fref, Fout); /* Apply the division for our remaining calculations */ Fref /= div; /* Fvco should be 90-100MHz; don't check the upper bound */ div = 0; target = Fout * 2; while (target < 90000000) { div++; target *= 2; if (div > 7) { pr_err("Unable to find FLL_OUTDIV for Fout=%uHz\n", Fout); return -EINVAL; } } fll_div->fll_outdiv = div; pr_debug("Fvco=%dHz\n", target); /* Find an appropriate FLL_FRATIO and factor it out of the target */ for (i = 0; i < ARRAY_SIZE(fll_fratios); i++) { if (fll_fratios[i].min <= Fref && Fref <= fll_fratios[i].max) { fll_div->fll_fratio = fll_fratios[i].fll_fratio; target /= fll_fratios[i].ratio; break; } } if (i == ARRAY_SIZE(fll_fratios)) { pr_err("Unable to find FLL_FRATIO for Fref=%uHz\n", Fref); return -EINVAL; } /* Now, calculate N.K */ Ndiv = target / Fref; fll_div->n = Ndiv; Nmod = target % Fref; pr_debug("Nmod=%d\n", Nmod); /* Calculate fractional part - scale up so we can round. */ Kpart = FIXED_FLL_SIZE * (long long)Nmod; do_div(Kpart, Fref); K = Kpart & 0xFFFFFFFF; if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ fll_div->k = K / 10; pr_debug("N=%x K=%x FLL_FRATIO=%x FLL_OUTDIV=%x FLL_CLK_REF_DIV=%x\n", fll_div->n, fll_div->k, fll_div->fll_fratio, fll_div->fll_outdiv, fll_div->fll_clk_ref_div); return 0; } static int wm9081_set_fll(struct snd_soc_codec *codec, int fll_id, unsigned int Fref, unsigned int Fout) { struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); u16 reg1, reg4, reg5; struct _fll_div fll_div; int ret; int clk_sys_reg; /* Any change? */ if (Fref == wm9081->fll_fref && Fout == wm9081->fll_fout) return 0; /* Disable the FLL */ if (Fout == 0) { dev_dbg(codec->dev, "FLL disabled\n"); wm9081->fll_fref = 0; wm9081->fll_fout = 0; return 0; } ret = fll_factors(&fll_div, Fref, Fout); if (ret != 0) return ret; reg5 = snd_soc_read(codec, WM9081_FLL_CONTROL_5); reg5 &= ~WM9081_FLL_CLK_SRC_MASK; switch (fll_id) { case WM9081_SYSCLK_FLL_MCLK: reg5 |= 0x1; break; default: dev_err(codec->dev, "Unknown FLL ID %d\n", fll_id); return -EINVAL; } /* Disable CLK_SYS while we reconfigure */ clk_sys_reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_3); if (clk_sys_reg & WM9081_CLK_SYS_ENA) snd_soc_write(codec, WM9081_CLOCK_CONTROL_3, clk_sys_reg & ~WM9081_CLK_SYS_ENA); /* Any FLL configuration change requires that the FLL be * disabled first. */ reg1 = snd_soc_read(codec, WM9081_FLL_CONTROL_1); reg1 &= ~WM9081_FLL_ENA; snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1); /* Apply the configuration */ if (fll_div.k) reg1 |= WM9081_FLL_FRAC_MASK; else reg1 &= ~WM9081_FLL_FRAC_MASK; snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1); snd_soc_write(codec, WM9081_FLL_CONTROL_2, (fll_div.fll_outdiv << WM9081_FLL_OUTDIV_SHIFT) | (fll_div.fll_fratio << WM9081_FLL_FRATIO_SHIFT)); snd_soc_write(codec, WM9081_FLL_CONTROL_3, fll_div.k); reg4 = snd_soc_read(codec, WM9081_FLL_CONTROL_4); reg4 &= ~WM9081_FLL_N_MASK; reg4 |= fll_div.n << WM9081_FLL_N_SHIFT; snd_soc_write(codec, WM9081_FLL_CONTROL_4, reg4); reg5 &= ~WM9081_FLL_CLK_REF_DIV_MASK; reg5 |= fll_div.fll_clk_ref_div << WM9081_FLL_CLK_REF_DIV_SHIFT; snd_soc_write(codec, WM9081_FLL_CONTROL_5, reg5); /* Set gain to the recommended value */ snd_soc_update_bits(codec, WM9081_FLL_CONTROL_4, WM9081_FLL_GAIN_MASK, 0); /* Enable the FLL */ snd_soc_write(codec, WM9081_FLL_CONTROL_1, reg1 | WM9081_FLL_ENA); /* Then bring CLK_SYS up again if it was disabled */ if (clk_sys_reg & WM9081_CLK_SYS_ENA) snd_soc_write(codec, WM9081_CLOCK_CONTROL_3, clk_sys_reg); dev_dbg(codec->dev, "FLL enabled at %dHz->%dHz\n", Fref, Fout); wm9081->fll_fref = Fref; wm9081->fll_fout = Fout; return 0; } static int configure_clock(struct snd_soc_codec *codec) { struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); int new_sysclk, i, target; unsigned int reg; int ret = 0; int mclkdiv = 0; int fll = 0; switch (wm9081->sysclk_source) { case WM9081_SYSCLK_MCLK: if (wm9081->mclk_rate > 12225000) { mclkdiv = 1; wm9081->sysclk_rate = wm9081->mclk_rate / 2; } else { wm9081->sysclk_rate = wm9081->mclk_rate; } wm9081_set_fll(codec, WM9081_SYSCLK_FLL_MCLK, 0, 0); break; case WM9081_SYSCLK_FLL_MCLK: /* If we have a sample rate calculate a CLK_SYS that * gives us a suitable DAC configuration, plus BCLK. * Ideally we would check to see if we can clock * directly from MCLK and only use the FLL if this is * not the case, though care must be taken with free * running mode. */ if (wm9081->master && wm9081->bclk) { /* Make sure we can generate CLK_SYS and BCLK * and that we've got 3MHz for optimal * performance. */ for (i = 0; i < ARRAY_SIZE(clk_sys_rates); i++) { target = wm9081->fs * clk_sys_rates[i].ratio; new_sysclk = target; if (target >= wm9081->bclk && target > 3000000) break; } if (i == ARRAY_SIZE(clk_sys_rates)) return -EINVAL; } else if (wm9081->fs) { for (i = 0; i < ARRAY_SIZE(clk_sys_rates); i++) { new_sysclk = clk_sys_rates[i].ratio * wm9081->fs; if (new_sysclk > 3000000) break; } if (i == ARRAY_SIZE(clk_sys_rates)) return -EINVAL; } else { new_sysclk = 12288000; } ret = wm9081_set_fll(codec, WM9081_SYSCLK_FLL_MCLK, wm9081->mclk_rate, new_sysclk); if (ret == 0) { wm9081->sysclk_rate = new_sysclk; /* Switch SYSCLK over to FLL */ fll = 1; } else { wm9081->sysclk_rate = wm9081->mclk_rate; } break; default: return -EINVAL; } reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_1); if (mclkdiv) reg |= WM9081_MCLKDIV2; else reg &= ~WM9081_MCLKDIV2; snd_soc_write(codec, WM9081_CLOCK_CONTROL_1, reg); reg = snd_soc_read(codec, WM9081_CLOCK_CONTROL_3); if (fll) reg |= WM9081_CLK_SRC_SEL; else reg &= ~WM9081_CLK_SRC_SEL; snd_soc_write(codec, WM9081_CLOCK_CONTROL_3, reg); dev_dbg(codec->dev, "CLK_SYS is %dHz\n", wm9081->sysclk_rate); return ret; } static int clk_sys_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { struct snd_soc_codec *codec = w->codec; struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); /* This should be done on init() for bypass paths */ switch (wm9081->sysclk_source) { case WM9081_SYSCLK_MCLK: dev_dbg(codec->dev, "Using %dHz MCLK\n", wm9081->mclk_rate); break; case WM9081_SYSCLK_FLL_MCLK: dev_dbg(codec->dev, "Using %dHz MCLK with FLL\n", wm9081->mclk_rate); break; default: dev_err(codec->dev, "System clock not configured\n"); return -EINVAL; } switch (event) { case SND_SOC_DAPM_PRE_PMU: configure_clock(codec); break; case SND_SOC_DAPM_POST_PMD: /* Disable the FLL if it's running */ wm9081_set_fll(codec, 0, 0, 0); break; } return 0; } static const struct snd_soc_dapm_widget wm9081_dapm_widgets[] = { SND_SOC_DAPM_INPUT("IN1"), SND_SOC_DAPM_INPUT("IN2"), SND_SOC_DAPM_DAC("DAC", "HiFi Playback", WM9081_POWER_MANAGEMENT, 0, 0), SND_SOC_DAPM_MIXER_NAMED_CTL("Mixer", SND_SOC_NOPM, 0, 0, mixer, ARRAY_SIZE(mixer)), SND_SOC_DAPM_PGA("LINEOUT PGA", WM9081_POWER_MANAGEMENT, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Speaker PGA", WM9081_POWER_MANAGEMENT, 2, 0, NULL, 0), SND_SOC_DAPM_OUT_DRV("Speaker", WM9081_POWER_MANAGEMENT, 1, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("LINEOUT"), SND_SOC_DAPM_OUTPUT("SPKN"), SND_SOC_DAPM_OUTPUT("SPKP"), SND_SOC_DAPM_SUPPLY("CLK_SYS", WM9081_CLOCK_CONTROL_3, 0, 0, clk_sys_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD), SND_SOC_DAPM_SUPPLY("CLK_DSP", WM9081_CLOCK_CONTROL_3, 1, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("TOCLK", WM9081_CLOCK_CONTROL_3, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("TSENSE", WM9081_POWER_MANAGEMENT, 7, 0, NULL, 0), }; static const struct snd_soc_dapm_route wm9081_audio_paths[] = { { "DAC", NULL, "CLK_SYS" }, { "DAC", NULL, "CLK_DSP" }, { "Mixer", "IN1 Switch", "IN1" }, { "Mixer", "IN2 Switch", "IN2" }, { "Mixer", "Playback Switch", "DAC" }, { "LINEOUT PGA", NULL, "Mixer" }, { "LINEOUT PGA", NULL, "TOCLK" }, { "LINEOUT PGA", NULL, "CLK_SYS" }, { "LINEOUT", NULL, "LINEOUT PGA" }, { "Speaker PGA", NULL, "Mixer" }, { "Speaker PGA", NULL, "TOCLK" }, { "Speaker PGA", NULL, "CLK_SYS" }, { "Speaker", NULL, "Speaker PGA" }, { "Speaker", NULL, "TSENSE" }, { "SPKN", NULL, "Speaker" }, { "SPKP", NULL, "Speaker" }, }; static int wm9081_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* VMID=2*40k */ snd_soc_update_bits(codec, WM9081_VMID_CONTROL, WM9081_VMID_SEL_MASK, 0x2); /* Normal bias current */ snd_soc_update_bits(codec, WM9081_BIAS_CONTROL_1, WM9081_STBY_BIAS_ENA, 0); break; case SND_SOC_BIAS_STANDBY: /* Initial cold start */ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { regcache_cache_only(wm9081->regmap, false); regcache_sync(wm9081->regmap); /* Disable LINEOUT discharge */ snd_soc_update_bits(codec, WM9081_ANTI_POP_CONTROL, WM9081_LINEOUT_DISCH, 0); /* Select startup bias source */ snd_soc_update_bits(codec, WM9081_BIAS_CONTROL_1, WM9081_BIAS_SRC | WM9081_BIAS_ENA, WM9081_BIAS_SRC | WM9081_BIAS_ENA); /* VMID 2*4k; Soft VMID ramp enable */ snd_soc_update_bits(codec, WM9081_VMID_CONTROL, WM9081_VMID_RAMP | WM9081_VMID_SEL_MASK, WM9081_VMID_RAMP | 0x6); mdelay(100); /* Normal bias enable & soft start off */ snd_soc_update_bits(codec, WM9081_VMID_CONTROL, WM9081_VMID_RAMP, 0); /* Standard bias source */ snd_soc_update_bits(codec, WM9081_BIAS_CONTROL_1, WM9081_BIAS_SRC, 0); } /* VMID 2*240k */ snd_soc_update_bits(codec, WM9081_VMID_CONTROL, WM9081_VMID_SEL_MASK, 0x04); /* Standby bias current on */ snd_soc_update_bits(codec, WM9081_BIAS_CONTROL_1, WM9081_STBY_BIAS_ENA, WM9081_STBY_BIAS_ENA); break; case SND_SOC_BIAS_OFF: /* Startup bias source and disable bias */ snd_soc_update_bits(codec, WM9081_BIAS_CONTROL_1, WM9081_BIAS_SRC | WM9081_BIAS_ENA, WM9081_BIAS_SRC); /* Disable VMID with soft ramping */ snd_soc_update_bits(codec, WM9081_VMID_CONTROL, WM9081_VMID_RAMP | WM9081_VMID_SEL_MASK, WM9081_VMID_RAMP); /* Actively discharge LINEOUT */ snd_soc_update_bits(codec, WM9081_ANTI_POP_CONTROL, WM9081_LINEOUT_DISCH, WM9081_LINEOUT_DISCH); regcache_cache_only(wm9081->regmap, true); break; } codec->dapm.bias_level = level; return 0; } static int wm9081_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt) { struct snd_soc_codec *codec = dai->codec; struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); unsigned int aif2 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_2); aif2 &= ~(WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV | WM9081_BCLK_DIR | WM9081_LRCLK_DIR | WM9081_AIF_FMT_MASK); switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: wm9081->master = 0; break; case SND_SOC_DAIFMT_CBS_CFM: aif2 |= WM9081_LRCLK_DIR; wm9081->master = 1; break; case SND_SOC_DAIFMT_CBM_CFS: aif2 |= WM9081_BCLK_DIR; wm9081->master = 1; break; case SND_SOC_DAIFMT_CBM_CFM: aif2 |= WM9081_LRCLK_DIR | WM9081_BCLK_DIR; wm9081->master = 1; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_B: aif2 |= WM9081_AIF_LRCLK_INV; case SND_SOC_DAIFMT_DSP_A: aif2 |= 0x3; break; case SND_SOC_DAIFMT_I2S: aif2 |= 0x2; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: aif2 |= 0x1; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: /* frame inversion not valid for DSP modes */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_NF: aif2 |= WM9081_AIF_BCLK_INV; break; default: return -EINVAL; } break; case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_RIGHT_J: case SND_SOC_DAIFMT_LEFT_J: switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: aif2 |= WM9081_AIF_BCLK_INV | WM9081_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_IB_NF: aif2 |= WM9081_AIF_BCLK_INV; break; case SND_SOC_DAIFMT_NB_IF: aif2 |= WM9081_AIF_LRCLK_INV; break; default: return -EINVAL; } break; default: return -EINVAL; } snd_soc_write(codec, WM9081_AUDIO_INTERFACE_2, aif2); return 0; } static int wm9081_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); int ret, i, best, best_val, cur_val; unsigned int clk_ctrl2, aif1, aif2, aif3, aif4; clk_ctrl2 = snd_soc_read(codec, WM9081_CLOCK_CONTROL_2); clk_ctrl2 &= ~(WM9081_CLK_SYS_RATE_MASK | WM9081_SAMPLE_RATE_MASK); aif1 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_1); aif2 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_2); aif2 &= ~WM9081_AIF_WL_MASK; aif3 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_3); aif3 &= ~WM9081_BCLK_DIV_MASK; aif4 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_4); aif4 &= ~WM9081_LRCLK_RATE_MASK; wm9081->fs = params_rate(params); if (wm9081->tdm_width) { /* If TDM is set up then that fixes our BCLK. */ int slots = ((aif1 & WM9081_AIFDAC_TDM_MODE_MASK) >> WM9081_AIFDAC_TDM_MODE_SHIFT) + 1; wm9081->bclk = wm9081->fs * wm9081->tdm_width * slots; } else { /* Otherwise work out a BCLK from the sample size */ wm9081->bclk = 2 * wm9081->fs; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: wm9081->bclk *= 16; break; case SNDRV_PCM_FORMAT_S20_3LE: wm9081->bclk *= 20; aif2 |= 0x4; break; case SNDRV_PCM_FORMAT_S24_LE: wm9081->bclk *= 24; aif2 |= 0x8; break; case SNDRV_PCM_FORMAT_S32_LE: wm9081->bclk *= 32; aif2 |= 0xc; break; default: return -EINVAL; } } dev_dbg(codec->dev, "Target BCLK is %dHz\n", wm9081->bclk); ret = configure_clock(codec); if (ret != 0) return ret; /* Select nearest CLK_SYS_RATE */ best = 0; best_val = abs((wm9081->sysclk_rate / clk_sys_rates[0].ratio) - wm9081->fs); for (i = 1; i < ARRAY_SIZE(clk_sys_rates); i++) { cur_val = abs((wm9081->sysclk_rate / clk_sys_rates[i].ratio) - wm9081->fs); if (cur_val < best_val) { best = i; best_val = cur_val; } } dev_dbg(codec->dev, "Selected CLK_SYS_RATIO of %d\n", clk_sys_rates[best].ratio); clk_ctrl2 |= (clk_sys_rates[best].clk_sys_rate << WM9081_CLK_SYS_RATE_SHIFT); /* SAMPLE_RATE */ best = 0; best_val = abs(wm9081->fs - sample_rates[0].rate); for (i = 1; i < ARRAY_SIZE(sample_rates); i++) { /* Closest match */ cur_val = abs(wm9081->fs - sample_rates[i].rate); if (cur_val < best_val) { best = i; best_val = cur_val; } } dev_dbg(codec->dev, "Selected SAMPLE_RATE of %dHz\n", sample_rates[best].rate); clk_ctrl2 |= (sample_rates[best].sample_rate << WM9081_SAMPLE_RATE_SHIFT); /* BCLK_DIV */ best = 0; best_val = INT_MAX; for (i = 0; i < ARRAY_SIZE(bclk_divs); i++) { cur_val = ((wm9081->sysclk_rate * 10) / bclk_divs[i].div) - wm9081->bclk; if (cur_val < 0) /* Table is sorted */ break; if (cur_val < best_val) { best = i; best_val = cur_val; } } wm9081->bclk = (wm9081->sysclk_rate * 10) / bclk_divs[best].div; dev_dbg(codec->dev, "Selected BCLK_DIV of %d for %dHz BCLK\n", bclk_divs[best].div, wm9081->bclk); aif3 |= bclk_divs[best].bclk_div; /* LRCLK is a simple fraction of BCLK */ dev_dbg(codec->dev, "LRCLK_RATE is %d\n", wm9081->bclk / wm9081->fs); aif4 |= wm9081->bclk / wm9081->fs; /* Apply a ReTune Mobile configuration if it's in use */ if (wm9081->pdata.num_retune_configs) { struct wm9081_pdata *pdata = &wm9081->pdata; struct wm9081_retune_mobile_setting *s; int eq1; best = 0; best_val = abs(pdata->retune_configs[0].rate - wm9081->fs); for (i = 0; i < pdata->num_retune_configs; i++) { cur_val = abs(pdata->retune_configs[i].rate - wm9081->fs); if (cur_val < best_val) { best_val = cur_val; best = i; } } s = &pdata->retune_configs[best]; dev_dbg(codec->dev, "ReTune Mobile %s tuned for %dHz\n", s->name, s->rate); /* If the EQ is enabled then disable it while we write out */ eq1 = snd_soc_read(codec, WM9081_EQ_1) & WM9081_EQ_ENA; if (eq1 & WM9081_EQ_ENA) snd_soc_write(codec, WM9081_EQ_1, 0); /* Write out the other values */ for (i = 1; i < ARRAY_SIZE(s->config); i++) snd_soc_write(codec, WM9081_EQ_1 + i, s->config[i]); eq1 |= (s->config[0] & ~WM9081_EQ_ENA); snd_soc_write(codec, WM9081_EQ_1, eq1); } snd_soc_write(codec, WM9081_CLOCK_CONTROL_2, clk_ctrl2); snd_soc_write(codec, WM9081_AUDIO_INTERFACE_2, aif2); snd_soc_write(codec, WM9081_AUDIO_INTERFACE_3, aif3); snd_soc_write(codec, WM9081_AUDIO_INTERFACE_4, aif4); return 0; } static int wm9081_digital_mute(struct snd_soc_dai *codec_dai, int mute) { struct snd_soc_codec *codec = codec_dai->codec; unsigned int reg; reg = snd_soc_read(codec, WM9081_DAC_DIGITAL_2); if (mute) reg |= WM9081_DAC_MUTE; else reg &= ~WM9081_DAC_MUTE; snd_soc_write(codec, WM9081_DAC_DIGITAL_2, reg); return 0; } static int wm9081_set_sysclk(struct snd_soc_codec *codec, int clk_id, int source, unsigned int freq, int dir) { struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); switch (clk_id) { case WM9081_SYSCLK_MCLK: case WM9081_SYSCLK_FLL_MCLK: wm9081->sysclk_source = clk_id; wm9081->mclk_rate = freq; break; default: return -EINVAL; } return 0; } static int wm9081_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int slot_width) { struct snd_soc_codec *codec = dai->codec; struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); unsigned int aif1 = snd_soc_read(codec, WM9081_AUDIO_INTERFACE_1); aif1 &= ~(WM9081_AIFDAC_TDM_SLOT_MASK | WM9081_AIFDAC_TDM_MODE_MASK); if (slots < 0 || slots > 4) return -EINVAL; wm9081->tdm_width = slot_width; if (slots == 0) slots = 1; aif1 |= (slots - 1) << WM9081_AIFDAC_TDM_MODE_SHIFT; switch (rx_mask) { case 1: break; case 2: aif1 |= 0x10; break; case 4: aif1 |= 0x20; break; case 8: aif1 |= 0x30; break; default: return -EINVAL; } snd_soc_write(codec, WM9081_AUDIO_INTERFACE_1, aif1); return 0; } #define WM9081_RATES SNDRV_PCM_RATE_8000_96000 #define WM9081_FORMATS \ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE) static const struct snd_soc_dai_ops wm9081_dai_ops = { .hw_params = wm9081_hw_params, .set_fmt = wm9081_set_dai_fmt, .digital_mute = wm9081_digital_mute, .set_tdm_slot = wm9081_set_tdm_slot, }; /* We report two channels because the CODEC processes a stereo signal, even * though it is only capable of handling a mono output. */ static struct snd_soc_dai_driver wm9081_dai = { .name = "wm9081-hifi", .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM9081_RATES, .formats = WM9081_FORMATS, }, .ops = &wm9081_dai_ops, }; static int wm9081_probe(struct snd_soc_codec *codec) { struct wm9081_priv *wm9081 = snd_soc_codec_get_drvdata(codec); int ret; codec->control_data = wm9081->regmap; ret = snd_soc_codec_set_cache_io(codec, 8, 16, SND_SOC_REGMAP); if (ret != 0) { dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret); return ret; } /* Enable zero cross by default */ snd_soc_update_bits(codec, WM9081_ANALOGUE_LINEOUT, WM9081_LINEOUTZC, WM9081_LINEOUTZC); snd_soc_update_bits(codec, WM9081_ANALOGUE_SPEAKER_PGA, WM9081_SPKPGAZC, WM9081_SPKPGAZC); if (!wm9081->pdata.num_retune_configs) { dev_dbg(codec->dev, "No ReTune Mobile data, using normal EQ\n"); snd_soc_add_codec_controls(codec, wm9081_eq_controls, ARRAY_SIZE(wm9081_eq_controls)); } return ret; } static int wm9081_remove(struct snd_soc_codec *codec) { wm9081_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm9081 = { .probe = wm9081_probe, .remove = wm9081_remove, .set_sysclk = wm9081_set_sysclk, .set_bias_level = wm9081_set_bias_level, .idle_bias_off = true, .controls = wm9081_snd_controls, .num_controls = ARRAY_SIZE(wm9081_snd_controls), .dapm_widgets = wm9081_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm9081_dapm_widgets), .dapm_routes = wm9081_audio_paths, .num_dapm_routes = ARRAY_SIZE(wm9081_audio_paths), }; static const struct regmap_config wm9081_regmap = { .reg_bits = 8, .val_bits = 16, .max_register = WM9081_MAX_REGISTER, .reg_defaults = wm9081_reg, .num_reg_defaults = ARRAY_SIZE(wm9081_reg), .volatile_reg = wm9081_volatile_register, .readable_reg = wm9081_readable_register, .cache_type = REGCACHE_RBTREE, }; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static __devinit int wm9081_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm9081_priv *wm9081; unsigned int reg; int ret; wm9081 = devm_kzalloc(&i2c->dev, sizeof(struct wm9081_priv), GFP_KERNEL); if (wm9081 == NULL) return -ENOMEM; i2c_set_clientdata(i2c, wm9081); wm9081->regmap = regmap_init_i2c(i2c, &wm9081_regmap); if (IS_ERR(wm9081->regmap)) { ret = PTR_ERR(wm9081->regmap); dev_err(&i2c->dev, "regmap_init() failed: %d\n", ret); goto err; } ret = regmap_read(wm9081->regmap, WM9081_SOFTWARE_RESET, &reg); if (ret != 0) { dev_err(&i2c->dev, "Failed to read chip ID: %d\n", ret); goto err_regmap; } if (reg != 0x9081) { dev_err(&i2c->dev, "Device is not a WM9081: ID=0x%x\n", reg); ret = -EINVAL; goto err_regmap; } ret = wm9081_reset(wm9081->regmap); if (ret < 0) { dev_err(&i2c->dev, "Failed to issue reset\n"); goto err_regmap; } if (dev_get_platdata(&i2c->dev)) memcpy(&wm9081->pdata, dev_get_platdata(&i2c->dev), sizeof(wm9081->pdata)); reg = 0; if (wm9081->pdata.irq_high) reg |= WM9081_IRQ_POL; if (!wm9081->pdata.irq_cmos) reg |= WM9081_IRQ_OP_CTRL; regmap_update_bits(wm9081->regmap, WM9081_INTERRUPT_CONTROL, WM9081_IRQ_POL | WM9081_IRQ_OP_CTRL, reg); regcache_cache_only(wm9081->regmap, true); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm9081, &wm9081_dai, 1); if (ret < 0) goto err_regmap; return 0; err_regmap: regmap_exit(wm9081->regmap); err: return ret; } static __devexit int wm9081_i2c_remove(struct i2c_client *client) { struct wm9081_priv *wm9081 = i2c_get_clientdata(client); snd_soc_unregister_codec(&client->dev); regmap_exit(wm9081->regmap); return 0; } static const struct i2c_device_id wm9081_i2c_id[] = { { "wm9081", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm9081_i2c_id); static struct i2c_driver wm9081_i2c_driver = { .driver = { .name = "wm9081", .owner = THIS_MODULE, }, .probe = wm9081_i2c_probe, .remove = __devexit_p(wm9081_i2c_remove), .id_table = wm9081_i2c_id, }; #endif module_i2c_driver(wm9081_i2c_driver); MODULE_DESCRIPTION("ASoC WM9081 driver"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Tommy-Geenexus/android_kernel_sony_apq8064_yuga_5.x
fs/xfs/xfs_qm_bhv.c
4812
4577
/* * Copyright (c) 2000-2006 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_qm.h" STATIC void xfs_fill_statvfs_from_dquot( struct kstatfs *statp, struct xfs_dquot *dqp) { __uint64_t limit; limit = dqp->q_core.d_blk_softlimit ? be64_to_cpu(dqp->q_core.d_blk_softlimit) : be64_to_cpu(dqp->q_core.d_blk_hardlimit); if (limit && statp->f_blocks > limit) { statp->f_blocks = limit; statp->f_bfree = statp->f_bavail = (statp->f_blocks > dqp->q_res_bcount) ? (statp->f_blocks - dqp->q_res_bcount) : 0; } limit = dqp->q_core.d_ino_softlimit ? be64_to_cpu(dqp->q_core.d_ino_softlimit) : be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (limit && statp->f_files > limit) { statp->f_files = limit; statp->f_ffree = (statp->f_files > dqp->q_res_icount) ? (statp->f_ffree - dqp->q_res_icount) : 0; } } /* * Directory tree accounting is implemented using project quotas, where * the project identifier is inherited from parent directories. * A statvfs (df, etc.) of a directory that is using project quota should * return a statvfs of the project, not the entire filesystem. * This makes such trees appear as if they are filesystems in themselves. */ void xfs_qm_statvfs( xfs_inode_t *ip, struct kstatfs *statp) { xfs_mount_t *mp = ip->i_mount; xfs_dquot_t *dqp; if (!xfs_qm_dqget(mp, NULL, xfs_get_projid(ip), XFS_DQ_PROJ, 0, &dqp)) { xfs_fill_statvfs_from_dquot(statp, dqp); xfs_qm_dqput(dqp); } } int xfs_qm_newmount( xfs_mount_t *mp, uint *needquotamount, uint *quotaflags) { uint quotaondisk; uint uquotaondisk = 0, gquotaondisk = 0, pquotaondisk = 0; quotaondisk = xfs_sb_version_hasquota(&mp->m_sb) && (mp->m_sb.sb_qflags & XFS_ALL_QUOTA_ACCT); if (quotaondisk) { uquotaondisk = mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT; pquotaondisk = mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT; gquotaondisk = mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT; } /* * If the device itself is read-only, we can't allow * the user to change the state of quota on the mount - * this would generate a transaction on the ro device, * which would lead to an I/O error and shutdown */ if (((uquotaondisk && !XFS_IS_UQUOTA_ON(mp)) || (!uquotaondisk && XFS_IS_UQUOTA_ON(mp)) || (pquotaondisk && !XFS_IS_PQUOTA_ON(mp)) || (!pquotaondisk && XFS_IS_PQUOTA_ON(mp)) || (gquotaondisk && !XFS_IS_GQUOTA_ON(mp)) || (!gquotaondisk && XFS_IS_OQUOTA_ON(mp))) && xfs_dev_is_read_only(mp, "changing quota state")) { xfs_warn(mp, "please mount with%s%s%s%s.", (!quotaondisk ? "out quota" : ""), (uquotaondisk ? " usrquota" : ""), (pquotaondisk ? " prjquota" : ""), (gquotaondisk ? " grpquota" : "")); return XFS_ERROR(EPERM); } if (XFS_IS_QUOTA_ON(mp) || quotaondisk) { /* * Call mount_quotas at this point only if we won't have to do * a quotacheck. */ if (quotaondisk && !XFS_QM_NEED_QUOTACHECK(mp)) { /* * If an error occurred, qm_mount_quotas code * has already disabled quotas. So, just finish * mounting, and get on with the boring life * without disk quotas. */ xfs_qm_mount_quotas(mp); } else { /* * Clear the quota flags, but remember them. This * is so that the quota code doesn't get invoked * before we're ready. This can happen when an * inode goes inactive and wants to free blocks, * or via xfs_log_mount_finish. */ *needquotamount = B_TRUE; *quotaflags = mp->m_qflags; mp->m_qflags = 0; } } return 0; }
gpl-2.0
JmzTaylor/android_kernel_htc_msm8960-1
fs/xfs/xfs_trans_dquot.c
4812
22233
/* * Copyright (c) 2000-2002 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_trans_priv.h" #include "xfs_qm.h" STATIC void xfs_trans_alloc_dqinfo(xfs_trans_t *); /* * Add the locked dquot to the transaction. * The dquot must be locked, and it cannot be associated with any * transaction. */ void xfs_trans_dqjoin( xfs_trans_t *tp, xfs_dquot_t *dqp) { ASSERT(dqp->q_transp != tp); ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(dqp->q_logitem.qli_dquot == dqp); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); /* * Initialize d_transp so we can later determine if this dquot is * associated with this transaction. */ dqp->q_transp = tp; } /* * This is called to mark the dquot as needing * to be logged when the transaction is committed. The dquot must * already be associated with the given transaction. * Note that it marks the entire transaction as dirty. In the ordinary * case, this gets called via xfs_trans_commit, after the transaction * is already dirty. However, there's nothing stop this from getting * called directly, as done by xfs_qm_scall_setqlim. Hence, the TRANS_DIRTY * flag. */ void xfs_trans_log_dquot( xfs_trans_t *tp, xfs_dquot_t *dqp) { ASSERT(dqp->q_transp == tp); ASSERT(XFS_DQ_IS_LOCKED(dqp)); tp->t_flags |= XFS_TRANS_DIRTY; dqp->q_logitem.qli_item.li_desc->lid_flags |= XFS_LID_DIRTY; } /* * Carry forward whatever is left of the quota blk reservation to * the spanky new transaction */ void xfs_trans_dup_dqinfo( xfs_trans_t *otp, xfs_trans_t *ntp) { xfs_dqtrx_t *oq, *nq; int i,j; xfs_dqtrx_t *oqa, *nqa; if (!otp->t_dqinfo) return; xfs_trans_alloc_dqinfo(ntp); oqa = otp->t_dqinfo->dqa_usrdquots; nqa = ntp->t_dqinfo->dqa_usrdquots; /* * Because the quota blk reservation is carried forward, * it is also necessary to carry forward the DQ_DIRTY flag. */ if(otp->t_flags & XFS_TRANS_DQ_DIRTY) ntp->t_flags |= XFS_TRANS_DQ_DIRTY; for (j = 0; j < 2; j++) { for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { if (oqa[i].qt_dquot == NULL) break; oq = &oqa[i]; nq = &nqa[i]; nq->qt_dquot = oq->qt_dquot; nq->qt_bcount_delta = nq->qt_icount_delta = 0; nq->qt_rtbcount_delta = 0; /* * Transfer whatever is left of the reservations. */ nq->qt_blk_res = oq->qt_blk_res - oq->qt_blk_res_used; oq->qt_blk_res = oq->qt_blk_res_used; nq->qt_rtblk_res = oq->qt_rtblk_res - oq->qt_rtblk_res_used; oq->qt_rtblk_res = oq->qt_rtblk_res_used; nq->qt_ino_res = oq->qt_ino_res - oq->qt_ino_res_used; oq->qt_ino_res = oq->qt_ino_res_used; } oqa = otp->t_dqinfo->dqa_grpdquots; nqa = ntp->t_dqinfo->dqa_grpdquots; } } /* * Wrap around mod_dquot to account for both user and group quotas. */ void xfs_trans_mod_dquot_byino( xfs_trans_t *tp, xfs_inode_t *ip, uint field, long delta) { xfs_mount_t *mp = tp->t_mountp; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp) || ip->i_ino == mp->m_sb.sb_uquotino || ip->i_ino == mp->m_sb.sb_gquotino) return; if (tp->t_dqinfo == NULL) xfs_trans_alloc_dqinfo(tp); if (XFS_IS_UQUOTA_ON(mp) && ip->i_udquot) (void) xfs_trans_mod_dquot(tp, ip->i_udquot, field, delta); if (XFS_IS_OQUOTA_ON(mp) && ip->i_gdquot) (void) xfs_trans_mod_dquot(tp, ip->i_gdquot, field, delta); } STATIC xfs_dqtrx_t * xfs_trans_get_dqtrx( xfs_trans_t *tp, xfs_dquot_t *dqp) { int i; xfs_dqtrx_t *qa; qa = XFS_QM_ISUDQ(dqp) ? tp->t_dqinfo->dqa_usrdquots : tp->t_dqinfo->dqa_grpdquots; for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { if (qa[i].qt_dquot == NULL || qa[i].qt_dquot == dqp) return &qa[i]; } return NULL; } /* * Make the changes in the transaction structure. * The moral equivalent to xfs_trans_mod_sb(). * We don't touch any fields in the dquot, so we don't care * if it's locked or not (most of the time it won't be). */ void xfs_trans_mod_dquot( xfs_trans_t *tp, xfs_dquot_t *dqp, uint field, long delta) { xfs_dqtrx_t *qtrx; ASSERT(tp); ASSERT(XFS_IS_QUOTA_RUNNING(tp->t_mountp)); qtrx = NULL; if (tp->t_dqinfo == NULL) xfs_trans_alloc_dqinfo(tp); /* * Find either the first free slot or the slot that belongs * to this dquot. */ qtrx = xfs_trans_get_dqtrx(tp, dqp); ASSERT(qtrx); if (qtrx->qt_dquot == NULL) qtrx->qt_dquot = dqp; switch (field) { /* * regular disk blk reservation */ case XFS_TRANS_DQ_RES_BLKS: qtrx->qt_blk_res += (ulong)delta; break; /* * inode reservation */ case XFS_TRANS_DQ_RES_INOS: qtrx->qt_ino_res += (ulong)delta; break; /* * disk blocks used. */ case XFS_TRANS_DQ_BCOUNT: if (qtrx->qt_blk_res && delta > 0) { qtrx->qt_blk_res_used += (ulong)delta; ASSERT(qtrx->qt_blk_res >= qtrx->qt_blk_res_used); } qtrx->qt_bcount_delta += delta; break; case XFS_TRANS_DQ_DELBCOUNT: qtrx->qt_delbcnt_delta += delta; break; /* * Inode Count */ case XFS_TRANS_DQ_ICOUNT: if (qtrx->qt_ino_res && delta > 0) { qtrx->qt_ino_res_used += (ulong)delta; ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); } qtrx->qt_icount_delta += delta; break; /* * rtblk reservation */ case XFS_TRANS_DQ_RES_RTBLKS: qtrx->qt_rtblk_res += (ulong)delta; break; /* * rtblk count */ case XFS_TRANS_DQ_RTBCOUNT: if (qtrx->qt_rtblk_res && delta > 0) { qtrx->qt_rtblk_res_used += (ulong)delta; ASSERT(qtrx->qt_rtblk_res >= qtrx->qt_rtblk_res_used); } qtrx->qt_rtbcount_delta += delta; break; case XFS_TRANS_DQ_DELRTBCOUNT: qtrx->qt_delrtb_delta += delta; break; default: ASSERT(0); } tp->t_flags |= XFS_TRANS_DQ_DIRTY; } /* * Given an array of dqtrx structures, lock all the dquots associated * and join them to the transaction, provided they have been modified. * We know that the highest number of dquots (of one type - usr OR grp), * involved in a transaction is 2 and that both usr and grp combined - 3. * So, we don't attempt to make this very generic. */ STATIC void xfs_trans_dqlockedjoin( xfs_trans_t *tp, xfs_dqtrx_t *q) { ASSERT(q[0].qt_dquot != NULL); if (q[1].qt_dquot == NULL) { xfs_dqlock(q[0].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); } else { ASSERT(XFS_QM_TRANS_MAXDQS == 2); xfs_dqlock2(q[0].qt_dquot, q[1].qt_dquot); xfs_trans_dqjoin(tp, q[0].qt_dquot); xfs_trans_dqjoin(tp, q[1].qt_dquot); } } /* * Called by xfs_trans_commit() and similar in spirit to * xfs_trans_apply_sb_deltas(). * Go thru all the dquots belonging to this transaction and modify the * INCORE dquot to reflect the actual usages. * Unreserve just the reservations done by this transaction. * dquot is still left locked at exit. */ void xfs_trans_apply_dquot_deltas( xfs_trans_t *tp) { int i, j; xfs_dquot_t *dqp; xfs_dqtrx_t *qtrx, *qa; xfs_disk_dquot_t *d; long totalbdelta; long totalrtbdelta; if (!(tp->t_flags & XFS_TRANS_DQ_DIRTY)) return; ASSERT(tp->t_dqinfo); qa = tp->t_dqinfo->dqa_usrdquots; for (j = 0; j < 2; j++) { if (qa[0].qt_dquot == NULL) { qa = tp->t_dqinfo->dqa_grpdquots; continue; } /* * Lock all of the dquots and join them to the transaction. */ xfs_trans_dqlockedjoin(tp, qa); for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { qtrx = &qa[i]; /* * The array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(dqp->q_transp == tp); /* * adjust the actual number of blocks used */ d = &dqp->q_core; /* * The issue here is - sometimes we don't make a blkquota * reservation intentionally to be fair to users * (when the amount is small). On the other hand, * delayed allocs do make reservations, but that's * outside of a transaction, so we have no * idea how much was really reserved. * So, here we've accumulated delayed allocation blks and * non-delay blks. The assumption is that the * delayed ones are always reserved (outside of a * transaction), and the others may or may not have * quota reservations. */ totalbdelta = qtrx->qt_bcount_delta + qtrx->qt_delbcnt_delta; totalrtbdelta = qtrx->qt_rtbcount_delta + qtrx->qt_delrtb_delta; #ifdef DEBUG if (totalbdelta < 0) ASSERT(be64_to_cpu(d->d_bcount) >= -totalbdelta); if (totalrtbdelta < 0) ASSERT(be64_to_cpu(d->d_rtbcount) >= -totalrtbdelta); if (qtrx->qt_icount_delta < 0) ASSERT(be64_to_cpu(d->d_icount) >= -qtrx->qt_icount_delta); #endif if (totalbdelta) be64_add_cpu(&d->d_bcount, (xfs_qcnt_t)totalbdelta); if (qtrx->qt_icount_delta) be64_add_cpu(&d->d_icount, (xfs_qcnt_t)qtrx->qt_icount_delta); if (totalrtbdelta) be64_add_cpu(&d->d_rtbcount, (xfs_qcnt_t)totalrtbdelta); /* * Get any default limits in use. * Start/reset the timer(s) if needed. */ if (d->d_id) { xfs_qm_adjust_dqlimits(tp->t_mountp, d); xfs_qm_adjust_dqtimers(tp->t_mountp, d); } dqp->dq_flags |= XFS_DQ_DIRTY; /* * add this to the list of items to get logged */ xfs_trans_log_dquot(tp, dqp); /* * Take off what's left of the original reservation. * In case of delayed allocations, there's no * reservation that a transaction structure knows of. */ if (qtrx->qt_blk_res != 0) { if (qtrx->qt_blk_res != qtrx->qt_blk_res_used) { if (qtrx->qt_blk_res > qtrx->qt_blk_res_used) dqp->q_res_bcount -= (xfs_qcnt_t) (qtrx->qt_blk_res - qtrx->qt_blk_res_used); else dqp->q_res_bcount -= (xfs_qcnt_t) (qtrx->qt_blk_res_used - qtrx->qt_blk_res); } } else { /* * These blks were never reserved, either inside * a transaction or outside one (in a delayed * allocation). Also, this isn't always a * negative number since we sometimes * deliberately skip quota reservations. */ if (qtrx->qt_bcount_delta) { dqp->q_res_bcount += (xfs_qcnt_t)qtrx->qt_bcount_delta; } } /* * Adjust the RT reservation. */ if (qtrx->qt_rtblk_res != 0) { if (qtrx->qt_rtblk_res != qtrx->qt_rtblk_res_used) { if (qtrx->qt_rtblk_res > qtrx->qt_rtblk_res_used) dqp->q_res_rtbcount -= (xfs_qcnt_t) (qtrx->qt_rtblk_res - qtrx->qt_rtblk_res_used); else dqp->q_res_rtbcount -= (xfs_qcnt_t) (qtrx->qt_rtblk_res_used - qtrx->qt_rtblk_res); } } else { if (qtrx->qt_rtbcount_delta) dqp->q_res_rtbcount += (xfs_qcnt_t)qtrx->qt_rtbcount_delta; } /* * Adjust the inode reservation. */ if (qtrx->qt_ino_res != 0) { ASSERT(qtrx->qt_ino_res >= qtrx->qt_ino_res_used); if (qtrx->qt_ino_res > qtrx->qt_ino_res_used) dqp->q_res_icount -= (xfs_qcnt_t) (qtrx->qt_ino_res - qtrx->qt_ino_res_used); } else { if (qtrx->qt_icount_delta) dqp->q_res_icount += (xfs_qcnt_t)qtrx->qt_icount_delta; } ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); } /* * Do the group quotas next */ qa = tp->t_dqinfo->dqa_grpdquots; } } /* * Release the reservations, and adjust the dquots accordingly. * This is called only when the transaction is being aborted. If by * any chance we have done dquot modifications incore (ie. deltas) already, * we simply throw those away, since that's the expected behavior * when a transaction is curtailed without a commit. */ void xfs_trans_unreserve_and_mod_dquots( xfs_trans_t *tp) { int i, j; xfs_dquot_t *dqp; xfs_dqtrx_t *qtrx, *qa; boolean_t locked; if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) return; qa = tp->t_dqinfo->dqa_usrdquots; for (j = 0; j < 2; j++) { for (i = 0; i < XFS_QM_TRANS_MAXDQS; i++) { qtrx = &qa[i]; /* * We assume that the array of dquots is filled * sequentially, not sparsely. */ if ((dqp = qtrx->qt_dquot) == NULL) break; /* * Unreserve the original reservation. We don't care * about the number of blocks used field, or deltas. * Also we don't bother to zero the fields. */ locked = B_FALSE; if (qtrx->qt_blk_res) { xfs_dqlock(dqp); locked = B_TRUE; dqp->q_res_bcount -= (xfs_qcnt_t)qtrx->qt_blk_res; } if (qtrx->qt_ino_res) { if (!locked) { xfs_dqlock(dqp); locked = B_TRUE; } dqp->q_res_icount -= (xfs_qcnt_t)qtrx->qt_ino_res; } if (qtrx->qt_rtblk_res) { if (!locked) { xfs_dqlock(dqp); locked = B_TRUE; } dqp->q_res_rtbcount -= (xfs_qcnt_t)qtrx->qt_rtblk_res; } if (locked) xfs_dqunlock(dqp); } qa = tp->t_dqinfo->dqa_grpdquots; } } STATIC void xfs_quota_warn( struct xfs_mount *mp, struct xfs_dquot *dqp, int type) { /* no warnings for project quotas - we just return ENOSPC later */ if (dqp->dq_flags & XFS_DQ_PROJ) return; quota_send_warning((dqp->dq_flags & XFS_DQ_USER) ? USRQUOTA : GRPQUOTA, be32_to_cpu(dqp->q_core.d_id), mp->m_super->s_dev, type); } /* * This reserves disk blocks and inodes against a dquot. * Flags indicate if the dquot is to be locked here and also * if the blk reservation is for RT or regular blocks. * Sending in XFS_QMOPT_FORCE_RES flag skips the quota check. */ STATIC int xfs_trans_dqresv( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dquot_t *dqp, long nblks, long ninos, uint flags) { xfs_qcnt_t hardlimit; xfs_qcnt_t softlimit; time_t timer; xfs_qwarncnt_t warns; xfs_qwarncnt_t warnlimit; xfs_qcnt_t total_count; xfs_qcnt_t *resbcountp; xfs_quotainfo_t *q = mp->m_quotainfo; xfs_dqlock(dqp); if (flags & XFS_TRANS_DQ_RES_BLKS) { hardlimit = be64_to_cpu(dqp->q_core.d_blk_hardlimit); if (!hardlimit) hardlimit = q->qi_bhardlimit; softlimit = be64_to_cpu(dqp->q_core.d_blk_softlimit); if (!softlimit) softlimit = q->qi_bsoftlimit; timer = be32_to_cpu(dqp->q_core.d_btimer); warns = be16_to_cpu(dqp->q_core.d_bwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_bwarnlimit; resbcountp = &dqp->q_res_bcount; } else { ASSERT(flags & XFS_TRANS_DQ_RES_RTBLKS); hardlimit = be64_to_cpu(dqp->q_core.d_rtb_hardlimit); if (!hardlimit) hardlimit = q->qi_rtbhardlimit; softlimit = be64_to_cpu(dqp->q_core.d_rtb_softlimit); if (!softlimit) softlimit = q->qi_rtbsoftlimit; timer = be32_to_cpu(dqp->q_core.d_rtbtimer); warns = be16_to_cpu(dqp->q_core.d_rtbwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_rtbwarnlimit; resbcountp = &dqp->q_res_rtbcount; } if ((flags & XFS_QMOPT_FORCE_RES) == 0 && dqp->q_core.d_id && ((XFS_IS_UQUOTA_ENFORCED(dqp->q_mount) && XFS_QM_ISUDQ(dqp)) || (XFS_IS_OQUOTA_ENFORCED(dqp->q_mount) && (XFS_QM_ISPDQ(dqp) || XFS_QM_ISGDQ(dqp))))) { if (nblks > 0) { /* * dquot is locked already. See if we'd go over the * hardlimit or exceed the timelimit if we allocate * nblks. */ total_count = *resbcountp + nblks; if (hardlimit && total_count > hardlimit) { xfs_quota_warn(mp, dqp, QUOTA_NL_BHARDWARN); goto error_return; } if (softlimit && total_count > softlimit) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTLONGWARN); goto error_return; } xfs_quota_warn(mp, dqp, QUOTA_NL_BSOFTWARN); } } if (ninos > 0) { total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos; timer = be32_to_cpu(dqp->q_core.d_itimer); warns = be16_to_cpu(dqp->q_core.d_iwarns); warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit; hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); if (!hardlimit) hardlimit = q->qi_ihardlimit; softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); if (!softlimit) softlimit = q->qi_isoftlimit; if (hardlimit && total_count > hardlimit) { xfs_quota_warn(mp, dqp, QUOTA_NL_IHARDWARN); goto error_return; } if (softlimit && total_count > softlimit) { if ((timer != 0 && get_seconds() > timer) || (warns != 0 && warns >= warnlimit)) { xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTLONGWARN); goto error_return; } xfs_quota_warn(mp, dqp, QUOTA_NL_ISOFTWARN); } } } /* * Change the reservation, but not the actual usage. * Note that q_res_bcount = q_core.d_bcount + resv */ (*resbcountp) += (xfs_qcnt_t)nblks; if (ninos != 0) dqp->q_res_icount += (xfs_qcnt_t)ninos; /* * note the reservation amt in the trans struct too, * so that the transaction knows how much was reserved by * it against this particular dquot. * We don't do this when we are reserving for a delayed allocation, * because we don't have the luxury of a transaction envelope then. */ if (tp) { ASSERT(tp->t_dqinfo); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (nblks != 0) xfs_trans_mod_dquot(tp, dqp, flags & XFS_QMOPT_RESBLK_MASK, nblks); if (ninos != 0) xfs_trans_mod_dquot(tp, dqp, XFS_TRANS_DQ_RES_INOS, ninos); } ASSERT(dqp->q_res_bcount >= be64_to_cpu(dqp->q_core.d_bcount)); ASSERT(dqp->q_res_rtbcount >= be64_to_cpu(dqp->q_core.d_rtbcount)); ASSERT(dqp->q_res_icount >= be64_to_cpu(dqp->q_core.d_icount)); xfs_dqunlock(dqp); return 0; error_return: xfs_dqunlock(dqp); if (flags & XFS_QMOPT_ENOSPC) return ENOSPC; return EDQUOT; } /* * Given dquot(s), make disk block and/or inode reservations against them. * The fact that this does the reservation against both the usr and * grp/prj quotas is important, because this follows a both-or-nothing * approach. * * flags = XFS_QMOPT_FORCE_RES evades limit enforcement. Used by chown. * XFS_QMOPT_ENOSPC returns ENOSPC not EDQUOT. Used by pquota. * XFS_TRANS_DQ_RES_BLKS reserves regular disk blocks * XFS_TRANS_DQ_RES_RTBLKS reserves realtime disk blocks * dquots are unlocked on return, if they were not locked by caller. */ int xfs_trans_reserve_quota_bydquots( xfs_trans_t *tp, xfs_mount_t *mp, xfs_dquot_t *udqp, xfs_dquot_t *gdqp, long nblks, long ninos, uint flags) { int resvd = 0, error; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return 0; if (tp && tp->t_dqinfo == NULL) xfs_trans_alloc_dqinfo(tp); ASSERT(flags & XFS_QMOPT_RESBLK_MASK); if (udqp) { error = xfs_trans_dqresv(tp, mp, udqp, nblks, ninos, (flags & ~XFS_QMOPT_ENOSPC)); if (error) return error; resvd = 1; } if (gdqp) { error = xfs_trans_dqresv(tp, mp, gdqp, nblks, ninos, flags); if (error) { /* * can't do it, so backout previous reservation */ if (resvd) { flags |= XFS_QMOPT_FORCE_RES; xfs_trans_dqresv(tp, mp, udqp, -nblks, -ninos, flags); } return error; } } /* * Didn't change anything critical, so, no need to log */ return 0; } /* * Lock the dquot and change the reservation if we can. * This doesn't change the actual usage, just the reservation. * The inode sent in is locked. */ int xfs_trans_reserve_quota_nblks( struct xfs_trans *tp, struct xfs_inode *ip, long nblks, long ninos, uint flags) { struct xfs_mount *mp = ip->i_mount; if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp)) return 0; if (XFS_IS_PQUOTA_ON(mp)) flags |= XFS_QMOPT_ENOSPC; ASSERT(ip->i_ino != mp->m_sb.sb_uquotino); ASSERT(ip->i_ino != mp->m_sb.sb_gquotino); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == XFS_TRANS_DQ_RES_RTBLKS || (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) == XFS_TRANS_DQ_RES_BLKS); /* * Reserve nblks against these dquots, with trans as the mediator. */ return xfs_trans_reserve_quota_bydquots(tp, mp, ip->i_udquot, ip->i_gdquot, nblks, ninos, flags); } /* * This routine is called to allocate a quotaoff log item. */ xfs_qoff_logitem_t * xfs_trans_get_qoff_item( xfs_trans_t *tp, xfs_qoff_logitem_t *startqoff, uint flags) { xfs_qoff_logitem_t *q; ASSERT(tp != NULL); q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); ASSERT(q != NULL); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &q->qql_item); return q; } /* * This is called to mark the quotaoff logitem as needing * to be logged when the transaction is committed. The logitem must * already be associated with the given transaction. */ void xfs_trans_log_quotaoff_item( xfs_trans_t *tp, xfs_qoff_logitem_t *qlp) { tp->t_flags |= XFS_TRANS_DIRTY; qlp->qql_item.li_desc->lid_flags |= XFS_LID_DIRTY; } STATIC void xfs_trans_alloc_dqinfo( xfs_trans_t *tp) { tp->t_dqinfo = kmem_zone_zalloc(xfs_qm_dqtrxzone, KM_SLEEP); } void xfs_trans_free_dqinfo( xfs_trans_t *tp) { if (!tp->t_dqinfo) return; kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo); tp->t_dqinfo = NULL; }
gpl-2.0
santod/KK_sense_kernel_htc_m8vzw
drivers/media/video/tda9840.c
4812
5608
/* tda9840 - i2c-driver for the tda9840 by SGS Thomson Copyright (C) 1998-2003 Michael Hunold <michael@mihu.de> Copyright (C) 2008 Hans Verkuil <hverkuil@xs4all.nl> The tda9840 is a stereo/dual sound processor with digital identification. It can be found at address 0x84 on the i2c-bus. For detailed informations download the specifications directly from SGS Thomson at http://www.st.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/i2c.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_AUTHOR("Michael Hunold <michael@mihu.de>"); MODULE_DESCRIPTION("tda9840 driver"); MODULE_LICENSE("GPL"); static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug level (0-1)"); #define SWITCH 0x00 #define LEVEL_ADJUST 0x02 #define STEREO_ADJUST 0x03 #define TEST 0x04 #define TDA9840_SET_MUTE 0x00 #define TDA9840_SET_MONO 0x10 #define TDA9840_SET_STEREO 0x2a #define TDA9840_SET_LANG1 0x12 #define TDA9840_SET_LANG2 0x1e #define TDA9840_SET_BOTH 0x1a #define TDA9840_SET_BOTH_R 0x16 #define TDA9840_SET_EXTERNAL 0x7a static void tda9840_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (i2c_smbus_write_byte_data(client, reg, val)) v4l2_dbg(1, debug, sd, "error writing %02x to %02x\n", val, reg); } static int tda9840_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t) { int byte; if (t->index) return -EINVAL; switch (t->audmode) { case V4L2_TUNER_MODE_STEREO: byte = TDA9840_SET_STEREO; break; case V4L2_TUNER_MODE_LANG1_LANG2: byte = TDA9840_SET_BOTH; break; case V4L2_TUNER_MODE_LANG1: byte = TDA9840_SET_LANG1; break; case V4L2_TUNER_MODE_LANG2: byte = TDA9840_SET_LANG2; break; default: byte = TDA9840_SET_MONO; break; } v4l2_dbg(1, debug, sd, "TDA9840_SWITCH: 0x%02x\n", byte); tda9840_write(sd, SWITCH, byte); return 0; } static int tda9840_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *t) { struct i2c_client *client = v4l2_get_subdevdata(sd); u8 byte; t->rxsubchans = V4L2_TUNER_SUB_MONO; if (1 != i2c_master_recv(client, &byte, 1)) { v4l2_dbg(1, debug, sd, "i2c_master_recv() failed\n"); return -EIO; } if (byte & 0x80) { v4l2_dbg(1, debug, sd, "TDA9840_DETECT: register contents invalid\n"); return -EINVAL; } v4l2_dbg(1, debug, sd, "TDA9840_DETECT: byte: 0x%02x\n", byte); switch (byte & 0x60) { case 0x00: t->rxsubchans = V4L2_TUNER_SUB_MONO; break; case 0x20: t->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; break; case 0x40: t->rxsubchans = V4L2_TUNER_SUB_STEREO | V4L2_TUNER_SUB_MONO; break; default: /* Incorrect detect */ t->rxsubchans = V4L2_TUNER_MODE_MONO; break; } return 0; } static int tda9840_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TDA9840, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops tda9840_core_ops = { .g_chip_ident = tda9840_g_chip_ident, }; static const struct v4l2_subdev_tuner_ops tda9840_tuner_ops = { .s_tuner = tda9840_s_tuner, .g_tuner = tda9840_g_tuner, }; static const struct v4l2_subdev_ops tda9840_ops = { .core = &tda9840_core_ops, .tuner = &tda9840_tuner_ops, }; /* ----------------------------------------------------------------------- */ static int tda9840_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct v4l2_subdev *sd; /* let's see whether this adapter can support what we need */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_READ_BYTE_DATA | I2C_FUNC_SMBUS_WRITE_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); sd = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); if (sd == NULL) return -ENOMEM; v4l2_i2c_subdev_init(sd, client, &tda9840_ops); /* set initial values for level & stereo - adjustment, mode */ tda9840_write(sd, LEVEL_ADJUST, 0); tda9840_write(sd, STEREO_ADJUST, 0); tda9840_write(sd, SWITCH, TDA9840_SET_STEREO); return 0; } static int tda9840_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(sd); return 0; } static const struct i2c_device_id tda9840_id[] = { { "tda9840", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tda9840_id); static struct i2c_driver tda9840_driver = { .driver = { .owner = THIS_MODULE, .name = "tda9840", }, .probe = tda9840_probe, .remove = tda9840_remove, .id_table = tda9840_id, }; module_i2c_driver(tda9840_driver);
gpl-2.0
MoKee/android_kernel_samsung_klte
fs/xfs/xfs_qm_syscalls.c
4812
24670
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/capability.h> #include "xfs.h" #include "xfs_fs.h" #include "xfs_bit.h" #include "xfs_log.h" #include "xfs_inum.h" #include "xfs_trans.h" #include "xfs_sb.h" #include "xfs_ag.h" #include "xfs_alloc.h" #include "xfs_quota.h" #include "xfs_mount.h" #include "xfs_bmap_btree.h" #include "xfs_inode.h" #include "xfs_inode_item.h" #include "xfs_itable.h" #include "xfs_bmap.h" #include "xfs_rtalloc.h" #include "xfs_error.h" #include "xfs_attr.h" #include "xfs_buf_item.h" #include "xfs_utils.h" #include "xfs_qm.h" #include "xfs_trace.h" STATIC int xfs_qm_log_quotaoff(xfs_mount_t *, xfs_qoff_logitem_t **, uint); STATIC int xfs_qm_log_quotaoff_end(xfs_mount_t *, xfs_qoff_logitem_t *, uint); STATIC uint xfs_qm_export_flags(uint); STATIC uint xfs_qm_export_qtype_flags(uint); /* * Turn off quota accounting and/or enforcement for all udquots and/or * gdquots. Called only at unmount time. * * This assumes that there are no dquots of this file system cached * incore, and modifies the ondisk dquot directly. Therefore, for example, * it is an error to call this twice, without purging the cache. */ int xfs_qm_scall_quotaoff( xfs_mount_t *mp, uint flags) { struct xfs_quotainfo *q = mp->m_quotainfo; uint dqtype; int error; uint inactivate_flags; xfs_qoff_logitem_t *qoffstart; /* * No file system can have quotas enabled on disk but not in core. * Note that quota utilities (like quotaoff) _expect_ * errno == EEXIST here. */ if ((mp->m_qflags & flags) == 0) return XFS_ERROR(EEXIST); error = 0; flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* * We don't want to deal with two quotaoffs messing up each other, * so we're going to serialize it. quotaoff isn't exactly a performance * critical thing. * If quotaoff, then we must be dealing with the root filesystem. */ ASSERT(q); mutex_lock(&q->qi_quotaofflock); /* * If we're just turning off quota enforcement, change mp and go. */ if ((flags & XFS_ALL_QUOTA_ACCT) == 0) { mp->m_qflags &= ~(flags); spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = mp->m_qflags; spin_unlock(&mp->m_sb_lock); mutex_unlock(&q->qi_quotaofflock); /* XXX what to do if error ? Revert back to old vals incore ? */ error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS); return (error); } dqtype = 0; inactivate_flags = 0; /* * If accounting is off, we must turn enforcement off, clear the * quota 'CHKD' certificate to make it known that we have to * do a quotacheck the next time this quota is turned on. */ if (flags & XFS_UQUOTA_ACCT) { dqtype |= XFS_QMOPT_UQUOTA; flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD); inactivate_flags |= XFS_UQUOTA_ACTIVE; } if (flags & XFS_GQUOTA_ACCT) { dqtype |= XFS_QMOPT_GQUOTA; flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); inactivate_flags |= XFS_GQUOTA_ACTIVE; } else if (flags & XFS_PQUOTA_ACCT) { dqtype |= XFS_QMOPT_PQUOTA; flags |= (XFS_OQUOTA_CHKD | XFS_OQUOTA_ENFD); inactivate_flags |= XFS_PQUOTA_ACTIVE; } /* * Nothing to do? Don't complain. This happens when we're just * turning off quota enforcement. */ if ((mp->m_qflags & flags) == 0) goto out_unlock; /* * Write the LI_QUOTAOFF log record, and do SB changes atomically, * and synchronously. If we fail to write, we should abort the * operation as it cannot be recovered safely if we crash. */ error = xfs_qm_log_quotaoff(mp, &qoffstart, flags); if (error) goto out_unlock; /* * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct * to take care of the race between dqget and quotaoff. We don't take * any special locks to reset these bits. All processes need to check * these bits *after* taking inode lock(s) to see if the particular * quota type is in the process of being turned off. If *ACTIVE, it is * guaranteed that all dquot structures and all quotainode ptrs will all * stay valid as long as that inode is kept locked. * * There is no turning back after this. */ mp->m_qflags &= ~inactivate_flags; /* * Give back all the dquot reference(s) held by inodes. * Here we go thru every single incore inode in this file system, and * do a dqrele on the i_udquot/i_gdquot that it may have. * Essentially, as long as somebody has an inode locked, this guarantees * that quotas will not be turned off. This is handy because in a * transaction once we lock the inode(s) and check for quotaon, we can * depend on the quota inodes (and other things) being valid as long as * we keep the lock(s). */ xfs_qm_dqrele_all_inodes(mp, flags); /* * Next we make the changes in the quota flag in the mount struct. * This isn't protected by a particular lock directly, because we * don't want to take a mrlock every time we depend on quotas being on. */ mp->m_qflags &= ~flags; /* * Go through all the dquots of this file system and purge them, * according to what was turned off. */ xfs_qm_dqpurge_all(mp, dqtype); /* * Transactions that had started before ACTIVE state bit was cleared * could have logged many dquots, so they'd have higher LSNs than * the first QUOTAOFF log record does. If we happen to crash when * the tail of the log has gone past the QUOTAOFF record, but * before the last dquot modification, those dquots __will__ * recover, and that's not good. * * So, we have QUOTAOFF start and end logitems; the start * logitem won't get overwritten until the end logitem appears... */ error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags); if (error) { /* We're screwed now. Shutdown is the only option. */ xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); goto out_unlock; } /* * If quotas is completely disabled, close shop. */ if (((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET1) || ((flags & XFS_MOUNT_QUOTA_ALL) == XFS_MOUNT_QUOTA_SET2)) { mutex_unlock(&q->qi_quotaofflock); xfs_qm_destroy_quotainfo(mp); return (0); } /* * Release our quotainode references if we don't need them anymore. */ if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) { IRELE(q->qi_uquotaip); q->qi_uquotaip = NULL; } if ((dqtype & (XFS_QMOPT_GQUOTA|XFS_QMOPT_PQUOTA)) && q->qi_gquotaip) { IRELE(q->qi_gquotaip); q->qi_gquotaip = NULL; } out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; } STATIC int xfs_qm_scall_trunc_qfile( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; struct xfs_trans *tp; int error; if (ino == NULLFSINO) return 0; error = xfs_iget(mp, NULL, ino, 0, 0, &ip); if (error) return error; xfs_ilock(ip, XFS_IOLOCK_EXCL); tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE); error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0, XFS_TRANS_PERM_LOG_RES, XFS_ITRUNCATE_LOG_COUNT); if (error) { xfs_trans_cancel(tp, 0); xfs_iunlock(ip, XFS_IOLOCK_EXCL); goto out_put; } xfs_ilock(ip, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, ip, 0); ip->i_d.di_size = 0; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0); if (error) { xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT); goto out_unlock; } ASSERT(ip->i_d.di_nextents == 0); xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES); out_unlock: xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL); out_put: IRELE(ip); return error; } int xfs_qm_scall_trunc_qfiles( xfs_mount_t *mp, uint flags) { int error = 0, error2 = 0; if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) { xfs_debug(mp, "%s: flags=%x m_qflags=%x\n", __func__, flags, mp->m_qflags); return XFS_ERROR(EINVAL); } if (flags & XFS_DQ_USER) error = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_uquotino); if (flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) error2 = xfs_qm_scall_trunc_qfile(mp, mp->m_sb.sb_gquotino); return error ? error : error2; } /* * Switch on (a given) quota enforcement for a filesystem. This takes * effect immediately. * (Switching on quota accounting must be done at mount time.) */ int xfs_qm_scall_quotaon( xfs_mount_t *mp, uint flags) { int error; uint qf; __int64_t sbflags; flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD); /* * Switching on quota accounting must be done at mount time. */ flags &= ~(XFS_ALL_QUOTA_ACCT); sbflags = 0; if (flags == 0) { xfs_debug(mp, "%s: zero flags, m_qflags=%x\n", __func__, mp->m_qflags); return XFS_ERROR(EINVAL); } /* No fs can turn on quotas with a delayed effect */ ASSERT((flags & XFS_ALL_QUOTA_ACCT) == 0); /* * Can't enforce without accounting. We check the superblock * qflags here instead of m_qflags because rootfs can have * quota acct on ondisk without m_qflags' knowing. */ if (((flags & XFS_UQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) == 0 && (flags & XFS_UQUOTA_ENFD)) || ((flags & XFS_PQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) == 0 && (flags & XFS_GQUOTA_ACCT) == 0 && (mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) == 0 && (flags & XFS_OQUOTA_ENFD))) { xfs_debug(mp, "%s: Can't enforce without acct, flags=%x sbflags=%x\n", __func__, flags, mp->m_sb.sb_qflags); return XFS_ERROR(EINVAL); } /* * If everything's up to-date incore, then don't waste time. */ if ((mp->m_qflags & flags) == flags) return XFS_ERROR(EEXIST); /* * Change sb_qflags on disk but not incore mp->qflags * if this is the root filesystem. */ spin_lock(&mp->m_sb_lock); qf = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = qf | flags; spin_unlock(&mp->m_sb_lock); /* * There's nothing to change if it's the same. */ if ((qf & flags) == flags && sbflags == 0) return XFS_ERROR(EEXIST); sbflags |= XFS_SB_QFLAGS; if ((error = xfs_qm_write_sb_changes(mp, sbflags))) return (error); /* * If we aren't trying to switch on quota enforcement, we are done. */ if (((mp->m_sb.sb_qflags & XFS_UQUOTA_ACCT) != (mp->m_qflags & XFS_UQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_PQUOTA_ACCT) != (mp->m_qflags & XFS_PQUOTA_ACCT)) || ((mp->m_sb.sb_qflags & XFS_GQUOTA_ACCT) != (mp->m_qflags & XFS_GQUOTA_ACCT)) || (flags & XFS_ALL_QUOTA_ENFD) == 0) return (0); if (! XFS_IS_QUOTA_RUNNING(mp)) return XFS_ERROR(ESRCH); /* * Switch on quota enforcement in core. */ mutex_lock(&mp->m_quotainfo->qi_quotaofflock); mp->m_qflags |= (flags & XFS_ALL_QUOTA_ENFD); mutex_unlock(&mp->m_quotainfo->qi_quotaofflock); return (0); } /* * Return quota status information, such as uquota-off, enforcements, etc. */ int xfs_qm_scall_getqstat( struct xfs_mount *mp, struct fs_quota_stat *out) { struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_inode *uip, *gip; boolean_t tempuqip, tempgqip; uip = gip = NULL; tempuqip = tempgqip = B_FALSE; memset(out, 0, sizeof(fs_quota_stat_t)); out->qs_version = FS_QSTAT_VERSION; if (!xfs_sb_version_hasquota(&mp->m_sb)) { out->qs_uquota.qfs_ino = NULLFSINO; out->qs_gquota.qfs_ino = NULLFSINO; return (0); } out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags & (XFS_ALL_QUOTA_ACCT| XFS_ALL_QUOTA_ENFD)); out->qs_pad = 0; out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino; out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino; if (q) { uip = q->qi_uquotaip; gip = q->qi_gquotaip; } if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &uip) == 0) tempuqip = B_TRUE; } if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &gip) == 0) tempgqip = B_TRUE; } if (uip) { out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nextents = uip->i_d.di_nextents; if (tempuqip) IRELE(uip); } if (gip) { out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks; out->qs_gquota.qfs_nextents = gip->i_d.di_nextents; if (tempgqip) IRELE(gip); } if (q) { out->qs_incoredqs = q->qi_dquots; out->qs_btimelimit = q->qi_btimelimit; out->qs_itimelimit = q->qi_itimelimit; out->qs_rtbtimelimit = q->qi_rtbtimelimit; out->qs_bwarnlimit = q->qi_bwarnlimit; out->qs_iwarnlimit = q->qi_iwarnlimit; } return 0; } #define XFS_DQ_MASK \ (FS_DQ_LIMIT_MASK | FS_DQ_TIMER_MASK | FS_DQ_WARNS_MASK) /* * Adjust quota limits, and start/stop timers accordingly. */ int xfs_qm_scall_setqlim( xfs_mount_t *mp, xfs_dqid_t id, uint type, fs_disk_quota_t *newlim) { struct xfs_quotainfo *q = mp->m_quotainfo; xfs_disk_dquot_t *ddq; xfs_dquot_t *dqp; xfs_trans_t *tp; int error; xfs_qcnt_t hard, soft; if (newlim->d_fieldmask & ~XFS_DQ_MASK) return EINVAL; if ((newlim->d_fieldmask & XFS_DQ_MASK) == 0) return 0; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, 0, 0, XFS_DEFAULT_LOG_COUNT))) { xfs_trans_cancel(tp, 0); return (error); } /* * We don't want to race with a quotaoff so take the quotaoff lock. * (We don't hold an inode lock, so there's nothing else to stop * a quotaoff from happening). (XXXThis doesn't currently happen * because we take the vfslock before calling xfs_qm_sysent). */ mutex_lock(&q->qi_quotaofflock); /* * Get the dquot (locked), and join it to the transaction. * Allocate the dquot if this doesn't exist. */ if ((error = xfs_qm_dqget(mp, NULL, id, type, XFS_QMOPT_DQALLOC, &dqp))) { xfs_trans_cancel(tp, XFS_TRANS_ABORT); ASSERT(error != ENOENT); goto out_unlock; } xfs_trans_dqjoin(tp, dqp); ddq = &dqp->q_core; /* * Make sure that hardlimits are >= soft limits before changing. */ hard = (newlim->d_fieldmask & FS_DQ_BHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_hardlimit) : be64_to_cpu(ddq->d_blk_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_BSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_blk_softlimit) : be64_to_cpu(ddq->d_blk_softlimit); if (hard == 0 || hard >= soft) { ddq->d_blk_hardlimit = cpu_to_be64(hard); ddq->d_blk_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_bhardlimit = hard; q->qi_bsoftlimit = soft; } } else { xfs_debug(mp, "blkhard %Ld < blksoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_RTBHARD) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_hardlimit) : be64_to_cpu(ddq->d_rtb_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_RTBSOFT) ? (xfs_qcnt_t) XFS_BB_TO_FSB(mp, newlim->d_rtb_softlimit) : be64_to_cpu(ddq->d_rtb_softlimit); if (hard == 0 || hard >= soft) { ddq->d_rtb_hardlimit = cpu_to_be64(hard); ddq->d_rtb_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_rtbhardlimit = hard; q->qi_rtbsoftlimit = soft; } } else { xfs_debug(mp, "rtbhard %Ld < rtbsoft %Ld\n", hard, soft); } hard = (newlim->d_fieldmask & FS_DQ_IHARD) ? (xfs_qcnt_t) newlim->d_ino_hardlimit : be64_to_cpu(ddq->d_ino_hardlimit); soft = (newlim->d_fieldmask & FS_DQ_ISOFT) ? (xfs_qcnt_t) newlim->d_ino_softlimit : be64_to_cpu(ddq->d_ino_softlimit); if (hard == 0 || hard >= soft) { ddq->d_ino_hardlimit = cpu_to_be64(hard); ddq->d_ino_softlimit = cpu_to_be64(soft); if (id == 0) { q->qi_ihardlimit = hard; q->qi_isoftlimit = soft; } } else { xfs_debug(mp, "ihard %Ld < isoft %Ld\n", hard, soft); } /* * Update warnings counter(s) if requested */ if (newlim->d_fieldmask & FS_DQ_BWARNS) ddq->d_bwarns = cpu_to_be16(newlim->d_bwarns); if (newlim->d_fieldmask & FS_DQ_IWARNS) ddq->d_iwarns = cpu_to_be16(newlim->d_iwarns); if (newlim->d_fieldmask & FS_DQ_RTBWARNS) ddq->d_rtbwarns = cpu_to_be16(newlim->d_rtbwarns); if (id == 0) { /* * Timelimits for the super user set the relative time * the other users can be over quota for this file system. * If it is zero a default is used. Ditto for the default * soft and hard limit values (already done, above), and * for warnings. */ if (newlim->d_fieldmask & FS_DQ_BTIMER) { q->qi_btimelimit = newlim->d_btimer; ddq->d_btimer = cpu_to_be32(newlim->d_btimer); } if (newlim->d_fieldmask & FS_DQ_ITIMER) { q->qi_itimelimit = newlim->d_itimer; ddq->d_itimer = cpu_to_be32(newlim->d_itimer); } if (newlim->d_fieldmask & FS_DQ_RTBTIMER) { q->qi_rtbtimelimit = newlim->d_rtbtimer; ddq->d_rtbtimer = cpu_to_be32(newlim->d_rtbtimer); } if (newlim->d_fieldmask & FS_DQ_BWARNS) q->qi_bwarnlimit = newlim->d_bwarns; if (newlim->d_fieldmask & FS_DQ_IWARNS) q->qi_iwarnlimit = newlim->d_iwarns; if (newlim->d_fieldmask & FS_DQ_RTBWARNS) q->qi_rtbwarnlimit = newlim->d_rtbwarns; } else { /* * If the user is now over quota, start the timelimit. * The user will not be 'warned'. * Note that we keep the timers ticking, whether enforcement * is on or off. We don't really want to bother with iterating * over all ondisk dquots and turning the timers on/off. */ xfs_qm_adjust_dqtimers(mp, ddq); } dqp->dq_flags |= XFS_DQ_DIRTY; xfs_trans_log_dquot(tp, dqp); error = xfs_trans_commit(tp, 0); xfs_qm_dqrele(dqp); out_unlock: mutex_unlock(&q->qi_quotaofflock); return error; } STATIC int xfs_qm_log_quotaoff_end( xfs_mount_t *mp, xfs_qoff_logitem_t *startqoff, uint flags) { xfs_trans_t *tp; int error; xfs_qoff_logitem_t *qoffi; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2, 0, 0, XFS_DEFAULT_LOG_COUNT))) { xfs_trans_cancel(tp, 0); return (error); } qoffi = xfs_trans_get_qoff_item(tp, startqoff, flags & XFS_ALL_QUOTA_ACCT); xfs_trans_log_quotaoff_item(tp, qoffi); /* * We have to make sure that the transaction is secure on disk before we * return and actually stop quota accounting. So, make it synchronous. * We don't care about quotoff's performance. */ xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); return (error); } STATIC int xfs_qm_log_quotaoff( xfs_mount_t *mp, xfs_qoff_logitem_t **qoffstartp, uint flags) { xfs_trans_t *tp; int error; xfs_qoff_logitem_t *qoffi=NULL; uint oldsbqflag=0; tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2 + mp->m_sb.sb_sectsize + 128, 0, 0, XFS_DEFAULT_LOG_COUNT))) { goto error0; } qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); xfs_trans_log_quotaoff_item(tp, qoffi); spin_lock(&mp->m_sb_lock); oldsbqflag = mp->m_sb.sb_qflags; mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; spin_unlock(&mp->m_sb_lock); xfs_mod_sb(tp, XFS_SB_QFLAGS); /* * We have to make sure that the transaction is secure on disk before we * return and actually stop quota accounting. So, make it synchronous. * We don't care about quotoff's performance. */ xfs_trans_set_sync(tp); error = xfs_trans_commit(tp, 0); error0: if (error) { xfs_trans_cancel(tp, 0); /* * No one else is modifying sb_qflags, so this is OK. * We still hold the quotaofflock. */ spin_lock(&mp->m_sb_lock); mp->m_sb.sb_qflags = oldsbqflag; spin_unlock(&mp->m_sb_lock); } *qoffstartp = qoffi; return (error); } int xfs_qm_scall_getquota( struct xfs_mount *mp, xfs_dqid_t id, uint type, struct fs_disk_quota *dst) { struct xfs_dquot *dqp; int error; /* * Try to get the dquot. We don't want it allocated on disk, so * we aren't passing the XFS_QMOPT_DOALLOC flag. If it doesn't * exist, we'll get ENOENT back. */ error = xfs_qm_dqget(mp, NULL, id, type, 0, &dqp); if (error) return error; /* * If everything's NULL, this dquot doesn't quite exist as far as * our utility programs are concerned. */ if (XFS_IS_DQUOT_UNINITIALIZED(dqp)) { error = XFS_ERROR(ENOENT); goto out_put; } memset(dst, 0, sizeof(*dst)); dst->d_version = FS_DQUOT_VERSION; dst->d_flags = xfs_qm_export_qtype_flags(dqp->q_core.d_flags); dst->d_id = be32_to_cpu(dqp->q_core.d_id); dst->d_blk_hardlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_hardlimit)); dst->d_blk_softlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_blk_softlimit)); dst->d_ino_hardlimit = be64_to_cpu(dqp->q_core.d_ino_hardlimit); dst->d_ino_softlimit = be64_to_cpu(dqp->q_core.d_ino_softlimit); dst->d_bcount = XFS_FSB_TO_BB(mp, dqp->q_res_bcount); dst->d_icount = dqp->q_res_icount; dst->d_btimer = be32_to_cpu(dqp->q_core.d_btimer); dst->d_itimer = be32_to_cpu(dqp->q_core.d_itimer); dst->d_iwarns = be16_to_cpu(dqp->q_core.d_iwarns); dst->d_bwarns = be16_to_cpu(dqp->q_core.d_bwarns); dst->d_rtb_hardlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_hardlimit)); dst->d_rtb_softlimit = XFS_FSB_TO_BB(mp, be64_to_cpu(dqp->q_core.d_rtb_softlimit)); dst->d_rtbcount = XFS_FSB_TO_BB(mp, dqp->q_res_rtbcount); dst->d_rtbtimer = be32_to_cpu(dqp->q_core.d_rtbtimer); dst->d_rtbwarns = be16_to_cpu(dqp->q_core.d_rtbwarns); /* * Internally, we don't reset all the timers when quota enforcement * gets turned off. No need to confuse the user level code, * so return zeroes in that case. */ if ((!XFS_IS_UQUOTA_ENFORCED(mp) && dqp->q_core.d_flags == XFS_DQ_USER) || (!XFS_IS_OQUOTA_ENFORCED(mp) && (dqp->q_core.d_flags & (XFS_DQ_PROJ | XFS_DQ_GROUP)))) { dst->d_btimer = 0; dst->d_itimer = 0; dst->d_rtbtimer = 0; } #ifdef DEBUG if (((XFS_IS_UQUOTA_ENFORCED(mp) && dst->d_flags == FS_USER_QUOTA) || (XFS_IS_OQUOTA_ENFORCED(mp) && (dst->d_flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)))) && dst->d_id != 0) { if (((int) dst->d_bcount > (int) dst->d_blk_softlimit) && (dst->d_blk_softlimit > 0)) { ASSERT(dst->d_btimer != 0); } if (((int) dst->d_icount > (int) dst->d_ino_softlimit) && (dst->d_ino_softlimit > 0)) { ASSERT(dst->d_itimer != 0); } } #endif out_put: xfs_qm_dqput(dqp); return error; } STATIC uint xfs_qm_export_qtype_flags( uint flags) { /* * Can't be more than one, or none. */ ASSERT((flags & (FS_PROJ_QUOTA | FS_USER_QUOTA)) != (FS_PROJ_QUOTA | FS_USER_QUOTA)); ASSERT((flags & (FS_PROJ_QUOTA | FS_GROUP_QUOTA)) != (FS_PROJ_QUOTA | FS_GROUP_QUOTA)); ASSERT((flags & (FS_USER_QUOTA | FS_GROUP_QUOTA)) != (FS_USER_QUOTA | FS_GROUP_QUOTA)); ASSERT((flags & (FS_PROJ_QUOTA|FS_USER_QUOTA|FS_GROUP_QUOTA)) != 0); return (flags & XFS_DQ_USER) ? FS_USER_QUOTA : (flags & XFS_DQ_PROJ) ? FS_PROJ_QUOTA : FS_GROUP_QUOTA; } STATIC uint xfs_qm_export_flags( uint flags) { uint uflags; uflags = 0; if (flags & XFS_UQUOTA_ACCT) uflags |= FS_QUOTA_UDQ_ACCT; if (flags & XFS_PQUOTA_ACCT) uflags |= FS_QUOTA_PDQ_ACCT; if (flags & XFS_GQUOTA_ACCT) uflags |= FS_QUOTA_GDQ_ACCT; if (flags & XFS_UQUOTA_ENFD) uflags |= FS_QUOTA_UDQ_ENFD; if (flags & (XFS_OQUOTA_ENFD)) { uflags |= (flags & XFS_GQUOTA_ACCT) ? FS_QUOTA_GDQ_ENFD : FS_QUOTA_PDQ_ENFD; } return (uflags); } STATIC int xfs_dqrele_inode( struct xfs_inode *ip, struct xfs_perag *pag, int flags) { /* skip quota inodes */ if (ip == ip->i_mount->m_quotainfo->qi_uquotaip || ip == ip->i_mount->m_quotainfo->qi_gquotaip) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); return 0; } xfs_ilock(ip, XFS_ILOCK_EXCL); if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } xfs_iunlock(ip, XFS_ILOCK_EXCL); return 0; } /* * Go thru all the inodes in the file system, releasing their dquots. * * Note that the mount structure gets modified to indicate that quotas are off * AFTER this, in the case of quotaoff. */ void xfs_qm_dqrele_all_inodes( struct xfs_mount *mp, uint flags) { ASSERT(mp->m_quotainfo); xfs_inode_ag_iterator(mp, xfs_dqrele_inode, flags); }
gpl-2.0
CyanideL/android_kernel_lge_hammerhead
drivers/infiniband/hw/cxgb4/ev.c
5068
5854
/* * Copyright (c) 2009-2010 Chelsio, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/slab.h> #include <linux/mman.h> #include <net/sock.h> #include "iw_cxgb4.h" static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp, struct c4iw_qp *qhp, struct t4_cqe *err_cqe, enum ib_event_type ib_event) { struct ib_event event; struct c4iw_qp_attributes attrs; unsigned long flag; if ((qhp->attr.state == C4IW_QP_STATE_ERROR) || (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { PDBG("%s AE received after RTS - " "qp state %d qpid 0x%x status 0x%x\n", __func__, qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); return; } printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x " "type %d wrid.hi 0x%x wrid.lo 0x%x\n", CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); if (qhp->attr.state == C4IW_QP_STATE_RTS) { attrs.next_state = C4IW_QP_STATE_TERMINATE; c4iw_modify_qp(qhp->rhp, qhp, C4IW_QP_ATTR_NEXT_STATE, &attrs, 0); } event.event = ib_event; event.device = chp->ibcq.device; if (ib_event == IB_EVENT_CQ_ERR) event.element.cq = &chp->ibcq; else event.element.qp = &qhp->ibqp; if (qhp->ibqp.event_handler) (*qhp->ibqp.event_handler)(&event, qhp->ibqp.qp_context); spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); } void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) { struct c4iw_cq *chp; struct c4iw_qp *qhp; u32 cqid; spin_lock(&dev->lock); qhp = get_qhp(dev, CQE_QPID(err_cqe)); if (!qhp) { printk(KERN_ERR MOD "BAD AE qpid 0x%x opcode %d " "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); spin_unlock(&dev->lock); goto out; } if (SQ_TYPE(err_cqe)) cqid = qhp->attr.scq; else cqid = qhp->attr.rcq; chp = get_chp(dev, cqid); if (!chp) { printk(KERN_ERR MOD "BAD AE cqid 0x%x qpid 0x%x opcode %d " "status 0x%x type %d wrid.hi 0x%x wrid.lo 0x%x\n", cqid, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe)); spin_unlock(&dev->lock); goto out; } c4iw_qp_add_ref(&qhp->ibqp); atomic_inc(&chp->refcnt); spin_unlock(&dev->lock); /* Bad incoming write */ if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE)) { post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_REQ_ERR); goto done; } switch (CQE_STATUS(err_cqe)) { /* Completion Events */ case T4_ERR_SUCCESS: printk(KERN_ERR MOD "AE with status 0!\n"); break; case T4_ERR_STAG: case T4_ERR_PDID: case T4_ERR_QPID: case T4_ERR_ACCESS: case T4_ERR_WRAP: case T4_ERR_BOUND: case T4_ERR_INVALIDATE_SHARED_MR: case T4_ERR_INVALIDATE_MR_WITH_MW_BOUND: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_ACCESS_ERR); break; /* Device Fatal Errors */ case T4_ERR_ECC: case T4_ERR_ECC_PSTAG: case T4_ERR_INTERNAL_ERR: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_DEVICE_FATAL); break; /* QP Fatal Errors */ case T4_ERR_OUT_OF_RQE: case T4_ERR_PBL_ADDR_BOUND: case T4_ERR_CRC: case T4_ERR_MARKER: case T4_ERR_PDU_LEN_ERR: case T4_ERR_DDP_VERSION: case T4_ERR_RDMA_VERSION: case T4_ERR_OPCODE: case T4_ERR_DDP_QUEUE_NUM: case T4_ERR_MSN: case T4_ERR_TBIT: case T4_ERR_MO: case T4_ERR_MSN_GAP: case T4_ERR_MSN_RANGE: case T4_ERR_RQE_ADDR_BOUND: case T4_ERR_IRD_OVERFLOW: post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); break; default: printk(KERN_ERR MOD "Unknown T4 status 0x%x QPID 0x%x\n", CQE_STATUS(err_cqe), qhp->wq.sq.qid); post_qp_event(dev, chp, qhp, err_cqe, IB_EVENT_QP_FATAL); break; } done: if (atomic_dec_and_test(&chp->refcnt)) wake_up(&chp->wait); c4iw_qp_rem_ref(&qhp->ibqp); out: return; } int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) { struct c4iw_cq *chp; unsigned long flag; chp = get_chp(dev, qid); if (chp) { spin_lock_irqsave(&chp->comp_handler_lock, flag); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); spin_unlock_irqrestore(&chp->comp_handler_lock, flag); } else PDBG("%s unknown cqid 0x%x\n", __func__, qid); return 0; }
gpl-2.0
GetOcean/ocean-linux
drivers/usb/host/ohci-ps3.c
5068
6458
/* * PS3 OHCI Host Controller driver * * Copyright (C) 2006 Sony Computer Entertainment Inc. * Copyright 2006 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <asm/firmware.h> #include <asm/ps3.h> static int ps3_ohci_hc_reset(struct usb_hcd *hcd) { struct ohci_hcd *ohci = hcd_to_ohci(hcd); ohci->flags |= OHCI_QUIRK_BE_MMIO; ohci_hcd_init(ohci); return ohci_init(ohci); } static int __devinit ps3_ohci_hc_start(struct usb_hcd *hcd) { int result; struct ohci_hcd *ohci = hcd_to_ohci(hcd); /* Handle root hub init quirk in spider south bridge. */ /* Also set PwrOn2PwrGood to 0x7f (254ms). */ ohci_writel(ohci, 0x7f000000 | RH_A_PSM | RH_A_OCPM, &ohci->regs->roothub.a); ohci_writel(ohci, 0x00060000, &ohci->regs->roothub.b); result = ohci_run(ohci); if (result < 0) { err("can't start %s", hcd->self.bus_name); ohci_stop(hcd); } return result; } static const struct hc_driver ps3_ohci_hc_driver = { .description = hcd_name, .product_desc = "PS3 OHCI Host Controller", .hcd_priv_size = sizeof(struct ohci_hcd), .irq = ohci_irq, .flags = HCD_MEMORY | HCD_USB11, .reset = ps3_ohci_hc_reset, .start = ps3_ohci_hc_start, .stop = ohci_stop, .shutdown = ohci_shutdown, .urb_enqueue = ohci_urb_enqueue, .urb_dequeue = ohci_urb_dequeue, .endpoint_disable = ohci_endpoint_disable, .get_frame_number = ohci_get_frame, .hub_status_data = ohci_hub_status_data, .hub_control = ohci_hub_control, .start_port_reset = ohci_start_port_reset, #if defined(CONFIG_PM) .bus_suspend = ohci_bus_suspend, .bus_resume = ohci_bus_resume, #endif }; static int __devinit ps3_ohci_probe(struct ps3_system_bus_device *dev) { int result; struct usb_hcd *hcd; unsigned int virq; static u64 dummy_mask = DMA_BIT_MASK(32); if (usb_disabled()) { result = -ENODEV; goto fail_start; } result = ps3_open_hv_device(dev); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_open_hv_device failed: %s\n", __func__, __LINE__, ps3_result(result)); result = -EPERM; goto fail_open; } result = ps3_dma_region_create(dev->d_region); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_dma_region_create failed: " "(%d)\n", __func__, __LINE__, result); BUG_ON("check region type"); goto fail_dma_region; } result = ps3_mmio_region_create(dev->m_region); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_map_mmio_region failed\n", __func__, __LINE__); result = -EPERM; goto fail_mmio_region; } dev_dbg(&dev->core, "%s:%d: mmio mapped_addr %lxh\n", __func__, __LINE__, dev->m_region->lpar_addr); result = ps3_io_irq_setup(PS3_BINDING_CPU_ANY, dev->interrupt_id, &virq); if (result) { dev_dbg(&dev->core, "%s:%d: ps3_construct_io_irq(%d) failed.\n", __func__, __LINE__, virq); result = -EPERM; goto fail_irq; } dev->core.dma_mask = &dummy_mask; /* FIXME: for improper usb code */ hcd = usb_create_hcd(&ps3_ohci_hc_driver, &dev->core, dev_name(&dev->core)); if (!hcd) { dev_dbg(&dev->core, "%s:%d: usb_create_hcd failed\n", __func__, __LINE__); result = -ENOMEM; goto fail_create_hcd; } hcd->rsrc_start = dev->m_region->lpar_addr; hcd->rsrc_len = dev->m_region->len; if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, hcd_name)) dev_dbg(&dev->core, "%s:%d: request_mem_region failed\n", __func__, __LINE__); hcd->regs = ioremap(dev->m_region->lpar_addr, dev->m_region->len); if (!hcd->regs) { dev_dbg(&dev->core, "%s:%d: ioremap failed\n", __func__, __LINE__); result = -EPERM; goto fail_ioremap; } dev_dbg(&dev->core, "%s:%d: hcd->rsrc_start %lxh\n", __func__, __LINE__, (unsigned long)hcd->rsrc_start); dev_dbg(&dev->core, "%s:%d: hcd->rsrc_len %lxh\n", __func__, __LINE__, (unsigned long)hcd->rsrc_len); dev_dbg(&dev->core, "%s:%d: hcd->regs %lxh\n", __func__, __LINE__, (unsigned long)hcd->regs); dev_dbg(&dev->core, "%s:%d: virq %lu\n", __func__, __LINE__, (unsigned long)virq); ps3_system_bus_set_drvdata(dev, hcd); result = usb_add_hcd(hcd, virq, 0); if (result) { dev_dbg(&dev->core, "%s:%d: usb_add_hcd failed (%d)\n", __func__, __LINE__, result); goto fail_add_hcd; } return result; fail_add_hcd: iounmap(hcd->regs); fail_ioremap: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); fail_create_hcd: ps3_io_irq_destroy(virq); fail_irq: ps3_free_mmio_region(dev->m_region); fail_mmio_region: ps3_dma_region_free(dev->d_region); fail_dma_region: ps3_close_hv_device(dev); fail_open: fail_start: return result; } static int ps3_ohci_remove(struct ps3_system_bus_device *dev) { unsigned int tmp; struct usb_hcd *hcd = ps3_system_bus_get_drvdata(dev); BUG_ON(!hcd); dev_dbg(&dev->core, "%s:%d: regs %p\n", __func__, __LINE__, hcd->regs); dev_dbg(&dev->core, "%s:%d: irq %u\n", __func__, __LINE__, hcd->irq); tmp = hcd->irq; ohci_shutdown(hcd); usb_remove_hcd(hcd); ps3_system_bus_set_drvdata(dev, NULL); BUG_ON(!hcd->regs); iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); usb_put_hcd(hcd); ps3_io_irq_destroy(tmp); ps3_free_mmio_region(dev->m_region); ps3_dma_region_free(dev->d_region); ps3_close_hv_device(dev); return 0; } static int __init ps3_ohci_driver_register(struct ps3_system_bus_driver *drv) { return firmware_has_feature(FW_FEATURE_PS3_LV1) ? ps3_system_bus_driver_register(drv) : 0; } static void ps3_ohci_driver_unregister(struct ps3_system_bus_driver *drv) { if (firmware_has_feature(FW_FEATURE_PS3_LV1)) ps3_system_bus_driver_unregister(drv); } MODULE_ALIAS(PS3_MODULE_ALIAS_OHCI); static struct ps3_system_bus_driver ps3_ohci_driver = { .core.name = "ps3-ohci-driver", .core.owner = THIS_MODULE, .match_id = PS3_MATCH_ID_OHCI, .probe = ps3_ohci_probe, .remove = ps3_ohci_remove, .shutdown = ps3_ohci_remove, };
gpl-2.0
flar2/flo-ElementalX
drivers/power/wm831x_backup.c
5068
5643
/* * Backup battery driver for Wolfson Microelectronics wm831x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/slab.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/auxadc.h> #include <linux/mfd/wm831x/pmu.h> #include <linux/mfd/wm831x/pdata.h> struct wm831x_backup { struct wm831x *wm831x; struct power_supply backup; char name[20]; }; static int wm831x_backup_read_voltage(struct wm831x *wm831x, enum wm831x_auxadc src, union power_supply_propval *val) { int ret; ret = wm831x_auxadc_read_uv(wm831x, src); if (ret >= 0) val->intval = ret; return ret; } /********************************************************************* * Backup supply properties *********************************************************************/ static void wm831x_config_backup(struct wm831x *wm831x) { struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; struct wm831x_backup_pdata *pdata; int ret, reg; if (!wm831x_pdata || !wm831x_pdata->backup) { dev_warn(wm831x->dev, "No backup battery charger configuration\n"); return; } pdata = wm831x_pdata->backup; reg = 0; if (pdata->charger_enable) reg |= WM831X_BKUP_CHG_ENA | WM831X_BKUP_BATT_DET_ENA; if (pdata->no_constant_voltage) reg |= WM831X_BKUP_CHG_MODE; switch (pdata->vlim) { case 2500: break; case 3100: reg |= WM831X_BKUP_CHG_VLIM; break; default: dev_err(wm831x->dev, "Invalid backup voltage limit %dmV\n", pdata->vlim); } switch (pdata->ilim) { case 100: break; case 200: reg |= 1; break; case 300: reg |= 2; break; case 400: reg |= 3; break; default: dev_err(wm831x->dev, "Invalid backup current limit %duA\n", pdata->ilim); } ret = wm831x_reg_unlock(wm831x); if (ret != 0) { dev_err(wm831x->dev, "Failed to unlock registers: %d\n", ret); return; } ret = wm831x_set_bits(wm831x, WM831X_BACKUP_CHARGER_CONTROL, WM831X_BKUP_CHG_ENA_MASK | WM831X_BKUP_CHG_MODE_MASK | WM831X_BKUP_BATT_DET_ENA_MASK | WM831X_BKUP_CHG_VLIM_MASK | WM831X_BKUP_CHG_ILIM_MASK, reg); if (ret != 0) dev_err(wm831x->dev, "Failed to set backup charger config: %d\n", ret); wm831x_reg_lock(wm831x); } static int wm831x_backup_get_prop(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct wm831x_backup *devdata = dev_get_drvdata(psy->dev->parent); struct wm831x *wm831x = devdata->wm831x; int ret = 0; ret = wm831x_reg_read(wm831x, WM831X_BACKUP_CHARGER_CONTROL); if (ret < 0) return ret; switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (ret & WM831X_BKUP_CHG_STS) val->intval = POWER_SUPPLY_STATUS_CHARGING; else val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = wm831x_backup_read_voltage(wm831x, WM831X_AUX_BKUP_BATT, val); break; case POWER_SUPPLY_PROP_PRESENT: if (ret & WM831X_BKUP_CHG_STS) val->intval = 1; else val->intval = 0; break; default: ret = -EINVAL; break; } return ret; } static enum power_supply_property wm831x_backup_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_PRESENT, }; /********************************************************************* * Initialisation *********************************************************************/ static __devinit int wm831x_backup_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *wm831x_pdata = wm831x->dev->platform_data; struct wm831x_backup *devdata; struct power_supply *backup; int ret; devdata = kzalloc(sizeof(struct wm831x_backup), GFP_KERNEL); if (devdata == NULL) return -ENOMEM; devdata->wm831x = wm831x; platform_set_drvdata(pdev, devdata); backup = &devdata->backup; /* We ignore configuration failures since we can still read * back the status without enabling the charger (which may * already be enabled anyway). */ wm831x_config_backup(wm831x); if (wm831x_pdata && wm831x_pdata->wm831x_num) snprintf(devdata->name, sizeof(devdata->name), "wm831x-backup.%d", wm831x_pdata->wm831x_num); else snprintf(devdata->name, sizeof(devdata->name), "wm831x-backup"); backup->name = devdata->name; backup->type = POWER_SUPPLY_TYPE_BATTERY; backup->properties = wm831x_backup_props; backup->num_properties = ARRAY_SIZE(wm831x_backup_props); backup->get_property = wm831x_backup_get_prop; ret = power_supply_register(&pdev->dev, backup); if (ret) goto err_kmalloc; return ret; err_kmalloc: kfree(devdata); return ret; } static __devexit int wm831x_backup_remove(struct platform_device *pdev) { struct wm831x_backup *devdata = platform_get_drvdata(pdev); power_supply_unregister(&devdata->backup); kfree(devdata->backup.name); kfree(devdata); return 0; } static struct platform_driver wm831x_backup_driver = { .probe = wm831x_backup_probe, .remove = __devexit_p(wm831x_backup_remove), .driver = { .name = "wm831x-backup", }, }; module_platform_driver(wm831x_backup_driver); MODULE_DESCRIPTION("Backup battery charger driver for WM831x PMICs"); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-backup");
gpl-2.0
jollaman999/jolla-kernel_G_Gen3-Stock
drivers/mmc/host/cb710-mmc.c
5068
22010
/* * cb710/mmc.c * * Copyright by Michał Mirosław, 2008-2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include "cb710-mmc.h" static const u8 cb710_clock_divider_log2[8] = { /* 1, 2, 4, 8, 16, 32, 128, 512 */ 0, 1, 2, 3, 4, 5, 7, 9 }; #define CB710_MAX_DIVIDER_IDX \ (ARRAY_SIZE(cb710_clock_divider_log2) - 1) static const u8 cb710_src_freq_mhz[16] = { 33, 10, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85 }; static void cb710_mmc_select_clock_divider(struct mmc_host *mmc, int hz) { struct cb710_slot *slot = cb710_mmc_to_slot(mmc); struct pci_dev *pdev = cb710_slot_to_chip(slot)->pdev; u32 src_freq_idx; u32 divider_idx; int src_hz; /* on CB710 in HP nx9500: * src_freq_idx == 0 * indexes 1-7 work as written in the table * indexes 0,8-15 give no clock output */ pci_read_config_dword(pdev, 0x48, &src_freq_idx); src_freq_idx = (src_freq_idx >> 16) & 0xF; src_hz = cb710_src_freq_mhz[src_freq_idx] * 1000000; for (divider_idx = 0; divider_idx < CB710_MAX_DIVIDER_IDX; ++divider_idx) { if (hz >= src_hz >> cb710_clock_divider_log2[divider_idx]) break; } if (src_freq_idx) divider_idx |= 0x8; else if (divider_idx == 0) divider_idx = 1; cb710_pci_update_config_reg(pdev, 0x40, ~0xF0000000, divider_idx << 28); dev_dbg(cb710_slot_dev(slot), "clock set to %d Hz, wanted %d Hz; src_freq_idx = %d, divider_idx = %d|%d\n", src_hz >> cb710_clock_divider_log2[divider_idx & 7], hz, src_freq_idx, divider_idx & 7, divider_idx & 8); } static void __cb710_mmc_enable_irq(struct cb710_slot *slot, unsigned short enable, unsigned short mask) { /* clear global IE * - it gets set later if any interrupt sources are enabled */ mask |= CB710_MMC_IE_IRQ_ENABLE; /* look like interrupt is fired whenever * WORD[0x0C] & WORD[0x10] != 0; * -> bit 15 port 0x0C seems to be global interrupt enable */ enable = (cb710_read_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT) & ~mask) | enable; if (enable) enable |= CB710_MMC_IE_IRQ_ENABLE; cb710_write_port_16(slot, CB710_MMC_IRQ_ENABLE_PORT, enable); } static void cb710_mmc_enable_irq(struct cb710_slot *slot, unsigned short enable, unsigned short mask) { struct cb710_mmc_reader *reader = mmc_priv(cb710_slot_to_mmc(slot)); unsigned long flags; spin_lock_irqsave(&reader->irq_lock, flags); /* this is the only thing irq_lock protects */ __cb710_mmc_enable_irq(slot, enable, mask); spin_unlock_irqrestore(&reader->irq_lock, flags); } static void cb710_mmc_reset_events(struct cb710_slot *slot) { cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, 0xFF); cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, 0xFF); cb710_write_port_8(slot, CB710_MMC_STATUS2_PORT, 0xFF); } static void cb710_mmc_enable_4bit_data(struct cb710_slot *slot, int enable) { if (enable) cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, CB710_MMC_C1_4BIT_DATA_BUS, 0); else cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, CB710_MMC_C1_4BIT_DATA_BUS); } static int cb710_check_event(struct cb710_slot *slot, u8 what) { u16 status; status = cb710_read_port_16(slot, CB710_MMC_STATUS_PORT); if (status & CB710_MMC_S0_FIFO_UNDERFLOW) { /* it is just a guess, so log it */ dev_dbg(cb710_slot_dev(slot), "CHECK : ignoring bit 6 in status %04X\n", status); cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, CB710_MMC_S0_FIFO_UNDERFLOW); status &= ~CB710_MMC_S0_FIFO_UNDERFLOW; } if (status & CB710_MMC_STATUS_ERROR_EVENTS) { dev_dbg(cb710_slot_dev(slot), "CHECK : returning EIO on status %04X\n", status); cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, status & 0xFF); cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, CB710_MMC_S1_RESET); return -EIO; } /* 'what' is a bit in MMC_STATUS1 */ if ((status >> 8) & what) { cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, what); return 1; } return 0; } static int cb710_wait_for_event(struct cb710_slot *slot, u8 what) { int err = 0; unsigned limit = 2000000; /* FIXME: real timeout */ #ifdef CONFIG_CB710_DEBUG u32 e, x; e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); #endif while (!(err = cb710_check_event(slot, what))) { if (!--limit) { cb710_dump_regs(cb710_slot_to_chip(slot), CB710_DUMP_REGS_MMC); err = -ETIMEDOUT; break; } udelay(1); } #ifdef CONFIG_CB710_DEBUG x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); limit = 2000000 - limit; if (limit > 100) dev_dbg(cb710_slot_dev(slot), "WAIT10: waited %d loops, what %d, entry val %08X, exit val %08X\n", limit, what, e, x); #endif return err < 0 ? err : 0; } static int cb710_wait_while_busy(struct cb710_slot *slot, uint8_t mask) { unsigned limit = 500000; /* FIXME: real timeout */ int err = 0; #ifdef CONFIG_CB710_DEBUG u32 e, x; e = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); #endif while (cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & mask) { if (!--limit) { cb710_dump_regs(cb710_slot_to_chip(slot), CB710_DUMP_REGS_MMC); err = -ETIMEDOUT; break; } udelay(1); } #ifdef CONFIG_CB710_DEBUG x = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); limit = 500000 - limit; if (limit > 100) dev_dbg(cb710_slot_dev(slot), "WAIT12: waited %d loops, mask %02X, entry val %08X, exit val %08X\n", limit, mask, e, x); #endif return err; } static void cb710_mmc_set_transfer_size(struct cb710_slot *slot, size_t count, size_t blocksize) { cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); cb710_write_port_32(slot, CB710_MMC_TRANSFER_SIZE_PORT, ((count - 1) << 16)|(blocksize - 1)); dev_vdbg(cb710_slot_dev(slot), "set up for %zu block%s of %zu bytes\n", count, count == 1 ? "" : "s", blocksize); } static void cb710_mmc_fifo_hack(struct cb710_slot *slot) { /* without this, received data is prepended with 8-bytes of zeroes */ u32 r1, r2; int ok = 0; r1 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT); r2 = cb710_read_port_32(slot, CB710_MMC_DATA_PORT); if (cb710_read_port_8(slot, CB710_MMC_STATUS0_PORT) & CB710_MMC_S0_FIFO_UNDERFLOW) { cb710_write_port_8(slot, CB710_MMC_STATUS0_PORT, CB710_MMC_S0_FIFO_UNDERFLOW); ok = 1; } dev_dbg(cb710_slot_dev(slot), "FIFO-read-hack: expected STATUS0 bit was %s\n", ok ? "set." : "NOT SET!"); dev_dbg(cb710_slot_dev(slot), "FIFO-read-hack: dwords ignored: %08X %08X - %s\n", r1, r2, (r1|r2) ? "BAD (NOT ZERO)!" : "ok"); } static int cb710_mmc_receive_pio(struct cb710_slot *slot, struct sg_mapping_iter *miter, size_t dw_count) { if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & CB710_MMC_S2_FIFO_READY)) { int err = cb710_wait_for_event(slot, CB710_MMC_S1_PIO_TRANSFER_DONE); if (err) return err; } cb710_sg_dwiter_write_from_io(miter, slot->iobase + CB710_MMC_DATA_PORT, dw_count); return 0; } static bool cb710_is_transfer_size_supported(struct mmc_data *data) { return !(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8)); } static int cb710_mmc_receive(struct cb710_slot *slot, struct mmc_data *data) { struct sg_mapping_iter miter; size_t len, blocks = data->blocks; int err = 0; /* TODO: I don't know how/if the hardware handles non-16B-boundary blocks * except single 8B block */ if (unlikely(data->blksz & 15 && (data->blocks != 1 || data->blksz != 8))) return -EINVAL; sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_TO_SG); cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 15, CB710_MMC_C2_READ_PIO_SIZE_MASK); cb710_mmc_fifo_hack(slot); while (blocks-- > 0) { len = data->blksz; while (len >= 16) { err = cb710_mmc_receive_pio(slot, &miter, 4); if (err) goto out; len -= 16; } if (!len) continue; cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, len - 1, CB710_MMC_C2_READ_PIO_SIZE_MASK); len = (len >= 8) ? 4 : 2; err = cb710_mmc_receive_pio(slot, &miter, len); if (err) goto out; } out: sg_miter_stop(&miter); return err; } static int cb710_mmc_send(struct cb710_slot *slot, struct mmc_data *data) { struct sg_mapping_iter miter; size_t len, blocks = data->blocks; int err = 0; /* TODO: I don't know how/if the hardware handles multiple * non-16B-boundary blocks */ if (unlikely(data->blocks > 1 && data->blksz & 15)) return -EINVAL; sg_miter_start(&miter, data->sg, data->sg_len, SG_MITER_FROM_SG); cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0, CB710_MMC_C2_READ_PIO_SIZE_MASK); while (blocks-- > 0) { len = (data->blksz + 15) >> 4; do { if (!(cb710_read_port_8(slot, CB710_MMC_STATUS2_PORT) & CB710_MMC_S2_FIFO_EMPTY)) { err = cb710_wait_for_event(slot, CB710_MMC_S1_PIO_TRANSFER_DONE); if (err) goto out; } cb710_sg_dwiter_read_to_io(&miter, slot->iobase + CB710_MMC_DATA_PORT, 4); } while (--len); } out: sg_miter_stop(&miter); return err; } static u16 cb710_encode_cmd_flags(struct cb710_mmc_reader *reader, struct mmc_command *cmd) { unsigned int flags = cmd->flags; u16 cb_flags = 0; /* Windows driver returned 0 for commands for which no response * is expected. It happened that there were only two such commands * used: MMC_GO_IDLE_STATE and MMC_GO_INACTIVE_STATE so it might * as well be a bug in that driver. * * Original driver set bit 14 for MMC/SD application * commands. There's no difference 'on the wire' and * it apparently works without it anyway. */ switch (flags & MMC_CMD_MASK) { case MMC_CMD_AC: cb_flags = CB710_MMC_CMD_AC; break; case MMC_CMD_ADTC: cb_flags = CB710_MMC_CMD_ADTC; break; case MMC_CMD_BC: cb_flags = CB710_MMC_CMD_BC; break; case MMC_CMD_BCR: cb_flags = CB710_MMC_CMD_BCR; break; } if (flags & MMC_RSP_BUSY) cb_flags |= CB710_MMC_RSP_BUSY; cb_flags |= cmd->opcode << CB710_MMC_CMD_CODE_SHIFT; if (cmd->data && (cmd->data->flags & MMC_DATA_READ)) cb_flags |= CB710_MMC_DATA_READ; if (flags & MMC_RSP_PRESENT) { /* Windows driver set 01 at bits 4,3 except for * MMC_SET_BLOCKLEN where it set 10. Maybe the * hardware can do something special about this * command? The original driver looks buggy/incomplete * anyway so we ignore this for now. * * I assume that 00 here means no response is expected. */ cb_flags |= CB710_MMC_RSP_PRESENT; if (flags & MMC_RSP_136) cb_flags |= CB710_MMC_RSP_136; if (!(flags & MMC_RSP_CRC)) cb_flags |= CB710_MMC_RSP_NO_CRC; } return cb_flags; } static void cb710_receive_response(struct cb710_slot *slot, struct mmc_command *cmd) { unsigned rsp_opcode, wanted_opcode; /* Looks like final byte with CRC is always stripped (same as SDHCI) */ if (cmd->flags & MMC_RSP_136) { u32 resp[4]; resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE3_PORT); resp[1] = cb710_read_port_32(slot, CB710_MMC_RESPONSE2_PORT); resp[2] = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT); resp[3] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT); rsp_opcode = resp[0] >> 24; cmd->resp[0] = (resp[0] << 8)|(resp[1] >> 24); cmd->resp[1] = (resp[1] << 8)|(resp[2] >> 24); cmd->resp[2] = (resp[2] << 8)|(resp[3] >> 24); cmd->resp[3] = (resp[3] << 8); } else { rsp_opcode = cb710_read_port_32(slot, CB710_MMC_RESPONSE1_PORT) & 0x3F; cmd->resp[0] = cb710_read_port_32(slot, CB710_MMC_RESPONSE0_PORT); } wanted_opcode = (cmd->flags & MMC_RSP_OPCODE) ? cmd->opcode : 0x3F; if (rsp_opcode != wanted_opcode) cmd->error = -EILSEQ; } static int cb710_mmc_transfer_data(struct cb710_slot *slot, struct mmc_data *data) { int error, to; if (data->flags & MMC_DATA_READ) error = cb710_mmc_receive(slot, data); else error = cb710_mmc_send(slot, data); to = cb710_wait_for_event(slot, CB710_MMC_S1_DATA_TRANSFER_DONE); if (!error) error = to; if (!error) data->bytes_xfered = data->blksz * data->blocks; return error; } static int cb710_mmc_command(struct mmc_host *mmc, struct mmc_command *cmd) { struct cb710_slot *slot = cb710_mmc_to_slot(mmc); struct cb710_mmc_reader *reader = mmc_priv(mmc); struct mmc_data *data = cmd->data; u16 cb_cmd = cb710_encode_cmd_flags(reader, cmd); dev_dbg(cb710_slot_dev(slot), "cmd request: 0x%04X\n", cb_cmd); if (data) { if (!cb710_is_transfer_size_supported(data)) { data->error = -EINVAL; return -1; } cb710_mmc_set_transfer_size(slot, data->blocks, data->blksz); } cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20|CB710_MMC_S2_BUSY_10); cb710_write_port_16(slot, CB710_MMC_CMD_TYPE_PORT, cb_cmd); cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); cb710_write_port_32(slot, CB710_MMC_CMD_PARAM_PORT, cmd->arg); cb710_mmc_reset_events(slot); cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x01, 0); cmd->error = cb710_wait_for_event(slot, CB710_MMC_S1_COMMAND_SENT); if (cmd->error) return -1; if (cmd->flags & MMC_RSP_PRESENT) { cb710_receive_response(slot, cmd); if (cmd->error) return -1; } if (data) data->error = cb710_mmc_transfer_data(slot, data); return 0; } static void cb710_mmc_request(struct mmc_host *mmc, struct mmc_request *mrq) { struct cb710_slot *slot = cb710_mmc_to_slot(mmc); struct cb710_mmc_reader *reader = mmc_priv(mmc); WARN_ON(reader->mrq != NULL); reader->mrq = mrq; cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0); if (!cb710_mmc_command(mmc, mrq->cmd) && mrq->stop) cb710_mmc_command(mmc, mrq->stop); tasklet_schedule(&reader->finish_req_tasklet); } static int cb710_mmc_powerup(struct cb710_slot *slot) { #ifdef CONFIG_CB710_DEBUG struct cb710_chip *chip = cb710_slot_to_chip(slot); #endif int err; /* a lot of magic for now */ dev_dbg(cb710_slot_dev(slot), "bus powerup\n"); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); if (unlikely(err)) return err; cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x80, 0); cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x80, 0); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); mdelay(1); dev_dbg(cb710_slot_dev(slot), "after delay 1\n"); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); if (unlikely(err)) return err; cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x09, 0); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); mdelay(1); dev_dbg(cb710_slot_dev(slot), "after delay 2\n"); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); if (unlikely(err)) return err; cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x08); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); mdelay(2); dev_dbg(cb710_slot_dev(slot), "after delay 3\n"); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0); cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0x70, 0); cb710_modify_port_8(slot, CB710_MMC_CONFIG2_PORT, 0x80, 0); cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0x03, 0); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); err = cb710_wait_while_busy(slot, CB710_MMC_S2_BUSY_20); if (unlikely(err)) return err; /* This port behaves weird: quick byte reads of 0x08,0x09 return * 0xFF,0x00 after writing 0xFFFF to 0x08; it works correctly when * read/written from userspace... What am I missing here? * (it doesn't depend on write-to-read delay) */ cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0xFFFF); cb710_modify_port_8(slot, CB710_MMC_CONFIG0_PORT, 0x06, 0); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); dev_dbg(cb710_slot_dev(slot), "bus powerup finished\n"); return cb710_check_event(slot, 0); } static void cb710_mmc_powerdown(struct cb710_slot *slot) { cb710_modify_port_8(slot, CB710_MMC_CONFIG1_PORT, 0, 0x81); cb710_modify_port_8(slot, CB710_MMC_CONFIG3_PORT, 0, 0x80); } static void cb710_mmc_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct cb710_slot *slot = cb710_mmc_to_slot(mmc); struct cb710_mmc_reader *reader = mmc_priv(mmc); int err; cb710_mmc_select_clock_divider(mmc, ios->clock); if (ios->power_mode != reader->last_power_mode) switch (ios->power_mode) { case MMC_POWER_ON: err = cb710_mmc_powerup(slot); if (err) { dev_warn(cb710_slot_dev(slot), "powerup failed (%d)- retrying\n", err); cb710_mmc_powerdown(slot); udelay(1); err = cb710_mmc_powerup(slot); if (err) dev_warn(cb710_slot_dev(slot), "powerup retry failed (%d) - expect errors\n", err); } reader->last_power_mode = MMC_POWER_ON; break; case MMC_POWER_OFF: cb710_mmc_powerdown(slot); reader->last_power_mode = MMC_POWER_OFF; break; case MMC_POWER_UP: default: /* ignore */; } cb710_mmc_enable_4bit_data(slot, ios->bus_width != MMC_BUS_WIDTH_1); cb710_mmc_enable_irq(slot, CB710_MMC_IE_TEST_MASK, 0); } static int cb710_mmc_get_ro(struct mmc_host *mmc) { struct cb710_slot *slot = cb710_mmc_to_slot(mmc); return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT) & CB710_MMC_S3_WRITE_PROTECTED; } static int cb710_mmc_get_cd(struct mmc_host *mmc) { struct cb710_slot *slot = cb710_mmc_to_slot(mmc); return cb710_read_port_8(slot, CB710_MMC_STATUS3_PORT) & CB710_MMC_S3_CARD_DETECTED; } static int cb710_mmc_irq_handler(struct cb710_slot *slot) { struct mmc_host *mmc = cb710_slot_to_mmc(slot); struct cb710_mmc_reader *reader = mmc_priv(mmc); u32 status, config1, config2, irqen; status = cb710_read_port_32(slot, CB710_MMC_STATUS_PORT); irqen = cb710_read_port_32(slot, CB710_MMC_IRQ_ENABLE_PORT); config2 = cb710_read_port_32(slot, CB710_MMC_CONFIGB_PORT); config1 = cb710_read_port_32(slot, CB710_MMC_CONFIG_PORT); dev_dbg(cb710_slot_dev(slot), "interrupt; status: %08X, " "ie: %08X, c2: %08X, c1: %08X\n", status, irqen, config2, config1); if (status & (CB710_MMC_S1_CARD_CHANGED << 8)) { /* ack the event */ cb710_write_port_8(slot, CB710_MMC_STATUS1_PORT, CB710_MMC_S1_CARD_CHANGED); if ((irqen & CB710_MMC_IE_CISTATUS_MASK) == CB710_MMC_IE_CISTATUS_MASK) mmc_detect_change(mmc, HZ/5); } else { dev_dbg(cb710_slot_dev(slot), "unknown interrupt (test)\n"); spin_lock(&reader->irq_lock); __cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_TEST_MASK); spin_unlock(&reader->irq_lock); } return 1; } static void cb710_mmc_finish_request_tasklet(unsigned long data) { struct mmc_host *mmc = (void *)data; struct cb710_mmc_reader *reader = mmc_priv(mmc); struct mmc_request *mrq = reader->mrq; reader->mrq = NULL; mmc_request_done(mmc, mrq); } static const struct mmc_host_ops cb710_mmc_host = { .request = cb710_mmc_request, .set_ios = cb710_mmc_set_ios, .get_ro = cb710_mmc_get_ro, .get_cd = cb710_mmc_get_cd, }; #ifdef CONFIG_PM static int cb710_mmc_suspend(struct platform_device *pdev, pm_message_t state) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); struct mmc_host *mmc = cb710_slot_to_mmc(slot); int err; err = mmc_suspend_host(mmc); if (err) return err; cb710_mmc_enable_irq(slot, 0, ~0); return 0; } static int cb710_mmc_resume(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); struct mmc_host *mmc = cb710_slot_to_mmc(slot); cb710_mmc_enable_irq(slot, 0, ~0); return mmc_resume_host(mmc); } #endif /* CONFIG_PM */ static int __devinit cb710_mmc_init(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); struct cb710_chip *chip = cb710_slot_to_chip(slot); struct mmc_host *mmc; struct cb710_mmc_reader *reader; int err; u32 val; mmc = mmc_alloc_host(sizeof(*reader), cb710_slot_dev(slot)); if (!mmc) return -ENOMEM; dev_set_drvdata(&pdev->dev, mmc); /* harmless (maybe) magic */ pci_read_config_dword(chip->pdev, 0x48, &val); val = cb710_src_freq_mhz[(val >> 16) & 0xF]; dev_dbg(cb710_slot_dev(slot), "source frequency: %dMHz\n", val); val *= 1000000; mmc->ops = &cb710_mmc_host; mmc->f_max = val; mmc->f_min = val >> cb710_clock_divider_log2[CB710_MAX_DIVIDER_IDX]; mmc->ocr_avail = MMC_VDD_32_33|MMC_VDD_33_34; mmc->caps = MMC_CAP_4_BIT_DATA; reader = mmc_priv(mmc); tasklet_init(&reader->finish_req_tasklet, cb710_mmc_finish_request_tasklet, (unsigned long)mmc); spin_lock_init(&reader->irq_lock); cb710_dump_regs(chip, CB710_DUMP_REGS_MMC); cb710_mmc_enable_irq(slot, 0, ~0); cb710_set_irq_handler(slot, cb710_mmc_irq_handler); err = mmc_add_host(mmc); if (unlikely(err)) goto err_free_mmc; dev_dbg(cb710_slot_dev(slot), "mmc_hostname is %s\n", mmc_hostname(mmc)); cb710_mmc_enable_irq(slot, CB710_MMC_IE_CARD_INSERTION_STATUS, 0); return 0; err_free_mmc: dev_dbg(cb710_slot_dev(slot), "mmc_add_host() failed: %d\n", err); cb710_set_irq_handler(slot, NULL); mmc_free_host(mmc); return err; } static int __devexit cb710_mmc_exit(struct platform_device *pdev) { struct cb710_slot *slot = cb710_pdev_to_slot(pdev); struct mmc_host *mmc = cb710_slot_to_mmc(slot); struct cb710_mmc_reader *reader = mmc_priv(mmc); cb710_mmc_enable_irq(slot, 0, CB710_MMC_IE_CARD_INSERTION_STATUS); mmc_remove_host(mmc); /* IRQs should be disabled now, but let's stay on the safe side */ cb710_mmc_enable_irq(slot, 0, ~0); cb710_set_irq_handler(slot, NULL); /* clear config ports - just in case */ cb710_write_port_32(slot, CB710_MMC_CONFIG_PORT, 0); cb710_write_port_16(slot, CB710_MMC_CONFIGB_PORT, 0); tasklet_kill(&reader->finish_req_tasklet); mmc_free_host(mmc); return 0; } static struct platform_driver cb710_mmc_driver = { .driver.name = "cb710-mmc", .probe = cb710_mmc_init, .remove = __devexit_p(cb710_mmc_exit), #ifdef CONFIG_PM .suspend = cb710_mmc_suspend, .resume = cb710_mmc_resume, #endif }; module_platform_driver(cb710_mmc_driver); MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>"); MODULE_DESCRIPTION("ENE CB710 memory card reader driver - MMC/SD part"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:cb710-mmc");
gpl-2.0
caoxin1988/linux-3.0.86
sound/usb/usx2y/usX2Yhwdep.c
9164
7517
/* * Driver for Tascam US-X2Y USB soundcards * * FPGA Loader + ALSA Startup * * Copyright (c) 2003 by Karsten Wiese <annabellesgarden@yahoo.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/memalloc.h> #include <sound/pcm.h> #include <sound/hwdep.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" static int snd_us428ctls_vm_fault(struct vm_area_struct *area, struct vm_fault *vmf) { unsigned long offset; struct page * page; void *vaddr; snd_printdd("ENTER, start %lXh, pgoff %ld\n", area->vm_start, vmf->pgoff); offset = vmf->pgoff << PAGE_SHIFT; vaddr = (char*)((struct usX2Ydev *)area->vm_private_data)->us428ctls_sharedmem + offset; page = virt_to_page(vaddr); get_page(page); vmf->page = page; snd_printdd("vaddr=%p made us428ctls_vm_fault() page %p\n", vaddr, page); return 0; } static const struct vm_operations_struct us428ctls_vm_ops = { .fault = snd_us428ctls_vm_fault, }; static int snd_us428ctls_mmap(struct snd_hwdep * hw, struct file *filp, struct vm_area_struct *area) { unsigned long size = (unsigned long)(area->vm_end - area->vm_start); struct usX2Ydev *us428 = hw->private_data; // FIXME this hwdep interface is used twice: fpga download and mmap for controlling Lights etc. Maybe better using 2 hwdep devs? // so as long as the device isn't fully initialised yet we return -EBUSY here. if (!(us428->chip_status & USX2Y_STAT_CHIP_INIT)) return -EBUSY; /* if userspace tries to mmap beyond end of our buffer, fail */ if (size > PAGE_ALIGN(sizeof(struct us428ctls_sharedmem))) { snd_printd( "%lu > %lu\n", size, (unsigned long)sizeof(struct us428ctls_sharedmem)); return -EINVAL; } if (!us428->us428ctls_sharedmem) { init_waitqueue_head(&us428->us428ctls_wait_queue_head); if(!(us428->us428ctls_sharedmem = snd_malloc_pages(sizeof(struct us428ctls_sharedmem), GFP_KERNEL))) return -ENOMEM; memset(us428->us428ctls_sharedmem, -1, sizeof(struct us428ctls_sharedmem)); us428->us428ctls_sharedmem->CtlSnapShotLast = -2; } area->vm_ops = &us428ctls_vm_ops; area->vm_flags |= VM_RESERVED | VM_DONTEXPAND; area->vm_private_data = hw->private_data; return 0; } static unsigned int snd_us428ctls_poll(struct snd_hwdep *hw, struct file *file, poll_table *wait) { unsigned int mask = 0; struct usX2Ydev *us428 = hw->private_data; struct us428ctls_sharedmem *shm = us428->us428ctls_sharedmem; if (us428->chip_status & USX2Y_STAT_CHIP_HUP) return POLLHUP; poll_wait(file, &us428->us428ctls_wait_queue_head, wait); if (shm != NULL && shm->CtlSnapShotLast != shm->CtlSnapShotRed) mask |= POLLIN; return mask; } static int snd_usX2Y_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { static char *type_ids[USX2Y_TYPE_NUMS] = { [USX2Y_TYPE_122] = "us122", [USX2Y_TYPE_224] = "us224", [USX2Y_TYPE_428] = "us428", }; struct usX2Ydev *us428 = hw->private_data; int id = -1; switch (le16_to_cpu(us428->dev->descriptor.idProduct)) { case USB_ID_US122: id = USX2Y_TYPE_122; break; case USB_ID_US224: id = USX2Y_TYPE_224; break; case USB_ID_US428: id = USX2Y_TYPE_428; break; } if (0 > id) return -ENODEV; strcpy(info->id, type_ids[id]); info->num_dsps = 2; // 0: Prepad Data, 1: FPGA Code if (us428->chip_status & USX2Y_STAT_CHIP_INIT) info->chip_ready = 1; info->version = USX2Y_DRIVER_VERSION; return 0; } static int usX2Y_create_usbmidi(struct snd_card *card) { static struct snd_usb_midi_endpoint_info quirk_data_1 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x001, .in_cables = 0x001 }; static struct snd_usb_audio_quirk quirk_1 = { .vendor_name = "TASCAM", .product_name = NAME_ALLCAPS, .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_1 }; static struct snd_usb_midi_endpoint_info quirk_data_2 = { .out_ep = 0x06, .in_ep = 0x06, .out_cables = 0x003, .in_cables = 0x003 }; static struct snd_usb_audio_quirk quirk_2 = { .vendor_name = "TASCAM", .product_name = "US428", .ifnum = 0, .type = QUIRK_MIDI_FIXED_ENDPOINT, .data = &quirk_data_2 }; struct usb_device *dev = usX2Y(card)->dev; struct usb_interface *iface = usb_ifnum_to_if(dev, 0); struct snd_usb_audio_quirk *quirk = le16_to_cpu(dev->descriptor.idProduct) == USB_ID_US428 ? &quirk_2 : &quirk_1; snd_printdd("usX2Y_create_usbmidi \n"); return snd_usbmidi_create(card, iface, &usX2Y(card)->midi_list, quirk); } static int usX2Y_create_alsa_devices(struct snd_card *card) { int err; do { if ((err = usX2Y_create_usbmidi(card)) < 0) { snd_printk(KERN_ERR "usX2Y_create_alsa_devices: usX2Y_create_usbmidi error %i \n", err); break; } if ((err = usX2Y_audio_create(card)) < 0) break; if ((err = usX2Y_hwdep_pcm_new(card)) < 0) break; if ((err = snd_card_register(card)) < 0) break; } while (0); return err; } static int snd_usX2Y_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct usX2Ydev *priv = hw->private_data; int lret, err = -EINVAL; snd_printdd( "dsp_load %s\n", dsp->name); if (access_ok(VERIFY_READ, dsp->image, dsp->length)) { struct usb_device* dev = priv->dev; char *buf; buf = memdup_user(dsp->image, dsp->length); if (IS_ERR(buf)) return PTR_ERR(buf); err = usb_set_interface(dev, 0, 1); if (err) snd_printk(KERN_ERR "usb_set_interface error \n"); else err = usb_bulk_msg(dev, usb_sndbulkpipe(dev, 2), buf, dsp->length, &lret, 6000); kfree(buf); } if (err) return err; if (dsp->index == 1) { msleep(250); // give the device some time err = usX2Y_AsyncSeq04_init(priv); if (err) { snd_printk(KERN_ERR "usX2Y_AsyncSeq04_init error \n"); return err; } err = usX2Y_In04_init(priv); if (err) { snd_printk(KERN_ERR "usX2Y_In04_init error \n"); return err; } err = usX2Y_create_alsa_devices(hw->card); if (err) { snd_printk(KERN_ERR "usX2Y_create_alsa_devices error %i \n", err); snd_card_free(hw->card); return err; } priv->chip_status |= USX2Y_STAT_CHIP_INIT; snd_printdd("%s: alsa all started\n", hw->name); } return err; } int usX2Y_hwdep_new(struct snd_card *card, struct usb_device* device) { int err; struct snd_hwdep *hw; if ((err = snd_hwdep_new(card, SND_USX2Y_LOADER_ID, 0, &hw)) < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_USX2Y; hw->private_data = usX2Y(card); hw->ops.dsp_status = snd_usX2Y_hwdep_dsp_status; hw->ops.dsp_load = snd_usX2Y_hwdep_dsp_load; hw->ops.mmap = snd_us428ctls_mmap; hw->ops.poll = snd_us428ctls_poll; hw->exclusive = 1; sprintf(hw->name, "/proc/bus/usb/%03d/%03d", device->bus->busnum, device->devnum); return 0; }
gpl-2.0
jgcaap/boeffla
arch/x86/kernel/acpi/realmode/wakemain.c
9676
1537
#include "wakeup.h" #include "boot.h" static void udelay(int loops) { while (loops--) io_delay(); /* Approximately 1 us */ } static void beep(unsigned int hz) { u8 enable; if (!hz) { enable = 0x00; /* Turn off speaker */ } else { u16 div = 1193181/hz; outb(0xb6, 0x43); /* Ctr 2, squarewave, load, binary */ io_delay(); outb(div, 0x42); /* LSB of counter */ io_delay(); outb(div >> 8, 0x42); /* MSB of counter */ io_delay(); enable = 0x03; /* Turn on speaker */ } inb(0x61); /* Dummy read of System Control Port B */ io_delay(); outb(enable, 0x61); /* Enable timer 2 output to speaker */ io_delay(); } #define DOT_HZ 880 #define DASH_HZ 587 #define US_PER_DOT 125000 /* Okay, this is totally silly, but it's kind of fun. */ static void send_morse(const char *pattern) { char s; while ((s = *pattern++)) { switch (s) { case '.': beep(DOT_HZ); udelay(US_PER_DOT); beep(0); udelay(US_PER_DOT); break; case '-': beep(DASH_HZ); udelay(US_PER_DOT * 3); beep(0); udelay(US_PER_DOT); break; default: /* Assume it's a space */ udelay(US_PER_DOT * 3); break; } } } void main(void) { /* Kill machine if structures are wrong */ if (wakeup_header.real_magic != 0x12345678) while (1); if (wakeup_header.realmode_flags & 4) send_morse("...-"); if (wakeup_header.realmode_flags & 1) asm volatile("lcallw $0xc000,$3"); if (wakeup_header.realmode_flags & 2) { /* Need to call BIOS */ probe_cards(0); set_mode(wakeup_header.video_mode); } }
gpl-2.0
latlontude/linux
arch/ia64/kernel/signal.c
461
16571
/* * Architecture-specific signal handling support. * * Copyright (C) 1999-2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * * Derived from i386 and Alpha versions. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/ptrace.h> #include <linux/tracehook.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/unistd.h> #include <linux/wait.h> #include <asm/intrinsics.h> #include <asm/uaccess.h> #include <asm/rse.h> #include <asm/sigcontext.h> #include "sigframe.h" #define DEBUG_SIG 0 #define STACK_ALIGN 16 /* minimal alignment for stack pointer */ #if _NSIG_WORDS > 1 # define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) # define GET_SIGSET(k,u) __copy_from_user((k)->sig, (u)->sig, sizeof(sigset_t)) #else # define PUT_SIGSET(k,u) __put_user((k)->sig[0], &(u)->sig[0]) # define GET_SIGSET(k,u) __get_user((k)->sig[0], &(u)->sig[0]) #endif static long restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr) { unsigned long ip, flags, nat, um, cfm, rsc; long err; /* Always make any pending restarted system calls return -EINTR */ current_thread_info()->restart_block.fn = do_no_restart_syscall; /* restore scratch that always needs gets updated during signal delivery: */ err = __get_user(flags, &sc->sc_flags); err |= __get_user(nat, &sc->sc_nat); err |= __get_user(ip, &sc->sc_ip); /* instruction pointer */ err |= __get_user(cfm, &sc->sc_cfm); err |= __get_user(um, &sc->sc_um); /* user mask */ err |= __get_user(rsc, &sc->sc_ar_rsc); err |= __get_user(scr->pt.ar_unat, &sc->sc_ar_unat); err |= __get_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); err |= __get_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __get_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __get_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __get_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_from_user(&scr->pt.r1, &sc->sc_gr[1], 8); /* r1 */ err |= __copy_from_user(&scr->pt.r8, &sc->sc_gr[8], 4*8); /* r8-r11 */ err |= __copy_from_user(&scr->pt.r12, &sc->sc_gr[12], 2*8); /* r12-r13 */ err |= __copy_from_user(&scr->pt.r15, &sc->sc_gr[15], 8); /* r15 */ scr->pt.cr_ifs = cfm | (1UL << 63); scr->pt.ar_rsc = rsc | (3 << 2); /* force PL3 */ /* establish new instruction pointer: */ scr->pt.cr_iip = ip & ~0x3UL; ia64_psr(&scr->pt)->ri = ip & 0x3; scr->pt.cr_ipsr = (scr->pt.cr_ipsr & ~IA64_PSR_UM) | (um & IA64_PSR_UM); scr->scratch_unat = ia64_put_scratch_nat_bits(&scr->pt, nat); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Restore most scratch-state only when not in syscall. */ err |= __get_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __get_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __get_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_from_user(&scr->pt.ar_csd, &sc->sc_ar25, 2*8); /* ar.csd & ar.ssd */ err |= __copy_from_user(&scr->pt.r2, &sc->sc_gr[2], 2*8); /* r2-r3 */ err |= __copy_from_user(&scr->pt.r16, &sc->sc_gr[16], 16*8); /* r16-r31 */ } if ((flags & IA64_SC_FLAG_FPH_VALID) != 0) { struct ia64_psr *psr = ia64_psr(&scr->pt); err |= __copy_from_user(current->thread.fph, &sc->sc_fr[32], 96*16); psr->mfh = 0; /* drop signal handler's fph contents... */ preempt_disable(); if (psr->dfh) ia64_drop_fpu(current); else { /* We already own the local fph, otherwise psr->dfh wouldn't be 0. */ __ia64_load_fpu(current->thread.fph); ia64_set_local_fpu_owner(current); } preempt_enable(); } return err; } int copy_siginfo_to_user (siginfo_t __user *to, const siginfo_t *from) { if (!access_ok(VERIFY_WRITE, to, sizeof(siginfo_t))) return -EFAULT; if (from->si_code < 0) { if (__copy_to_user(to, from, sizeof(siginfo_t))) return -EFAULT; return 0; } else { int err; /* * If you change siginfo_t structure, please be sure this code is fixed * accordingly. It should never copy any pad contained in the structure * to avoid security leaks, but must copy the generic 3 ints plus the * relevant union member. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); switch (from->si_code >> 16) { case __SI_FAULT >> 16: err |= __put_user(from->si_flags, &to->si_flags); err |= __put_user(from->si_isr, &to->si_isr); case __SI_POLL >> 16: err |= __put_user(from->si_addr, &to->si_addr); err |= __put_user(from->si_imm, &to->si_imm); break; case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_RT >> 16: /* Not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_ptr, &to->si_ptr); break; case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); default: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); break; } return err; } } long ia64_rt_sigreturn (struct sigscratch *scr) { extern char ia64_strace_leave_kernel, ia64_leave_kernel; struct sigcontext __user *sc; struct siginfo si; sigset_t set; long retval; sc = &((struct sigframe __user *) (scr->pt.r12 + 16))->sc; /* * When we return to the previously executing context, r8 and r10 have already * been setup the way we want them. Indeed, if the signal wasn't delivered while * in a system call, we must not touch r8 or r10 as otherwise user-level state * could be corrupted. */ retval = (long) &ia64_leave_kernel; if (test_thread_flag(TIF_SYSCALL_TRACE) || test_thread_flag(TIF_SYSCALL_AUDIT)) /* * strace expects to be notified after sigreturn returns even though the * context to which we return may not be in the middle of a syscall. * Thus, the return-value that strace displays for sigreturn is * meaningless. */ retval = (long) &ia64_strace_leave_kernel; if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) goto give_sigsegv; if (GET_SIGSET(&set, &sc->sc_mask)) goto give_sigsegv; set_current_blocked(&set); if (restore_sigcontext(sc, scr)) goto give_sigsegv; #if DEBUG_SIG printk("SIG return (%s:%d): sp=%lx ip=%lx\n", current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip); #endif if (restore_altstack(&sc->sc_stack)) goto give_sigsegv; return retval; give_sigsegv: si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); si.si_addr = sc; force_sig_info(SIGSEGV, &si, current); return retval; } /* * This does just the minimum required setup of sigcontext. * Specifically, it only installs data that is either not knowable at * the user-level or that gets modified before execution in the * trampoline starts. Everything else is done at the user-level. */ static long setup_sigcontext (struct sigcontext __user *sc, sigset_t *mask, struct sigscratch *scr) { unsigned long flags = 0, ifs, cfm, nat; long err = 0; ifs = scr->pt.cr_ifs; if (on_sig_stack((unsigned long) sc)) flags |= IA64_SC_FLAG_ONSTACK; if ((ifs & (1UL << 63)) == 0) /* if cr_ifs doesn't have the valid bit set, we got here through a syscall */ flags |= IA64_SC_FLAG_IN_SYSCALL; cfm = ifs & ((1UL << 38) - 1); ia64_flush_fph(current); if ((current->thread.flags & IA64_THREAD_FPH_VALID)) { flags |= IA64_SC_FLAG_FPH_VALID; err = __copy_to_user(&sc->sc_fr[32], current->thread.fph, 96*16); } nat = ia64_get_scratch_nat_bits(&scr->pt, scr->scratch_unat); err |= __put_user(flags, &sc->sc_flags); err |= __put_user(nat, &sc->sc_nat); err |= PUT_SIGSET(mask, &sc->sc_mask); err |= __put_user(cfm, &sc->sc_cfm); err |= __put_user(scr->pt.cr_ipsr & IA64_PSR_UM, &sc->sc_um); err |= __put_user(scr->pt.ar_rsc, &sc->sc_ar_rsc); err |= __put_user(scr->pt.ar_unat, &sc->sc_ar_unat); /* ar.unat */ err |= __put_user(scr->pt.ar_fpsr, &sc->sc_ar_fpsr); /* ar.fpsr */ err |= __put_user(scr->pt.ar_pfs, &sc->sc_ar_pfs); err |= __put_user(scr->pt.pr, &sc->sc_pr); /* predicates */ err |= __put_user(scr->pt.b0, &sc->sc_br[0]); /* b0 (rp) */ err |= __put_user(scr->pt.b6, &sc->sc_br[6]); /* b6 */ err |= __copy_to_user(&sc->sc_gr[1], &scr->pt.r1, 8); /* r1 */ err |= __copy_to_user(&sc->sc_gr[8], &scr->pt.r8, 4*8); /* r8-r11 */ err |= __copy_to_user(&sc->sc_gr[12], &scr->pt.r12, 2*8); /* r12-r13 */ err |= __copy_to_user(&sc->sc_gr[15], &scr->pt.r15, 8); /* r15 */ err |= __put_user(scr->pt.cr_iip + ia64_psr(&scr->pt)->ri, &sc->sc_ip); if (!(flags & IA64_SC_FLAG_IN_SYSCALL)) { /* Copy scratch regs to sigcontext if the signal didn't interrupt a syscall. */ err |= __put_user(scr->pt.ar_ccv, &sc->sc_ar_ccv); /* ar.ccv */ err |= __put_user(scr->pt.b7, &sc->sc_br[7]); /* b7 */ err |= __put_user(scr->pt.r14, &sc->sc_gr[14]); /* r14 */ err |= __copy_to_user(&sc->sc_ar25, &scr->pt.ar_csd, 2*8); /* ar.csd & ar.ssd */ err |= __copy_to_user(&sc->sc_gr[2], &scr->pt.r2, 2*8); /* r2-r3 */ err |= __copy_to_user(&sc->sc_gr[16], &scr->pt.r16, 16*8); /* r16-r31 */ } return err; } /* * Check whether the register-backing store is already on the signal stack. */ static inline int rbs_on_sig_stack (unsigned long bsp) { return (bsp - current->sas_ss_sp < current->sas_ss_size); } static long force_sigsegv_info (int sig, void __user *addr) { unsigned long flags; struct siginfo si; if (sig == SIGSEGV) { /* * Acquiring siglock around the sa_handler-update is almost * certainly overkill, but this isn't a * performance-critical path and I'd rather play it safe * here than having to debug a nasty race if and when * something changes in kernel/signal.c that would make it * no longer safe to modify sa_handler without holding the * lock. */ spin_lock_irqsave(&current->sighand->siglock, flags); current->sighand->action[sig - 1].sa.sa_handler = SIG_DFL; spin_unlock_irqrestore(&current->sighand->siglock, flags); } si.si_signo = SIGSEGV; si.si_errno = 0; si.si_code = SI_KERNEL; si.si_pid = task_pid_vnr(current); si.si_uid = from_kuid_munged(current_user_ns(), current_uid()); si.si_addr = addr; force_sig_info(SIGSEGV, &si, current); return 1; } static long setup_frame(struct ksignal *ksig, sigset_t *set, struct sigscratch *scr) { extern char __kernel_sigtramp[]; unsigned long tramp_addr, new_rbs = 0, new_sp; struct sigframe __user *frame; long err; new_sp = scr->pt.r12; tramp_addr = (unsigned long) __kernel_sigtramp; if (ksig->ka.sa.sa_flags & SA_ONSTACK) { int onstack = sas_ss_flags(new_sp); if (onstack == 0) { new_sp = current->sas_ss_sp + current->sas_ss_size; /* * We need to check for the register stack being on the * signal stack separately, because it's switched * separately (memory stack is switched in the kernel, * register stack is switched in the signal trampoline). */ if (!rbs_on_sig_stack(scr->pt.ar_bspstore)) new_rbs = ALIGN(current->sas_ss_sp, sizeof(long)); } else if (onstack == SS_ONSTACK) { unsigned long check_sp; /* * If we are on the alternate signal stack and would * overflow it, don't. Return an always-bogus address * instead so we will die with SIGSEGV. */ check_sp = (new_sp - sizeof(*frame)) & -STACK_ALIGN; if (!likely(on_sig_stack(check_sp))) return force_sigsegv_info(ksig->sig, (void __user *) check_sp); } } frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return force_sigsegv_info(ksig->sig, frame); err = __put_user(ksig->sig, &frame->arg0); err |= __put_user(&frame->info, &frame->arg1); err |= __put_user(&frame->sc, &frame->arg2); err |= __put_user(new_rbs, &frame->sc.sc_rbs_base); err |= __put_user(0, &frame->sc.sc_loadrs); /* initialize to zero */ err |= __put_user(ksig->ka.sa.sa_handler, &frame->handler); err |= copy_siginfo_to_user(&frame->info, &ksig->info); err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12); err |= setup_sigcontext(&frame->sc, set, scr); if (unlikely(err)) return force_sigsegv_info(ksig->sig, frame); scr->pt.r12 = (unsigned long) frame - 16; /* new stack pointer */ scr->pt.ar_fpsr = FPSR_DEFAULT; /* reset fpsr for signal handler */ scr->pt.cr_iip = tramp_addr; ia64_psr(&scr->pt)->ri = 0; /* start executing in first slot */ ia64_psr(&scr->pt)->be = 0; /* force little-endian byte-order */ /* * Force the interruption function mask to zero. This has no effect when a * system-call got interrupted by a signal (since, in that case, scr->pt_cr_ifs is * ignored), but it has the desirable effect of making it possible to deliver a * signal with an incomplete register frame (which happens when a mandatory RSE * load faults). Furthermore, it has no negative effect on the getting the user's * dirty partition preserved, because that's governed by scr->pt.loadrs. */ scr->pt.cr_ifs = (1UL << 63); /* * Note: this affects only the NaT bits of the scratch regs (the ones saved in * pt_regs), which is exactly what we want. */ scr->scratch_unat = 0; /* ensure NaT bits of r12 is clear */ #if DEBUG_SIG printk("SIG deliver (%s:%d): sig=%d sp=%lx ip=%lx handler=%p\n", current->comm, current->pid, ksig->sig, scr->pt.r12, frame->sc.sc_ip, frame->handler); #endif return 0; } static long handle_signal (struct ksignal *ksig, struct sigscratch *scr) { int ret = setup_frame(ksig, sigmask_to_save(), scr); if (!ret) signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP)); return ret; } /* * Note that `init' is a special process: it doesn't get signals it doesn't want to * handle. Thus you cannot kill init even with a SIGKILL even by mistake. */ void ia64_do_signal (struct sigscratch *scr, long in_syscall) { long restart = in_syscall; long errno = scr->pt.r8; struct ksignal ksig; /* * This only loops in the rare cases of handle_signal() failing, in which case we * need to push through a forced SIGSEGV. */ while (1) { get_signal(&ksig); /* * get_signal_to_deliver() may have run a debugger (via notify_parent()) * and the debugger may have modified the state (e.g., to arrange for an * inferior call), thus it's important to check for restarting _after_ * get_signal_to_deliver(). */ if ((long) scr->pt.r10 != -1) /* * A system calls has to be restarted only if one of the error codes * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 * isn't -1 then r8 doesn't hold an error code and we don't need to * restart the syscall, so we can clear the "restart" flag here. */ restart = 0; if (ksig.sig <= 0) break; if (unlikely(restart)) { switch (errno) { case ERESTART_RESTARTBLOCK: case ERESTARTNOHAND: scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; case ERESTARTSYS: if ((ksig.ka.sa.sa_flags & SA_RESTART) == 0) { scr->pt.r8 = EINTR; /* note: scr->pt.r10 is already -1 */ break; } case ERESTARTNOINTR: ia64_decrement_ip(&scr->pt); restart = 0; /* don't restart twice if handle_signal() fails... */ } } /* * Whee! Actually deliver the signal. If the delivery failed, we need to * continue to iterate in this loop so we can deliver the SIGSEGV... */ if (handle_signal(&ksig, scr)) return; } /* Did we come from a system call? */ if (restart) { /* Restart the system call - no handlers present */ if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR || errno == ERESTART_RESTARTBLOCK) { /* * Note: the syscall number is in r15 which is saved in * pt_regs so all we need to do here is adjust ip so that * the "break" instruction gets re-executed. */ ia64_decrement_ip(&scr->pt); if (errno == ERESTART_RESTARTBLOCK) scr->pt.r15 = __NR_restart_syscall; } } /* if there's no signal to deliver, we just put the saved sigmask * back */ restore_saved_sigmask(); }
gpl-2.0
bju2000/lge-kernel-gproj
drivers/media/video/msm/s5k4e1.c
717
26348
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/types.h> #include <linux/i2c.h> #include <linux/uaccess.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/gpio.h> #include <linux/bitops.h> #include <mach/camera.h> #include <media/msm_camera.h> #include "s5k4e1.h" /* 16bit address - 8 bit context register structure */ #define Q8 0x00000100 #define Q10 0x00000400 /* MCLK */ #define S5K4E1_MASTER_CLK_RATE 24000000 /* AF Total steps parameters */ #define S5K4E1_TOTAL_STEPS_NEAR_TO_FAR 32 #define S5K4E1_REG_PREV_FRAME_LEN_1 31 #define S5K4E1_REG_PREV_FRAME_LEN_2 32 #define S5K4E1_REG_PREV_LINE_LEN_1 33 #define S5K4E1_REG_PREV_LINE_LEN_2 34 #define S5K4E1_REG_SNAP_FRAME_LEN_1 15 #define S5K4E1_REG_SNAP_FRAME_LEN_2 16 #define S5K4E1_REG_SNAP_LINE_LEN_1 17 #define S5K4E1_REG_SNAP_LINE_LEN_2 18 #define MSB 1 #define LSB 0 struct s5k4e1_work_t { struct work_struct work; }; static struct s5k4e1_work_t *s5k4e1_sensorw; static struct s5k4e1_work_t *s5k4e1_af_sensorw; static struct i2c_client *s5k4e1_af_client; static struct i2c_client *s5k4e1_client; struct s5k4e1_ctrl_t { const struct msm_camera_sensor_info *sensordata; uint32_t sensormode; uint32_t fps_divider;/* init to 1 * 0x00000400 */ uint32_t pict_fps_divider;/* init to 1 * 0x00000400 */ uint16_t fps; uint16_t curr_lens_pos; uint16_t curr_step_pos; uint16_t my_reg_gain; uint32_t my_reg_line_count; uint16_t total_lines_per_frame; enum s5k4e1_resolution_t prev_res; enum s5k4e1_resolution_t pict_res; enum s5k4e1_resolution_t curr_res; enum s5k4e1_test_mode_t set_test; }; static bool CSI_CONFIG; static struct s5k4e1_ctrl_t *s5k4e1_ctrl; static DECLARE_WAIT_QUEUE_HEAD(s5k4e1_wait_queue); static DECLARE_WAIT_QUEUE_HEAD(s5k4e1_af_wait_queue); DEFINE_MUTEX(s5k4e1_mut); static uint16_t prev_line_length_pck; static uint16_t prev_frame_length_lines; static uint16_t snap_line_length_pck; static uint16_t snap_frame_length_lines; static int s5k4e1_i2c_rxdata(unsigned short saddr, unsigned char *rxdata, int length) { struct i2c_msg msgs[] = { { .addr = saddr, .flags = 0, .len = 1, .buf = rxdata, }, { .addr = saddr, .flags = I2C_M_RD, .len = 1, .buf = rxdata, }, }; if (i2c_transfer(s5k4e1_client->adapter, msgs, 2) < 0) { CDBG("s5k4e1_i2c_rxdata faild 0x%x\n", saddr); return -EIO; } return 0; } static int32_t s5k4e1_i2c_txdata(unsigned short saddr, unsigned char *txdata, int length) { struct i2c_msg msg[] = { { .addr = saddr, .flags = 0, .len = length, .buf = txdata, }, }; if (i2c_transfer(s5k4e1_client->adapter, msg, 1) < 0) { CDBG("s5k4e1_i2c_txdata faild 0x%x\n", saddr); return -EIO; } return 0; } static int32_t s5k4e1_i2c_read(unsigned short raddr, unsigned short *rdata, int rlen) { int32_t rc = 0; unsigned char buf[2]; if (!rdata) return -EIO; memset(buf, 0, sizeof(buf)); buf[0] = (raddr & 0xFF00) >> 8; buf[1] = (raddr & 0x00FF); rc = s5k4e1_i2c_rxdata(s5k4e1_client->addr, buf, rlen); if (rc < 0) { CDBG("s5k4e1_i2c_read 0x%x failed!\n", raddr); return rc; } *rdata = (rlen == 2 ? buf[0] << 8 | buf[1] : buf[0]); CDBG("s5k4e1_i2c_read 0x%x val = 0x%x!\n", raddr, *rdata); return rc; } static int32_t s5k4e1_i2c_write_b_sensor(unsigned short waddr, uint8_t bdata) { int32_t rc = -EFAULT; unsigned char buf[3]; memset(buf, 0, sizeof(buf)); buf[0] = (waddr & 0xFF00) >> 8; buf[1] = (waddr & 0x00FF); buf[2] = bdata; CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata); rc = s5k4e1_i2c_txdata(s5k4e1_client->addr, buf, 3); if (rc < 0) { CDBG("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n", waddr, bdata); } return rc; } static int32_t s5k4e1_i2c_write_b_table(struct s5k4e1_i2c_reg_conf const *reg_conf_tbl, int num) { int i; int32_t rc = -EIO; for (i = 0; i < num; i++) { rc = s5k4e1_i2c_write_b_sensor(reg_conf_tbl->waddr, reg_conf_tbl->wdata); if (rc < 0) break; reg_conf_tbl++; } return rc; } static int32_t s5k4e1_af_i2c_txdata(unsigned short saddr, unsigned char *txdata, int length) { struct i2c_msg msg[] = { { .addr = saddr, .flags = 0, .len = length, .buf = txdata, }, }; if (i2c_transfer(s5k4e1_af_client->adapter, msg, 1) < 0) { pr_err("s5k4e1_af_i2c_txdata faild 0x%x\n", saddr); return -EIO; } return 0; } static int32_t s5k4e1_af_i2c_write_b_sensor(uint8_t waddr, uint8_t bdata) { int32_t rc = -EFAULT; unsigned char buf[2]; memset(buf, 0, sizeof(buf)); buf[0] = waddr; buf[1] = bdata; CDBG("i2c_write_b addr = 0x%x, val = 0x%x\n", waddr, bdata); rc = s5k4e1_af_i2c_txdata(s5k4e1_af_client->addr << 1, buf, 2); if (rc < 0) { pr_err("i2c_write_b failed, addr = 0x%x, val = 0x%x!\n", waddr, bdata); } return rc; } static void s5k4e1_start_stream(void) { s5k4e1_i2c_write_b_sensor(0x0100, 0x01);/* streaming on */ } static void s5k4e1_stop_stream(void) { s5k4e1_i2c_write_b_sensor(0x0100, 0x00);/* streaming off */ } static void s5k4e1_group_hold_on(void) { s5k4e1_i2c_write_b_sensor(0x0104, 0x01); } static void s5k4e1_group_hold_off(void) { s5k4e1_i2c_write_b_sensor(0x0104, 0x0); } static void s5k4e1_get_pict_fps(uint16_t fps, uint16_t *pfps) { /* input fps is preview fps in Q8 format */ uint32_t divider, d1, d2; d1 = (prev_frame_length_lines * 0x00000400) / snap_frame_length_lines; d2 = (prev_line_length_pck * 0x00000400) / snap_line_length_pck; divider = (d1 * d2) / 0x400; /*Verify PCLK settings and frame sizes.*/ *pfps = (uint16_t) (fps * divider / 0x400); } static uint16_t s5k4e1_get_prev_lines_pf(void) { if (s5k4e1_ctrl->prev_res == QTR_SIZE) return prev_frame_length_lines; else return snap_frame_length_lines; } static uint16_t s5k4e1_get_prev_pixels_pl(void) { if (s5k4e1_ctrl->prev_res == QTR_SIZE) return prev_line_length_pck; else return snap_line_length_pck; } static uint16_t s5k4e1_get_pict_lines_pf(void) { if (s5k4e1_ctrl->pict_res == QTR_SIZE) return prev_frame_length_lines; else return snap_frame_length_lines; } static uint16_t s5k4e1_get_pict_pixels_pl(void) { if (s5k4e1_ctrl->pict_res == QTR_SIZE) return prev_line_length_pck; else return snap_line_length_pck; } static uint32_t s5k4e1_get_pict_max_exp_lc(void) { return snap_frame_length_lines * 24; } static int32_t s5k4e1_set_fps(struct fps_cfg *fps) { uint16_t total_lines_per_frame; int32_t rc = 0; s5k4e1_ctrl->fps_divider = fps->fps_div; s5k4e1_ctrl->pict_fps_divider = fps->pict_fps_div; if (s5k4e1_ctrl->sensormode == SENSOR_PREVIEW_MODE) { total_lines_per_frame = (uint16_t) ((prev_frame_length_lines * s5k4e1_ctrl->fps_divider) / 0x400); } else { total_lines_per_frame = (uint16_t) ((snap_frame_length_lines * s5k4e1_ctrl->fps_divider) / 0x400); } s5k4e1_group_hold_on(); rc = s5k4e1_i2c_write_b_sensor(0x0340, ((total_lines_per_frame & 0xFF00) >> 8)); rc = s5k4e1_i2c_write_b_sensor(0x0341, (total_lines_per_frame & 0x00FF)); s5k4e1_group_hold_off(); return rc; } static inline uint8_t s5k4e1_byte(uint16_t word, uint8_t offset) { return word >> (offset * BITS_PER_BYTE); } static int32_t s5k4e1_write_exp_gain(uint16_t gain, uint32_t line) { uint16_t max_legal_gain = 0x0200; int32_t rc = 0; static uint32_t fl_lines; if (gain > max_legal_gain) { pr_debug("Max legal gain Line:%d\n", __LINE__); gain = max_legal_gain; } /* Analogue Gain */ s5k4e1_i2c_write_b_sensor(0x0204, s5k4e1_byte(gain, MSB)); s5k4e1_i2c_write_b_sensor(0x0205, s5k4e1_byte(gain, LSB)); if (line > (prev_frame_length_lines - 4)) { fl_lines = line+4; s5k4e1_group_hold_on(); s5k4e1_i2c_write_b_sensor(0x0340, s5k4e1_byte(fl_lines, MSB)); s5k4e1_i2c_write_b_sensor(0x0341, s5k4e1_byte(fl_lines, LSB)); /* Coarse Integration Time */ s5k4e1_i2c_write_b_sensor(0x0202, s5k4e1_byte(line, MSB)); s5k4e1_i2c_write_b_sensor(0x0203, s5k4e1_byte(line, LSB)); s5k4e1_group_hold_off(); } else if (line < (fl_lines - 4)) { fl_lines = line+4; if (fl_lines < prev_frame_length_lines) fl_lines = prev_frame_length_lines; s5k4e1_group_hold_on(); /* Coarse Integration Time */ s5k4e1_i2c_write_b_sensor(0x0202, s5k4e1_byte(line, MSB)); s5k4e1_i2c_write_b_sensor(0x0203, s5k4e1_byte(line, LSB)); s5k4e1_i2c_write_b_sensor(0x0340, s5k4e1_byte(fl_lines, MSB)); s5k4e1_i2c_write_b_sensor(0x0341, s5k4e1_byte(fl_lines, LSB)); s5k4e1_group_hold_off(); } else { fl_lines = line+4; s5k4e1_group_hold_on(); /* Coarse Integration Time */ s5k4e1_i2c_write_b_sensor(0x0202, s5k4e1_byte(line, MSB)); s5k4e1_i2c_write_b_sensor(0x0203, s5k4e1_byte(line, LSB)); s5k4e1_group_hold_off(); } return rc; } static int32_t s5k4e1_set_pict_exp_gain(uint16_t gain, uint32_t line) { uint16_t max_legal_gain = 0x0200; uint16_t min_ll_pck = 0x0AB2; uint32_t ll_pck, fl_lines; uint32_t ll_ratio; int32_t rc = 0; uint8_t gain_msb, gain_lsb; uint8_t intg_time_msb, intg_time_lsb; uint8_t ll_pck_msb, ll_pck_lsb; if (gain > max_legal_gain) { pr_debug("Max legal gain Line:%d\n", __LINE__); gain = max_legal_gain; } pr_debug("s5k4e1_write_exp_gain : gain = %d line = %d\n", gain, line); line = (uint32_t) (line * s5k4e1_ctrl->pict_fps_divider); fl_lines = snap_frame_length_lines; ll_pck = snap_line_length_pck; if (fl_lines < (line / 0x400)) ll_ratio = (line / (fl_lines - 4)); else ll_ratio = 0x400; ll_pck = ll_pck * ll_ratio / 0x400; line = line / ll_ratio; if (ll_pck < min_ll_pck) ll_pck = min_ll_pck; gain_msb = (uint8_t) ((gain & 0xFF00) >> 8); gain_lsb = (uint8_t) (gain & 0x00FF); intg_time_msb = (uint8_t) ((line & 0xFF00) >> 8); intg_time_lsb = (uint8_t) (line & 0x00FF); ll_pck_msb = (uint8_t) ((ll_pck & 0xFF00) >> 8); ll_pck_lsb = (uint8_t) (ll_pck & 0x00FF); s5k4e1_group_hold_on(); s5k4e1_i2c_write_b_sensor(0x0204, gain_msb); /* Analogue Gain */ s5k4e1_i2c_write_b_sensor(0x0205, gain_lsb); s5k4e1_i2c_write_b_sensor(0x0342, ll_pck_msb); s5k4e1_i2c_write_b_sensor(0x0343, ll_pck_lsb); /* Coarse Integration Time */ s5k4e1_i2c_write_b_sensor(0x0202, intg_time_msb); s5k4e1_i2c_write_b_sensor(0x0203, intg_time_lsb); s5k4e1_group_hold_off(); return rc; } static int32_t s5k4e1_move_focus(int direction, int32_t num_steps) { int16_t step_direction, actual_step, next_position; uint8_t code_val_msb, code_val_lsb; if (direction == MOVE_NEAR) step_direction = 16; else step_direction = -16; actual_step = (int16_t) (step_direction * num_steps); next_position = (int16_t) (s5k4e1_ctrl->curr_lens_pos + actual_step); if (next_position > 1023) next_position = 1023; else if (next_position < 0) next_position = 0; code_val_msb = next_position >> 4; code_val_lsb = (next_position & 0x000F) << 4; if (s5k4e1_af_i2c_write_b_sensor(code_val_msb, code_val_lsb) < 0) { pr_err("move_focus failed at line %d ...\n", __LINE__); return -EBUSY; } s5k4e1_ctrl->curr_lens_pos = next_position; return 0; } static int32_t s5k4e1_set_default_focus(uint8_t af_step) { int32_t rc = 0; if (s5k4e1_ctrl->curr_step_pos != 0) { rc = s5k4e1_move_focus(MOVE_FAR, s5k4e1_ctrl->curr_step_pos); } else { s5k4e1_af_i2c_write_b_sensor(0x00, 0x00); } s5k4e1_ctrl->curr_lens_pos = 0; s5k4e1_ctrl->curr_step_pos = 0; return rc; } static int32_t s5k4e1_test(enum s5k4e1_test_mode_t mo) { int32_t rc = 0; if (mo != TEST_OFF) rc = s5k4e1_i2c_write_b_sensor(0x0601, (uint8_t) mo); return rc; } static void s5k4e1_reset_sensor(void) { s5k4e1_i2c_write_b_sensor(0x103, 0x1); } static int32_t s5k4e1_sensor_setting(int update_type, int rt) { int32_t rc = 0; struct msm_camera_csi_params s5k4e1_csi_params; s5k4e1_stop_stream(); msleep(30); if (update_type == REG_INIT) { s5k4e1_reset_sensor(); s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_mipi, s5k4e1_regs.reg_mipi_size); s5k4e1_i2c_write_b_table(s5k4e1_regs.rec_settings, s5k4e1_regs.rec_size); s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_pll_p, s5k4e1_regs.reg_pll_p_size); CSI_CONFIG = 0; } else if (update_type == UPDATE_PERIODIC) { if (rt == RES_PREVIEW) s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_prev, s5k4e1_regs.reg_prev_size); else s5k4e1_i2c_write_b_table(s5k4e1_regs.reg_snap, s5k4e1_regs.reg_snap_size); msleep(20); if (!CSI_CONFIG) { msm_camio_vfe_clk_rate_set(192000000); s5k4e1_csi_params.data_format = CSI_10BIT; s5k4e1_csi_params.lane_cnt = 1; s5k4e1_csi_params.lane_assign = 0xe4; s5k4e1_csi_params.dpcm_scheme = 0; s5k4e1_csi_params.settle_cnt = 24; rc = msm_camio_csi_config(&s5k4e1_csi_params); msleep(20); CSI_CONFIG = 1; } s5k4e1_start_stream(); msleep(30); } return rc; } static int32_t s5k4e1_video_config(int mode) { int32_t rc = 0; int rt; CDBG("video config\n"); /* change sensor resolution if needed */ if (s5k4e1_ctrl->prev_res == QTR_SIZE) rt = RES_PREVIEW; else rt = RES_CAPTURE; if (s5k4e1_sensor_setting(UPDATE_PERIODIC, rt) < 0) return rc; if (s5k4e1_ctrl->set_test) { if (s5k4e1_test(s5k4e1_ctrl->set_test) < 0) return rc; } s5k4e1_ctrl->curr_res = s5k4e1_ctrl->prev_res; s5k4e1_ctrl->sensormode = mode; return rc; } static int32_t s5k4e1_snapshot_config(int mode) { int32_t rc = 0; int rt; /*change sensor resolution if needed */ if (s5k4e1_ctrl->curr_res != s5k4e1_ctrl->pict_res) { if (s5k4e1_ctrl->pict_res == QTR_SIZE) rt = RES_PREVIEW; else rt = RES_CAPTURE; if (s5k4e1_sensor_setting(UPDATE_PERIODIC, rt) < 0) return rc; } s5k4e1_ctrl->curr_res = s5k4e1_ctrl->pict_res; s5k4e1_ctrl->sensormode = mode; return rc; } static int32_t s5k4e1_raw_snapshot_config(int mode) { int32_t rc = 0; int rt; /* change sensor resolution if needed */ if (s5k4e1_ctrl->curr_res != s5k4e1_ctrl->pict_res) { if (s5k4e1_ctrl->pict_res == QTR_SIZE) rt = RES_PREVIEW; else rt = RES_CAPTURE; if (s5k4e1_sensor_setting(UPDATE_PERIODIC, rt) < 0) return rc; } s5k4e1_ctrl->curr_res = s5k4e1_ctrl->pict_res; s5k4e1_ctrl->sensormode = mode; return rc; } static int32_t s5k4e1_set_sensor_mode(int mode, int res) { int32_t rc = 0; switch (mode) { case SENSOR_PREVIEW_MODE: rc = s5k4e1_video_config(mode); break; case SENSOR_SNAPSHOT_MODE: rc = s5k4e1_snapshot_config(mode); break; case SENSOR_RAW_SNAPSHOT_MODE: rc = s5k4e1_raw_snapshot_config(mode); break; default: rc = -EINVAL; break; } return rc; } static int32_t s5k4e1_power_down(void) { s5k4e1_stop_stream(); return 0; } static int s5k4e1_probe_init_done(const struct msm_camera_sensor_info *data) { CDBG("probe done\n"); gpio_free(data->sensor_reset); return 0; } static int s5k4e1_probe_init_sensor(const struct msm_camera_sensor_info *data) { int32_t rc = 0; uint16_t regaddress1 = 0x0000; uint16_t regaddress2 = 0x0001; uint16_t chipid1 = 0; uint16_t chipid2 = 0; CDBG("%s: %d\n", __func__, __LINE__); CDBG(" s5k4e1_probe_init_sensor is called\n"); rc = gpio_request(data->sensor_reset, "s5k4e1"); CDBG(" s5k4e1_probe_init_sensor\n"); if (!rc) { CDBG("sensor_reset = %d\n", rc); gpio_direction_output(data->sensor_reset, 0); msleep(50); gpio_set_value_cansleep(data->sensor_reset, 1); msleep(20); } else goto gpio_req_fail; msleep(20); s5k4e1_i2c_read(regaddress1, &chipid1, 1); if (chipid1 != 0x4E) { rc = -ENODEV; CDBG("s5k4e1_probe_init_sensor fail chip id doesnot match\n"); goto init_probe_fail; } s5k4e1_i2c_read(regaddress2, &chipid2 , 1); if (chipid2 != 0x10) { rc = -ENODEV; CDBG("s5k4e1_probe_init_sensor fail chip id doesnot match\n"); goto init_probe_fail; } CDBG("ID: %d\n", chipid1); CDBG("ID: %d\n", chipid1); return rc; init_probe_fail: CDBG(" s5k4e1_probe_init_sensor fails\n"); gpio_set_value_cansleep(data->sensor_reset, 0); s5k4e1_probe_init_done(data); if (data->vcm_enable) { int ret = gpio_request(data->vcm_pwd, "s5k4e1_af"); if (!ret) { gpio_direction_output(data->vcm_pwd, 0); msleep(20); gpio_free(data->vcm_pwd); } } gpio_req_fail: return rc; } int s5k4e1_sensor_open_init(const struct msm_camera_sensor_info *data) { int32_t rc = 0; CDBG("%s: %d\n", __func__, __LINE__); CDBG("Calling s5k4e1_sensor_open_init\n"); s5k4e1_ctrl = kzalloc(sizeof(struct s5k4e1_ctrl_t), GFP_KERNEL); if (!s5k4e1_ctrl) { CDBG("s5k4e1_init failed!\n"); rc = -ENOMEM; goto init_done; } s5k4e1_ctrl->fps_divider = 1 * 0x00000400; s5k4e1_ctrl->pict_fps_divider = 1 * 0x00000400; s5k4e1_ctrl->set_test = TEST_OFF; s5k4e1_ctrl->prev_res = QTR_SIZE; s5k4e1_ctrl->pict_res = FULL_SIZE; if (data) s5k4e1_ctrl->sensordata = data; prev_frame_length_lines = ((s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_FRAME_LEN_1].wdata << 8) | s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_FRAME_LEN_2].wdata); prev_line_length_pck = (s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_LINE_LEN_1].wdata << 8) | s5k4e1_regs.reg_prev[S5K4E1_REG_PREV_LINE_LEN_2].wdata; snap_frame_length_lines = (s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_FRAME_LEN_1].wdata << 8) | s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_FRAME_LEN_2].wdata; snap_line_length_pck = (s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_LINE_LEN_1].wdata << 8) | s5k4e1_regs.reg_snap[S5K4E1_REG_SNAP_LINE_LEN_1].wdata; /* enable mclk first */ msm_camio_clk_rate_set(S5K4E1_MASTER_CLK_RATE); rc = s5k4e1_probe_init_sensor(data); if (rc < 0) goto init_fail; CDBG("init settings\n"); if (s5k4e1_ctrl->prev_res == QTR_SIZE) rc = s5k4e1_sensor_setting(REG_INIT, RES_PREVIEW); else rc = s5k4e1_sensor_setting(REG_INIT, RES_CAPTURE); s5k4e1_ctrl->fps = 30 * Q8; /* enable AF actuator */ if (s5k4e1_ctrl->sensordata->vcm_enable) { CDBG("enable AF actuator, gpio = %d\n", s5k4e1_ctrl->sensordata->vcm_pwd); rc = gpio_request(s5k4e1_ctrl->sensordata->vcm_pwd, "s5k4e1_af"); if (!rc) gpio_direction_output( s5k4e1_ctrl->sensordata->vcm_pwd, 1); else { pr_err("s5k4e1_ctrl gpio request failed!\n"); goto init_fail; } msleep(20); rc = s5k4e1_set_default_focus(0); if (rc < 0) { gpio_direction_output(s5k4e1_ctrl->sensordata->vcm_pwd, 0); gpio_free(s5k4e1_ctrl->sensordata->vcm_pwd); } } if (rc < 0) goto init_fail; else goto init_done; init_fail: CDBG("init_fail\n"); s5k4e1_probe_init_done(data); init_done: CDBG("init_done\n"); return rc; } static int s5k4e1_init_client(struct i2c_client *client) { /* Initialize the MSM_CAMI2C Chip */ init_waitqueue_head(&s5k4e1_wait_queue); return 0; } static int s5k4e1_af_init_client(struct i2c_client *client) { /* Initialize the MSM_CAMI2C Chip */ init_waitqueue_head(&s5k4e1_af_wait_queue); return 0; } static const struct i2c_device_id s5k4e1_af_i2c_id[] = { {"s5k4e1_af", 0}, { } }; static int s5k4e1_af_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; CDBG("s5k4e1_af_probe called!\n"); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { CDBG("i2c_check_functionality failed\n"); goto probe_failure; } s5k4e1_af_sensorw = kzalloc(sizeof(struct s5k4e1_work_t), GFP_KERNEL); if (!s5k4e1_af_sensorw) { CDBG("kzalloc failed.\n"); rc = -ENOMEM; goto probe_failure; } i2c_set_clientdata(client, s5k4e1_af_sensorw); s5k4e1_af_init_client(client); s5k4e1_af_client = client; msleep(50); CDBG("s5k4e1_af_probe successed! rc = %d\n", rc); return 0; probe_failure: CDBG("s5k4e1_af_probe failed! rc = %d\n", rc); return rc; } static const struct i2c_device_id s5k4e1_i2c_id[] = { {"s5k4e1", 0}, { } }; static int s5k4e1_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int rc = 0; CDBG("s5k4e1_probe called!\n"); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { CDBG("i2c_check_functionality failed\n"); goto probe_failure; } s5k4e1_sensorw = kzalloc(sizeof(struct s5k4e1_work_t), GFP_KERNEL); if (!s5k4e1_sensorw) { CDBG("kzalloc failed.\n"); rc = -ENOMEM; goto probe_failure; } i2c_set_clientdata(client, s5k4e1_sensorw); s5k4e1_init_client(client); s5k4e1_client = client; msleep(50); CDBG("s5k4e1_probe successed! rc = %d\n", rc); return 0; probe_failure: CDBG("s5k4e1_probe failed! rc = %d\n", rc); return rc; } static int __devexit s5k4e1_remove(struct i2c_client *client) { struct s5k4e1_work_t *sensorw = i2c_get_clientdata(client); free_irq(client->irq, sensorw); s5k4e1_client = NULL; kfree(sensorw); return 0; } static int __devexit s5k4e1_af_remove(struct i2c_client *client) { struct s5k4e1_work_t *s5k4e1_af = i2c_get_clientdata(client); free_irq(client->irq, s5k4e1_af); s5k4e1_af_client = NULL; kfree(s5k4e1_af); return 0; } static struct i2c_driver s5k4e1_i2c_driver = { .id_table = s5k4e1_i2c_id, .probe = s5k4e1_i2c_probe, .remove = __exit_p(s5k4e1_i2c_remove), .driver = { .name = "s5k4e1", }, }; static struct i2c_driver s5k4e1_af_i2c_driver = { .id_table = s5k4e1_af_i2c_id, .probe = s5k4e1_af_i2c_probe, .remove = __exit_p(s5k4e1_af_i2c_remove), .driver = { .name = "s5k4e1_af", }, }; int s5k4e1_sensor_config(void __user *argp) { struct sensor_cfg_data cdata; long rc = 0; if (copy_from_user(&cdata, (void *)argp, sizeof(struct sensor_cfg_data))) return -EFAULT; mutex_lock(&s5k4e1_mut); CDBG("s5k4e1_sensor_config: cfgtype = %d\n", cdata.cfgtype); switch (cdata.cfgtype) { case CFG_GET_PICT_FPS: s5k4e1_get_pict_fps( cdata.cfg.gfps.prevfps, &(cdata.cfg.gfps.pictfps)); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PREV_L_PF: cdata.cfg.prevl_pf = s5k4e1_get_prev_lines_pf(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PREV_P_PL: cdata.cfg.prevp_pl = s5k4e1_get_prev_pixels_pl(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_L_PF: cdata.cfg.pictl_pf = s5k4e1_get_pict_lines_pf(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_P_PL: cdata.cfg.pictp_pl = s5k4e1_get_pict_pixels_pl(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_GET_PICT_MAX_EXP_LC: cdata.cfg.pict_max_exp_lc = s5k4e1_get_pict_max_exp_lc(); if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_SET_FPS: case CFG_SET_PICT_FPS: rc = s5k4e1_set_fps(&(cdata.cfg.fps)); break; case CFG_SET_EXP_GAIN: rc = s5k4e1_write_exp_gain(cdata.cfg.exp_gain.gain, cdata.cfg.exp_gain.line); break; case CFG_SET_PICT_EXP_GAIN: rc = s5k4e1_set_pict_exp_gain(cdata.cfg.exp_gain.gain, cdata.cfg.exp_gain.line); break; case CFG_SET_MODE: rc = s5k4e1_set_sensor_mode(cdata.mode, cdata.rs); break; case CFG_PWR_DOWN: rc = s5k4e1_power_down(); break; case CFG_MOVE_FOCUS: rc = s5k4e1_move_focus(cdata.cfg.focus.dir, cdata.cfg.focus.steps); break; case CFG_SET_DEFAULT_FOCUS: rc = s5k4e1_set_default_focus(cdata.cfg.focus.steps); break; case CFG_GET_AF_MAX_STEPS: cdata.max_steps = S5K4E1_TOTAL_STEPS_NEAR_TO_FAR; if (copy_to_user((void *)argp, &cdata, sizeof(struct sensor_cfg_data))) rc = -EFAULT; break; case CFG_SET_EFFECT: rc = s5k4e1_set_default_focus(cdata.cfg.effect); break; default: rc = -EFAULT; break; } mutex_unlock(&s5k4e1_mut); return rc; } static int s5k4e1_sensor_release(void) { int rc = -EBADF; mutex_lock(&s5k4e1_mut); s5k4e1_power_down(); msleep(20); gpio_set_value_cansleep(s5k4e1_ctrl->sensordata->sensor_reset, 0); usleep_range(5000, 5100); gpio_free(s5k4e1_ctrl->sensordata->sensor_reset); if (s5k4e1_ctrl->sensordata->vcm_enable) { gpio_set_value_cansleep(s5k4e1_ctrl->sensordata->vcm_pwd, 0); gpio_free(s5k4e1_ctrl->sensordata->vcm_pwd); } kfree(s5k4e1_ctrl); s5k4e1_ctrl = NULL; CDBG("s5k4e1_release completed\n"); mutex_unlock(&s5k4e1_mut); return rc; } static int s5k4e1_sensor_probe(const struct msm_camera_sensor_info *info, struct msm_sensor_ctrl *s) { int rc = 0; rc = i2c_add_driver(&s5k4e1_i2c_driver); if (rc < 0 || s5k4e1_client == NULL) { rc = -ENOTSUPP; CDBG("I2C add driver failed"); goto probe_fail_1; } rc = i2c_add_driver(&s5k4e1_af_i2c_driver); if (rc < 0 || s5k4e1_af_client == NULL) { rc = -ENOTSUPP; CDBG("I2C add driver failed"); goto probe_fail_2; } msm_camio_clk_rate_set(S5K4E1_MASTER_CLK_RATE); rc = s5k4e1_probe_init_sensor(info); if (rc < 0) goto probe_fail_3; s->s_init = s5k4e1_sensor_open_init; s->s_release = s5k4e1_sensor_release; s->s_config = s5k4e1_sensor_config; s->s_mount_angle = info->sensor_platform_info->mount_angle; gpio_set_value_cansleep(info->sensor_reset, 0); s5k4e1_probe_init_done(info); /* Keep vcm_pwd to OUT Low */ if (info->vcm_enable) { rc = gpio_request(info->vcm_pwd, "s5k4e1_af"); if (!rc) { gpio_direction_output(info->vcm_pwd, 0); msleep(20); gpio_free(info->vcm_pwd); } else return rc; } return rc; probe_fail_3: i2c_del_driver(&s5k4e1_af_i2c_driver); probe_fail_2: i2c_del_driver(&s5k4e1_i2c_driver); probe_fail_1: CDBG("s5k4e1_sensor_probe: SENSOR PROBE FAILS!\n"); return rc; } static int __devinit s5k4e1_probe(struct platform_device *pdev) { return msm_camera_drv_start(pdev, s5k4e1_sensor_probe); } static struct platform_driver msm_camera_driver = { .probe = s5k4e1_probe, .driver = { .name = "msm_camera_s5k4e1", .owner = THIS_MODULE, }, }; static int __init s5k4e1_init(void) { return platform_driver_register(&msm_camera_driver); } module_init(s5k4e1_init); MODULE_DESCRIPTION("Samsung 5 MP Bayer sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TeamWin/android_kernel_htc_a32e
drivers/usb/phy/phy-ab8500-usb.c
1997
23723
/* * drivers/usb/otg/ab8500_usb.c * * USB transceiver driver for AB8500 chip * * Copyright (C) 2010 ST-Ericsson AB * Mian Yousaf Kaukab <mian.yousaf.kaukab@stericsson.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/usb/otg.h> #include <linux/slab.h> #include <linux/notifier.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500.h> #include <linux/usb/musb-ux500.h> #include <linux/regulator/consumer.h> #include <linux/pinctrl/consumer.h> /* Bank AB8500_SYS_CTRL2_BLOCK */ #define AB8500_MAIN_WD_CTRL_REG 0x01 /* Bank AB8500_USB */ #define AB8500_USB_LINE_STAT_REG 0x80 #define AB8505_USB_LINE_STAT_REG 0x94 #define AB8500_USB_PHY_CTRL_REG 0x8A /* Bank AB8500_DEVELOPMENT */ #define AB8500_BANK12_ACCESS 0x00 /* Bank AB8500_DEBUG */ #define AB8500_USB_PHY_TUNE1 0x05 #define AB8500_USB_PHY_TUNE2 0x06 #define AB8500_USB_PHY_TUNE3 0x07 #define AB8500_BIT_OTG_STAT_ID (1 << 0) #define AB8500_BIT_PHY_CTRL_HOST_EN (1 << 0) #define AB8500_BIT_PHY_CTRL_DEVICE_EN (1 << 1) #define AB8500_BIT_WD_CTRL_ENABLE (1 << 0) #define AB8500_BIT_WD_CTRL_KICK (1 << 1) #define AB8500_WD_KICK_DELAY_US 100 /* usec */ #define AB8500_WD_V11_DISABLE_DELAY_US 100 /* usec */ #define AB8500_V20_31952_DISABLE_DELAY_US 100 /* usec */ /* Usb line status register */ enum ab8500_usb_link_status { USB_LINK_NOT_CONFIGURED_8500 = 0, USB_LINK_STD_HOST_NC_8500, USB_LINK_STD_HOST_C_NS_8500, USB_LINK_STD_HOST_C_S_8500, USB_LINK_HOST_CHG_NM_8500, USB_LINK_HOST_CHG_HS_8500, USB_LINK_HOST_CHG_HS_CHIRP_8500, USB_LINK_DEDICATED_CHG_8500, USB_LINK_ACA_RID_A_8500, USB_LINK_ACA_RID_B_8500, USB_LINK_ACA_RID_C_NM_8500, USB_LINK_ACA_RID_C_HS_8500, USB_LINK_ACA_RID_C_HS_CHIRP_8500, USB_LINK_HM_IDGND_8500, USB_LINK_RESERVED_8500, USB_LINK_NOT_VALID_LINK_8500, }; enum ab8505_usb_link_status { USB_LINK_NOT_CONFIGURED_8505 = 0, USB_LINK_STD_HOST_NC_8505, USB_LINK_STD_HOST_C_NS_8505, USB_LINK_STD_HOST_C_S_8505, USB_LINK_CDP_8505, USB_LINK_RESERVED0_8505, USB_LINK_RESERVED1_8505, USB_LINK_DEDICATED_CHG_8505, USB_LINK_ACA_RID_A_8505, USB_LINK_ACA_RID_B_8505, USB_LINK_ACA_RID_C_NM_8505, USB_LINK_RESERVED2_8505, USB_LINK_RESERVED3_8505, USB_LINK_HM_IDGND_8505, USB_LINK_CHARGERPORT_NOT_OK_8505, USB_LINK_CHARGER_DM_HIGH_8505, USB_LINK_PHYEN_NO_VBUS_NO_IDGND_8505, USB_LINK_STD_UPSTREAM_NO_IDGNG_NO_VBUS_8505, USB_LINK_STD_UPSTREAM_8505, USB_LINK_CHARGER_SE1_8505, USB_LINK_CARKIT_CHGR_1_8505, USB_LINK_CARKIT_CHGR_2_8505, USB_LINK_ACA_DOCK_CHGR_8505, USB_LINK_SAMSUNG_BOOT_CBL_PHY_EN_8505, USB_LINK_SAMSUNG_BOOT_CBL_PHY_DISB_8505, USB_LINK_SAMSUNG_UART_CBL_PHY_EN_8505, USB_LINK_SAMSUNG_UART_CBL_PHY_DISB_8505, USB_LINK_MOTOROLA_FACTORY_CBL_PHY_EN_8505, }; enum ab8500_usb_mode { USB_IDLE = 0, USB_PERIPHERAL, USB_HOST, USB_DEDICATED_CHG }; struct ab8500_usb { struct usb_phy phy; struct device *dev; struct ab8500 *ab8500; unsigned vbus_draw; struct work_struct phy_dis_work; enum ab8500_usb_mode mode; struct regulator *v_ape; struct regulator *v_musb; struct regulator *v_ulpi; int saved_v_ulpi; int previous_link_status_state; struct pinctrl *pinctrl; struct pinctrl_state *pins_sleep; }; static inline struct ab8500_usb *phy_to_ab(struct usb_phy *x) { return container_of(x, struct ab8500_usb, phy); } static void ab8500_usb_wd_workaround(struct ab8500_usb *ab) { abx500_set_register_interruptible(ab->dev, AB8500_SYS_CTRL2_BLOCK, AB8500_MAIN_WD_CTRL_REG, AB8500_BIT_WD_CTRL_ENABLE); udelay(AB8500_WD_KICK_DELAY_US); abx500_set_register_interruptible(ab->dev, AB8500_SYS_CTRL2_BLOCK, AB8500_MAIN_WD_CTRL_REG, (AB8500_BIT_WD_CTRL_ENABLE | AB8500_BIT_WD_CTRL_KICK)); udelay(AB8500_WD_V11_DISABLE_DELAY_US); abx500_set_register_interruptible(ab->dev, AB8500_SYS_CTRL2_BLOCK, AB8500_MAIN_WD_CTRL_REG, 0); } static void ab8500_usb_regulator_enable(struct ab8500_usb *ab) { int ret, volt; ret = regulator_enable(ab->v_ape); if (ret) dev_err(ab->dev, "Failed to enable v-ape\n"); if (!is_ab8500_2p0_or_earlier(ab->ab8500)) { ab->saved_v_ulpi = regulator_get_voltage(ab->v_ulpi); if (ab->saved_v_ulpi < 0) dev_err(ab->dev, "Failed to get v_ulpi voltage\n"); ret = regulator_set_voltage(ab->v_ulpi, 1300000, 1350000); if (ret < 0) dev_err(ab->dev, "Failed to set the Vintcore to 1.3V, ret=%d\n", ret); ret = regulator_set_optimum_mode(ab->v_ulpi, 28000); if (ret < 0) dev_err(ab->dev, "Failed to set optimum mode (ret=%d)\n", ret); } ret = regulator_enable(ab->v_ulpi); if (ret) dev_err(ab->dev, "Failed to enable vddulpivio18\n"); if (!is_ab8500_2p0_or_earlier(ab->ab8500)) { volt = regulator_get_voltage(ab->v_ulpi); if ((volt != 1300000) && (volt != 1350000)) dev_err(ab->dev, "Vintcore is not set to 1.3V volt=%d\n", volt); } ret = regulator_enable(ab->v_musb); if (ret) dev_err(ab->dev, "Failed to enable musb_1v8\n"); } static void ab8500_usb_regulator_disable(struct ab8500_usb *ab) { int ret; regulator_disable(ab->v_musb); regulator_disable(ab->v_ulpi); /* USB is not the only consumer of Vintcore, restore old settings */ if (!is_ab8500_2p0_or_earlier(ab->ab8500)) { if (ab->saved_v_ulpi > 0) { ret = regulator_set_voltage(ab->v_ulpi, ab->saved_v_ulpi, ab->saved_v_ulpi); if (ret < 0) dev_err(ab->dev, "Failed to set the Vintcore to %duV, ret=%d\n", ab->saved_v_ulpi, ret); } ret = regulator_set_optimum_mode(ab->v_ulpi, 0); if (ret < 0) dev_err(ab->dev, "Failed to set optimum mode (ret=%d)\n", ret); } regulator_disable(ab->v_ape); } static void ab8500_usb_wd_linkstatus(struct ab8500_usb *ab, u8 bit) { /* Workaround for v2.0 bug # 31952 */ if (is_ab8500_2p0(ab->ab8500)) { abx500_mask_and_set_register_interruptible(ab->dev, AB8500_USB, AB8500_USB_PHY_CTRL_REG, bit, bit); udelay(AB8500_V20_31952_DISABLE_DELAY_US); } } static void ab8500_usb_phy_enable(struct ab8500_usb *ab, bool sel_host) { u8 bit; bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN : AB8500_BIT_PHY_CTRL_DEVICE_EN; /* mux and configure USB pins to DEFAULT state */ ab->pinctrl = pinctrl_get_select(ab->dev, PINCTRL_STATE_DEFAULT); if (IS_ERR(ab->pinctrl)) dev_err(ab->dev, "could not get/set default pinstate\n"); ab8500_usb_regulator_enable(ab); abx500_mask_and_set_register_interruptible(ab->dev, AB8500_USB, AB8500_USB_PHY_CTRL_REG, bit, bit); } static void ab8500_usb_phy_disable(struct ab8500_usb *ab, bool sel_host) { u8 bit; bit = sel_host ? AB8500_BIT_PHY_CTRL_HOST_EN : AB8500_BIT_PHY_CTRL_DEVICE_EN; ab8500_usb_wd_linkstatus(ab, bit); abx500_mask_and_set_register_interruptible(ab->dev, AB8500_USB, AB8500_USB_PHY_CTRL_REG, bit, 0); /* Needed to disable the phy.*/ ab8500_usb_wd_workaround(ab); ab8500_usb_regulator_disable(ab); if (!IS_ERR(ab->pinctrl)) { /* configure USB pins to SLEEP state */ ab->pins_sleep = pinctrl_lookup_state(ab->pinctrl, PINCTRL_STATE_SLEEP); if (IS_ERR(ab->pins_sleep)) dev_dbg(ab->dev, "could not get sleep pinstate\n"); else if (pinctrl_select_state(ab->pinctrl, ab->pins_sleep)) dev_err(ab->dev, "could not set pins to sleep state\n"); /* as USB pins are shared with idddet, release them to allow * iddet to request them */ pinctrl_put(ab->pinctrl); } } #define ab8500_usb_host_phy_en(ab) ab8500_usb_phy_enable(ab, true) #define ab8500_usb_host_phy_dis(ab) ab8500_usb_phy_disable(ab, true) #define ab8500_usb_peri_phy_en(ab) ab8500_usb_phy_enable(ab, false) #define ab8500_usb_peri_phy_dis(ab) ab8500_usb_phy_disable(ab, false) static int ab8505_usb_link_status_update(struct ab8500_usb *ab, enum ab8505_usb_link_status lsts) { enum ux500_musb_vbus_id_status event = 0; dev_dbg(ab->dev, "ab8505_usb_link_status_update %d\n", lsts); /* * Spurious link_status interrupts are seen at the time of * disconnection of a device in RIDA state */ if (ab->previous_link_status_state == USB_LINK_ACA_RID_A_8505 && (lsts == USB_LINK_STD_HOST_NC_8505)) return 0; ab->previous_link_status_state = lsts; switch (lsts) { case USB_LINK_ACA_RID_B_8505: event = UX500_MUSB_RIDB; case USB_LINK_NOT_CONFIGURED_8505: case USB_LINK_RESERVED0_8505: case USB_LINK_RESERVED1_8505: case USB_LINK_RESERVED2_8505: case USB_LINK_RESERVED3_8505: ab->mode = USB_IDLE; ab->phy.otg->default_a = false; ab->vbus_draw = 0; if (event != UX500_MUSB_RIDB) event = UX500_MUSB_NONE; /* * Fallback to default B_IDLE as nothing * is connected */ ab->phy.state = OTG_STATE_B_IDLE; break; case USB_LINK_ACA_RID_C_NM_8505: event = UX500_MUSB_RIDC; case USB_LINK_STD_HOST_NC_8505: case USB_LINK_STD_HOST_C_NS_8505: case USB_LINK_STD_HOST_C_S_8505: case USB_LINK_CDP_8505: if (ab->mode == USB_IDLE) { ab->mode = USB_PERIPHERAL; ab8500_usb_peri_phy_en(ab); atomic_notifier_call_chain(&ab->phy.notifier, UX500_MUSB_PREPARE, &ab->vbus_draw); } if (event != UX500_MUSB_RIDC) event = UX500_MUSB_VBUS; break; case USB_LINK_ACA_RID_A_8505: case USB_LINK_ACA_DOCK_CHGR_8505: event = UX500_MUSB_RIDA; case USB_LINK_HM_IDGND_8505: if (ab->mode == USB_IDLE) { ab->mode = USB_HOST; ab8500_usb_host_phy_en(ab); atomic_notifier_call_chain(&ab->phy.notifier, UX500_MUSB_PREPARE, &ab->vbus_draw); } ab->phy.otg->default_a = true; if (event != UX500_MUSB_RIDA) event = UX500_MUSB_ID; atomic_notifier_call_chain(&ab->phy.notifier, event, &ab->vbus_draw); break; case USB_LINK_DEDICATED_CHG_8505: ab->mode = USB_DEDICATED_CHG; event = UX500_MUSB_CHARGER; atomic_notifier_call_chain(&ab->phy.notifier, event, &ab->vbus_draw); break; default: break; } return 0; } static int ab8500_usb_link_status_update(struct ab8500_usb *ab, enum ab8500_usb_link_status lsts) { enum ux500_musb_vbus_id_status event = 0; dev_dbg(ab->dev, "ab8500_usb_link_status_update %d\n", lsts); /* * Spurious link_status interrupts are seen in case of a * disconnection of a device in IDGND and RIDA stage */ if (ab->previous_link_status_state == USB_LINK_HM_IDGND_8500 && (lsts == USB_LINK_STD_HOST_C_NS_8500 || lsts == USB_LINK_STD_HOST_NC_8500)) return 0; if (ab->previous_link_status_state == USB_LINK_ACA_RID_A_8500 && lsts == USB_LINK_STD_HOST_NC_8500) return 0; ab->previous_link_status_state = lsts; switch (lsts) { case USB_LINK_ACA_RID_B_8500: event = UX500_MUSB_RIDB; case USB_LINK_NOT_CONFIGURED_8500: case USB_LINK_NOT_VALID_LINK_8500: ab->mode = USB_IDLE; ab->phy.otg->default_a = false; ab->vbus_draw = 0; if (event != UX500_MUSB_RIDB) event = UX500_MUSB_NONE; /* Fallback to default B_IDLE as nothing is connected */ ab->phy.state = OTG_STATE_B_IDLE; break; case USB_LINK_ACA_RID_C_NM_8500: case USB_LINK_ACA_RID_C_HS_8500: case USB_LINK_ACA_RID_C_HS_CHIRP_8500: event = UX500_MUSB_RIDC; case USB_LINK_STD_HOST_NC_8500: case USB_LINK_STD_HOST_C_NS_8500: case USB_LINK_STD_HOST_C_S_8500: case USB_LINK_HOST_CHG_NM_8500: case USB_LINK_HOST_CHG_HS_8500: case USB_LINK_HOST_CHG_HS_CHIRP_8500: if (ab->mode == USB_IDLE) { ab->mode = USB_PERIPHERAL; ab8500_usb_peri_phy_en(ab); atomic_notifier_call_chain(&ab->phy.notifier, UX500_MUSB_PREPARE, &ab->vbus_draw); } if (event != UX500_MUSB_RIDC) event = UX500_MUSB_VBUS; break; case USB_LINK_ACA_RID_A_8500: event = UX500_MUSB_RIDA; case USB_LINK_HM_IDGND_8500: if (ab->mode == USB_IDLE) { ab->mode = USB_HOST; ab8500_usb_host_phy_en(ab); atomic_notifier_call_chain(&ab->phy.notifier, UX500_MUSB_PREPARE, &ab->vbus_draw); } ab->phy.otg->default_a = true; if (event != UX500_MUSB_RIDA) event = UX500_MUSB_ID; atomic_notifier_call_chain(&ab->phy.notifier, event, &ab->vbus_draw); break; case USB_LINK_DEDICATED_CHG_8500: ab->mode = USB_DEDICATED_CHG; event = UX500_MUSB_CHARGER; atomic_notifier_call_chain(&ab->phy.notifier, event, &ab->vbus_draw); break; case USB_LINK_RESERVED_8500: break; } return 0; } /* * Connection Sequence: * 1. Link Status Interrupt * 2. Enable AB clock * 3. Enable AB regulators * 4. Enable USB phy * 5. Reset the musb controller * 6. Switch the ULPI GPIO pins to fucntion mode * 7. Enable the musb Peripheral5 clock * 8. Restore MUSB context */ static int abx500_usb_link_status_update(struct ab8500_usb *ab) { u8 reg; int ret = 0; if (is_ab8500(ab->ab8500)) { enum ab8500_usb_link_status lsts; abx500_get_register_interruptible(ab->dev, AB8500_USB, AB8500_USB_LINE_STAT_REG, &reg); lsts = (reg >> 3) & 0x0F; ret = ab8500_usb_link_status_update(ab, lsts); } else if (is_ab8505(ab->ab8500)) { enum ab8505_usb_link_status lsts; abx500_get_register_interruptible(ab->dev, AB8500_USB, AB8505_USB_LINE_STAT_REG, &reg); lsts = (reg >> 3) & 0x1F; ret = ab8505_usb_link_status_update(ab, lsts); } return ret; } /* * Disconnection Sequence: * 1. Disconect Interrupt * 2. Disable regulators * 3. Disable AB clock * 4. Disable the Phy * 5. Link Status Interrupt * 6. Disable Musb Clock */ static irqreturn_t ab8500_usb_disconnect_irq(int irq, void *data) { struct ab8500_usb *ab = (struct ab8500_usb *) data; enum usb_phy_events event = UX500_MUSB_NONE; /* Link status will not be updated till phy is disabled. */ if (ab->mode == USB_HOST) { ab->phy.otg->default_a = false; ab->vbus_draw = 0; atomic_notifier_call_chain(&ab->phy.notifier, event, &ab->vbus_draw); ab8500_usb_host_phy_dis(ab); ab->mode = USB_IDLE; } if (ab->mode == USB_PERIPHERAL) { atomic_notifier_call_chain(&ab->phy.notifier, event, &ab->vbus_draw); ab8500_usb_peri_phy_dis(ab); atomic_notifier_call_chain(&ab->phy.notifier, UX500_MUSB_CLEAN, &ab->vbus_draw); ab->mode = USB_IDLE; ab->phy.otg->default_a = false; ab->vbus_draw = 0; } if (is_ab8500_2p0(ab->ab8500)) { if (ab->mode == USB_DEDICATED_CHG) { ab8500_usb_wd_linkstatus(ab, AB8500_BIT_PHY_CTRL_DEVICE_EN); abx500_mask_and_set_register_interruptible(ab->dev, AB8500_USB, AB8500_USB_PHY_CTRL_REG, AB8500_BIT_PHY_CTRL_DEVICE_EN, 0); } } return IRQ_HANDLED; } static irqreturn_t ab8500_usb_link_status_irq(int irq, void *data) { struct ab8500_usb *ab = (struct ab8500_usb *) data; abx500_usb_link_status_update(ab); return IRQ_HANDLED; } static void ab8500_usb_phy_disable_work(struct work_struct *work) { struct ab8500_usb *ab = container_of(work, struct ab8500_usb, phy_dis_work); if (!ab->phy.otg->host) ab8500_usb_host_phy_dis(ab); if (!ab->phy.otg->gadget) ab8500_usb_peri_phy_dis(ab); } static unsigned ab8500_eyediagram_workaroud(struct ab8500_usb *ab, unsigned mA) { /* * AB8500 V2 has eye diagram issues when drawing more than 100mA from * VBUS. Set charging current to 100mA in case of standard host */ if (is_ab8500_2p0_or_earlier(ab->ab8500)) if (mA > 100) mA = 100; return mA; } static int ab8500_usb_set_power(struct usb_phy *phy, unsigned mA) { struct ab8500_usb *ab; if (!phy) return -ENODEV; ab = phy_to_ab(phy); mA = ab8500_eyediagram_workaroud(ab, mA); ab->vbus_draw = mA; atomic_notifier_call_chain(&ab->phy.notifier, UX500_MUSB_VBUS, &ab->vbus_draw); return 0; } static int ab8500_usb_set_suspend(struct usb_phy *x, int suspend) { /* TODO */ return 0; } static int ab8500_usb_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { struct ab8500_usb *ab; if (!otg) return -ENODEV; ab = phy_to_ab(otg->phy); ab->phy.otg->gadget = gadget; /* Some drivers call this function in atomic context. * Do not update ab8500 registers directly till this * is fixed. */ if ((ab->mode != USB_IDLE) && (!gadget)) { ab->mode = USB_IDLE; schedule_work(&ab->phy_dis_work); } return 0; } static int ab8500_usb_set_host(struct usb_otg *otg, struct usb_bus *host) { struct ab8500_usb *ab; if (!otg) return -ENODEV; ab = phy_to_ab(otg->phy); ab->phy.otg->host = host; /* Some drivers call this function in atomic context. * Do not update ab8500 registers directly till this * is fixed. */ if ((ab->mode != USB_IDLE) && (!host)) { ab->mode = USB_IDLE; schedule_work(&ab->phy_dis_work); } return 0; } static int ab8500_usb_regulator_get(struct ab8500_usb *ab) { int err; ab->v_ape = devm_regulator_get(ab->dev, "v-ape"); if (IS_ERR(ab->v_ape)) { dev_err(ab->dev, "Could not get v-ape supply\n"); err = PTR_ERR(ab->v_ape); return err; } ab->v_ulpi = devm_regulator_get(ab->dev, "vddulpivio18"); if (IS_ERR(ab->v_ulpi)) { dev_err(ab->dev, "Could not get vddulpivio18 supply\n"); err = PTR_ERR(ab->v_ulpi); return err; } ab->v_musb = devm_regulator_get(ab->dev, "musb_1v8"); if (IS_ERR(ab->v_musb)) { dev_err(ab->dev, "Could not get musb_1v8 supply\n"); err = PTR_ERR(ab->v_musb); return err; } return 0; } static int ab8500_usb_irq_setup(struct platform_device *pdev, struct ab8500_usb *ab) { int err; int irq; irq = platform_get_irq_byname(pdev, "USB_LINK_STATUS"); if (irq < 0) { dev_err(&pdev->dev, "Link status irq not found\n"); return irq; } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_link_status_irq, IRQF_NO_SUSPEND | IRQF_SHARED, "usb-link-status", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for link status irq\n"); return err; } irq = platform_get_irq_byname(pdev, "ID_WAKEUP_F"); if (irq < 0) { dev_err(&pdev->dev, "ID fall irq not found\n"); return irq; } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, IRQF_NO_SUSPEND | IRQF_SHARED, "usb-id-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for ID fall irq\n"); return err; } irq = platform_get_irq_byname(pdev, "VBUS_DET_F"); if (irq < 0) { dev_err(&pdev->dev, "VBUS fall irq not found\n"); return irq; } err = devm_request_threaded_irq(&pdev->dev, irq, NULL, ab8500_usb_disconnect_irq, IRQF_NO_SUSPEND | IRQF_SHARED, "usb-vbus-fall", ab); if (err < 0) { dev_err(ab->dev, "request_irq failed for Vbus fall irq\n"); return err; } return 0; } static int ab8500_usb_probe(struct platform_device *pdev) { struct ab8500_usb *ab; struct ab8500 *ab8500; struct usb_otg *otg; int err; int rev; ab8500 = dev_get_drvdata(pdev->dev.parent); rev = abx500_get_chip_id(&pdev->dev); if (is_ab8500_1p1_or_earlier(ab8500)) { dev_err(&pdev->dev, "Unsupported AB8500 chip rev=%d\n", rev); return -ENODEV; } ab = devm_kzalloc(&pdev->dev, sizeof(*ab), GFP_KERNEL); if (!ab) return -ENOMEM; otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL); if (!otg) return -ENOMEM; ab->dev = &pdev->dev; ab->ab8500 = ab8500; ab->phy.dev = ab->dev; ab->phy.otg = otg; ab->phy.label = "ab8500"; ab->phy.set_suspend = ab8500_usb_set_suspend; ab->phy.set_power = ab8500_usb_set_power; ab->phy.state = OTG_STATE_UNDEFINED; otg->phy = &ab->phy; otg->set_host = ab8500_usb_set_host; otg->set_peripheral = ab8500_usb_set_peripheral; platform_set_drvdata(pdev, ab); ATOMIC_INIT_NOTIFIER_HEAD(&ab->phy.notifier); /* all: Disable phy when called from set_host and set_peripheral */ INIT_WORK(&ab->phy_dis_work, ab8500_usb_phy_disable_work); err = ab8500_usb_regulator_get(ab); if (err) return err; err = ab8500_usb_irq_setup(pdev, ab); if (err < 0) return err; err = usb_add_phy(&ab->phy, USB_PHY_TYPE_USB2); if (err) { dev_err(&pdev->dev, "Can't register transceiver\n"); return err; } /* Phy tuning values for AB8500 */ if (!is_ab8500_2p0_or_earlier(ab->ab8500)) { /* Enable the PBT/Bank 0x12 access */ err = abx500_set_register_interruptible(ab->dev, AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x01); if (err < 0) dev_err(ab->dev, "Failed to enable bank12 access err=%d\n", err); err = abx500_set_register_interruptible(ab->dev, AB8500_DEBUG, AB8500_USB_PHY_TUNE1, 0xC8); if (err < 0) dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n", err); err = abx500_set_register_interruptible(ab->dev, AB8500_DEBUG, AB8500_USB_PHY_TUNE2, 0x00); if (err < 0) dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n", err); err = abx500_set_register_interruptible(ab->dev, AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0x78); if (err < 0) dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n", err); /* Switch to normal mode/disable Bank 0x12 access */ err = abx500_set_register_interruptible(ab->dev, AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x00); if (err < 0) dev_err(ab->dev, "Failed to switch bank12 access err=%d\n", err); } /* Phy tuning values for AB8505 */ if (is_ab8505(ab->ab8500)) { /* Enable the PBT/Bank 0x12 access */ err = abx500_mask_and_set_register_interruptible(ab->dev, AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x01, 0x01); if (err < 0) dev_err(ab->dev, "Failed to enable bank12 access err=%d\n", err); err = abx500_mask_and_set_register_interruptible(ab->dev, AB8500_DEBUG, AB8500_USB_PHY_TUNE1, 0xC8, 0xC8); if (err < 0) dev_err(ab->dev, "Failed to set PHY_TUNE1 register err=%d\n", err); err = abx500_mask_and_set_register_interruptible(ab->dev, AB8500_DEBUG, AB8500_USB_PHY_TUNE2, 0x60, 0x60); if (err < 0) dev_err(ab->dev, "Failed to set PHY_TUNE2 register err=%d\n", err); err = abx500_mask_and_set_register_interruptible(ab->dev, AB8500_DEBUG, AB8500_USB_PHY_TUNE3, 0xFC, 0x80); if (err < 0) dev_err(ab->dev, "Failed to set PHY_TUNE3 regester err=%d\n", err); /* Switch to normal mode/disable Bank 0x12 access */ err = abx500_mask_and_set_register_interruptible(ab->dev, AB8500_DEVELOPMENT, AB8500_BANK12_ACCESS, 0x00, 0x00); if (err < 0) dev_err(ab->dev, "Failed to switch bank12 access err=%d\n", err); } /* Needed to enable ID detection. */ ab8500_usb_wd_workaround(ab); abx500_usb_link_status_update(ab); dev_info(&pdev->dev, "revision 0x%2x driver initialized\n", rev); return 0; } static int ab8500_usb_remove(struct platform_device *pdev) { struct ab8500_usb *ab = platform_get_drvdata(pdev); cancel_work_sync(&ab->phy_dis_work); usb_remove_phy(&ab->phy); if (ab->mode == USB_HOST) ab8500_usb_host_phy_dis(ab); else if (ab->mode == USB_PERIPHERAL) ab8500_usb_peri_phy_dis(ab); return 0; } static struct platform_driver ab8500_usb_driver = { .probe = ab8500_usb_probe, .remove = ab8500_usb_remove, .driver = { .name = "ab8500-usb", .owner = THIS_MODULE, }, }; static int __init ab8500_usb_init(void) { return platform_driver_register(&ab8500_usb_driver); } subsys_initcall(ab8500_usb_init); static void __exit ab8500_usb_exit(void) { platform_driver_unregister(&ab8500_usb_driver); } module_exit(ab8500_usb_exit); MODULE_ALIAS("platform:ab8500_usb"); MODULE_AUTHOR("ST-Ericsson AB"); MODULE_DESCRIPTION("AB8500 usb transceiver driver"); MODULE_LICENSE("GPL");
gpl-2.0
TeamWin/android_kernel_samsung_zerofltespr
fs/cifs/smbencrypt.c
2765
6112
/* Unix SMB/Netbios implementation. Version 1.9. SMB parameters and setup Copyright (C) Andrew Tridgell 1992-2000 Copyright (C) Luke Kenneth Casson Leighton 1996-2000 Modified by Jeremy Allison 1995. Copyright (C) Andrew Bartlett <abartlet@samba.org> 2002-2003 Modified by Steve French (sfrench@us.ibm.com) 2002-2003 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/random.h> #include "cifs_unicode.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifs_debug.h" #include "cifsproto.h" #ifndef false #define false 0 #endif #ifndef true #define true 1 #endif /* following came from the other byteorder.h to avoid include conflicts */ #define CVAL(buf,pos) (((unsigned char *)(buf))[pos]) #define SSVALX(buf,pos,val) (CVAL(buf,pos)=(val)&0xFF,CVAL(buf,pos+1)=(val)>>8) #define SSVAL(buf,pos,val) SSVALX((buf),(pos),((__u16)(val))) static void str_to_key(unsigned char *str, unsigned char *key) { int i; key[0] = str[0] >> 1; key[1] = ((str[0] & 0x01) << 6) | (str[1] >> 2); key[2] = ((str[1] & 0x03) << 5) | (str[2] >> 3); key[3] = ((str[2] & 0x07) << 4) | (str[3] >> 4); key[4] = ((str[3] & 0x0F) << 3) | (str[4] >> 5); key[5] = ((str[4] & 0x1F) << 2) | (str[5] >> 6); key[6] = ((str[5] & 0x3F) << 1) | (str[6] >> 7); key[7] = str[6] & 0x7F; for (i = 0; i < 8; i++) key[i] = (key[i] << 1); } static int smbhash(unsigned char *out, const unsigned char *in, unsigned char *key) { int rc; unsigned char key2[8]; struct crypto_blkcipher *tfm_des; struct scatterlist sgin, sgout; struct blkcipher_desc desc; str_to_key(key, key2); tfm_des = crypto_alloc_blkcipher("ecb(des)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm_des)) { rc = PTR_ERR(tfm_des); cifs_dbg(VFS, "could not allocate des crypto API\n"); goto smbhash_err; } desc.tfm = tfm_des; crypto_blkcipher_setkey(tfm_des, key2, 8); sg_init_one(&sgin, in, 8); sg_init_one(&sgout, out, 8); rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, 8); if (rc) cifs_dbg(VFS, "could not encrypt crypt key rc: %d\n", rc); crypto_free_blkcipher(tfm_des); smbhash_err: return rc; } static int E_P16(unsigned char *p14, unsigned char *p16) { int rc; unsigned char sp8[8] = { 0x4b, 0x47, 0x53, 0x21, 0x40, 0x23, 0x24, 0x25 }; rc = smbhash(p16, sp8, p14); if (rc) return rc; rc = smbhash(p16 + 8, sp8, p14 + 7); return rc; } static int E_P24(unsigned char *p21, const unsigned char *c8, unsigned char *p24) { int rc; rc = smbhash(p24, c8, p21); if (rc) return rc; rc = smbhash(p24 + 8, c8, p21 + 7); if (rc) return rc; rc = smbhash(p24 + 16, c8, p21 + 14); return rc; } /* produce a md4 message digest from data of length n bytes */ int mdfour(unsigned char *md4_hash, unsigned char *link_str, int link_len) { int rc; unsigned int size; struct crypto_shash *md4; struct sdesc *sdescmd4; md4 = crypto_alloc_shash("md4", 0, 0); if (IS_ERR(md4)) { rc = PTR_ERR(md4); cifs_dbg(VFS, "%s: Crypto md4 allocation error %d\n", __func__, rc); return rc; } size = sizeof(struct shash_desc) + crypto_shash_descsize(md4); sdescmd4 = kmalloc(size, GFP_KERNEL); if (!sdescmd4) { rc = -ENOMEM; goto mdfour_err; } sdescmd4->shash.tfm = md4; sdescmd4->shash.flags = 0x0; rc = crypto_shash_init(&sdescmd4->shash); if (rc) { cifs_dbg(VFS, "%s: Could not init md4 shash\n", __func__); goto mdfour_err; } rc = crypto_shash_update(&sdescmd4->shash, link_str, link_len); if (rc) { cifs_dbg(VFS, "%s: Could not update with link_str\n", __func__); goto mdfour_err; } rc = crypto_shash_final(&sdescmd4->shash, md4_hash); if (rc) cifs_dbg(VFS, "%s: Could not generate md4 hash\n", __func__); mdfour_err: crypto_free_shash(md4); kfree(sdescmd4); return rc; } /* This implements the X/Open SMB password encryption It takes a password, a 8 byte "crypt key" and puts 24 bytes of encrypted password into p24 */ /* Note that password must be uppercased and null terminated */ int SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24) { int rc; unsigned char p14[14], p16[16], p21[21]; memset(p14, '\0', 14); memset(p16, '\0', 16); memset(p21, '\0', 21); memcpy(p14, passwd, 14); rc = E_P16(p14, p16); if (rc) return rc; memcpy(p21, p16, 16); rc = E_P24(p21, c8, p24); return rc; } /* * Creates the MD4 Hash of the users password in NT UNICODE. */ int E_md4hash(const unsigned char *passwd, unsigned char *p16, const struct nls_table *codepage) { int rc; int len; __le16 wpwd[129]; /* Password cannot be longer than 128 characters */ if (passwd) /* Password must be converted to NT unicode */ len = cifs_strtoUTF16(wpwd, passwd, 128, codepage); else { len = 0; *wpwd = 0; /* Ensure string is null terminated */ } rc = mdfour(p16, (unsigned char *) wpwd, len * sizeof(__le16)); memset(wpwd, 0, 129 * sizeof(__le16)); return rc; } /* Does the NT MD4 hash then des encryption. */ int SMBNTencrypt(unsigned char *passwd, unsigned char *c8, unsigned char *p24, const struct nls_table *codepage) { int rc; unsigned char p16[16], p21[21]; memset(p16, '\0', 16); memset(p21, '\0', 21); rc = E_md4hash(passwd, p16, codepage); if (rc) { cifs_dbg(FYI, "%s Can't generate NT hash, error: %d\n", __func__, rc); return rc; } memcpy(p21, p16, 16); rc = E_P24(p21, c8, p24); return rc; }
gpl-2.0