repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
aopp/android_kernel_google_msm | drivers/net/fddi/skfp/pcmplc.c | 12681 | 49275 | /******************************************************************************
*
* (C)Copyright 1998,1999 SysKonnect,
* a business unit of Schneider & Koch & Co. Datensysteme GmbH.
*
* See the file "skfddi.c" for further information.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* The information in this file is provided "AS IS" without warranty.
*
******************************************************************************/
/*
PCM
Physical Connection Management
*/
/*
* Hardware independent state machine implemantation
* The following external SMT functions are referenced :
*
* queue_event()
* smt_timer_start()
* smt_timer_stop()
*
* The following external HW dependent functions are referenced :
* sm_pm_control()
* sm_ph_linestate()
* sm_pm_ls_latch()
*
* The following HW dependent events are required :
* PC_QLS
* PC_ILS
* PC_HLS
* PC_MLS
* PC_NSE
* PC_LEM
*
*/
#include "h/types.h"
#include "h/fddi.h"
#include "h/smc.h"
#include "h/supern_2.h"
#define KERNEL
#include "h/smtstate.h"
#ifndef lint
static const char ID_sccs[] = "@(#)pcmplc.c 2.55 99/08/05 (C) SK " ;
#endif
#ifdef FDDI_MIB
extern int snmp_fddi_trap(
#ifdef ANSIC
struct s_smc * smc, int type, int index
#endif
);
#endif
#ifdef CONCENTRATOR
extern int plc_is_installed(
#ifdef ANSIC
struct s_smc *smc ,
int p
#endif
) ;
#endif
/*
* FSM Macros
*/
#define AFLAG (0x20)
#define GO_STATE(x) (mib->fddiPORTPCMState = (x)|AFLAG)
#define ACTIONS_DONE() (mib->fddiPORTPCMState &= ~AFLAG)
#define ACTIONS(x) (x|AFLAG)
/*
* PCM states
*/
#define PC0_OFF 0
#define PC1_BREAK 1
#define PC2_TRACE 2
#define PC3_CONNECT 3
#define PC4_NEXT 4
#define PC5_SIGNAL 5
#define PC6_JOIN 6
#define PC7_VERIFY 7
#define PC8_ACTIVE 8
#define PC9_MAINT 9
#ifdef DEBUG
/*
* symbolic state names
*/
static const char * const pcm_states[] = {
"PC0_OFF","PC1_BREAK","PC2_TRACE","PC3_CONNECT","PC4_NEXT",
"PC5_SIGNAL","PC6_JOIN","PC7_VERIFY","PC8_ACTIVE","PC9_MAINT"
} ;
/*
* symbolic event names
*/
static const char * const pcm_events[] = {
"NONE","PC_START","PC_STOP","PC_LOOP","PC_JOIN","PC_SIGNAL",
"PC_REJECT","PC_MAINT","PC_TRACE","PC_PDR",
"PC_ENABLE","PC_DISABLE",
"PC_QLS","PC_ILS","PC_MLS","PC_HLS","PC_LS_PDR","PC_LS_NONE",
"PC_TIMEOUT_TB_MAX","PC_TIMEOUT_TB_MIN",
"PC_TIMEOUT_C_MIN","PC_TIMEOUT_T_OUT",
"PC_TIMEOUT_TL_MIN","PC_TIMEOUT_T_NEXT","PC_TIMEOUT_LCT",
"PC_NSE","PC_LEM"
} ;
#endif
#ifdef MOT_ELM
/*
* PCL-S control register
* this register in the PLC-S controls the scrambling parameters
*/
#define PLCS_CONTROL_C_U 0
#define PLCS_CONTROL_C_S (PL_C_SDOFF_ENABLE | PL_C_SDON_ENABLE | \
PL_C_CIPHER_ENABLE)
#define PLCS_FASSERT_U 0
#define PLCS_FASSERT_S 0xFd76 /* 52.0 us */
#define PLCS_FDEASSERT_U 0
#define PLCS_FDEASSERT_S 0
#else /* nMOT_ELM */
/*
* PCL-S control register
* this register in the PLC-S controls the scrambling parameters
* can be patched for ANSI compliance if standard changes
*/
static const u_char plcs_control_c_u[17] = "PLC_CNTRL_C_U=\0\0" ;
static const u_char plcs_control_c_s[17] = "PLC_CNTRL_C_S=\01\02" ;
#define PLCS_CONTROL_C_U (plcs_control_c_u[14] | (plcs_control_c_u[15]<<8))
#define PLCS_CONTROL_C_S (plcs_control_c_s[14] | (plcs_control_c_s[15]<<8))
#endif /* nMOT_ELM */
/*
* external vars
*/
/* struct definition see 'cmtdef.h' (also used by CFM) */
#define PS_OFF 0
#define PS_BIT3 1
#define PS_BIT4 2
#define PS_BIT7 3
#define PS_LCT 4
#define PS_BIT8 5
#define PS_JOIN 6
#define PS_ACTIVE 7
#define LCT_LEM_MAX 255
/*
* PLC timing parameter
*/
#define PLC_MS(m) ((int)((0x10000L-(m*100000L/2048))))
#define SLOW_TL_MIN PLC_MS(6)
#define SLOW_C_MIN PLC_MS(10)
static const struct plt {
int timer ; /* relative plc timer address */
int para ; /* default timing parameters */
} pltm[] = {
{ PL_C_MIN, SLOW_C_MIN }, /* min t. to remain Connect State */
{ PL_TL_MIN, SLOW_TL_MIN }, /* min t. to transmit a Line State */
{ PL_TB_MIN, TP_TB_MIN }, /* min break time */
{ PL_T_OUT, TP_T_OUT }, /* Signaling timeout */
{ PL_LC_LENGTH, TP_LC_LENGTH }, /* Link Confidence Test Time */
{ PL_T_SCRUB, TP_T_SCRUB }, /* Scrub Time == MAC TVX time ! */
{ PL_NS_MAX, TP_NS_MAX }, /* max t. that noise is tolerated */
{ 0,0 }
} ;
/*
* interrupt mask
*/
#ifdef SUPERNET_3
/*
* Do we need the EBUF error during signaling, too, to detect SUPERNET_3
* PLL bug?
*/
static const int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
#else /* SUPERNET_3 */
/*
* We do NOT need the elasticity buffer error during signaling.
*/
static int plc_imsk_na = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST ;
#endif /* SUPERNET_3 */
static const int plc_imsk_act = PL_PCM_CODE | PL_TRACE_PROP | PL_PCM_BREAK |
PL_PCM_ENABLED | PL_SELF_TEST | PL_EBUF_ERR;
/* internal functions */
static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd);
static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy);
static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy);
static void reset_lem_struct(struct s_phy *phy);
static void plc_init(struct s_smc *smc, int p);
static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold);
static void sm_ph_lem_stop(struct s_smc *smc, int np);
static void sm_ph_linestate(struct s_smc *smc, int phy, int ls);
static void real_init_plc(struct s_smc *smc);
/*
* SMT timer interface
* start PCM timer 0
*/
static void start_pcm_timer0(struct s_smc *smc, u_long value, int event,
struct s_phy *phy)
{
phy->timer0_exp = FALSE ; /* clear timer event flag */
smt_timer_start(smc,&phy->pcm_timer0,value,
EV_TOKEN(EVENT_PCM+phy->np,event)) ;
}
/*
* SMT timer interface
* stop PCM timer 0
*/
static void stop_pcm_timer0(struct s_smc *smc, struct s_phy *phy)
{
if (phy->pcm_timer0.tm_active)
smt_timer_stop(smc,&phy->pcm_timer0) ;
}
/*
init PCM state machine (called by driver)
clear all PCM vars and flags
*/
void pcm_init(struct s_smc *smc)
{
int i ;
int np ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
for (np = 0,phy = smc->y ; np < NUMPHYS ; np++,phy++) {
/* Indicates the type of PHY being used */
mib = phy->mib ;
mib->fddiPORTPCMState = ACTIONS(PC0_OFF) ;
phy->np = np ;
switch (smc->s.sas) {
#ifdef CONCENTRATOR
case SMT_SAS :
mib->fddiPORTMy_Type = (np == PS) ? TS : TM ;
break ;
case SMT_DAS :
mib->fddiPORTMy_Type = (np == PA) ? TA :
(np == PB) ? TB : TM ;
break ;
case SMT_NAC :
mib->fddiPORTMy_Type = TM ;
break;
#else
case SMT_SAS :
mib->fddiPORTMy_Type = (np == PS) ? TS : TNONE ;
mib->fddiPORTHardwarePresent = (np == PS) ? TRUE :
FALSE ;
#ifndef SUPERNET_3
smc->y[PA].mib->fddiPORTPCMState = PC0_OFF ;
#else
smc->y[PB].mib->fddiPORTPCMState = PC0_OFF ;
#endif
break ;
case SMT_DAS :
mib->fddiPORTMy_Type = (np == PB) ? TB : TA ;
break ;
#endif
}
/*
* set PMD-type
*/
phy->pmd_scramble = 0 ;
switch (phy->pmd_type[PMD_SK_PMD]) {
case 'P' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_MULTI ;
break ;
case 'L' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_LCF ;
break ;
case 'D' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
case 'S' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
phy->pmd_scramble = TRUE ;
break ;
case 'U' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
phy->pmd_scramble = TRUE ;
break ;
case '1' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
break ;
case '2' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
break ;
case '3' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE2 ;
break ;
case '4' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_SINGLE1 ;
break ;
case 'H' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
break ;
case 'I' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
case 'G' :
mib->fddiPORTPMDClass = MIB_PMDCLASS_TP ;
break ;
default:
mib->fddiPORTPMDClass = MIB_PMDCLASS_UNKNOWN ;
break ;
}
/*
* A and B port can be on primary and secondary path
*/
switch (mib->fddiPORTMy_Type) {
case TA :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_SEC_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_SEC_PREFER |
MIB_P_PATH_THRU ;
break ;
case TB :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_PRIM_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_PRIM_PREFER |
MIB_P_PATH_CON_PREFER |
MIB_P_PATH_THRU ;
break ;
case TS :
mib->fddiPORTAvailablePaths |= MIB_PATH_S ;
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_PRIM_PREFER ;
mib->fddiPORTRequestedPaths[3] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_CON_ALTER |
MIB_P_PATH_PRIM_PREFER ;
break ;
case TM :
mib->fddiPORTRequestedPaths[1] = MIB_P_PATH_LOCAL ;
mib->fddiPORTRequestedPaths[2] =
MIB_P_PATH_LOCAL |
MIB_P_PATH_SEC_ALTER |
MIB_P_PATH_PRIM_ALTER ;
mib->fddiPORTRequestedPaths[3] = 0 ;
break ;
}
phy->pc_lem_fail = FALSE ;
mib->fddiPORTPCMStateX = mib->fddiPORTPCMState ;
mib->fddiPORTLCTFail_Ct = 0 ;
mib->fddiPORTBS_Flag = 0 ;
mib->fddiPORTCurrentPath = MIB_PATH_ISOLATED ;
mib->fddiPORTNeighborType = TNONE ;
phy->ls_flag = 0 ;
phy->rc_flag = 0 ;
phy->tc_flag = 0 ;
phy->td_flag = 0 ;
if (np >= PM)
phy->phy_name = '0' + np - PM ;
else
phy->phy_name = 'A' + np ;
phy->wc_flag = FALSE ; /* set by SMT */
memset((char *)&phy->lem,0,sizeof(struct lem_counter)) ;
reset_lem_struct(phy) ;
memset((char *)&phy->plc,0,sizeof(struct s_plc)) ;
phy->plc.p_state = PS_OFF ;
for (i = 0 ; i < NUMBITS ; i++) {
phy->t_next[i] = 0 ;
}
}
real_init_plc(smc) ;
}
void init_plc(struct s_smc *smc)
{
SK_UNUSED(smc) ;
/*
* dummy
* this is an obsolete public entry point that has to remain
* for compat. It is used by various drivers.
* the work is now done in real_init_plc()
* which is called from pcm_init() ;
*/
}
static void real_init_plc(struct s_smc *smc)
{
int p ;
for (p = 0 ; p < NUMPHYS ; p++)
plc_init(smc,p) ;
}
static void plc_init(struct s_smc *smc, int p)
{
int i ;
#ifndef MOT_ELM
int rev ; /* Revision of PLC-x */
#endif /* MOT_ELM */
/* transit PCM state machine to MAINT state */
outpw(PLC(p,PL_CNTRL_B),0) ;
outpw(PLC(p,PL_CNTRL_B),PL_PCM_STOP) ;
outpw(PLC(p,PL_CNTRL_A),0) ;
/*
* if PLC-S then set control register C
*/
#ifndef MOT_ELM
rev = inpw(PLC(p,PL_STATUS_A)) & PLC_REV_MASK ;
if (rev != PLC_REVISION_A)
#endif /* MOT_ELM */
{
if (smc->y[p].pmd_scramble) {
outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_S) ;
#ifdef MOT_ELM
outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_S) ;
outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_S) ;
#endif /* MOT_ELM */
}
else {
outpw(PLC(p,PL_CNTRL_C),PLCS_CONTROL_C_U) ;
#ifdef MOT_ELM
outpw(PLC(p,PL_T_FOT_ASS),PLCS_FASSERT_U) ;
outpw(PLC(p,PL_T_FOT_DEASS),PLCS_FDEASSERT_U) ;
#endif /* MOT_ELM */
}
}
/*
* set timer register
*/
for ( i = 0 ; pltm[i].timer; i++) /* set timer parameter reg */
outpw(PLC(p,pltm[i].timer),pltm[i].para) ;
(void)inpw(PLC(p,PL_INTR_EVENT)) ; /* clear interrupt event reg */
plc_clear_irq(smc,p) ;
outpw(PLC(p,PL_INTR_MASK),plc_imsk_na); /* enable non active irq's */
/*
* if PCM is configured for class s, it will NOT go to the
* REMOVE state if offline (page 3-36;)
* in the concentrator, all inactive PHYS always must be in
* the remove state
* there's no real need to use this feature at all ..
*/
#ifndef CONCENTRATOR
if ((smc->s.sas == SMT_SAS) && (p == PS)) {
outpw(PLC(p,PL_CNTRL_B),PL_CLASS_S) ;
}
#endif
}
/*
* control PCM state machine
*/
static void plc_go_state(struct s_smc *smc, int p, int state)
{
HW_PTR port ;
int val ;
SK_UNUSED(smc) ;
port = (HW_PTR) (PLC(p,PL_CNTRL_B)) ;
val = inpw(port) & ~(PL_PCM_CNTRL | PL_MAINT) ;
outpw(port,val) ;
outpw(port,val | state) ;
}
/*
* read current line state (called by ECM & PCM)
*/
int sm_pm_get_ls(struct s_smc *smc, int phy)
{
int state ;
#ifdef CONCENTRATOR
if (!plc_is_installed(smc,phy))
return PC_QLS;
#endif
state = inpw(PLC(phy,PL_STATUS_A)) & PL_LINE_ST ;
switch(state) {
case PL_L_QLS:
state = PC_QLS ;
break ;
case PL_L_MLS:
state = PC_MLS ;
break ;
case PL_L_HLS:
state = PC_HLS ;
break ;
case PL_L_ILS4:
case PL_L_ILS16:
state = PC_ILS ;
break ;
case PL_L_ALS:
state = PC_LS_PDR ;
break ;
default :
state = PC_LS_NONE ;
}
return state;
}
static int plc_send_bits(struct s_smc *smc, struct s_phy *phy, int len)
{
int np = phy->np ; /* PHY index */
int n ;
int i ;
SK_UNUSED(smc) ;
/* create bit vector */
for (i = len-1,n = 0 ; i >= 0 ; i--) {
n = (n<<1) | phy->t_val[phy->bitn+i] ;
}
if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
#if 0
printf("PL_PCM_SIGNAL is set\n") ;
#endif
return 1;
}
/* write bit[n] & length = 1 to regs */
outpw(PLC(np,PL_VECTOR_LEN),len-1) ; /* len=nr-1 */
outpw(PLC(np,PL_XMIT_VECTOR),n) ;
#ifdef DEBUG
#if 1
#ifdef DEBUG_BRD
if (smc->debug.d_plc & 0x80)
#else
if (debug.d_plc & 0x80)
#endif
printf("SIGNALING bit %d .. %d\n",phy->bitn,phy->bitn+len-1) ;
#endif
#endif
return 0;
}
/*
* config plc muxes
*/
void plc_config_mux(struct s_smc *smc, int mux)
{
if (smc->s.sas != SMT_DAS)
return ;
if (mux == MUX_WRAPB) {
SETMASK(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
SETMASK(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
}
else {
CLEAR(PLC(PA,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
CLEAR(PLC(PA,PL_CNTRL_A),PL_SC_REM_LOOP) ;
}
CLEAR(PLC(PB,PL_CNTRL_B),PL_CONFIG_CNTRL) ;
CLEAR(PLC(PB,PL_CNTRL_A),PL_SC_REM_LOOP) ;
}
/*
PCM state machine
called by dispatcher & fddi_init() (driver)
do
display state change
process event
until SM is stable
*/
void pcm(struct s_smc *smc, const int np, int event)
{
int state ;
int oldstate ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
#ifndef CONCENTRATOR
/*
* ignore 2nd PHY if SAS
*/
if ((np != PS) && (smc->s.sas == SMT_SAS))
return ;
#endif
phy = &smc->y[np] ;
mib = phy->mib ;
oldstate = mib->fddiPORTPCMState ;
do {
DB_PCM("PCM %c: state %s",
phy->phy_name,
(mib->fddiPORTPCMState & AFLAG) ? "ACTIONS " : "") ;
DB_PCM("%s, event %s\n",
pcm_states[mib->fddiPORTPCMState & ~AFLAG],
pcm_events[event]) ;
state = mib->fddiPORTPCMState ;
pcm_fsm(smc,phy,event) ;
event = 0 ;
} while (state != mib->fddiPORTPCMState) ;
/*
* because the PLC does the bit signaling for us,
* we're always in SIGNAL state
* the MIB want's to see CONNECT
* we therefore fake an entry in the MIB
*/
if (state == PC5_SIGNAL)
mib->fddiPORTPCMStateX = PC3_CONNECT ;
else
mib->fddiPORTPCMStateX = state ;
#ifndef SLIM_SMT
/*
* path change
*/
if ( mib->fddiPORTPCMState != oldstate &&
((oldstate == PC8_ACTIVE) || (mib->fddiPORTPCMState == PC8_ACTIVE))) {
smt_srf_event(smc,SMT_EVENT_PORT_PATH_CHANGE,
(int) (INDEX_PORT+ phy->np),0) ;
}
#endif
#ifdef FDDI_MIB
/* check whether a snmp-trap has to be sent */
if ( mib->fddiPORTPCMState != oldstate ) {
/* a real state change took place */
DB_SNMP ("PCM from %d to %d\n", oldstate, mib->fddiPORTPCMState);
if ( mib->fddiPORTPCMState == PC0_OFF ) {
/* send first trap */
snmp_fddi_trap (smc, 1, (int) mib->fddiPORTIndex );
} else if ( oldstate == PC0_OFF ) {
/* send second trap */
snmp_fddi_trap (smc, 2, (int) mib->fddiPORTIndex );
} else if ( mib->fddiPORTPCMState != PC2_TRACE &&
oldstate == PC8_ACTIVE ) {
/* send third trap */
snmp_fddi_trap (smc, 3, (int) mib->fddiPORTIndex );
} else if ( mib->fddiPORTPCMState == PC8_ACTIVE ) {
/* send fourth trap */
snmp_fddi_trap (smc, 4, (int) mib->fddiPORTIndex );
}
}
#endif
pcm_state_change(smc,np,state) ;
}
/*
* PCM state machine
*/
static void pcm_fsm(struct s_smc *smc, struct s_phy *phy, int cmd)
{
int i ;
int np = phy->np ; /* PHY index */
struct s_plc *plc ;
struct fddi_mib_p *mib ;
#ifndef MOT_ELM
u_short plc_rev ; /* Revision of the plc */
#endif /* nMOT_ELM */
plc = &phy->plc ;
mib = phy->mib ;
/*
* general transitions independent of state
*/
switch (cmd) {
case PC_STOP :
/*PC00-PC80*/
if (mib->fddiPORTPCMState != PC9_MAINT) {
GO_STATE(PC0_OFF) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_PORT_EVENT, (u_long) FDDI_PORT_STOP,
smt_get_port_event_word(smc));
}
return ;
case PC_START :
/*PC01-PC81*/
if (mib->fddiPORTPCMState != PC9_MAINT)
GO_STATE(PC1_BREAK) ;
return ;
case PC_DISABLE :
/* PC09-PC99 */
GO_STATE(PC9_MAINT) ;
AIX_EVENT(smc, (u_long) FDDI_RING_STATUS, (u_long)
FDDI_PORT_EVENT, (u_long) FDDI_PORT_DISABLED,
smt_get_port_event_word(smc));
return ;
case PC_TIMEOUT_LCT :
/* if long or extended LCT */
stop_pcm_timer0(smc,phy) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
/* end of LCT is indicate by PCM_CODE (initiate PCM event) */
return ;
}
switch(mib->fddiPORTPCMState) {
case ACTIONS(PC0_OFF) :
stop_pcm_timer0(smc,phy) ;
outpw(PLC(np,PL_CNTRL_A),0) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
sm_ph_lem_stop(smc,np) ; /* disable LEM */
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
plc_go_state(smc,np,PL_PCM_STOP) ;
mib->fddiPORTConnectState = PCM_DISABLED ;
ACTIONS_DONE() ;
break ;
case PC0_OFF:
/*PC09*/
if (cmd == PC_MAINT) {
GO_STATE(PC9_MAINT) ;
break ;
}
break ;
case ACTIONS(PC1_BREAK) :
/* Stop the LCT timer if we came from Signal state */
stop_pcm_timer0(smc,phy) ;
ACTIONS_DONE() ;
plc_go_state(smc,np,0) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
sm_ph_lem_stop(smc,np) ; /* disable LEM */
/*
* if vector is already loaded, go to OFF to clear PCM_SIGNAL
*/
#if 0
if (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL) {
plc_go_state(smc,np,PL_PCM_STOP) ;
/* TB_MIN ? */
}
#endif
/*
* Go to OFF state in any case.
*/
plc_go_state(smc,np,PL_PCM_STOP) ;
if (mib->fddiPORTPC_Withhold == PC_WH_NONE)
mib->fddiPORTConnectState = PCM_CONNECTING ;
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
phy->ls_flag = FALSE ;
phy->pc_mode = PM_NONE ; /* needed by CFM */
phy->bitn = 0 ; /* bit signaling start bit */
for (i = 0 ; i < 3 ; i++)
pc_tcode_actions(smc,i,phy) ;
/* Set the non-active interrupt mask register */
outpw(PLC(np,PL_INTR_MASK),plc_imsk_na) ;
/*
* If the LCT was stopped. There might be a
* PCM_CODE interrupt event present.
* This must be cleared.
*/
(void)inpw(PLC(np,PL_INTR_EVENT)) ;
#ifndef MOT_ELM
/* Get the plc revision for revision dependent code */
plc_rev = inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK ;
if (plc_rev != PLC_REV_SN3)
#endif /* MOT_ELM */
{
/*
* No supernet III PLC, so set Xmit verctor and
* length BEFORE starting the state machine.
*/
if (plc_send_bits(smc,phy,3)) {
return ;
}
}
/*
* Now give the Start command.
* - The start command shall be done before setting the bits
* to be signaled. (In PLC-S description and PLCS in SN3.
* - The start command shall be issued AFTER setting the
* XMIT vector and the XMIT length register.
*
* We do it exactly according this specs for the old PLC and
* the new PLCS inside the SN3.
* For the usual PLCS we try it the way it is done for the
* old PLC and set the XMIT registers again, if the PLC is
* not in SIGNAL state. This is done according to an PLCS
* errata workaround.
*/
plc_go_state(smc,np,PL_PCM_START) ;
/*
* workaround for PLC-S eng. sample errata
*/
#ifdef MOT_ELM
if (!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
#else /* nMOT_ELM */
if (((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) !=
PLC_REVISION_A) &&
!(inpw(PLC(np,PL_STATUS_B)) & PL_PCM_SIGNAL))
#endif /* nMOT_ELM */
{
/*
* Set register again (PLCS errata) or the first time
* (new SN3 PLCS).
*/
(void) plc_send_bits(smc,phy,3) ;
}
/*
* end of workaround
*/
GO_STATE(PC5_SIGNAL) ;
plc->p_state = PS_BIT3 ;
plc->p_bits = 3 ;
plc->p_start = 0 ;
break ;
case PC1_BREAK :
break ;
case ACTIONS(PC2_TRACE) :
plc_go_state(smc,np,PL_PCM_TRACE) ;
ACTIONS_DONE() ;
break ;
case PC2_TRACE :
break ;
case PC3_CONNECT : /* these states are done by hardware */
case PC4_NEXT :
break ;
case ACTIONS(PC5_SIGNAL) :
ACTIONS_DONE() ;
case PC5_SIGNAL :
if ((cmd != PC_SIGNAL) && (cmd != PC_TIMEOUT_LCT))
break ;
switch (plc->p_state) {
case PS_BIT3 :
for (i = 0 ; i <= 2 ; i++)
pc_rcode_actions(smc,i,phy) ;
pc_tcode_actions(smc,3,phy) ;
plc->p_state = PS_BIT4 ;
plc->p_bits = 1 ;
plc->p_start = 3 ;
phy->bitn = 3 ;
if (plc_send_bits(smc,phy,1)) {
return ;
}
break ;
case PS_BIT4 :
pc_rcode_actions(smc,3,phy) ;
for (i = 4 ; i <= 6 ; i++)
pc_tcode_actions(smc,i,phy) ;
plc->p_state = PS_BIT7 ;
plc->p_bits = 3 ;
plc->p_start = 4 ;
phy->bitn = 4 ;
if (plc_send_bits(smc,phy,3)) {
return ;
}
break ;
case PS_BIT7 :
for (i = 3 ; i <= 6 ; i++)
pc_rcode_actions(smc,i,phy) ;
plc->p_state = PS_LCT ;
plc->p_bits = 0 ;
plc->p_start = 7 ;
phy->bitn = 7 ;
sm_ph_lem_start(smc,np,(int)smc->s.lct_short) ; /* enable LEM */
/* start LCT */
i = inpw(PLC(np,PL_CNTRL_B)) & ~PL_PC_LOOP ;
outpw(PLC(np,PL_CNTRL_B),i) ; /* must be cleared */
outpw(PLC(np,PL_CNTRL_B),i | PL_RLBP) ;
break ;
case PS_LCT :
/* check for local LCT failure */
pc_tcode_actions(smc,7,phy) ;
/*
* set tval[7]
*/
plc->p_state = PS_BIT8 ;
plc->p_bits = 1 ;
plc->p_start = 7 ;
phy->bitn = 7 ;
if (plc_send_bits(smc,phy,1)) {
return ;
}
break ;
case PS_BIT8 :
/* check for remote LCT failure */
pc_rcode_actions(smc,7,phy) ;
if (phy->t_val[7] || phy->r_val[7]) {
plc_go_state(smc,np,PL_PCM_STOP) ;
GO_STATE(PC1_BREAK) ;
break ;
}
for (i = 8 ; i <= 9 ; i++)
pc_tcode_actions(smc,i,phy) ;
plc->p_state = PS_JOIN ;
plc->p_bits = 2 ;
plc->p_start = 8 ;
phy->bitn = 8 ;
if (plc_send_bits(smc,phy,2)) {
return ;
}
break ;
case PS_JOIN :
for (i = 8 ; i <= 9 ; i++)
pc_rcode_actions(smc,i,phy) ;
plc->p_state = PS_ACTIVE ;
GO_STATE(PC6_JOIN) ;
break ;
}
break ;
case ACTIONS(PC6_JOIN) :
/*
* prevent mux error when going from WRAP_A to WRAP_B
*/
if (smc->s.sas == SMT_DAS && np == PB &&
(smc->y[PA].pc_mode == PM_TREE ||
smc->y[PB].pc_mode == PM_TREE)) {
SETMASK(PLC(np,PL_CNTRL_A),
PL_SC_REM_LOOP,PL_SC_REM_LOOP) ;
SETMASK(PLC(np,PL_CNTRL_B),
PL_CONFIG_CNTRL,PL_CONFIG_CNTRL) ;
}
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
SETMASK(PLC(np,PL_CNTRL_B),PL_PC_JOIN,PL_PC_JOIN) ;
ACTIONS_DONE() ;
cmd = 0 ;
/* fall thru */
case PC6_JOIN :
switch (plc->p_state) {
case PS_ACTIVE:
/*PC88b*/
if (!phy->cf_join) {
phy->cf_join = TRUE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
}
if (cmd == PC_JOIN)
GO_STATE(PC8_ACTIVE) ;
/*PC82*/
if (cmd == PC_TRACE) {
GO_STATE(PC2_TRACE) ;
break ;
}
break ;
}
break ;
case PC7_VERIFY :
break ;
case ACTIONS(PC8_ACTIVE) :
/*
* start LEM for SMT
*/
sm_ph_lem_start(smc,(int)phy->np,LCT_LEM_MAX) ;
phy->tr_flag = FALSE ;
mib->fddiPORTConnectState = PCM_ACTIVE ;
/* Set the active interrupt mask register */
outpw(PLC(np,PL_INTR_MASK),plc_imsk_act) ;
ACTIONS_DONE() ;
break ;
case PC8_ACTIVE :
/*PC81 is done by PL_TNE_EXPIRED irq */
/*PC82*/
if (cmd == PC_TRACE) {
GO_STATE(PC2_TRACE) ;
break ;
}
/*PC88c: is done by TRACE_PROP irq */
break ;
case ACTIONS(PC9_MAINT) :
stop_pcm_timer0(smc,phy) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_PC_JOIN) ;
CLEAR(PLC(np,PL_CNTRL_B),PL_LONG) ;
CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ; /* disable LEM int. */
sm_ph_lem_stop(smc,np) ; /* disable LEM */
phy->cf_loop = FALSE ;
phy->cf_join = FALSE ;
queue_event(smc,EVENT_CFM,CF_JOIN+np) ;
plc_go_state(smc,np,PL_PCM_STOP) ;
mib->fddiPORTConnectState = PCM_DISABLED ;
SETMASK(PLC(np,PL_CNTRL_B),PL_MAINT,PL_MAINT) ;
sm_ph_linestate(smc,np,(int) MIB2LS(mib->fddiPORTMaint_LS)) ;
outpw(PLC(np,PL_CNTRL_A),PL_SC_BYPASS) ;
ACTIONS_DONE() ;
break ;
case PC9_MAINT :
DB_PCMN(1,"PCM %c : MAINT\n",phy->phy_name,0) ;
/*PC90*/
if (cmd == PC_ENABLE) {
GO_STATE(PC0_OFF) ;
break ;
}
break ;
default:
SMT_PANIC(smc,SMT_E0118, SMT_E0118_MSG) ;
break ;
}
}
/*
* force line state on a PHY output (only in MAINT state)
*/
static void sm_ph_linestate(struct s_smc *smc, int phy, int ls)
{
int cntrl ;
SK_UNUSED(smc) ;
cntrl = (inpw(PLC(phy,PL_CNTRL_B)) & ~PL_MAINT_LS) |
PL_PCM_STOP | PL_MAINT ;
switch(ls) {
case PC_QLS: /* Force Quiet */
cntrl |= PL_M_QUI0 ;
break ;
case PC_MLS: /* Force Master */
cntrl |= PL_M_MASTR ;
break ;
case PC_HLS: /* Force Halt */
cntrl |= PL_M_HALT ;
break ;
default :
case PC_ILS: /* Force Idle */
cntrl |= PL_M_IDLE ;
break ;
case PC_LS_PDR: /* Enable repeat filter */
cntrl |= PL_M_TPDR ;
break ;
}
outpw(PLC(phy,PL_CNTRL_B),cntrl) ;
}
static void reset_lem_struct(struct s_phy *phy)
{
struct lem_counter *lem = &phy->lem ;
phy->mib->fddiPORTLer_Estimate = 15 ;
lem->lem_float_ber = 15 * 100 ;
}
/*
* link error monitor
*/
static void lem_evaluate(struct s_smc *smc, struct s_phy *phy)
{
int ber ;
u_long errors ;
struct lem_counter *lem = &phy->lem ;
struct fddi_mib_p *mib ;
int cond ;
mib = phy->mib ;
if (!lem->lem_on)
return ;
errors = inpw(PLC(((int) phy->np),PL_LINK_ERR_CTR)) ;
lem->lem_errors += errors ;
mib->fddiPORTLem_Ct += errors ;
errors = lem->lem_errors ;
/*
* calculation is called on a intervall of 8 seconds
* -> this means, that one error in 8 sec. is one of 8*125*10E6
* the same as BER = 10E-9
* Please note:
* -> 9 errors in 8 seconds mean:
* BER = 9 * 10E-9 and this is
* < 10E-8, so the limit of 10E-8 is not reached!
*/
if (!errors) ber = 15 ;
else if (errors <= 9) ber = 9 ;
else if (errors <= 99) ber = 8 ;
else if (errors <= 999) ber = 7 ;
else if (errors <= 9999) ber = 6 ;
else if (errors <= 99999) ber = 5 ;
else if (errors <= 999999) ber = 4 ;
else if (errors <= 9999999) ber = 3 ;
else if (errors <= 99999999) ber = 2 ;
else if (errors <= 999999999) ber = 1 ;
else ber = 0 ;
/*
* weighted average
*/
ber *= 100 ;
lem->lem_float_ber = lem->lem_float_ber * 7 + ber * 3 ;
lem->lem_float_ber /= 10 ;
mib->fddiPORTLer_Estimate = lem->lem_float_ber / 100 ;
if (mib->fddiPORTLer_Estimate < 4) {
mib->fddiPORTLer_Estimate = 4 ;
}
if (lem->lem_errors) {
DB_PCMN(1,"LEM %c :\n",phy->np == PB? 'B' : 'A',0) ;
DB_PCMN(1,"errors : %ld\n",lem->lem_errors,0) ;
DB_PCMN(1,"sum_errors : %ld\n",mib->fddiPORTLem_Ct,0) ;
DB_PCMN(1,"current BER : 10E-%d\n",ber/100,0) ;
DB_PCMN(1,"float BER : 10E-(%d/100)\n",lem->lem_float_ber,0) ;
DB_PCMN(1,"avg. BER : 10E-%d\n",
mib->fddiPORTLer_Estimate,0) ;
}
lem->lem_errors = 0L ;
#ifndef SLIM_SMT
cond = (mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Alarm) ?
TRUE : FALSE ;
#ifdef SMT_EXT_CUTOFF
smt_ler_alarm_check(smc,phy,cond) ;
#endif /* nSMT_EXT_CUTOFF */
if (cond != mib->fddiPORTLerFlag) {
smt_srf_event(smc,SMT_COND_PORT_LER,
(int) (INDEX_PORT+ phy->np) ,cond) ;
}
#endif
if ( mib->fddiPORTLer_Estimate <= mib->fddiPORTLer_Cutoff) {
phy->pc_lem_fail = TRUE ; /* flag */
mib->fddiPORTLem_Reject_Ct++ ;
/*
* "forgive 10e-2" if we cutoff so we can come
* up again ..
*/
lem->lem_float_ber += 2*100 ;
/*PC81b*/
#ifdef CONCENTRATOR
DB_PCMN(1,"PCM: LER cutoff on port %d cutoff %d\n",
phy->np, mib->fddiPORTLer_Cutoff) ;
#endif
#ifdef SMT_EXT_CUTOFF
smt_port_off_event(smc,phy->np);
#else /* nSMT_EXT_CUTOFF */
queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
#endif /* nSMT_EXT_CUTOFF */
}
}
/*
* called by SMT to calculate LEM bit error rate
*/
void sm_lem_evaluate(struct s_smc *smc)
{
int np ;
for (np = 0 ; np < NUMPHYS ; np++)
lem_evaluate(smc,&smc->y[np]) ;
}
static void lem_check_lct(struct s_smc *smc, struct s_phy *phy)
{
struct lem_counter *lem = &phy->lem ;
struct fddi_mib_p *mib ;
int errors ;
mib = phy->mib ;
phy->pc_lem_fail = FALSE ; /* flag */
errors = inpw(PLC(((int)phy->np),PL_LINK_ERR_CTR)) ;
lem->lem_errors += errors ;
mib->fddiPORTLem_Ct += errors ;
if (lem->lem_errors) {
switch(phy->lc_test) {
case LC_SHORT:
if (lem->lem_errors >= smc->s.lct_short)
phy->pc_lem_fail = TRUE ;
break ;
case LC_MEDIUM:
if (lem->lem_errors >= smc->s.lct_medium)
phy->pc_lem_fail = TRUE ;
break ;
case LC_LONG:
if (lem->lem_errors >= smc->s.lct_long)
phy->pc_lem_fail = TRUE ;
break ;
case LC_EXTENDED:
if (lem->lem_errors >= smc->s.lct_extended)
phy->pc_lem_fail = TRUE ;
break ;
}
DB_PCMN(1," >>errors : %d\n",lem->lem_errors,0) ;
}
if (phy->pc_lem_fail) {
mib->fddiPORTLCTFail_Ct++ ;
mib->fddiPORTLem_Reject_Ct++ ;
}
else
mib->fddiPORTLCTFail_Ct = 0 ;
}
/*
* LEM functions
*/
static void sm_ph_lem_start(struct s_smc *smc, int np, int threshold)
{
struct lem_counter *lem = &smc->y[np].lem ;
lem->lem_on = 1 ;
lem->lem_errors = 0L ;
/* Do NOT reset mib->fddiPORTLer_Estimate here. It is called too
* often.
*/
outpw(PLC(np,PL_LE_THRESHOLD),threshold) ;
(void)inpw(PLC(np,PL_LINK_ERR_CTR)) ; /* clear error counter */
/* enable LE INT */
SETMASK(PLC(np,PL_INTR_MASK),PL_LE_CTR,PL_LE_CTR) ;
}
static void sm_ph_lem_stop(struct s_smc *smc, int np)
{
struct lem_counter *lem = &smc->y[np].lem ;
lem->lem_on = 0 ;
CLEAR(PLC(np,PL_INTR_MASK),PL_LE_CTR) ;
}
/* ARGSUSED */
void sm_pm_ls_latch(struct s_smc *smc, int phy, int on_off)
/* int on_off; en- or disable ident. ls */
{
SK_UNUSED(smc) ;
phy = phy ; on_off = on_off ;
}
/*
* PCM pseudo code
* receive actions are called AFTER the bit n is received,
* i.e. if pc_rcode_actions(5) is called, bit 6 is the next bit to be received
*/
/*
* PCM pseudo code 5.1 .. 6.1
*/
static void pc_rcode_actions(struct s_smc *smc, int bit, struct s_phy *phy)
{
struct fddi_mib_p *mib ;
mib = phy->mib ;
DB_PCMN(1,"SIG rec %x %x:\n", bit,phy->r_val[bit] ) ;
bit++ ;
switch(bit) {
case 0:
case 1:
case 2:
break ;
case 3 :
if (phy->r_val[1] == 0 && phy->r_val[2] == 0)
mib->fddiPORTNeighborType = TA ;
else if (phy->r_val[1] == 0 && phy->r_val[2] == 1)
mib->fddiPORTNeighborType = TB ;
else if (phy->r_val[1] == 1 && phy->r_val[2] == 0)
mib->fddiPORTNeighborType = TS ;
else if (phy->r_val[1] == 1 && phy->r_val[2] == 1)
mib->fddiPORTNeighborType = TM ;
break ;
case 4:
if (mib->fddiPORTMy_Type == TM &&
mib->fddiPORTNeighborType == TM) {
DB_PCMN(1,"PCM %c : E100 withhold M-M\n",
phy->phy_name,0) ;
mib->fddiPORTPC_Withhold = PC_WH_M_M ;
RS_SET(smc,RS_EVENT) ;
}
else if (phy->t_val[3] || phy->r_val[3]) {
mib->fddiPORTPC_Withhold = PC_WH_NONE ;
if (mib->fddiPORTMy_Type == TM ||
mib->fddiPORTNeighborType == TM)
phy->pc_mode = PM_TREE ;
else
phy->pc_mode = PM_PEER ;
/* reevaluate the selection criteria (wc_flag) */
all_selection_criteria (smc);
if (phy->wc_flag) {
mib->fddiPORTPC_Withhold = PC_WH_PATH ;
}
}
else {
mib->fddiPORTPC_Withhold = PC_WH_OTHER ;
RS_SET(smc,RS_EVENT) ;
DB_PCMN(1,"PCM %c : E101 withhold other\n",
phy->phy_name,0) ;
}
phy->twisted = ((mib->fddiPORTMy_Type != TS) &&
(mib->fddiPORTMy_Type != TM) &&
(mib->fddiPORTNeighborType ==
mib->fddiPORTMy_Type)) ;
if (phy->twisted) {
DB_PCMN(1,"PCM %c : E102 !!! TWISTED !!!\n",
phy->phy_name,0) ;
}
break ;
case 5 :
break ;
case 6:
if (phy->t_val[4] || phy->r_val[4]) {
if ((phy->t_val[4] && phy->t_val[5]) ||
(phy->r_val[4] && phy->r_val[5]) )
phy->lc_test = LC_EXTENDED ;
else
phy->lc_test = LC_LONG ;
}
else if (phy->t_val[5] || phy->r_val[5])
phy->lc_test = LC_MEDIUM ;
else
phy->lc_test = LC_SHORT ;
switch (phy->lc_test) {
case LC_SHORT : /* 50ms */
outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LENGTH ) ;
phy->t_next[7] = smc->s.pcm_lc_short ;
break ;
case LC_MEDIUM : /* 500ms */
outpw(PLC((int)phy->np,PL_LC_LENGTH), TP_LC_LONGLN ) ;
phy->t_next[7] = smc->s.pcm_lc_medium ;
break ;
case LC_LONG :
SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
phy->t_next[7] = smc->s.pcm_lc_long ;
break ;
case LC_EXTENDED :
SETMASK(PLC((int)phy->np,PL_CNTRL_B),PL_LONG,PL_LONG) ;
phy->t_next[7] = smc->s.pcm_lc_extended ;
break ;
}
if (phy->t_next[7] > smc->s.pcm_lc_medium) {
start_pcm_timer0(smc,phy->t_next[7],PC_TIMEOUT_LCT,phy);
}
DB_PCMN(1,"LCT timer = %ld us\n", phy->t_next[7], 0) ;
phy->t_next[9] = smc->s.pcm_t_next_9 ;
break ;
case 7:
if (phy->t_val[6]) {
phy->cf_loop = TRUE ;
}
phy->td_flag = TRUE ;
break ;
case 8:
if (phy->t_val[7] || phy->r_val[7]) {
DB_PCMN(1,"PCM %c : E103 LCT fail %s\n",
phy->phy_name,phy->t_val[7]? "local":"remote") ;
queue_event(smc,(int)(EVENT_PCM+phy->np),PC_START) ;
}
break ;
case 9:
if (phy->t_val[8] || phy->r_val[8]) {
if (phy->t_val[8])
phy->cf_loop = TRUE ;
phy->td_flag = TRUE ;
}
break ;
case 10:
if (phy->r_val[9]) {
/* neighbor intends to have MAC on output */ ;
mib->fddiPORTMacIndicated.R_val = TRUE ;
}
else {
/* neighbor does not intend to have MAC on output */ ;
mib->fddiPORTMacIndicated.R_val = FALSE ;
}
break ;
}
}
/*
* PCM pseudo code 5.1 .. 6.1
*/
static void pc_tcode_actions(struct s_smc *smc, const int bit, struct s_phy *phy)
{
int np = phy->np ;
struct fddi_mib_p *mib ;
mib = phy->mib ;
switch(bit) {
case 0:
phy->t_val[0] = 0 ; /* no escape used */
break ;
case 1:
if (mib->fddiPORTMy_Type == TS || mib->fddiPORTMy_Type == TM)
phy->t_val[1] = 1 ;
else
phy->t_val[1] = 0 ;
break ;
case 2 :
if (mib->fddiPORTMy_Type == TB || mib->fddiPORTMy_Type == TM)
phy->t_val[2] = 1 ;
else
phy->t_val[2] = 0 ;
break ;
case 3:
{
int type,ne ;
int policy ;
type = mib->fddiPORTMy_Type ;
ne = mib->fddiPORTNeighborType ;
policy = smc->mib.fddiSMTConnectionPolicy ;
phy->t_val[3] = 1 ; /* Accept connection */
switch (type) {
case TA :
if (
((policy & POLICY_AA) && ne == TA) ||
((policy & POLICY_AB) && ne == TB) ||
((policy & POLICY_AS) && ne == TS) ||
((policy & POLICY_AM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TB :
if (
((policy & POLICY_BA) && ne == TA) ||
((policy & POLICY_BB) && ne == TB) ||
((policy & POLICY_BS) && ne == TS) ||
((policy & POLICY_BM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TS :
if (
((policy & POLICY_SA) && ne == TA) ||
((policy & POLICY_SB) && ne == TB) ||
((policy & POLICY_SS) && ne == TS) ||
((policy & POLICY_SM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
case TM :
if ( ne == TM ||
((policy & POLICY_MA) && ne == TA) ||
((policy & POLICY_MB) && ne == TB) ||
((policy & POLICY_MS) && ne == TS) ||
((policy & POLICY_MM) && ne == TM) )
phy->t_val[3] = 0 ; /* Reject */
break ;
}
#ifndef SLIM_SMT
/*
* detect undesirable connection attempt event
*/
if ( (type == TA && ne == TA ) ||
(type == TA && ne == TS ) ||
(type == TB && ne == TB ) ||
(type == TB && ne == TS ) ||
(type == TS && ne == TA ) ||
(type == TS && ne == TB ) ) {
smt_srf_event(smc,SMT_EVENT_PORT_CONNECTION,
(int) (INDEX_PORT+ phy->np) ,0) ;
}
#endif
}
break ;
case 4:
if (mib->fddiPORTPC_Withhold == PC_WH_NONE) {
if (phy->pc_lem_fail) {
phy->t_val[4] = 1 ; /* long */
phy->t_val[5] = 0 ;
}
else {
phy->t_val[4] = 0 ;
if (mib->fddiPORTLCTFail_Ct > 0)
phy->t_val[5] = 1 ; /* medium */
else
phy->t_val[5] = 0 ; /* short */
/*
* Implementers choice: use medium
* instead of short when undesired
* connection attempt is made.
*/
if (phy->wc_flag)
phy->t_val[5] = 1 ; /* medium */
}
mib->fddiPORTConnectState = PCM_CONNECTING ;
}
else {
mib->fddiPORTConnectState = PCM_STANDBY ;
phy->t_val[4] = 1 ; /* extended */
phy->t_val[5] = 1 ;
}
break ;
case 5:
break ;
case 6:
/* we do NOT have a MAC for LCT */
phy->t_val[6] = 0 ;
break ;
case 7:
phy->cf_loop = FALSE ;
lem_check_lct(smc,phy) ;
if (phy->pc_lem_fail) {
DB_PCMN(1,"PCM %c : E104 LCT failed\n",
phy->phy_name,0) ;
phy->t_val[7] = 1 ;
}
else
phy->t_val[7] = 0 ;
break ;
case 8:
phy->t_val[8] = 0 ; /* Don't request MAC loopback */
break ;
case 9:
phy->cf_loop = 0 ;
if ((mib->fddiPORTPC_Withhold != PC_WH_NONE) ||
((smc->s.sas == SMT_DAS) && (phy->wc_flag))) {
queue_event(smc,EVENT_PCM+np,PC_START) ;
break ;
}
phy->t_val[9] = FALSE ;
switch (smc->s.sas) {
case SMT_DAS :
/*
* MAC intended on output
*/
if (phy->pc_mode == PM_TREE) {
if ((np == PB) || ((np == PA) &&
(smc->y[PB].mib->fddiPORTConnectState !=
PCM_ACTIVE)))
phy->t_val[9] = TRUE ;
}
else {
if (np == PB)
phy->t_val[9] = TRUE ;
}
break ;
case SMT_SAS :
if (np == PS)
phy->t_val[9] = TRUE ;
break ;
#ifdef CONCENTRATOR
case SMT_NAC :
/*
* MAC intended on output
*/
if (np == PB)
phy->t_val[9] = TRUE ;
break ;
#endif
}
mib->fddiPORTMacIndicated.T_val = phy->t_val[9] ;
break ;
}
DB_PCMN(1,"SIG snd %x %x:\n", bit,phy->t_val[bit] ) ;
}
/*
* return status twisted (called by SMT)
*/
int pcm_status_twisted(struct s_smc *smc)
{
int twist = 0 ;
if (smc->s.sas != SMT_DAS)
return 0;
if (smc->y[PA].twisted && (smc->y[PA].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 1 ;
if (smc->y[PB].twisted && (smc->y[PB].mib->fddiPORTPCMState == PC8_ACTIVE))
twist |= 2 ;
return twist;
}
/*
* return status (called by SMT)
* type
* state
* remote phy type
* remote mac yes/no
*/
void pcm_status_state(struct s_smc *smc, int np, int *type, int *state,
int *remote, int *mac)
{
struct s_phy *phy = &smc->y[np] ;
struct fddi_mib_p *mib ;
mib = phy->mib ;
/* remote PHY type and MAC - set only if active */
*mac = 0 ;
*type = mib->fddiPORTMy_Type ; /* our PHY type */
*state = mib->fddiPORTConnectState ;
*remote = mib->fddiPORTNeighborType ;
switch(mib->fddiPORTPCMState) {
case PC8_ACTIVE :
*mac = mib->fddiPORTMacIndicated.R_val ;
break ;
}
}
/*
* return rooted station status (called by SMT)
*/
int pcm_rooted_station(struct s_smc *smc)
{
int n ;
for (n = 0 ; n < NUMPHYS ; n++) {
if (smc->y[n].mib->fddiPORTPCMState == PC8_ACTIVE &&
smc->y[n].mib->fddiPORTNeighborType == TM)
return 0;
}
return 1;
}
/*
* Interrupt actions for PLC & PCM events
*/
void plc_irq(struct s_smc *smc, int np, unsigned int cmd)
/* int np; PHY index */
{
struct s_phy *phy = &smc->y[np] ;
struct s_plc *plc = &phy->plc ;
int n ;
#ifdef SUPERNET_3
int corr_mask ;
#endif /* SUPERNET_3 */
int i ;
if (np >= smc->s.numphys) {
plc->soft_err++ ;
return ;
}
if (cmd & PL_EBUF_ERR) { /* elastic buff. det. over-|underflow*/
/*
* Check whether the SRF Condition occurred.
*/
if (!plc->ebuf_cont && phy->mib->fddiPORTPCMState == PC8_ACTIVE){
/*
* This is the real Elasticity Error.
* More than one in a row are treated as a
* single one.
* Only count this in the active state.
*/
phy->mib->fddiPORTEBError_Ct ++ ;
}
plc->ebuf_err++ ;
if (plc->ebuf_cont <= 1000) {
/*
* Prevent counter from being wrapped after
* hanging years in that interrupt.
*/
plc->ebuf_cont++ ; /* Ebuf continuous error */
}
#ifdef SUPERNET_3
if (plc->ebuf_cont == 1000 &&
((inpw(PLC(np,PL_STATUS_A)) & PLC_REV_MASK) ==
PLC_REV_SN3)) {
/*
* This interrupt remeained high for at least
* 1000 consecutive interrupt calls.
*
* This is caused by a hardware error of the
* ORION part of the Supernet III chipset.
*
* Disable this bit from the mask.
*/
corr_mask = (plc_imsk_na & ~PL_EBUF_ERR) ;
outpw(PLC(np,PL_INTR_MASK),corr_mask);
/*
* Disconnect from the ring.
* Call the driver with the reset indication.
*/
queue_event(smc,EVENT_ECM,EC_DISCONNECT) ;
/*
* Make an error log entry.
*/
SMT_ERR_LOG(smc,SMT_E0136, SMT_E0136_MSG) ;
/*
* Indicate the Reset.
*/
drv_reset_indication(smc) ;
}
#endif /* SUPERNET_3 */
} else {
/* Reset the continuous error variable */
plc->ebuf_cont = 0 ; /* reset Ebuf continuous error */
}
if (cmd & PL_PHYINV) { /* physical layer invalid signal */
plc->phyinv++ ;
}
if (cmd & PL_VSYM_CTR) { /* violation symbol counter has incr.*/
plc->vsym_ctr++ ;
}
if (cmd & PL_MINI_CTR) { /* dep. on PLC_CNTRL_A's MINI_CTR_INT*/
plc->mini_ctr++ ;
}
if (cmd & PL_LE_CTR) { /* link error event counter */
int j ;
/*
* note: PL_LINK_ERR_CTR MUST be read to clear it
*/
j = inpw(PLC(np,PL_LE_THRESHOLD)) ;
i = inpw(PLC(np,PL_LINK_ERR_CTR)) ;
if (i < j) {
/* wrapped around */
i += 256 ;
}
if (phy->lem.lem_on) {
/* Note: Lem errors shall only be counted when
* link is ACTIVE or LCT is active.
*/
phy->lem.lem_errors += i ;
phy->mib->fddiPORTLem_Ct += i ;
}
}
if (cmd & PL_TPC_EXPIRED) { /* TPC timer reached zero */
if (plc->p_state == PS_LCT) {
/*
* end of LCT
*/
;
}
plc->tpc_exp++ ;
}
if (cmd & PL_LS_MATCH) { /* LS == LS in PLC_CNTRL_B's MATCH_LS*/
switch (inpw(PLC(np,PL_CNTRL_B)) & PL_MATCH_LS) {
case PL_I_IDLE : phy->curr_ls = PC_ILS ; break ;
case PL_I_HALT : phy->curr_ls = PC_HLS ; break ;
case PL_I_MASTR : phy->curr_ls = PC_MLS ; break ;
case PL_I_QUIET : phy->curr_ls = PC_QLS ; break ;
}
}
if (cmd & PL_PCM_BREAK) { /* PCM has entered the BREAK state */
int reason;
reason = inpw(PLC(np,PL_STATUS_B)) & PL_BREAK_REASON ;
switch (reason) {
case PL_B_PCS : plc->b_pcs++ ; break ;
case PL_B_TPC : plc->b_tpc++ ; break ;
case PL_B_TNE : plc->b_tne++ ; break ;
case PL_B_QLS : plc->b_qls++ ; break ;
case PL_B_ILS : plc->b_ils++ ; break ;
case PL_B_HLS : plc->b_hls++ ; break ;
}
/*jd 05-Aug-1999 changed: Bug #10419 */
DB_PCMN(1,"PLC %d: MDcF = %x\n", np, smc->e.DisconnectFlag);
if (smc->e.DisconnectFlag == FALSE) {
DB_PCMN(1,"PLC %d: restart (reason %x)\n", np, reason);
queue_event(smc,EVENT_PCM+np,PC_START) ;
}
else {
DB_PCMN(1,"PLC %d: NO!! restart (reason %x)\n", np, reason);
}
return ;
}
/*
* If both CODE & ENABLE are set ignore enable
*/
if (cmd & PL_PCM_CODE) { /* receive last sign.-bit | LCT complete */
queue_event(smc,EVENT_PCM+np,PC_SIGNAL) ;
n = inpw(PLC(np,PL_RCV_VECTOR)) ;
for (i = 0 ; i < plc->p_bits ; i++) {
phy->r_val[plc->p_start+i] = n & 1 ;
n >>= 1 ;
}
}
else if (cmd & PL_PCM_ENABLED) { /* asserted SC_JOIN, scrub.completed*/
queue_event(smc,EVENT_PCM+np,PC_JOIN) ;
}
if (cmd & PL_TRACE_PROP) { /* MLS while PC8_ACTIV || PC2_TRACE */
/*PC22b*/
if (!phy->tr_flag) {
DB_PCMN(1,"PCM : irq TRACE_PROP %d %d\n",
np,smc->mib.fddiSMTECMState) ;
phy->tr_flag = TRUE ;
smc->e.trace_prop |= ENTITY_BIT(ENTITY_PHY(np)) ;
queue_event(smc,EVENT_ECM,EC_TRACE_PROP) ;
}
}
/*
* filter PLC glitch ???
* QLS || HLS only while in PC2_TRACE state
*/
if ((cmd & PL_SELF_TEST) && (phy->mib->fddiPORTPCMState == PC2_TRACE)) {
/*PC22a*/
if (smc->e.path_test == PT_PASSED) {
DB_PCMN(1,"PCM : state = %s %d\n", get_pcmstate(smc,np),
phy->mib->fddiPORTPCMState) ;
smc->e.path_test = PT_PENDING ;
queue_event(smc,EVENT_ECM,EC_PATH_TEST) ;
}
}
if (cmd & PL_TNE_EXPIRED) { /* TNE: length of noise events */
/* break_required (TNE > NS_Max) */
if (phy->mib->fddiPORTPCMState == PC8_ACTIVE) {
if (!phy->tr_flag) {
DB_PCMN(1,"PCM %c : PC81 %s\n",phy->phy_name,"NSE");
queue_event(smc,EVENT_PCM+np,PC_START) ;
return ;
}
}
}
#if 0
if (cmd & PL_NP_ERR) { /* NP has requested to r/w an inv reg*/
/*
* It's a bug by AMD
*/
plc->np_err++ ;
}
/* pin inactiv (GND) */
if (cmd & PL_PARITY_ERR) { /* p. error dedected on TX9-0 inp */
plc->parity_err++ ;
}
if (cmd & PL_LSDO) { /* carrier detected */
;
}
#endif
}
#ifdef DEBUG
/*
* fill state struct
*/
void pcm_get_state(struct s_smc *smc, struct smt_state *state)
{
struct s_phy *phy ;
struct pcm_state *pcs ;
int i ;
int ii ;
short rbits ;
short tbits ;
struct fddi_mib_p *mib ;
for (i = 0, phy = smc->y, pcs = state->pcm_state ; i < NUMPHYS ;
i++ , phy++, pcs++ ) {
mib = phy->mib ;
pcs->pcm_type = (u_char) mib->fddiPORTMy_Type ;
pcs->pcm_state = (u_char) mib->fddiPORTPCMState ;
pcs->pcm_mode = phy->pc_mode ;
pcs->pcm_neighbor = (u_char) mib->fddiPORTNeighborType ;
pcs->pcm_bsf = mib->fddiPORTBS_Flag ;
pcs->pcm_lsf = phy->ls_flag ;
pcs->pcm_lct_fail = (u_char) mib->fddiPORTLCTFail_Ct ;
pcs->pcm_ls_rx = LS2MIB(sm_pm_get_ls(smc,i)) ;
for (ii = 0, rbits = tbits = 0 ; ii < NUMBITS ; ii++) {
rbits <<= 1 ;
tbits <<= 1 ;
if (phy->r_val[NUMBITS-1-ii])
rbits |= 1 ;
if (phy->t_val[NUMBITS-1-ii])
tbits |= 1 ;
}
pcs->pcm_r_val = rbits ;
pcs->pcm_t_val = tbits ;
}
}
int get_pcm_state(struct s_smc *smc, int np)
{
int pcs ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
case PL_PC0 : pcs = PC_STOP ; break ;
case PL_PC1 : pcs = PC_START ; break ;
case PL_PC2 : pcs = PC_TRACE ; break ;
case PL_PC3 : pcs = PC_SIGNAL ; break ;
case PL_PC4 : pcs = PC_SIGNAL ; break ;
case PL_PC5 : pcs = PC_SIGNAL ; break ;
case PL_PC6 : pcs = PC_JOIN ; break ;
case PL_PC7 : pcs = PC_JOIN ; break ;
case PL_PC8 : pcs = PC_ENABLE ; break ;
case PL_PC9 : pcs = PC_MAINT ; break ;
default : pcs = PC_DISABLE ; break ;
}
return pcs;
}
char *get_linestate(struct s_smc *smc, int np)
{
char *ls = "" ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_A)) & PL_LINE_ST) {
case PL_L_NLS : ls = "NOISE" ; break ;
case PL_L_ALS : ls = "ACTIV" ; break ;
case PL_L_UND : ls = "UNDEF" ; break ;
case PL_L_ILS4: ls = "ILS 4" ; break ;
case PL_L_QLS : ls = "QLS" ; break ;
case PL_L_MLS : ls = "MLS" ; break ;
case PL_L_HLS : ls = "HLS" ; break ;
case PL_L_ILS16:ls = "ILS16" ; break ;
#ifdef lint
default: ls = "unknown" ; break ;
#endif
}
return ls;
}
char *get_pcmstate(struct s_smc *smc, int np)
{
char *pcs ;
SK_UNUSED(smc) ;
switch (inpw(PLC(np,PL_STATUS_B)) & PL_PCM_STATE) {
case PL_PC0 : pcs = "OFF" ; break ;
case PL_PC1 : pcs = "BREAK" ; break ;
case PL_PC2 : pcs = "TRACE" ; break ;
case PL_PC3 : pcs = "CONNECT"; break ;
case PL_PC4 : pcs = "NEXT" ; break ;
case PL_PC5 : pcs = "SIGNAL" ; break ;
case PL_PC6 : pcs = "JOIN" ; break ;
case PL_PC7 : pcs = "VERIFY" ; break ;
case PL_PC8 : pcs = "ACTIV" ; break ;
case PL_PC9 : pcs = "MAINT" ; break ;
default : pcs = "UNKNOWN" ; break ;
}
return pcs;
}
void list_phy(struct s_smc *smc)
{
struct s_plc *plc ;
int np ;
for (np = 0 ; np < NUMPHYS ; np++) {
plc = &smc->y[np].plc ;
printf("PHY %d:\tERRORS\t\t\tBREAK_REASONS\t\tSTATES:\n",np) ;
printf("\tsoft_error: %ld \t\tPC_Start : %ld\n",
plc->soft_err,plc->b_pcs);
printf("\tparity_err: %ld \t\tTPC exp. : %ld\t\tLine: %s\n",
plc->parity_err,plc->b_tpc,get_linestate(smc,np)) ;
printf("\tebuf_error: %ld \t\tTNE exp. : %ld\n",
plc->ebuf_err,plc->b_tne) ;
printf("\tphyinvalid: %ld \t\tQLS det. : %ld\t\tPCM : %s\n",
plc->phyinv,plc->b_qls,get_pcmstate(smc,np)) ;
printf("\tviosym_ctr: %ld \t\tILS det. : %ld\n",
plc->vsym_ctr,plc->b_ils) ;
printf("\tmingap_ctr: %ld \t\tHLS det. : %ld\n",
plc->mini_ctr,plc->b_hls) ;
printf("\tnodepr_err: %ld\n",plc->np_err) ;
printf("\tTPC_exp : %ld\n",plc->tpc_exp) ;
printf("\tLEM_err : %ld\n",smc->y[np].lem.lem_errors) ;
}
}
#ifdef CONCENTRATOR
void pcm_lem_dump(struct s_smc *smc)
{
int i ;
struct s_phy *phy ;
struct fddi_mib_p *mib ;
char *entostring() ;
printf("PHY errors BER\n") ;
printf("----------------------\n") ;
for (i = 0,phy = smc->y ; i < NUMPHYS ; i++,phy++) {
if (!plc_is_installed(smc,i))
continue ;
mib = phy->mib ;
printf("%s\t%ld\t10E-%d\n",
entostring(smc,ENTITY_PHY(i)),
mib->fddiPORTLem_Ct,
mib->fddiPORTLer_Estimate) ;
}
}
#endif
#endif
| gpl-2.0 |
Kuzma30/kernel32nooktablet | arch/arm/mach-s3c64xx/mach-crag6410-module.c | 138 | 4763 | /* Speyside modules for Cragganmore - board data probing
*
* Copyright 2011 Wolfson Microelectronics plc
* Mark Brown <broonie@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/export.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/mfd/wm831x/irq.h>
#include <linux/mfd/wm831x/gpio.h>
#include <sound/wm8996.h>
#include <sound/wm8962.h>
#include <sound/wm9081.h>
#include <mach/crag6410.h>
static struct wm8996_retune_mobile_config wm8996_retune[] = {
{
.name = "Sub LPF",
.rate = 48000,
.regs = {
0x6318, 0x6300, 0x1000, 0x0000, 0x0004, 0x2000, 0xF000,
0x0000, 0x0004, 0x2000, 0xF000, 0x0000, 0x0004, 0x2000,
0xF000, 0x0000, 0x0004, 0x1000, 0x0800, 0x4000
},
},
{
.name = "Sub HPF",
.rate = 48000,
.regs = {
0x000A, 0x6300, 0x1000, 0x0000, 0x0004, 0x2000, 0xF000,
0x0000, 0x0004, 0x2000, 0xF000, 0x0000, 0x0004, 0x2000,
0xF000, 0x0000, 0x0004, 0x1000, 0x0800, 0x4000
},
},
};
static struct wm8996_pdata wm8996_pdata __initdata = {
.ldo_ena = S3C64XX_GPN(7),
.gpio_base = CODEC_GPIO_BASE,
.micdet_def = 1,
.inl_mode = WM8996_DIFFERRENTIAL_1,
.inr_mode = WM8996_DIFFERRENTIAL_1,
.irq_flags = IRQF_TRIGGER_RISING,
.gpio_default = {
0x8001, /* GPIO1 == ADCLRCLK1 */
0x8001, /* GPIO2 == ADCLRCLK2, input due to CPU */
0x0141, /* GPIO3 == HP_SEL */
0x0002, /* GPIO4 == IRQ */
0x020e, /* GPIO5 == CLKOUT */
},
.retune_mobile_cfgs = wm8996_retune,
.num_retune_mobile_cfgs = ARRAY_SIZE(wm8996_retune),
};
static struct wm8962_pdata wm8962_pdata __initdata = {
.gpio_init = {
0,
WM8962_GPIO_FN_OPCLK,
WM8962_GPIO_FN_DMICCLK,
0,
0x8000 | WM8962_GPIO_FN_DMICDAT,
WM8962_GPIO_FN_IRQ, /* Open drain mode */
},
.irq_active_low = true,
};
static struct wm9081_pdata wm9081_pdata __initdata = {
.irq_high = false,
.irq_cmos = false,
};
static const struct i2c_board_info wm1254_devs[] = {
{ I2C_BOARD_INFO("wm8996", 0x1a),
.platform_data = &wm8996_pdata,
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
},
{ I2C_BOARD_INFO("wm9081", 0x6c),
.platform_data = &wm9081_pdata, },
};
static const struct i2c_board_info wm1255_devs[] = {
{ I2C_BOARD_INFO("wm5100", 0x1a),
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
},
{ I2C_BOARD_INFO("wm9081", 0x6c),
.platform_data = &wm9081_pdata, },
};
static const struct i2c_board_info wm1259_devs[] = {
{ I2C_BOARD_INFO("wm8962", 0x1a),
.platform_data = &wm8962_pdata,
.irq = GLENFARCLAS_PMIC_IRQ_BASE + WM831X_IRQ_GPIO_2,
},
};
static __devinitdata const struct {
u8 id;
const char *name;
const struct i2c_board_info *i2c_devs;
int num_i2c_devs;
} gf_mods[] = {
{ .id = 0x01, .name = "1250-EV1 Springbank" },
{ .id = 0x02, .name = "1251-EV1 Jura" },
{ .id = 0x03, .name = "1252-EV1 Glenlivet" },
{ .id = 0x11, .name = "6249-EV2 Glenfarclas", },
{ .id = 0x21, .name = "1275-EV1 Mortlach" },
{ .id = 0x25, .name = "1274-EV1 Glencadam" },
{ .id = 0x31, .name = "1253-EV1 Tomatin", },
{ .id = 0x39, .name = "1254-EV1 Dallas Dhu",
.i2c_devs = wm1254_devs, .num_i2c_devs = ARRAY_SIZE(wm1254_devs) },
{ .id = 0x3a, .name = "1259-EV1 Tobermory",
.i2c_devs = wm1259_devs, .num_i2c_devs = ARRAY_SIZE(wm1259_devs) },
{ .id = 0x3b, .name = "1255-EV1 Kilchoman",
.i2c_devs = wm1255_devs, .num_i2c_devs = ARRAY_SIZE(wm1255_devs) },
{ .id = 0x3c, .name = "1273-EV1 Longmorn" },
};
static __devinit int wlf_gf_module_probe(struct i2c_client *i2c,
const struct i2c_device_id *i2c_id)
{
int ret, i, j, id, rev;
ret = i2c_smbus_read_byte_data(i2c, 0);
if (ret < 0) {
dev_err(&i2c->dev, "Failed to read ID: %d\n", ret);
return ret;
}
id = (ret & 0xfe) >> 2;
rev = ret & 0x3;
for (i = 0; i < ARRAY_SIZE(gf_mods); i++)
if (id == gf_mods[i].id)
break;
if (i < ARRAY_SIZE(gf_mods)) {
dev_info(&i2c->dev, "%s revision %d\n",
gf_mods[i].name, rev + 1);
for (j = 0; j < gf_mods[i].num_i2c_devs; j++) {
if (!i2c_new_device(i2c->adapter,
&(gf_mods[i].i2c_devs[j])))
dev_err(&i2c->dev,
"Failed to register dev: %d\n", ret);
}
} else {
dev_warn(&i2c->dev, "Unknown module ID %d revision %d\n",
id, rev);
}
return 0;
}
static const struct i2c_device_id wlf_gf_module_id[] = {
{ "wlf-gf-module", 0 },
{ }
};
static struct i2c_driver wlf_gf_module_driver = {
.driver = {
.name = "wlf-gf-module",
.owner = THIS_MODULE,
},
.probe = wlf_gf_module_probe,
.id_table = wlf_gf_module_id,
};
static int __init wlf_gf_module_register(void)
{
return i2c_add_driver(&wlf_gf_module_driver);
}
module_init(wlf_gf_module_register);
| gpl-2.0 |
mjmccall/Kernel | net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c | 138 | 12481 |
/* (C) 1999-2001 Paul `Rusty' Russell
* (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/netfilter.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/sysctl.h>
#include <net/route.h>
#include <net/ip.h>
#include <linux/netfilter_ipv4.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
int (*nf_nat_seq_adjust_hook)(struct sk_buff *skb,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo);
EXPORT_SYMBOL_GPL(nf_nat_seq_adjust_hook);
static bool ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
struct nf_conntrack_tuple *tuple)
{
const __be32 *ap;
__be32 _addrs[2];
ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
sizeof(u_int32_t) * 2, _addrs);
if (ap == NULL)
return false;
tuple->src.u3.ip = ap[0];
tuple->dst.u3.ip = ap[1];
return true;
}
static bool ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig)
{
tuple->src.u3.ip = orig->dst.u3.ip;
tuple->dst.u3.ip = orig->src.u3.ip;
return true;
}
static int ipv4_print_tuple(struct seq_file *s,
const struct nf_conntrack_tuple *tuple)
{
return seq_printf(s, "src=%pI4 dst=%pI4 ",
&tuple->src.u3.ip, &tuple->dst.u3.ip);
}
static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff,
unsigned int *dataoff, u_int8_t *protonum)
{
const struct iphdr *iph;
struct iphdr _iph;
iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
if (iph == NULL)
return -NF_DROP;
/* Conntrack defragments packets, we might still see fragments
* inside ICMP packets though. */
if (iph->frag_off & htons(IP_OFFSET))
return -NF_DROP;
*dataoff = nhoff + (iph->ihl << 2);
*protonum = iph->protocol;
return NF_ACCEPT;
}
static unsigned int ipv4_confirm(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
struct nf_conn *ct;
enum ip_conntrack_info ctinfo;
const struct nf_conn_help *help;
const struct nf_conntrack_helper *helper;
unsigned int ret;
/* This is where we call the helper: as the packet goes out. */
ct = nf_ct_get(skb, &ctinfo);
if (!ct || ctinfo == IP_CT_RELATED + IP_CT_IS_REPLY)
goto out;
help = nfct_help(ct);
if (!help)
goto out;
/* rcu_read_lock()ed by nf_hook_slow */
helper = rcu_dereference(help->helper);
if (!helper)
goto out;
ret = helper->help(skb, skb_network_offset(skb) + ip_hdrlen(skb),
ct, ctinfo);
if (ret != NF_ACCEPT)
return ret;
if (test_bit(IPS_SEQ_ADJUST_BIT, &ct->status)) {
typeof(nf_nat_seq_adjust_hook) seq_adjust;
seq_adjust = rcu_dereference(nf_nat_seq_adjust_hook);
if (!seq_adjust || !seq_adjust(skb, ct, ctinfo))
return NF_DROP;
}
out:
/* We've seen it coming out the other side: confirm it */
return nf_conntrack_confirm(skb);
}
static unsigned int ipv4_conntrack_in(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
return nf_conntrack_in(dev_net(in), PF_INET, hooknum, skb);
}
static unsigned int ipv4_conntrack_local(unsigned int hooknum,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
int (*okfn)(struct sk_buff *))
{
/* root is playing with raw sockets. */
if (skb->len < sizeof(struct iphdr) ||
ip_hdrlen(skb) < sizeof(struct iphdr))
return NF_ACCEPT;
return nf_conntrack_in(dev_net(out), PF_INET, hooknum, skb);
}
/* Connection tracking may drop packets, but never alters them, so
make it the first hook. */
static struct nf_hook_ops ipv4_conntrack_ops[] __read_mostly = {
{
.hook = ipv4_conntrack_in,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_PRE_ROUTING,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_conntrack_local,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_CONNTRACK,
},
{
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
{
.hook = ipv4_confirm,
.owner = THIS_MODULE,
.pf = PF_INET,
.hooknum = NF_INET_LOCAL_IN,
.priority = NF_IP_PRI_CONNTRACK_CONFIRM,
},
};
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
static int log_invalid_proto_min = 0;
static int log_invalid_proto_max = 255;
static ctl_table ip_ct_sysctl_table[] = {
{
.ctl_name = NET_IPV4_NF_CONNTRACK_MAX,
.procname = "ip_conntrack_max",
.data = &nf_conntrack_max,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_COUNT,
.procname = "ip_conntrack_count",
.data = &init_net.ct.count,
.maxlen = sizeof(int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_BUCKETS,
.procname = "ip_conntrack_buckets",
.data = &nf_conntrack_htable_size,
.maxlen = sizeof(unsigned int),
.mode = 0444,
.proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_CHECKSUM,
.procname = "ip_conntrack_checksum",
.data = &init_net.ct.sysctl_checksum,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec,
},
{
.ctl_name = NET_IPV4_NF_CONNTRACK_LOG_INVALID,
.procname = "ip_conntrack_log_invalid",
.data = &init_net.ct.sysctl_log_invalid,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
.strategy = sysctl_intvec,
.extra1 = &log_invalid_proto_min,
.extra2 = &log_invalid_proto_max,
},
{
.ctl_name = 0
}
};
#endif /* CONFIG_SYSCTL && CONFIG_NF_CONNTRACK_PROC_COMPAT */
/* Fast function for those who don't want to parse /proc (and I don't
blame them). */
/* Reversing the socket's dst/src point of view gives us the reply
mapping. */
static int
getorigdst(struct sock *sk, int optval, void __user *user, int *len)
{
const struct inet_sock *inet = inet_sk(sk);
const struct nf_conntrack_tuple_hash *h;
struct nf_conntrack_tuple tuple;
memset(&tuple, 0, sizeof(tuple));
tuple.src.u3.ip = inet->rcv_saddr;
tuple.src.u.tcp.port = inet->sport;
tuple.dst.u3.ip = inet->daddr;
tuple.dst.u.tcp.port = inet->dport;
tuple.src.l3num = PF_INET;
tuple.dst.protonum = IPPROTO_TCP;
/* We only do TCP at the moment: is there a better way? */
if (strcmp(sk->sk_prot->name, "TCP")) {
pr_debug("SO_ORIGINAL_DST: Not a TCP socket\n");
return -ENOPROTOOPT;
}
if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
pr_debug("SO_ORIGINAL_DST: len %d not %Zu\n",
*len, sizeof(struct sockaddr_in));
return -EINVAL;
}
h = nf_conntrack_find_get(sock_net(sk), &tuple);
if (h) {
struct sockaddr_in sin;
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
sin.sin_family = AF_INET;
sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u.tcp.port;
sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
.tuple.dst.u3.ip;
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
pr_debug("SO_ORIGINAL_DST: %pI4 %u\n",
&sin.sin_addr.s_addr, ntohs(sin.sin_port));
nf_ct_put(ct);
if (copy_to_user(user, &sin, sizeof(sin)) != 0)
return -EFAULT;
else
return 0;
}
pr_debug("SO_ORIGINAL_DST: Can't find %pI4/%u-%pI4/%u.\n",
&tuple.src.u3.ip, ntohs(tuple.src.u.tcp.port),
&tuple.dst.u3.ip, ntohs(tuple.dst.u.tcp.port));
return -ENOENT;
}
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/nfnetlink_conntrack.h>
static int ipv4_tuple_to_nlattr(struct sk_buff *skb,
const struct nf_conntrack_tuple *tuple)
{
NLA_PUT_BE32(skb, CTA_IP_V4_SRC, tuple->src.u3.ip);
NLA_PUT_BE32(skb, CTA_IP_V4_DST, tuple->dst.u3.ip);
return 0;
nla_put_failure:
return -1;
}
static const struct nla_policy ipv4_nla_policy[CTA_IP_MAX+1] = {
[CTA_IP_V4_SRC] = { .type = NLA_U32 },
[CTA_IP_V4_DST] = { .type = NLA_U32 },
};
static int ipv4_nlattr_to_tuple(struct nlattr *tb[],
struct nf_conntrack_tuple *t)
{
if (!tb[CTA_IP_V4_SRC] || !tb[CTA_IP_V4_DST])
return -EINVAL;
t->src.u3.ip = nla_get_be32(tb[CTA_IP_V4_SRC]);
t->dst.u3.ip = nla_get_be32(tb[CTA_IP_V4_DST]);
return 0;
}
#endif
static struct nf_sockopt_ops so_getorigdst = {
.pf = PF_INET,
.get_optmin = SO_ORIGINAL_DST,
.get_optmax = SO_ORIGINAL_DST+1,
.get = &getorigdst,
.owner = THIS_MODULE,
};
struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
.l3proto = PF_INET,
.name = "ipv4",
.pkt_to_tuple = ipv4_pkt_to_tuple,
.invert_tuple = ipv4_invert_tuple,
.print_tuple = ipv4_print_tuple,
.get_l4proto = ipv4_get_l4proto,
#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
.tuple_to_nlattr = ipv4_tuple_to_nlattr,
.nlattr_to_tuple = ipv4_nlattr_to_tuple,
.nla_policy = ipv4_nla_policy,
#endif
#if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
.ctl_table_path = nf_net_ipv4_netfilter_sysctl_path,
.ctl_table = ip_ct_sysctl_table,
#endif
.me = THIS_MODULE,
};
module_param_call(hashsize, nf_conntrack_set_hashsize, param_get_uint,
&nf_conntrack_htable_size, 0600);
MODULE_ALIAS("nf_conntrack-" __stringify(AF_INET));
MODULE_ALIAS("ip_conntrack");
MODULE_LICENSE("GPL");
static int __init nf_conntrack_l3proto_ipv4_init(void)
{
int ret = 0;
need_conntrack();
nf_defrag_ipv4_enable();
ret = nf_register_sockopt(&so_getorigdst);
if (ret < 0) {
printk(KERN_ERR "Unable to register netfilter socket option\n");
return ret;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_tcp4);
if (ret < 0) {
printk("nf_conntrack_ipv4: can't register tcp.\n");
goto cleanup_sockopt;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_udp4);
if (ret < 0) {
printk("nf_conntrack_ipv4: can't register udp.\n");
goto cleanup_tcp;
}
ret = nf_conntrack_l4proto_register(&nf_conntrack_l4proto_icmp);
if (ret < 0) {
printk("nf_conntrack_ipv4: can't register icmp.\n");
goto cleanup_udp;
}
ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
if (ret < 0) {
printk("nf_conntrack_ipv4: can't register ipv4\n");
goto cleanup_icmp;
}
ret = nf_register_hooks(ipv4_conntrack_ops,
ARRAY_SIZE(ipv4_conntrack_ops));
if (ret < 0) {
printk("nf_conntrack_ipv4: can't register hooks.\n");
goto cleanup_ipv4;
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
ret = nf_conntrack_ipv4_compat_init();
if (ret < 0)
goto cleanup_hooks;
#endif
return ret;
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
cleanup_hooks:
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
#endif
cleanup_ipv4:
nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
cleanup_icmp:
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
cleanup_udp:
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
cleanup_tcp:
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
cleanup_sockopt:
nf_unregister_sockopt(&so_getorigdst);
return ret;
}
static void __exit nf_conntrack_l3proto_ipv4_fini(void)
{
synchronize_net();
#if defined(CONFIG_PROC_FS) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
nf_conntrack_ipv4_compat_fini();
#endif
nf_unregister_hooks(ipv4_conntrack_ops, ARRAY_SIZE(ipv4_conntrack_ops));
nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_icmp);
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_udp4);
nf_conntrack_l4proto_unregister(&nf_conntrack_l4proto_tcp4);
nf_unregister_sockopt(&so_getorigdst);
}
module_init(nf_conntrack_l3proto_ipv4_init);
module_exit(nf_conntrack_l3proto_ipv4_fini);
void need_ipv4_conntrack(void)
{
return;
}
EXPORT_SYMBOL_GPL(need_ipv4_conntrack);
| gpl-2.0 |
oceanfly/linux | drivers/media/dvb-frontends/tda10071.c | 394 | 28571 | /*
* NXP TDA10071 + Conexant CX24118A DVB-S/S2 demodulator + tuner driver
*
* Copyright (C) 2011 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include "tda10071_priv.h"
static struct dvb_frontend_ops tda10071_ops;
/*
* XXX: regmap_update_bits() does not fit our needs as it does not support
* partially volatile registers. Also it performs register read even mask is as
* wide as register value.
*/
/* write single register with mask */
static int tda10071_wr_reg_mask(struct tda10071_dev *dev,
u8 reg, u8 val, u8 mask)
{
int ret;
u8 tmp;
/* no need for read if whole reg is written */
if (mask != 0xff) {
ret = regmap_bulk_read(dev->regmap, reg, &tmp, 1);
if (ret)
return ret;
val &= mask;
tmp &= ~mask;
val |= tmp;
}
return regmap_bulk_write(dev->regmap, reg, &val, 1);
}
/* execute firmware command */
static int tda10071_cmd_execute(struct tda10071_dev *dev,
struct tda10071_cmd *cmd)
{
struct i2c_client *client = dev->client;
int ret, i;
unsigned int uitmp;
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
mutex_lock(&dev->cmd_execute_mutex);
/* write cmd and args for firmware */
ret = regmap_bulk_write(dev->regmap, 0x00, cmd->args, cmd->len);
if (ret)
goto error_mutex_unlock;
/* start cmd execution */
ret = regmap_write(dev->regmap, 0x1f, 1);
if (ret)
goto error_mutex_unlock;
/* wait cmd execution terminate */
for (i = 1000, uitmp = 1; i && uitmp; i--) {
ret = regmap_read(dev->regmap, 0x1f, &uitmp);
if (ret)
goto error_mutex_unlock;
usleep_range(200, 5000);
}
mutex_unlock(&dev->cmd_execute_mutex);
dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
return ret;
error_mutex_unlock:
mutex_unlock(&dev->cmd_execute_mutex);
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_set_tone(struct dvb_frontend *fe,
enum fe_sec_tone_mode fe_sec_tone_mode)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret;
u8 tone;
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
dev_dbg(&client->dev, "tone_mode=%d\n", fe_sec_tone_mode);
switch (fe_sec_tone_mode) {
case SEC_TONE_ON:
tone = 1;
break;
case SEC_TONE_OFF:
tone = 0;
break;
default:
dev_dbg(&client->dev, "invalid fe_sec_tone_mode\n");
ret = -EINVAL;
goto error;
}
cmd.args[0] = CMD_LNB_PCB_CONFIG;
cmd.args[1] = 0;
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
cmd.args[4] = tone;
cmd.len = 5;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_set_voltage(struct dvb_frontend *fe,
enum fe_sec_voltage fe_sec_voltage)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret;
u8 voltage;
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
dev_dbg(&client->dev, "voltage=%d\n", fe_sec_voltage);
switch (fe_sec_voltage) {
case SEC_VOLTAGE_13:
voltage = 0;
break;
case SEC_VOLTAGE_18:
voltage = 1;
break;
case SEC_VOLTAGE_OFF:
voltage = 0;
break;
default:
dev_dbg(&client->dev, "invalid fe_sec_voltage\n");
ret = -EINVAL;
goto error;
}
cmd.args[0] = CMD_LNB_SET_DC_LEVEL;
cmd.args[1] = 0;
cmd.args[2] = voltage;
cmd.len = 3;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_diseqc_send_master_cmd(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *diseqc_cmd)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
unsigned int uitmp;
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
dev_dbg(&client->dev, "msg_len=%d\n", diseqc_cmd->msg_len);
if (diseqc_cmd->msg_len < 3 || diseqc_cmd->msg_len > 6) {
ret = -EINVAL;
goto error;
}
/* wait LNB TX */
for (i = 500, uitmp = 0; i && !uitmp; i--) {
ret = regmap_read(dev->regmap, 0x47, &uitmp);
if (ret)
goto error;
uitmp = (uitmp >> 0) & 1;
usleep_range(10000, 20000);
}
dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
ret = regmap_update_bits(dev->regmap, 0x47, 0x01, 0x00);
if (ret)
goto error;
cmd.args[0] = CMD_LNB_SEND_DISEQC;
cmd.args[1] = 0;
cmd.args[2] = 0;
cmd.args[3] = 0;
cmd.args[4] = 2;
cmd.args[5] = 0;
cmd.args[6] = diseqc_cmd->msg_len;
memcpy(&cmd.args[7], diseqc_cmd->msg, diseqc_cmd->msg_len);
cmd.len = 7 + diseqc_cmd->msg_len;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_diseqc_recv_slave_reply(struct dvb_frontend *fe,
struct dvb_diseqc_slave_reply *reply)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
unsigned int uitmp;
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
dev_dbg(&client->dev, "\n");
/* wait LNB RX */
for (i = 500, uitmp = 0; i && !uitmp; i--) {
ret = regmap_read(dev->regmap, 0x47, &uitmp);
if (ret)
goto error;
uitmp = (uitmp >> 1) & 1;
usleep_range(10000, 20000);
}
dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
/* reply len */
ret = regmap_read(dev->regmap, 0x46, &uitmp);
if (ret)
goto error;
reply->msg_len = uitmp & 0x1f; /* [4:0] */
if (reply->msg_len > sizeof(reply->msg))
reply->msg_len = sizeof(reply->msg); /* truncate API max */
/* read reply */
cmd.args[0] = CMD_LNB_UPDATE_REPLY;
cmd.args[1] = 0;
cmd.len = 2;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
ret = regmap_bulk_read(dev->regmap, cmd.len, reply->msg,
reply->msg_len);
if (ret)
goto error;
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_diseqc_send_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd fe_sec_mini_cmd)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
unsigned int uitmp;
u8 burst;
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
dev_dbg(&client->dev, "fe_sec_mini_cmd=%d\n", fe_sec_mini_cmd);
switch (fe_sec_mini_cmd) {
case SEC_MINI_A:
burst = 0;
break;
case SEC_MINI_B:
burst = 1;
break;
default:
dev_dbg(&client->dev, "invalid fe_sec_mini_cmd\n");
ret = -EINVAL;
goto error;
}
/* wait LNB TX */
for (i = 500, uitmp = 0; i && !uitmp; i--) {
ret = regmap_read(dev->regmap, 0x47, &uitmp);
if (ret)
goto error;
uitmp = (uitmp >> 0) & 1;
usleep_range(10000, 20000);
}
dev_dbg(&client->dev, "loop=%d\n", i);
if (i == 0) {
ret = -ETIMEDOUT;
goto error;
}
ret = regmap_update_bits(dev->regmap, 0x47, 0x01, 0x00);
if (ret)
goto error;
cmd.args[0] = CMD_LNB_SEND_TONEBURST;
cmd.args[1] = 0;
cmd.args[2] = burst;
cmd.len = 3;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct tda10071_cmd cmd;
int ret;
unsigned int uitmp;
u8 buf[8];
*status = 0;
if (!dev->warm) {
ret = 0;
goto error;
}
ret = regmap_read(dev->regmap, 0x39, &uitmp);
if (ret)
goto error;
/* 0x39[0] tuner PLL */
if (uitmp & 0x02) /* demod PLL */
*status |= FE_HAS_SIGNAL | FE_HAS_CARRIER;
if (uitmp & 0x04) /* viterbi or LDPC*/
*status |= FE_HAS_VITERBI;
if (uitmp & 0x08) /* RS or BCH */
*status |= FE_HAS_SYNC | FE_HAS_LOCK;
dev->fe_status = *status;
/* signal strength */
if (dev->fe_status & FE_HAS_SIGNAL) {
cmd.args[0] = CMD_GET_AGCACC;
cmd.args[1] = 0;
cmd.len = 2;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
/* input power estimate dBm */
ret = regmap_read(dev->regmap, 0x50, &uitmp);
if (ret)
goto error;
c->strength.stat[0].scale = FE_SCALE_DECIBEL;
c->strength.stat[0].svalue = (int) (uitmp - 256) * 1000;
} else {
c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
/* CNR */
if (dev->fe_status & FE_HAS_VITERBI) {
/* Es/No */
ret = regmap_bulk_read(dev->regmap, 0x3a, buf, 2);
if (ret)
goto error;
c->cnr.stat[0].scale = FE_SCALE_DECIBEL;
c->cnr.stat[0].svalue = (buf[0] << 8 | buf[1] << 0) * 100;
} else {
c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
/* UCB/PER/BER */
if (dev->fe_status & FE_HAS_LOCK) {
/* TODO: report total bits/packets */
u8 delivery_system, reg, len;
switch (dev->delivery_system) {
case SYS_DVBS:
reg = 0x4c;
len = 8;
delivery_system = 1;
break;
case SYS_DVBS2:
reg = 0x4d;
len = 4;
delivery_system = 0;
break;
default:
ret = -EINVAL;
goto error;
}
ret = regmap_read(dev->regmap, reg, &uitmp);
if (ret)
goto error;
if (dev->meas_count == uitmp) {
dev_dbg(&client->dev, "meas not ready=%02x\n", uitmp);
ret = 0;
goto error;
} else {
dev->meas_count = uitmp;
}
cmd.args[0] = CMD_BER_UPDATE_COUNTERS;
cmd.args[1] = 0;
cmd.args[2] = delivery_system;
cmd.len = 3;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
ret = regmap_bulk_read(dev->regmap, cmd.len, buf, len);
if (ret)
goto error;
if (dev->delivery_system == SYS_DVBS) {
dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
buf[2] << 8 | buf[3] << 0;
dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
buf[2] << 8 | buf[3] << 0;
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
dev->block_error += buf[4] << 8 | buf[5] << 0;
c->block_error.stat[0].scale = FE_SCALE_COUNTER;
c->block_error.stat[0].uvalue = dev->block_error;
} else {
dev->dvbv3_ber = buf[0] << 8 | buf[1] << 0;
dev->post_bit_error += buf[0] << 8 | buf[1] << 0;
c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
} else {
c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
}
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
if (c->cnr.stat[0].scale == FE_SCALE_DECIBEL)
*snr = div_s64(c->cnr.stat[0].svalue, 100);
else
*snr = 0;
return 0;
}
static int tda10071_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
unsigned int uitmp;
if (c->strength.stat[0].scale == FE_SCALE_DECIBEL) {
uitmp = div_s64(c->strength.stat[0].svalue, 1000) + 256;
uitmp = clamp(uitmp, 181U, 236U); /* -75dBm - -20dBm */
/* scale value to 0x0000-0xffff */
*strength = (uitmp-181) * 0xffff / (236-181);
} else {
*strength = 0;
}
return 0;
}
static int tda10071_read_ber(struct dvb_frontend *fe, u32 *ber)
{
struct tda10071_dev *dev = fe->demodulator_priv;
*ber = dev->dvbv3_ber;
return 0;
}
static int tda10071_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
if (c->block_error.stat[0].scale == FE_SCALE_COUNTER)
*ucblocks = c->block_error.stat[0].uvalue;
else
*ucblocks = 0;
return 0;
}
static int tda10071_set_frontend(struct dvb_frontend *fe)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 mode, rolloff, pilot, inversion, div;
enum fe_modulation modulation;
dev_dbg(&client->dev,
"delivery_system=%d modulation=%d frequency=%u symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
c->delivery_system, c->modulation, c->frequency, c->symbol_rate,
c->inversion, c->pilot, c->rolloff);
dev->delivery_system = SYS_UNDEFINED;
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
switch (c->inversion) {
case INVERSION_OFF:
inversion = 1;
break;
case INVERSION_ON:
inversion = 0;
break;
case INVERSION_AUTO:
/* 2 = auto; try first on then off
* 3 = auto; try first off then on */
inversion = 3;
break;
default:
dev_dbg(&client->dev, "invalid inversion\n");
ret = -EINVAL;
goto error;
}
switch (c->delivery_system) {
case SYS_DVBS:
modulation = QPSK;
rolloff = 0;
pilot = 2;
break;
case SYS_DVBS2:
modulation = c->modulation;
switch (c->rolloff) {
case ROLLOFF_20:
rolloff = 2;
break;
case ROLLOFF_25:
rolloff = 1;
break;
case ROLLOFF_35:
rolloff = 0;
break;
case ROLLOFF_AUTO:
default:
dev_dbg(&client->dev, "invalid rolloff\n");
ret = -EINVAL;
goto error;
}
switch (c->pilot) {
case PILOT_OFF:
pilot = 0;
break;
case PILOT_ON:
pilot = 1;
break;
case PILOT_AUTO:
pilot = 2;
break;
default:
dev_dbg(&client->dev, "invalid pilot\n");
ret = -EINVAL;
goto error;
}
break;
default:
dev_dbg(&client->dev, "invalid delivery_system\n");
ret = -EINVAL;
goto error;
}
for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
modulation == TDA10071_MODCOD[i].modulation &&
c->fec_inner == TDA10071_MODCOD[i].fec) {
mode = TDA10071_MODCOD[i].val;
dev_dbg(&client->dev, "mode found=%02x\n", mode);
break;
}
}
if (mode == 0xff) {
dev_dbg(&client->dev, "invalid parameter combination\n");
ret = -EINVAL;
goto error;
}
if (c->symbol_rate <= 5000000)
div = 14;
else
div = 4;
ret = regmap_write(dev->regmap, 0x81, div);
if (ret)
goto error;
ret = regmap_write(dev->regmap, 0xe3, div);
if (ret)
goto error;
cmd.args[0] = CMD_CHANGE_CHANNEL;
cmd.args[1] = 0;
cmd.args[2] = mode;
cmd.args[3] = (c->frequency >> 16) & 0xff;
cmd.args[4] = (c->frequency >> 8) & 0xff;
cmd.args[5] = (c->frequency >> 0) & 0xff;
cmd.args[6] = ((c->symbol_rate / 1000) >> 8) & 0xff;
cmd.args[7] = ((c->symbol_rate / 1000) >> 0) & 0xff;
cmd.args[8] = (tda10071_ops.info.frequency_tolerance >> 8) & 0xff;
cmd.args[9] = (tda10071_ops.info.frequency_tolerance >> 0) & 0xff;
cmd.args[10] = rolloff;
cmd.args[11] = inversion;
cmd.args[12] = pilot;
cmd.args[13] = 0x00;
cmd.args[14] = 0x00;
cmd.len = 15;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
dev->delivery_system = c->delivery_system;
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_get_frontend(struct dvb_frontend *fe)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
int ret, i;
u8 buf[5], tmp;
if (!dev->warm || !(dev->fe_status & FE_HAS_LOCK)) {
ret = 0;
goto error;
}
ret = regmap_bulk_read(dev->regmap, 0x30, buf, 5);
if (ret)
goto error;
tmp = buf[0] & 0x3f;
for (i = 0; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
if (tmp == TDA10071_MODCOD[i].val) {
c->modulation = TDA10071_MODCOD[i].modulation;
c->fec_inner = TDA10071_MODCOD[i].fec;
c->delivery_system = TDA10071_MODCOD[i].delivery_system;
}
}
switch ((buf[1] >> 0) & 0x01) {
case 0:
c->inversion = INVERSION_ON;
break;
case 1:
c->inversion = INVERSION_OFF;
break;
}
switch ((buf[1] >> 7) & 0x01) {
case 0:
c->pilot = PILOT_OFF;
break;
case 1:
c->pilot = PILOT_ON;
break;
}
c->frequency = (buf[2] << 16) | (buf[3] << 8) | (buf[4] << 0);
ret = regmap_bulk_read(dev->regmap, 0x52, buf, 3);
if (ret)
goto error;
c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_init(struct dvb_frontend *fe)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct dtv_frontend_properties *c = &fe->dtv_property_cache;
struct tda10071_cmd cmd;
int ret, i, len, remaining, fw_size;
unsigned int uitmp;
const struct firmware *fw;
u8 *fw_file = TDA10071_FIRMWARE;
u8 tmp, buf[4];
struct tda10071_reg_val_mask tab[] = {
{ 0xcd, 0x00, 0x07 },
{ 0x80, 0x00, 0x02 },
{ 0xcd, 0x00, 0xc0 },
{ 0xce, 0x00, 0x1b },
{ 0x9d, 0x00, 0x01 },
{ 0x9d, 0x00, 0x02 },
{ 0x9e, 0x00, 0x01 },
{ 0x87, 0x00, 0x80 },
{ 0xce, 0x00, 0x08 },
{ 0xce, 0x00, 0x10 },
};
struct tda10071_reg_val_mask tab2[] = {
{ 0xf1, 0x70, 0xff },
{ 0x88, dev->pll_multiplier, 0x3f },
{ 0x89, 0x00, 0x10 },
{ 0x89, 0x10, 0x10 },
{ 0xc0, 0x01, 0x01 },
{ 0xc0, 0x00, 0x01 },
{ 0xe0, 0xff, 0xff },
{ 0xe0, 0x00, 0xff },
{ 0x96, 0x1e, 0x7e },
{ 0x8b, 0x08, 0x08 },
{ 0x8b, 0x00, 0x08 },
{ 0x8f, 0x1a, 0x7e },
{ 0x8c, 0x68, 0xff },
{ 0x8d, 0x08, 0xff },
{ 0x8e, 0x4c, 0xff },
{ 0x8f, 0x01, 0x01 },
{ 0x8b, 0x04, 0x04 },
{ 0x8b, 0x00, 0x04 },
{ 0x87, 0x05, 0x07 },
{ 0x80, 0x00, 0x20 },
{ 0xc8, 0x01, 0xff },
{ 0xb4, 0x47, 0xff },
{ 0xb5, 0x9c, 0xff },
{ 0xb6, 0x7d, 0xff },
{ 0xba, 0x00, 0x03 },
{ 0xb7, 0x47, 0xff },
{ 0xb8, 0x9c, 0xff },
{ 0xb9, 0x7d, 0xff },
{ 0xba, 0x00, 0x0c },
{ 0xc8, 0x00, 0xff },
{ 0xcd, 0x00, 0x04 },
{ 0xcd, 0x00, 0x20 },
{ 0xe8, 0x02, 0xff },
{ 0xcf, 0x20, 0xff },
{ 0x9b, 0xd7, 0xff },
{ 0x9a, 0x01, 0x03 },
{ 0xa8, 0x05, 0x0f },
{ 0xa8, 0x65, 0xf0 },
{ 0xa6, 0xa0, 0xf0 },
{ 0x9d, 0x50, 0xfc },
{ 0x9e, 0x20, 0xe0 },
{ 0xa3, 0x1c, 0x7c },
{ 0xd5, 0x03, 0x03 },
};
if (dev->warm) {
/* warm state - wake up device from sleep */
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = tda10071_wr_reg_mask(dev, tab[i].reg,
tab[i].val, tab[i].mask);
if (ret)
goto error;
}
cmd.args[0] = CMD_SET_SLEEP_MODE;
cmd.args[1] = 0;
cmd.args[2] = 0;
cmd.len = 3;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
} else {
/* cold state - try to download firmware */
/* request the firmware, this will block and timeout */
ret = request_firmware(&fw, fw_file, &client->dev);
if (ret) {
dev_err(&client->dev,
"did not find the firmware file. (%s) Please see linux/Documentation/dvb/ for more details on firmware-problems. (%d)\n",
fw_file, ret);
goto error;
}
/* init */
for (i = 0; i < ARRAY_SIZE(tab2); i++) {
ret = tda10071_wr_reg_mask(dev, tab2[i].reg,
tab2[i].val, tab2[i].mask);
if (ret)
goto error_release_firmware;
}
/* download firmware */
ret = regmap_write(dev->regmap, 0xe0, 0x7f);
if (ret)
goto error_release_firmware;
ret = regmap_write(dev->regmap, 0xf7, 0x81);
if (ret)
goto error_release_firmware;
ret = regmap_write(dev->regmap, 0xf8, 0x00);
if (ret)
goto error_release_firmware;
ret = regmap_write(dev->regmap, 0xf9, 0x00);
if (ret)
goto error_release_firmware;
dev_info(&client->dev,
"found a '%s' in cold state, will try to load a firmware\n",
tda10071_ops.info.name);
dev_info(&client->dev, "downloading firmware from file '%s'\n",
fw_file);
/* do not download last byte */
fw_size = fw->size - 1;
for (remaining = fw_size; remaining > 0;
remaining -= (dev->i2c_wr_max - 1)) {
len = remaining;
if (len > (dev->i2c_wr_max - 1))
len = (dev->i2c_wr_max - 1);
ret = regmap_bulk_write(dev->regmap, 0xfa,
(u8 *) &fw->data[fw_size - remaining], len);
if (ret) {
dev_err(&client->dev,
"firmware download failed=%d\n", ret);
goto error_release_firmware;
}
}
release_firmware(fw);
ret = regmap_write(dev->regmap, 0xf7, 0x0c);
if (ret)
goto error;
ret = regmap_write(dev->regmap, 0xe0, 0x00);
if (ret)
goto error;
/* wait firmware start */
msleep(250);
/* firmware status */
ret = regmap_read(dev->regmap, 0x51, &uitmp);
if (ret)
goto error;
if (uitmp) {
dev_info(&client->dev, "firmware did not run\n");
ret = -EFAULT;
goto error;
} else {
dev->warm = true;
}
cmd.args[0] = CMD_GET_FW_VERSION;
cmd.len = 1;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
ret = regmap_bulk_read(dev->regmap, cmd.len, buf, 4);
if (ret)
goto error;
dev_info(&client->dev, "firmware version %d.%d.%d.%d\n",
buf[0], buf[1], buf[2], buf[3]);
dev_info(&client->dev, "found a '%s' in warm state\n",
tda10071_ops.info.name);
ret = regmap_bulk_read(dev->regmap, 0x81, buf, 2);
if (ret)
goto error;
cmd.args[0] = CMD_DEMOD_INIT;
cmd.args[1] = ((dev->clk / 1000) >> 8) & 0xff;
cmd.args[2] = ((dev->clk / 1000) >> 0) & 0xff;
cmd.args[3] = buf[0];
cmd.args[4] = buf[1];
cmd.args[5] = dev->pll_multiplier;
cmd.args[6] = dev->spec_inv;
cmd.args[7] = 0x00;
cmd.len = 8;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
if (dev->tuner_i2c_addr)
tmp = dev->tuner_i2c_addr;
else
tmp = 0x14;
cmd.args[0] = CMD_TUNER_INIT;
cmd.args[1] = 0x00;
cmd.args[2] = 0x00;
cmd.args[3] = 0x00;
cmd.args[4] = 0x00;
cmd.args[5] = tmp;
cmd.args[6] = 0x00;
cmd.args[7] = 0x03;
cmd.args[8] = 0x02;
cmd.args[9] = 0x02;
cmd.args[10] = 0x00;
cmd.args[11] = 0x00;
cmd.args[12] = 0x00;
cmd.args[13] = 0x00;
cmd.args[14] = 0x00;
cmd.len = 15;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
cmd.args[0] = CMD_MPEG_CONFIG;
cmd.args[1] = 0;
cmd.args[2] = dev->ts_mode;
cmd.args[3] = 0x00;
cmd.args[4] = 0x04;
cmd.args[5] = 0x00;
cmd.len = 6;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
ret = regmap_update_bits(dev->regmap, 0xf0, 0x01, 0x01);
if (ret)
goto error;
cmd.args[0] = CMD_LNB_CONFIG;
cmd.args[1] = 0;
cmd.args[2] = 150;
cmd.args[3] = 3;
cmd.args[4] = 22;
cmd.args[5] = 1;
cmd.args[6] = 1;
cmd.args[7] = 30;
cmd.args[8] = 30;
cmd.args[9] = 30;
cmd.args[10] = 30;
cmd.len = 11;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
cmd.args[0] = CMD_BER_CONTROL;
cmd.args[1] = 0;
cmd.args[2] = 14;
cmd.args[3] = 14;
cmd.len = 4;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
}
/* init stats here in order signal app which stats are supported */
c->strength.len = 1;
c->strength.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->cnr.len = 1;
c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->post_bit_error.len = 1;
c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
c->block_error.len = 1;
c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE;
return ret;
error_release_firmware:
release_firmware(fw);
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_sleep(struct dvb_frontend *fe)
{
struct tda10071_dev *dev = fe->demodulator_priv;
struct i2c_client *client = dev->client;
struct tda10071_cmd cmd;
int ret, i;
struct tda10071_reg_val_mask tab[] = {
{ 0xcd, 0x07, 0x07 },
{ 0x80, 0x02, 0x02 },
{ 0xcd, 0xc0, 0xc0 },
{ 0xce, 0x1b, 0x1b },
{ 0x9d, 0x01, 0x01 },
{ 0x9d, 0x02, 0x02 },
{ 0x9e, 0x01, 0x01 },
{ 0x87, 0x80, 0x80 },
{ 0xce, 0x08, 0x08 },
{ 0xce, 0x10, 0x10 },
};
if (!dev->warm) {
ret = -EFAULT;
goto error;
}
cmd.args[0] = CMD_SET_SLEEP_MODE;
cmd.args[1] = 0;
cmd.args[2] = 1;
cmd.len = 3;
ret = tda10071_cmd_execute(dev, &cmd);
if (ret)
goto error;
for (i = 0; i < ARRAY_SIZE(tab); i++) {
ret = tda10071_wr_reg_mask(dev, tab[i].reg, tab[i].val,
tab[i].mask);
if (ret)
goto error;
}
return ret;
error:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_get_tune_settings(struct dvb_frontend *fe,
struct dvb_frontend_tune_settings *s)
{
s->min_delay_ms = 8000;
s->step_size = 0;
s->max_drift = 0;
return 0;
}
static struct dvb_frontend_ops tda10071_ops = {
.delsys = { SYS_DVBS, SYS_DVBS2 },
.info = {
.name = "NXP TDA10071",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_tolerance = 5000,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.caps = FE_CAN_INVERSION_AUTO |
FE_CAN_FEC_1_2 |
FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 |
FE_CAN_FEC_4_5 |
FE_CAN_FEC_5_6 |
FE_CAN_FEC_6_7 |
FE_CAN_FEC_7_8 |
FE_CAN_FEC_8_9 |
FE_CAN_FEC_AUTO |
FE_CAN_QPSK |
FE_CAN_RECOVER |
FE_CAN_2G_MODULATION
},
.get_tune_settings = tda10071_get_tune_settings,
.init = tda10071_init,
.sleep = tda10071_sleep,
.set_frontend = tda10071_set_frontend,
.get_frontend = tda10071_get_frontend,
.read_status = tda10071_read_status,
.read_snr = tda10071_read_snr,
.read_signal_strength = tda10071_read_signal_strength,
.read_ber = tda10071_read_ber,
.read_ucblocks = tda10071_read_ucblocks,
.diseqc_send_master_cmd = tda10071_diseqc_send_master_cmd,
.diseqc_recv_slave_reply = tda10071_diseqc_recv_slave_reply,
.diseqc_send_burst = tda10071_diseqc_send_burst,
.set_tone = tda10071_set_tone,
.set_voltage = tda10071_set_voltage,
};
static struct dvb_frontend *tda10071_get_dvb_frontend(struct i2c_client *client)
{
struct tda10071_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
return &dev->fe;
}
static int tda10071_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct tda10071_dev *dev;
struct tda10071_platform_data *pdata = client->dev.platform_data;
int ret;
unsigned int uitmp;
static const struct regmap_config regmap_config = {
.reg_bits = 8,
.val_bits = 8,
};
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev) {
ret = -ENOMEM;
goto err;
}
dev->client = client;
mutex_init(&dev->cmd_execute_mutex);
dev->clk = pdata->clk;
dev->i2c_wr_max = pdata->i2c_wr_max;
dev->ts_mode = pdata->ts_mode;
dev->spec_inv = pdata->spec_inv;
dev->pll_multiplier = pdata->pll_multiplier;
dev->tuner_i2c_addr = pdata->tuner_i2c_addr;
dev->regmap = devm_regmap_init_i2c(client, ®map_config);
if (IS_ERR(dev->regmap)) {
ret = PTR_ERR(dev->regmap);
goto err_kfree;
}
/* chip ID */
ret = regmap_read(dev->regmap, 0xff, &uitmp);
if (ret)
goto err_kfree;
if (uitmp != 0x0f) {
ret = -ENODEV;
goto err_kfree;
}
/* chip type */
ret = regmap_read(dev->regmap, 0xdd, &uitmp);
if (ret)
goto err_kfree;
if (uitmp != 0x00) {
ret = -ENODEV;
goto err_kfree;
}
/* chip version */
ret = regmap_read(dev->regmap, 0xfe, &uitmp);
if (ret)
goto err_kfree;
if (uitmp != 0x01) {
ret = -ENODEV;
goto err_kfree;
}
/* create dvb_frontend */
memcpy(&dev->fe.ops, &tda10071_ops, sizeof(struct dvb_frontend_ops));
dev->fe.demodulator_priv = dev;
i2c_set_clientdata(client, dev);
/* setup callbacks */
pdata->get_dvb_frontend = tda10071_get_dvb_frontend;
dev_info(&client->dev, "NXP TDA10071 successfully identified\n");
return 0;
err_kfree:
kfree(dev);
err:
dev_dbg(&client->dev, "failed=%d\n", ret);
return ret;
}
static int tda10071_remove(struct i2c_client *client)
{
struct tda10071_dev *dev = i2c_get_clientdata(client);
dev_dbg(&client->dev, "\n");
kfree(dev);
return 0;
}
static const struct i2c_device_id tda10071_id_table[] = {
{"tda10071_cx24118", 0},
{}
};
MODULE_DEVICE_TABLE(i2c, tda10071_id_table);
static struct i2c_driver tda10071_driver = {
.driver = {
.name = "tda10071",
.suppress_bind_attrs = true,
},
.probe = tda10071_probe,
.remove = tda10071_remove,
.id_table = tda10071_id_table,
};
module_i2c_driver(tda10071_driver);
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_DESCRIPTION("NXP TDA10071 DVB-S/S2 demodulator driver");
MODULE_LICENSE("GPL");
MODULE_FIRMWARE(TDA10071_FIRMWARE);
| gpl-2.0 |
TEAM-Gummy/Gummy_kernel_grouper | drivers/net/mlx4/main.c | 394 | 41629 | /*
* Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
* Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/slab.h>
#include <linux/io-mapping.h>
#include <linux/mlx4/device.h>
#include <linux/mlx4/doorbell.h>
#include "mlx4.h"
#include "fw.h"
#include "icm.h"
MODULE_AUTHOR("Roland Dreier");
MODULE_DESCRIPTION("Mellanox ConnectX HCA low-level driver");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(DRV_VERSION);
struct workqueue_struct *mlx4_wq;
#ifdef CONFIG_MLX4_DEBUG
int mlx4_debug_level = 0;
module_param_named(debug_level, mlx4_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif /* CONFIG_MLX4_DEBUG */
#ifdef CONFIG_PCI_MSI
static int msi_x = 1;
module_param(msi_x, int, 0444);
MODULE_PARM_DESC(msi_x, "attempt to use MSI-X if nonzero");
#else /* CONFIG_PCI_MSI */
#define msi_x (0)
#endif /* CONFIG_PCI_MSI */
static char mlx4_version[] __devinitdata =
DRV_NAME ": Mellanox ConnectX core driver v"
DRV_VERSION " (" DRV_RELDATE ")\n";
static struct mlx4_profile default_profile = {
.num_qp = 1 << 17,
.num_srq = 1 << 16,
.rdmarc_per_qp = 1 << 4,
.num_cq = 1 << 16,
.num_mcg = 1 << 13,
.num_mpt = 1 << 17,
.num_mtt = 1 << 20,
};
static int log_num_mac = 2;
module_param_named(log_num_mac, log_num_mac, int, 0444);
MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
static int log_num_vlan;
module_param_named(log_num_vlan, log_num_vlan, int, 0444);
MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
static int use_prio;
module_param_named(use_prio, use_prio, bool, 0444);
MODULE_PARM_DESC(use_prio, "Enable steering by VLAN priority on ETH ports "
"(0/1, default 0)");
static int log_mtts_per_seg = ilog2(MLX4_MTT_ENTRY_PER_SEG);
module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);
MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-7)");
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
int i;
for (i = 0; i < dev->caps.num_ports - 1; i++) {
if (port_type[i] != port_type[i + 1]) {
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
mlx4_err(dev, "Only same port types supported "
"on this HCA, aborting.\n");
return -EINVAL;
}
if (port_type[i] == MLX4_PORT_TYPE_ETH &&
port_type[i + 1] == MLX4_PORT_TYPE_IB)
return -EINVAL;
}
}
for (i = 0; i < dev->caps.num_ports; i++) {
if (!(port_type[i] & dev->caps.supported_type[i+1])) {
mlx4_err(dev, "Requested port type for port %d is not "
"supported on this HCA\n", i + 1);
return -EINVAL;
}
}
return 0;
}
static void mlx4_set_port_mask(struct mlx4_dev *dev)
{
int i;
dev->caps.port_mask = 0;
for (i = 1; i <= dev->caps.num_ports; ++i)
if (dev->caps.port_type[i] == MLX4_PORT_TYPE_IB)
dev->caps.port_mask |= 1 << (i - 1);
}
static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
{
int err;
int i;
err = mlx4_QUERY_DEV_CAP(dev, dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
return err;
}
if (dev_cap->min_page_sz > PAGE_SIZE) {
mlx4_err(dev, "HCA minimum page size of %d bigger than "
"kernel PAGE_SIZE of %ld, aborting.\n",
dev_cap->min_page_sz, PAGE_SIZE);
return -ENODEV;
}
if (dev_cap->num_ports > MLX4_MAX_PORTS) {
mlx4_err(dev, "HCA has %d ports, but we only support %d, "
"aborting.\n",
dev_cap->num_ports, MLX4_MAX_PORTS);
return -ENODEV;
}
if (dev_cap->uar_size > pci_resource_len(dev->pdev, 2)) {
mlx4_err(dev, "HCA reported UAR size of 0x%x bigger than "
"PCI resource 2 size of 0x%llx, aborting.\n",
dev_cap->uar_size,
(unsigned long long) pci_resource_len(dev->pdev, 2));
return -ENODEV;
}
dev->caps.num_ports = dev_cap->num_ports;
for (i = 1; i <= dev->caps.num_ports; ++i) {
dev->caps.vl_cap[i] = dev_cap->max_vl[i];
dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
dev->caps.gid_table_len[i] = dev_cap->max_gids[i];
dev->caps.pkey_table_len[i] = dev_cap->max_pkeys[i];
dev->caps.port_width_cap[i] = dev_cap->max_port_width[i];
dev->caps.eth_mtu_cap[i] = dev_cap->eth_mtu[i];
dev->caps.def_mac[i] = dev_cap->def_mac[i];
dev->caps.supported_type[i] = dev_cap->supported_port_types[i];
dev->caps.trans_type[i] = dev_cap->trans_type[i];
dev->caps.vendor_oui[i] = dev_cap->vendor_oui[i];
dev->caps.wavelength[i] = dev_cap->wavelength[i];
dev->caps.trans_code[i] = dev_cap->trans_code[i];
}
dev->caps.num_uars = dev_cap->uar_size / PAGE_SIZE;
dev->caps.local_ca_ack_delay = dev_cap->local_ca_ack_delay;
dev->caps.bf_reg_size = dev_cap->bf_reg_size;
dev->caps.bf_regs_per_page = dev_cap->bf_regs_per_page;
dev->caps.max_sq_sg = dev_cap->max_sq_sg;
dev->caps.max_rq_sg = dev_cap->max_rq_sg;
dev->caps.max_wqes = dev_cap->max_qp_sz;
dev->caps.max_qp_init_rdma = dev_cap->max_requester_per_qp;
dev->caps.max_srq_wqes = dev_cap->max_srq_sz;
dev->caps.max_srq_sge = dev_cap->max_rq_sg - 1;
dev->caps.reserved_srqs = dev_cap->reserved_srqs;
dev->caps.max_sq_desc_sz = dev_cap->max_sq_desc_sz;
dev->caps.max_rq_desc_sz = dev_cap->max_rq_desc_sz;
dev->caps.num_qp_per_mgm = MLX4_QP_PER_MGM;
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
* empty CQ and a full CQ.
*/
dev->caps.max_cqes = dev_cap->max_cq_sz - 1;
dev->caps.reserved_cqs = dev_cap->reserved_cqs;
dev->caps.reserved_eqs = dev_cap->reserved_eqs;
dev->caps.mtts_per_seg = 1 << log_mtts_per_seg;
dev->caps.reserved_mtts = DIV_ROUND_UP(dev_cap->reserved_mtts,
dev->caps.mtts_per_seg);
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
dev->caps.reserved_uars = dev_cap->reserved_uars;
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.mtt_entry_sz = dev->caps.mtts_per_seg * dev_cap->mtt_entry_sz;
dev->caps.max_msg_sz = dev_cap->max_msg_sz;
dev->caps.page_size_cap = ~(u32) (dev_cap->min_page_sz - 1);
dev->caps.flags = dev_cap->flags;
dev->caps.bmme_flags = dev_cap->bmme_flags;
dev->caps.reserved_lkey = dev_cap->reserved_lkey;
dev->caps.stat_rate_support = dev_cap->stat_rate_support;
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.log_num_macs = log_num_mac;
dev->caps.log_num_vlans = log_num_vlan;
dev->caps.log_num_prios = use_prio ? 3 : 0;
for (i = 1; i <= dev->caps.num_ports; ++i) {
if (dev->caps.supported_type[i] != MLX4_PORT_TYPE_ETH)
dev->caps.port_type[i] = MLX4_PORT_TYPE_IB;
else
dev->caps.port_type[i] = MLX4_PORT_TYPE_ETH;
dev->caps.possible_type[i] = dev->caps.port_type[i];
mlx4_priv(dev)->sense.sense_allowed[i] =
dev->caps.supported_type[i] == MLX4_PORT_TYPE_AUTO;
if (dev->caps.log_num_macs > dev_cap->log_max_macs[i]) {
dev->caps.log_num_macs = dev_cap->log_max_macs[i];
mlx4_warn(dev, "Requested number of MACs is too much "
"for port %d, reducing to %d.\n",
i, 1 << dev->caps.log_num_macs);
}
if (dev->caps.log_num_vlans > dev_cap->log_max_vlans[i]) {
dev->caps.log_num_vlans = dev_cap->log_max_vlans[i];
mlx4_warn(dev, "Requested number of VLANs is too much "
"for port %d, reducing to %d.\n",
i, 1 << dev->caps.log_num_vlans);
}
}
mlx4_set_port_mask(dev);
dev->caps.max_counters = 1 << ilog2(dev_cap->max_counters);
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] = dev_cap->reserved_qps;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] =
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] =
(1 << dev->caps.log_num_macs) *
(1 << dev->caps.log_num_vlans) *
(1 << dev->caps.log_num_prios) *
dev->caps.num_ports;
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH] = MLX4_NUM_FEXCH;
dev->caps.reserved_qps = dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_ETH_ADDR] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_ADDR] +
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FC_EXCH];
return 0;
}
/*
* Change the port configuration of the device.
* Every user of this function must hold the port mutex.
*/
int mlx4_change_port_types(struct mlx4_dev *dev,
enum mlx4_port_type *port_types)
{
int err = 0;
int change = 0;
int port;
for (port = 0; port < dev->caps.num_ports; port++) {
/* Change the port type only if the new type is different
* from the current, and not set to Auto */
if (port_types[port] != dev->caps.port_type[port + 1]) {
change = 1;
dev->caps.port_type[port + 1] = port_types[port];
}
}
if (change) {
mlx4_unregister_device(dev);
for (port = 1; port <= dev->caps.num_ports; port++) {
mlx4_CLOSE_PORT(dev, port);
err = mlx4_SET_PORT(dev, port);
if (err) {
mlx4_err(dev, "Failed to set port %d, "
"aborting\n", port);
goto out;
}
}
mlx4_set_port_mask(dev);
err = mlx4_register_device(dev);
}
out:
return err;
}
static ssize_t show_port_type(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
port_attr);
struct mlx4_dev *mdev = info->dev;
char type[8];
sprintf(type, "%s",
(mdev->caps.port_type[info->port] == MLX4_PORT_TYPE_IB) ?
"ib" : "eth");
if (mdev->caps.possible_type[info->port] == MLX4_PORT_TYPE_AUTO)
sprintf(buf, "auto (%s)\n", type);
else
sprintf(buf, "%s\n", type);
return strlen(buf);
}
static ssize_t set_port_type(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct mlx4_port_info *info = container_of(attr, struct mlx4_port_info,
port_attr);
struct mlx4_dev *mdev = info->dev;
struct mlx4_priv *priv = mlx4_priv(mdev);
enum mlx4_port_type types[MLX4_MAX_PORTS];
enum mlx4_port_type new_types[MLX4_MAX_PORTS];
int i;
int err = 0;
if (!strcmp(buf, "ib\n"))
info->tmp_type = MLX4_PORT_TYPE_IB;
else if (!strcmp(buf, "eth\n"))
info->tmp_type = MLX4_PORT_TYPE_ETH;
else if (!strcmp(buf, "auto\n"))
info->tmp_type = MLX4_PORT_TYPE_AUTO;
else {
mlx4_err(mdev, "%s is not supported port type\n", buf);
return -EINVAL;
}
mlx4_stop_sense(mdev);
mutex_lock(&priv->port_mutex);
/* Possible type is always the one that was delivered */
mdev->caps.possible_type[info->port] = info->tmp_type;
for (i = 0; i < mdev->caps.num_ports; i++) {
types[i] = priv->port[i+1].tmp_type ? priv->port[i+1].tmp_type :
mdev->caps.possible_type[i+1];
if (types[i] == MLX4_PORT_TYPE_AUTO)
types[i] = mdev->caps.port_type[i+1];
}
if (!(mdev->caps.flags & MLX4_DEV_CAP_FLAG_DPDP)) {
for (i = 1; i <= mdev->caps.num_ports; i++) {
if (mdev->caps.possible_type[i] == MLX4_PORT_TYPE_AUTO) {
mdev->caps.possible_type[i] = mdev->caps.port_type[i];
err = -EINVAL;
}
}
}
if (err) {
mlx4_err(mdev, "Auto sensing is not supported on this HCA. "
"Set only 'eth' or 'ib' for both ports "
"(should be the same)\n");
goto out;
}
mlx4_do_sense_ports(mdev, new_types, types);
err = mlx4_check_port_params(mdev, new_types);
if (err)
goto out;
/* We are about to apply the changes after the configuration
* was verified, no need to remember the temporary types
* any more */
for (i = 0; i < mdev->caps.num_ports; i++)
priv->port[i + 1].tmp_type = 0;
err = mlx4_change_port_types(mdev, new_types);
out:
mlx4_start_sense(mdev);
mutex_unlock(&priv->port_mutex);
return err ? err : count;
}
static int mlx4_load_fw(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
priv->fw.fw_icm = mlx4_alloc_icm(dev, priv->fw.fw_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.fw_icm) {
mlx4_err(dev, "Couldn't allocate FW area, aborting.\n");
return -ENOMEM;
}
err = mlx4_MAP_FA(dev, priv->fw.fw_icm);
if (err) {
mlx4_err(dev, "MAP_FA command failed, aborting.\n");
goto err_free;
}
err = mlx4_RUN_FW(dev);
if (err) {
mlx4_err(dev, "RUN_FW command failed, aborting.\n");
goto err_unmap_fa;
}
return 0;
err_unmap_fa:
mlx4_UNMAP_FA(dev);
err_free:
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
return err;
}
static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
int cmpt_entry_sz)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
err = mlx4_init_icm_table(dev, &priv->qp_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_QP *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err)
goto err;
err = mlx4_init_icm_table(dev, &priv->srq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_SRQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err)
goto err_qp;
err = mlx4_init_icm_table(dev, &priv->cq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_CQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz, dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err)
goto err_srq;
err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
cmpt_base +
((u64) (MLX4_CMPT_TYPE_EQ *
cmpt_entry_sz) << MLX4_CMPT_SHIFT),
cmpt_entry_sz,
dev->caps.num_eqs, dev->caps.num_eqs, 0, 0);
if (err)
goto err_cq;
return 0;
err_cq:
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
err_srq:
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
err_qp:
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
err:
return err;
}
static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
struct mlx4_init_hca_param *init_hca, u64 icm_size)
{
struct mlx4_priv *priv = mlx4_priv(dev);
u64 aux_pages;
int err;
err = mlx4_SET_ICM_SIZE(dev, icm_size, &aux_pages);
if (err) {
mlx4_err(dev, "SET_ICM_SIZE command failed, aborting.\n");
return err;
}
mlx4_dbg(dev, "%lld KB of HCA context requires %lld KB aux memory.\n",
(unsigned long long) icm_size >> 10,
(unsigned long long) aux_pages << 2);
priv->fw.aux_icm = mlx4_alloc_icm(dev, aux_pages,
GFP_HIGHUSER | __GFP_NOWARN, 0);
if (!priv->fw.aux_icm) {
mlx4_err(dev, "Couldn't allocate aux memory, aborting.\n");
return -ENOMEM;
}
err = mlx4_MAP_ICM_AUX(dev, priv->fw.aux_icm);
if (err) {
mlx4_err(dev, "MAP_ICM_AUX command failed, aborting.\n");
goto err_free_aux;
}
err = mlx4_init_cmpt_table(dev, init_hca->cmpt_base, dev_cap->cmpt_entry_sz);
if (err) {
mlx4_err(dev, "Failed to map cMPT context memory, aborting.\n");
goto err_unmap_aux;
}
err = mlx4_init_icm_table(dev, &priv->eq_table.table,
init_hca->eqc_base, dev_cap->eqc_entry_sz,
dev->caps.num_eqs, dev->caps.num_eqs,
0, 0);
if (err) {
mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
goto err_unmap_cmpt;
}
/*
* Reserved MTT entries must be aligned up to a cacheline
* boundary, since the FW will write to them, while the driver
* writes to all other MTT entries. (The variable
* dev->caps.mtt_entry_sz below is really the MTT segment
* size, not the raw entry size)
*/
dev->caps.reserved_mtts =
ALIGN(dev->caps.reserved_mtts * dev->caps.mtt_entry_sz,
dma_get_cache_alignment()) / dev->caps.mtt_entry_sz;
err = mlx4_init_icm_table(dev, &priv->mr_table.mtt_table,
init_hca->mtt_base,
dev->caps.mtt_entry_sz,
dev->caps.num_mtt_segs,
dev->caps.reserved_mtts, 1, 0);
if (err) {
mlx4_err(dev, "Failed to map MTT context memory, aborting.\n");
goto err_unmap_eq;
}
err = mlx4_init_icm_table(dev, &priv->mr_table.dmpt_table,
init_hca->dmpt_base,
dev_cap->dmpt_entry_sz,
dev->caps.num_mpts,
dev->caps.reserved_mrws, 1, 1);
if (err) {
mlx4_err(dev, "Failed to map dMPT context memory, aborting.\n");
goto err_unmap_mtt;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.qp_table,
init_hca->qpc_base,
dev_cap->qpc_entry_sz,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map QP context memory, aborting.\n");
goto err_unmap_dmpt;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.auxc_table,
init_hca->auxc_base,
dev_cap->aux_entry_sz,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map AUXC context memory, aborting.\n");
goto err_unmap_qp;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.altc_table,
init_hca->altc_base,
dev_cap->altc_entry_sz,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map ALTC context memory, aborting.\n");
goto err_unmap_auxc;
}
err = mlx4_init_icm_table(dev, &priv->qp_table.rdmarc_table,
init_hca->rdmarc_base,
dev_cap->rdmarc_entry_sz << priv->qp_table.rdmarc_shift,
dev->caps.num_qps,
dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW],
0, 0);
if (err) {
mlx4_err(dev, "Failed to map RDMARC context memory, aborting\n");
goto err_unmap_altc;
}
err = mlx4_init_icm_table(dev, &priv->cq_table.table,
init_hca->cqc_base,
dev_cap->cqc_entry_sz,
dev->caps.num_cqs,
dev->caps.reserved_cqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map CQ context memory, aborting.\n");
goto err_unmap_rdmarc;
}
err = mlx4_init_icm_table(dev, &priv->srq_table.table,
init_hca->srqc_base,
dev_cap->srq_entry_sz,
dev->caps.num_srqs,
dev->caps.reserved_srqs, 0, 0);
if (err) {
mlx4_err(dev, "Failed to map SRQ context memory, aborting.\n");
goto err_unmap_cq;
}
/*
* It's not strictly required, but for simplicity just map the
* whole multicast group table now. The table isn't very big
* and it's a lot easier than trying to track ref counts.
*/
err = mlx4_init_icm_table(dev, &priv->mcg_table.table,
init_hca->mc_base, MLX4_MGM_ENTRY_SIZE,
dev->caps.num_mgms + dev->caps.num_amgms,
dev->caps.num_mgms + dev->caps.num_amgms,
0, 0);
if (err) {
mlx4_err(dev, "Failed to map MCG context memory, aborting.\n");
goto err_unmap_srq;
}
return 0;
err_unmap_srq:
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
err_unmap_cq:
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
err_unmap_rdmarc:
mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
err_unmap_altc:
mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
err_unmap_auxc:
mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
err_unmap_qp:
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
err_unmap_dmpt:
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
err_unmap_mtt:
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
err_unmap_eq:
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
err_unmap_cmpt:
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
err_unmap_aux:
mlx4_UNMAP_ICM_AUX(dev);
err_free_aux:
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
return err;
}
static void mlx4_free_icms(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
mlx4_cleanup_icm_table(dev, &priv->mcg_table.table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.rdmarc_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.altc_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.auxc_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
mlx4_UNMAP_ICM_AUX(dev);
mlx4_free_icm(dev, priv->fw.aux_icm, 0);
}
static int map_bf_area(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
resource_size_t bf_start;
resource_size_t bf_len;
int err = 0;
bf_start = pci_resource_start(dev->pdev, 2) + (dev->caps.num_uars << PAGE_SHIFT);
bf_len = pci_resource_len(dev->pdev, 2) - (dev->caps.num_uars << PAGE_SHIFT);
priv->bf_mapping = io_mapping_create_wc(bf_start, bf_len);
if (!priv->bf_mapping)
err = -ENOMEM;
return err;
}
static void unmap_bf_area(struct mlx4_dev *dev)
{
if (mlx4_priv(dev)->bf_mapping)
io_mapping_free(mlx4_priv(dev)->bf_mapping);
}
static void mlx4_close_hca(struct mlx4_dev *dev)
{
unmap_bf_area(dev);
mlx4_CLOSE_HCA(dev, 0);
mlx4_free_icms(dev);
mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, mlx4_priv(dev)->fw.fw_icm, 0);
}
static int mlx4_init_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_adapter adapter;
struct mlx4_dev_cap dev_cap;
struct mlx4_mod_stat_cfg mlx4_cfg;
struct mlx4_profile profile;
struct mlx4_init_hca_param init_hca;
u64 icm_size;
int err;
err = mlx4_QUERY_FW(dev);
if (err) {
if (err == -EACCES)
mlx4_info(dev, "non-primary physical function, skipping.\n");
else
mlx4_err(dev, "QUERY_FW command failed, aborting.\n");
return err;
}
err = mlx4_load_fw(dev);
if (err) {
mlx4_err(dev, "Failed to start FW, aborting.\n");
return err;
}
mlx4_cfg.log_pg_sz_m = 1;
mlx4_cfg.log_pg_sz = 0;
err = mlx4_MOD_STAT_CFG(dev, &mlx4_cfg);
if (err)
mlx4_warn(dev, "Failed to override log_pg_sz parameter\n");
err = mlx4_dev_cap(dev, &dev_cap);
if (err) {
mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
goto err_stop_fw;
}
profile = default_profile;
icm_size = mlx4_make_profile(dev, &profile, &dev_cap, &init_hca);
if ((long long) icm_size < 0) {
err = icm_size;
goto err_stop_fw;
}
if (map_bf_area(dev))
mlx4_dbg(dev, "Failed to map blue flame area\n");
init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
err = mlx4_init_icm(dev, &dev_cap, &init_hca, icm_size);
if (err)
goto err_stop_fw;
err = mlx4_INIT_HCA(dev, &init_hca);
if (err) {
mlx4_err(dev, "INIT_HCA command failed, aborting.\n");
goto err_free_icm;
}
err = mlx4_QUERY_ADAPTER(dev, &adapter);
if (err) {
mlx4_err(dev, "QUERY_ADAPTER command failed, aborting.\n");
goto err_close;
}
priv->eq_table.inta_pin = adapter.inta_pin;
memcpy(dev->board_id, adapter.board_id, sizeof dev->board_id);
return 0;
err_close:
mlx4_CLOSE_HCA(dev, 0);
err_free_icm:
mlx4_free_icms(dev);
err_stop_fw:
unmap_bf_area(dev);
mlx4_UNMAP_FA(dev);
mlx4_free_icm(dev, priv->fw.fw_icm, 0);
return err;
}
static int mlx4_init_counters_table(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int nent;
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
nent = dev->caps.max_counters;
return mlx4_bitmap_init(&priv->counters_bitmap, nent, nent - 1, 0, 0);
}
static void mlx4_cleanup_counters_table(struct mlx4_dev *dev)
{
mlx4_bitmap_cleanup(&mlx4_priv(dev)->counters_bitmap);
}
int mlx4_counter_alloc(struct mlx4_dev *dev, u32 *idx)
{
struct mlx4_priv *priv = mlx4_priv(dev);
if (!(dev->caps.flags & MLX4_DEV_CAP_FLAG_COUNTERS))
return -ENOENT;
*idx = mlx4_bitmap_alloc(&priv->counters_bitmap);
if (*idx == -1)
return -ENOMEM;
return 0;
}
EXPORT_SYMBOL_GPL(mlx4_counter_alloc);
void mlx4_counter_free(struct mlx4_dev *dev, u32 idx)
{
mlx4_bitmap_free(&mlx4_priv(dev)->counters_bitmap, idx);
return;
}
EXPORT_SYMBOL_GPL(mlx4_counter_free);
static int mlx4_setup_hca(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int err;
int port;
__be32 ib_port_default_caps;
err = mlx4_init_uar_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"user access region table, aborting.\n");
return err;
}
err = mlx4_uar_alloc(dev, &priv->driver_uar);
if (err) {
mlx4_err(dev, "Failed to allocate driver access region, "
"aborting.\n");
goto err_uar_table_free;
}
priv->kar = ioremap((phys_addr_t) priv->driver_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
if (!priv->kar) {
mlx4_err(dev, "Couldn't map kernel access region, "
"aborting.\n");
err = -ENOMEM;
goto err_uar_free;
}
err = mlx4_init_pd_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"protection domain table, aborting.\n");
goto err_kar_unmap;
}
err = mlx4_init_mr_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"memory region table, aborting.\n");
goto err_pd_table_free;
}
err = mlx4_init_eq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"event queue table, aborting.\n");
goto err_mr_table_free;
}
err = mlx4_cmd_use_events(dev);
if (err) {
mlx4_err(dev, "Failed to switch to event-driven "
"firmware commands, aborting.\n");
goto err_eq_table_free;
}
err = mlx4_NOP(dev);
if (err) {
if (dev->flags & MLX4_FLAG_MSI_X) {
mlx4_warn(dev, "NOP command failed to generate MSI-X "
"interrupt IRQ %d).\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_warn(dev, "Trying again without MSI-X.\n");
} else {
mlx4_err(dev, "NOP command failed to generate interrupt "
"(IRQ %d), aborting.\n",
priv->eq_table.eq[dev->caps.num_comp_vectors].irq);
mlx4_err(dev, "BIOS or ACPI interrupt routing problem?\n");
}
goto err_cmd_poll;
}
mlx4_dbg(dev, "NOP command IRQ test passed\n");
err = mlx4_init_cq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"completion queue table, aborting.\n");
goto err_cmd_poll;
}
err = mlx4_init_srq_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"shared receive queue table, aborting.\n");
goto err_cq_table_free;
}
err = mlx4_init_qp_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"queue pair table, aborting.\n");
goto err_srq_table_free;
}
err = mlx4_init_mcg_table(dev);
if (err) {
mlx4_err(dev, "Failed to initialize "
"multicast group table, aborting.\n");
goto err_qp_table_free;
}
err = mlx4_init_counters_table(dev);
if (err && err != -ENOENT) {
mlx4_err(dev, "Failed to initialize counters table, aborting.\n");
goto err_counters_table_free;
}
for (port = 1; port <= dev->caps.num_ports; port++) {
enum mlx4_port_type port_type = 0;
mlx4_SENSE_PORT(dev, port, &port_type);
if (port_type)
dev->caps.port_type[port] = port_type;
ib_port_default_caps = 0;
err = mlx4_get_port_ib_caps(dev, port, &ib_port_default_caps);
if (err)
mlx4_warn(dev, "failed to get port %d default "
"ib capabilities (%d). Continuing with "
"caps = 0\n", port, err);
dev->caps.ib_port_def_cap[port] = ib_port_default_caps;
err = mlx4_SET_PORT(dev, port);
if (err) {
mlx4_err(dev, "Failed to set port %d, aborting\n",
port);
goto err_mcg_table_free;
}
}
mlx4_set_port_mask(dev);
return 0;
err_mcg_table_free:
mlx4_cleanup_mcg_table(dev);
err_counters_table_free:
mlx4_cleanup_counters_table(dev);
err_qp_table_free:
mlx4_cleanup_qp_table(dev);
err_srq_table_free:
mlx4_cleanup_srq_table(dev);
err_cq_table_free:
mlx4_cleanup_cq_table(dev);
err_cmd_poll:
mlx4_cmd_use_polling(dev);
err_eq_table_free:
mlx4_cleanup_eq_table(dev);
err_mr_table_free:
mlx4_cleanup_mr_table(dev);
err_pd_table_free:
mlx4_cleanup_pd_table(dev);
err_kar_unmap:
iounmap(priv->kar);
err_uar_free:
mlx4_uar_free(dev, &priv->driver_uar);
err_uar_table_free:
mlx4_cleanup_uar_table(dev);
return err;
}
static void mlx4_enable_msi_x(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct msix_entry *entries;
int nreq = min_t(int, dev->caps.num_ports *
min_t(int, num_online_cpus() + 1, MAX_MSIX_P_PORT)
+ MSIX_LEGACY_SZ, MAX_MSIX);
int err;
int i;
if (msi_x) {
nreq = min_t(int, dev->caps.num_eqs - dev->caps.reserved_eqs,
nreq);
entries = kcalloc(nreq, sizeof *entries, GFP_KERNEL);
if (!entries)
goto no_msi;
for (i = 0; i < nreq; ++i)
entries[i].entry = i;
retry:
err = pci_enable_msix(dev->pdev, entries, nreq);
if (err) {
/* Try again if at least 2 vectors are available */
if (err > 1) {
mlx4_info(dev, "Requested %d vectors, "
"but only %d MSI-X vectors available, "
"trying again\n", nreq, err);
nreq = err;
goto retry;
}
kfree(entries);
goto no_msi;
}
if (nreq <
MSIX_LEGACY_SZ + dev->caps.num_ports * MIN_MSIX_P_PORT) {
/*Working in legacy mode , all EQ's shared*/
dev->caps.comp_pool = 0;
dev->caps.num_comp_vectors = nreq - 1;
} else {
dev->caps.comp_pool = nreq - MSIX_LEGACY_SZ;
dev->caps.num_comp_vectors = MSIX_LEGACY_SZ - 1;
}
for (i = 0; i < nreq; ++i)
priv->eq_table.eq[i].irq = entries[i].vector;
dev->flags |= MLX4_FLAG_MSI_X;
kfree(entries);
return;
}
no_msi:
dev->caps.num_comp_vectors = 1;
dev->caps.comp_pool = 0;
for (i = 0; i < 2; ++i)
priv->eq_table.eq[i].irq = dev->pdev->irq;
}
static int mlx4_init_port_info(struct mlx4_dev *dev, int port)
{
struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
int err = 0;
info->dev = dev;
info->port = port;
mlx4_init_mac_table(dev, &info->mac_table);
mlx4_init_vlan_table(dev, &info->vlan_table);
info->base_qpn = dev->caps.reserved_qps_base[MLX4_QP_REGION_ETH_ADDR] +
(port - 1) * (1 << log_num_mac);
sprintf(info->dev_name, "mlx4_port%d", port);
info->port_attr.attr.name = info->dev_name;
info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
info->port_attr.show = show_port_type;
info->port_attr.store = set_port_type;
sysfs_attr_init(&info->port_attr.attr);
err = device_create_file(&dev->pdev->dev, &info->port_attr);
if (err) {
mlx4_err(dev, "Failed to create file for port %d\n", port);
info->port = -1;
}
return err;
}
static void mlx4_cleanup_port_info(struct mlx4_port_info *info)
{
if (info->port < 0)
return;
device_remove_file(&info->dev->pdev->dev, &info->port_attr);
}
static int mlx4_init_steering(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
int num_entries = dev->caps.num_ports;
int i, j;
priv->steer = kzalloc(sizeof(struct mlx4_steer) * num_entries, GFP_KERNEL);
if (!priv->steer)
return -ENOMEM;
for (i = 0; i < num_entries; i++) {
for (j = 0; j < MLX4_NUM_STEERS; j++) {
INIT_LIST_HEAD(&priv->steer[i].promisc_qps[j]);
INIT_LIST_HEAD(&priv->steer[i].steer_entries[j]);
}
INIT_LIST_HEAD(&priv->steer[i].high_prios);
}
return 0;
}
static void mlx4_clear_steering(struct mlx4_dev *dev)
{
struct mlx4_priv *priv = mlx4_priv(dev);
struct mlx4_steer_index *entry, *tmp_entry;
struct mlx4_promisc_qp *pqp, *tmp_pqp;
int num_entries = dev->caps.num_ports;
int i, j;
for (i = 0; i < num_entries; i++) {
for (j = 0; j < MLX4_NUM_STEERS; j++) {
list_for_each_entry_safe(pqp, tmp_pqp,
&priv->steer[i].promisc_qps[j],
list) {
list_del(&pqp->list);
kfree(pqp);
}
list_for_each_entry_safe(entry, tmp_entry,
&priv->steer[i].steer_entries[j],
list) {
list_del(&entry->list);
list_for_each_entry_safe(pqp, tmp_pqp,
&entry->duplicates,
list) {
list_del(&pqp->list);
kfree(pqp);
}
kfree(entry);
}
}
}
kfree(priv->steer);
}
static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct mlx4_priv *priv;
struct mlx4_dev *dev;
int err;
int port;
pr_info(DRV_NAME ": Initializing %s\n", pci_name(pdev));
err = pci_enable_device(pdev);
if (err) {
dev_err(&pdev->dev, "Cannot enable PCI device, "
"aborting.\n");
return err;
}
/*
* Check for BARs. We expect 0: 1MB
*/
if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM) ||
pci_resource_len(pdev, 0) != 1 << 20) {
dev_err(&pdev->dev, "Missing DCS, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
dev_err(&pdev->dev, "Missing UAR, aborting.\n");
err = -ENODEV;
goto err_disable_pdev;
}
err = pci_request_regions(pdev, DRV_NAME);
if (err) {
dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
goto err_disable_pdev;
}
pci_set_master(pdev);
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit PCI DMA mask.\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
goto err_release_regions;
}
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
dev_warn(&pdev->dev, "Warning: couldn't set 64-bit "
"consistent PCI DMA mask.\n");
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
"aborting.\n");
goto err_release_regions;
}
}
/* Allow large DMA segments, up to the firmware limit of 1 GB */
dma_set_max_seg_size(&pdev->dev, 1024 * 1024 * 1024);
priv = kzalloc(sizeof *priv, GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "Device struct alloc failed, "
"aborting.\n");
err = -ENOMEM;
goto err_release_regions;
}
dev = &priv->dev;
dev->pdev = pdev;
INIT_LIST_HEAD(&priv->ctx_list);
spin_lock_init(&priv->ctx_lock);
mutex_init(&priv->port_mutex);
INIT_LIST_HEAD(&priv->pgdir_list);
mutex_init(&priv->pgdir_mutex);
INIT_LIST_HEAD(&priv->bf_list);
mutex_init(&priv->bf_mutex);
dev->rev_id = pdev->revision;
/*
* Now reset the HCA before we touch the PCI capabilities or
* attempt a firmware command, since a boot ROM may have left
* the HCA in an undefined state.
*/
err = mlx4_reset(dev);
if (err) {
mlx4_err(dev, "Failed to reset HCA, aborting.\n");
goto err_free_dev;
}
if (mlx4_cmd_init(dev)) {
mlx4_err(dev, "Failed to init command interface, aborting.\n");
goto err_free_dev;
}
err = mlx4_init_hca(dev);
if (err)
goto err_cmd;
err = mlx4_alloc_eq_table(dev);
if (err)
goto err_close;
priv->msix_ctl.pool_bm = 0;
spin_lock_init(&priv->msix_ctl.pool_lock);
mlx4_enable_msi_x(dev);
err = mlx4_init_steering(dev);
if (err)
goto err_free_eq;
err = mlx4_setup_hca(dev);
if (err == -EBUSY && (dev->flags & MLX4_FLAG_MSI_X)) {
dev->flags &= ~MLX4_FLAG_MSI_X;
pci_disable_msix(pdev);
err = mlx4_setup_hca(dev);
}
if (err)
goto err_steer;
for (port = 1; port <= dev->caps.num_ports; port++) {
err = mlx4_init_port_info(dev, port);
if (err)
goto err_port;
}
err = mlx4_register_device(dev);
if (err)
goto err_port;
mlx4_sense_init(dev);
mlx4_start_sense(dev);
pci_set_drvdata(pdev, dev);
return 0;
err_port:
for (--port; port >= 1; --port)
mlx4_cleanup_port_info(&priv->port[port]);
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
mlx4_cleanup_uar_table(dev);
err_steer:
mlx4_clear_steering(dev);
err_free_eq:
mlx4_free_eq_table(dev);
err_close:
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
mlx4_close_hca(dev);
err_cmd:
mlx4_cmd_cleanup(dev);
err_free_dev:
kfree(priv);
err_release_regions:
pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
return err;
}
static int __devinit mlx4_init_one(struct pci_dev *pdev,
const struct pci_device_id *id)
{
printk_once(KERN_INFO "%s", mlx4_version);
return __mlx4_init_one(pdev, id);
}
static void mlx4_remove_one(struct pci_dev *pdev)
{
struct mlx4_dev *dev = pci_get_drvdata(pdev);
struct mlx4_priv *priv = mlx4_priv(dev);
int p;
if (dev) {
mlx4_stop_sense(dev);
mlx4_unregister_device(dev);
for (p = 1; p <= dev->caps.num_ports; p++) {
mlx4_cleanup_port_info(&priv->port[p]);
mlx4_CLOSE_PORT(dev, p);
}
mlx4_cleanup_counters_table(dev);
mlx4_cleanup_mcg_table(dev);
mlx4_cleanup_qp_table(dev);
mlx4_cleanup_srq_table(dev);
mlx4_cleanup_cq_table(dev);
mlx4_cmd_use_polling(dev);
mlx4_cleanup_eq_table(dev);
mlx4_cleanup_mr_table(dev);
mlx4_cleanup_pd_table(dev);
iounmap(priv->kar);
mlx4_uar_free(dev, &priv->driver_uar);
mlx4_cleanup_uar_table(dev);
mlx4_clear_steering(dev);
mlx4_free_eq_table(dev);
mlx4_close_hca(dev);
mlx4_cmd_cleanup(dev);
if (dev->flags & MLX4_FLAG_MSI_X)
pci_disable_msix(pdev);
kfree(priv);
pci_release_regions(pdev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
}
}
int mlx4_restart_one(struct pci_dev *pdev)
{
mlx4_remove_one(pdev);
return __mlx4_init_one(pdev, NULL);
}
static DEFINE_PCI_DEVICE_TABLE(mlx4_pci_table) = {
{ PCI_VDEVICE(MELLANOX, 0x6340) }, /* MT25408 "Hermon" SDR */
{ PCI_VDEVICE(MELLANOX, 0x634a) }, /* MT25408 "Hermon" DDR */
{ PCI_VDEVICE(MELLANOX, 0x6354) }, /* MT25408 "Hermon" QDR */
{ PCI_VDEVICE(MELLANOX, 0x6732) }, /* MT25408 "Hermon" DDR PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x673c) }, /* MT25408 "Hermon" QDR PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x6368) }, /* MT25408 "Hermon" EN 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x6750) }, /* MT25408 "Hermon" EN 10GigE PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x6372) }, /* MT25458 ConnectX EN 10GBASE-T 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x675a) }, /* MT25458 ConnectX EN 10GBASE-T+Gen2 10GigE */
{ PCI_VDEVICE(MELLANOX, 0x6764) }, /* MT26468 ConnectX EN 10GigE PCIe gen2*/
{ PCI_VDEVICE(MELLANOX, 0x6746) }, /* MT26438 ConnectX EN 40GigE PCIe gen2 5GT/s */
{ PCI_VDEVICE(MELLANOX, 0x676e) }, /* MT26478 ConnectX2 40GigE PCIe gen2 */
{ PCI_VDEVICE(MELLANOX, 0x1002) }, /* MT25400 Family [ConnectX-2 Virtual Function] */
{ PCI_VDEVICE(MELLANOX, 0x1003) }, /* MT27500 Family [ConnectX-3] */
{ PCI_VDEVICE(MELLANOX, 0x1004) }, /* MT27500 Family [ConnectX-3 Virtual Function] */
{ PCI_VDEVICE(MELLANOX, 0x1005) }, /* MT27510 Family */
{ PCI_VDEVICE(MELLANOX, 0x1006) }, /* MT27511 Family */
{ PCI_VDEVICE(MELLANOX, 0x1007) }, /* MT27520 Family */
{ PCI_VDEVICE(MELLANOX, 0x1008) }, /* MT27521 Family */
{ PCI_VDEVICE(MELLANOX, 0x1009) }, /* MT27530 Family */
{ PCI_VDEVICE(MELLANOX, 0x100a) }, /* MT27531 Family */
{ PCI_VDEVICE(MELLANOX, 0x100b) }, /* MT27540 Family */
{ PCI_VDEVICE(MELLANOX, 0x100c) }, /* MT27541 Family */
{ PCI_VDEVICE(MELLANOX, 0x100d) }, /* MT27550 Family */
{ PCI_VDEVICE(MELLANOX, 0x100e) }, /* MT27551 Family */
{ PCI_VDEVICE(MELLANOX, 0x100f) }, /* MT27560 Family */
{ PCI_VDEVICE(MELLANOX, 0x1010) }, /* MT27561 Family */
{ 0, }
};
MODULE_DEVICE_TABLE(pci, mlx4_pci_table);
static struct pci_driver mlx4_driver = {
.name = DRV_NAME,
.id_table = mlx4_pci_table,
.probe = mlx4_init_one,
.remove = __devexit_p(mlx4_remove_one)
};
static int __init mlx4_verify_params(void)
{
if ((log_num_mac < 0) || (log_num_mac > 7)) {
pr_warning("mlx4_core: bad num_mac: %d\n", log_num_mac);
return -1;
}
if ((log_num_vlan < 0) || (log_num_vlan > 7)) {
pr_warning("mlx4_core: bad num_vlan: %d\n", log_num_vlan);
return -1;
}
if ((log_mtts_per_seg < 1) || (log_mtts_per_seg > 7)) {
pr_warning("mlx4_core: bad log_mtts_per_seg: %d\n", log_mtts_per_seg);
return -1;
}
return 0;
}
static int __init mlx4_init(void)
{
int ret;
if (mlx4_verify_params())
return -EINVAL;
mlx4_catas_init();
mlx4_wq = create_singlethread_workqueue("mlx4");
if (!mlx4_wq)
return -ENOMEM;
ret = pci_register_driver(&mlx4_driver);
return ret < 0 ? ret : 0;
}
static void __exit mlx4_cleanup(void)
{
pci_unregister_driver(&mlx4_driver);
destroy_workqueue(mlx4_wq);
}
module_init(mlx4_init);
module_exit(mlx4_cleanup);
| gpl-2.0 |
padovan/bluetooth-next | arch/arm/mach-omap2/clockdomain44xx.c | 394 | 3737 | /*
* OMAP4 clockdomain control
*
* Copyright (C) 2008-2010 Texas Instruments, Inc.
* Copyright (C) 2008-2010 Nokia Corporation
*
* Derived from mach-omap2/clockdomain.c written by Paul Walmsley
* Rajendra Nayak <rnayak@ti.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include "clockdomain.h"
#include "cminst44xx.h"
#include "cm44xx.h"
static int omap4_clkdm_add_wkup_sleep_dep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
omap4_cminst_set_inst_reg_bits((1 << clkdm2->dep_bit),
clkdm1->prcm_partition,
clkdm1->cm_inst, clkdm1->clkdm_offs +
OMAP4_CM_STATICDEP);
return 0;
}
static int omap4_clkdm_del_wkup_sleep_dep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
omap4_cminst_clear_inst_reg_bits((1 << clkdm2->dep_bit),
clkdm1->prcm_partition,
clkdm1->cm_inst, clkdm1->clkdm_offs +
OMAP4_CM_STATICDEP);
return 0;
}
static int omap4_clkdm_read_wkup_sleep_dep(struct clockdomain *clkdm1,
struct clockdomain *clkdm2)
{
return omap4_cminst_read_inst_reg_bits(clkdm1->prcm_partition,
clkdm1->cm_inst, clkdm1->clkdm_offs +
OMAP4_CM_STATICDEP,
(1 << clkdm2->dep_bit));
}
static int omap4_clkdm_clear_all_wkup_sleep_deps(struct clockdomain *clkdm)
{
struct clkdm_dep *cd;
u32 mask = 0;
for (cd = clkdm->wkdep_srcs; cd && cd->clkdm_name; cd++) {
if (!omap_chip_is(cd->omap_chip))
continue;
if (!cd->clkdm)
continue; /* only happens if data is erroneous */
mask |= 1 << cd->clkdm->dep_bit;
atomic_set(&cd->wkdep_usecount, 0);
}
omap4_cminst_clear_inst_reg_bits(mask, clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs +
OMAP4_CM_STATICDEP);
return 0;
}
static int omap4_clkdm_sleep(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_force_sleep(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
return 0;
}
static int omap4_clkdm_wakeup(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_force_wakeup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
return 0;
}
static void omap4_clkdm_allow_idle(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_enable_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
}
static void omap4_clkdm_deny_idle(struct clockdomain *clkdm)
{
omap4_cminst_clkdm_disable_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
}
static int omap4_clkdm_clk_enable(struct clockdomain *clkdm)
{
if (clkdm->flags & CLKDM_CAN_FORCE_WAKEUP)
return omap4_clkdm_wakeup(clkdm);
return 0;
}
static int omap4_clkdm_clk_disable(struct clockdomain *clkdm)
{
bool hwsup = false;
hwsup = omap4_cminst_is_clkdm_in_hwsup(clkdm->prcm_partition,
clkdm->cm_inst, clkdm->clkdm_offs);
if (!hwsup && (clkdm->flags & CLKDM_CAN_FORCE_SLEEP))
omap4_clkdm_sleep(clkdm);
return 0;
}
struct clkdm_ops omap4_clkdm_operations = {
.clkdm_add_wkdep = omap4_clkdm_add_wkup_sleep_dep,
.clkdm_del_wkdep = omap4_clkdm_del_wkup_sleep_dep,
.clkdm_read_wkdep = omap4_clkdm_read_wkup_sleep_dep,
.clkdm_clear_all_wkdeps = omap4_clkdm_clear_all_wkup_sleep_deps,
.clkdm_add_sleepdep = omap4_clkdm_add_wkup_sleep_dep,
.clkdm_del_sleepdep = omap4_clkdm_del_wkup_sleep_dep,
.clkdm_read_sleepdep = omap4_clkdm_read_wkup_sleep_dep,
.clkdm_clear_all_sleepdeps = omap4_clkdm_clear_all_wkup_sleep_deps,
.clkdm_sleep = omap4_clkdm_sleep,
.clkdm_wakeup = omap4_clkdm_wakeup,
.clkdm_allow_idle = omap4_clkdm_allow_idle,
.clkdm_deny_idle = omap4_clkdm_deny_idle,
.clkdm_clk_enable = omap4_clkdm_clk_enable,
.clkdm_clk_disable = omap4_clkdm_clk_disable,
};
| gpl-2.0 |
ravikirancg/android_kernel_gionee_msm8974 | drivers/net/ethernet/msm/msm_rmnet_bam.c | 650 | 26405 | /* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/*
* RMNET BAM Module.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/delay.h>
#include <linux/errno.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/wakelock.h>
#include <linux/if_arp.h>
#include <linux/msm_rmnet.h>
#include <linux/platform_device.h>
#include <net/pkt_sched.h>
#ifdef CONFIG_HAS_EARLYSUSPEND
#include <linux/earlysuspend.h>
#endif
#include <mach/bam_dmux.h>
/* Debug message support */
static int msm_rmnet_bam_debug_mask;
module_param_named(debug_enable, msm_rmnet_bam_debug_mask,
int, S_IRUGO | S_IWUSR | S_IWGRP);
static unsigned long int msm_rmnet_bam_headroom_check_failure;
module_param(msm_rmnet_bam_headroom_check_failure, ulong, S_IRUGO);
MODULE_PARM_DESC(msm_rmnet_bam_headroom_check_failure,
"Number of packets with insufficient headroom");
#define DEBUG_MASK_LVL0 (1U << 0)
#define DEBUG_MASK_LVL1 (1U << 1)
#define DEBUG_MASK_LVL2 (1U << 2)
#define DBG(m, x...) do { \
if (msm_rmnet_bam_debug_mask & m) \
pr_info(x); \
} while (0)
#define DBG0(x...) DBG(DEBUG_MASK_LVL0, x)
#define DBG1(x...) DBG(DEBUG_MASK_LVL1, x)
#define DBG2(x...) DBG(DEBUG_MASK_LVL2, x)
/* Configure device instances */
#define RMNET_DEVICE_COUNT 9
/* allow larger frames */
#define RMNET_DATA_LEN 2000
#define DEVICE_ID_INVALID -1
#define DEVICE_INACTIVE 2
#define DEVICE_ACTIVE 1
#define DEVICE_UNINITIALIZED 0
#define HEADROOM_FOR_BAM 8 /* for mux header */
#define HEADROOM_FOR_QOS 8
#define TAILROOM 8 /* for padding by mux layer */
struct rmnet_private {
struct net_device_stats stats;
uint32_t ch_id;
#ifdef CONFIG_MSM_RMNET_DEBUG
ktime_t last_packet;
unsigned long wakeups_xmit;
unsigned long wakeups_rcv;
unsigned long timeout_us;
#endif
struct sk_buff *waiting_for_ul_skb;
spinlock_t lock;
spinlock_t tx_queue_lock;
struct tasklet_struct tsklt;
u32 operation_mode; /* IOCTL specified mode (protocol, QoS header) */
uint8_t device_up;
uint8_t in_reset;
struct platform_driver *bam_pdev;
};
#ifdef CONFIG_MSM_RMNET_DEBUG
static unsigned long timeout_us;
#ifdef CONFIG_HAS_EARLYSUSPEND
/*
* If early suspend is enabled then we specify two timeout values,
* screen on (default), and screen is off.
*/
static unsigned long timeout_suspend_us;
static struct device *rmnet0;
/* Set timeout in us when the screen is off. */
static ssize_t timeout_suspend_store(struct device *d,
struct device_attribute *attr,
const char *buf, size_t n)
{
timeout_suspend_us = strict_strtoul(buf, NULL, 10);
return n;
}
static ssize_t timeout_suspend_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
return sprintf(buf, "%lu\n", (unsigned long) timeout_suspend_us);
}
static DEVICE_ATTR(timeout_suspend, 0664, timeout_suspend_show,
timeout_suspend_store);
static void rmnet_early_suspend(struct early_suspend *handler)
{
if (rmnet0) {
struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
p->timeout_us = timeout_suspend_us;
}
}
static void rmnet_late_resume(struct early_suspend *handler)
{
if (rmnet0) {
struct rmnet_private *p = netdev_priv(to_net_dev(rmnet0));
p->timeout_us = timeout_us;
}
}
static struct early_suspend rmnet_power_suspend = {
.suspend = rmnet_early_suspend,
.resume = rmnet_late_resume,
};
static int __init rmnet_late_init(void)
{
register_early_suspend(&rmnet_power_suspend);
return 0;
}
late_initcall(rmnet_late_init);
#endif
/* Returns 1 if packet caused rmnet to wakeup, 0 otherwise. */
static int rmnet_cause_wakeup(struct rmnet_private *p)
{
int ret = 0;
ktime_t now;
if (p->timeout_us == 0) /* Check if disabled */
return 0;
/* Use real (wall) time. */
now = ktime_get_real();
if (ktime_us_delta(now, p->last_packet) > p->timeout_us)
ret = 1;
p->last_packet = now;
return ret;
}
static ssize_t wakeups_xmit_show(struct device *d,
struct device_attribute *attr,
char *buf)
{
struct rmnet_private *p = netdev_priv(to_net_dev(d));
return sprintf(buf, "%lu\n", p->wakeups_xmit);
}
DEVICE_ATTR(wakeups_xmit, 0444, wakeups_xmit_show, NULL);
static ssize_t wakeups_rcv_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct rmnet_private *p = netdev_priv(to_net_dev(d));
return sprintf(buf, "%lu\n", p->wakeups_rcv);
}
DEVICE_ATTR(wakeups_rcv, 0444, wakeups_rcv_show, NULL);
/* Set timeout in us. */
static ssize_t timeout_store(struct device *d, struct device_attribute *attr,
const char *buf, size_t n)
{
#ifndef CONFIG_HAS_EARLYSUSPEND
struct rmnet_private *p = netdev_priv(to_net_dev(d));
p->timeout_us = timeout_us = strict_strtoul(buf, NULL, 10);
#else
/* If using early suspend/resume hooks do not write the value on store. */
timeout_us = strict_strtoul(buf, NULL, 10);
#endif
return n;
}
static ssize_t timeout_show(struct device *d, struct device_attribute *attr,
char *buf)
{
struct rmnet_private *p = netdev_priv(to_net_dev(d));
p = netdev_priv(to_net_dev(d));
return sprintf(buf, "%lu\n", timeout_us);
}
DEVICE_ATTR(timeout, 0664, timeout_show, timeout_store);
#endif
/* Forward declaration */
static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
static __be16 rmnet_ip_type_trans(struct sk_buff *skb, struct net_device *dev)
{
__be16 protocol = 0;
skb->dev = dev;
/* Determine L3 protocol */
switch (skb->data[0] & 0xf0) {
case 0x40:
protocol = htons(ETH_P_IP);
break;
case 0x60:
protocol = htons(ETH_P_IPV6);
break;
default:
pr_err("[%s] rmnet_recv() L3 protocol decode error: 0x%02x",
dev->name, skb->data[0] & 0xf0);
/* skb will be dropped in upper layer for unknown protocol */
}
return protocol;
}
static int count_this_packet(void *_hdr, int len)
{
struct ethhdr *hdr = _hdr;
if (len >= ETH_HLEN && hdr->h_proto == htons(ETH_P_ARP))
return 0;
return 1;
}
/* Rx Callback, Called in Work Queue context */
static void bam_recv_notify(void *dev, struct sk_buff *skb)
{
struct rmnet_private *p = netdev_priv(dev);
unsigned long flags;
u32 opmode;
if (skb) {
skb->dev = dev;
/* Handle Rx frame format */
spin_lock_irqsave(&p->lock, flags);
opmode = p->operation_mode;
spin_unlock_irqrestore(&p->lock, flags);
if (RMNET_IS_MODE_IP(opmode)) {
/* Driver in IP mode */
skb->protocol = rmnet_ip_type_trans(skb, dev);
} else {
/* Driver in Ethernet mode */
skb->protocol = eth_type_trans(skb, dev);
}
if (RMNET_IS_MODE_IP(opmode) ||
count_this_packet(skb->data, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
p->wakeups_rcv += rmnet_cause_wakeup(p);
#endif
p->stats.rx_packets++;
p->stats.rx_bytes += skb->len;
}
DBG1("[%s] Rx packet #%lu len=%d\n",
((struct net_device *)dev)->name,
p->stats.rx_packets, skb->len);
/* Deliver to network stack */
netif_rx(skb);
} else
pr_err("[%s] %s: No skb received",
((struct net_device *)dev)->name, __func__);
}
static struct sk_buff *_rmnet_add_headroom(struct sk_buff **skb,
struct net_device *dev)
{
struct sk_buff *skbn;
if (skb_headroom(*skb) < dev->needed_headroom) {
msm_rmnet_bam_headroom_check_failure++;
skbn = skb_realloc_headroom(*skb, dev->needed_headroom);
kfree_skb(*skb);
*skb = skbn;
} else {
skbn = *skb;
}
return skbn;
}
static int _rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rmnet_private *p = netdev_priv(dev);
int bam_ret;
struct QMI_QOS_HDR_S *qmih;
u32 opmode;
unsigned long flags;
if (unlikely(!_rmnet_add_headroom(&skb, dev))) {
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
/* For QoS mode, prepend QMI header and assign flow ID from skb->mark */
spin_lock_irqsave(&p->lock, flags);
opmode = p->operation_mode;
spin_unlock_irqrestore(&p->lock, flags);
if (RMNET_IS_MODE_QOS(opmode)) {
qmih = (struct QMI_QOS_HDR_S *)
skb_push(skb, sizeof(struct QMI_QOS_HDR_S));
qmih->version = 1;
qmih->flags = 0;
qmih->flow_id = skb->mark;
}
dev->trans_start = jiffies;
/* if write() succeeds, skb access is unsafe in this process */
bam_ret = msm_bam_dmux_write(p->ch_id, skb);
if (bam_ret != 0 && bam_ret != -EAGAIN && bam_ret != -EFAULT) {
pr_err("[%s] %s: write returned error %d",
dev->name, __func__, bam_ret);
return -EPERM;
}
return bam_ret;
}
static void bam_write_done(void *dev, struct sk_buff *skb)
{
struct rmnet_private *p = netdev_priv(dev);
u32 opmode = p->operation_mode;
unsigned long flags;
DBG1("%s: write complete\n", __func__);
if (RMNET_IS_MODE_IP(opmode) ||
count_this_packet(skb->data, skb->len)) {
p->stats.tx_packets++;
p->stats.tx_bytes += skb->len;
#ifdef CONFIG_MSM_RMNET_DEBUG
p->wakeups_xmit += rmnet_cause_wakeup(p);
#endif
}
DBG1("[%s] Tx packet #%lu len=%d mark=0x%x\n",
((struct net_device *)(dev))->name, p->stats.tx_packets,
skb->len, skb->mark);
dev_kfree_skb_any(skb);
spin_lock_irqsave(&p->tx_queue_lock, flags);
if (netif_queue_stopped(dev) &&
msm_bam_dmux_is_ch_low(p->ch_id)) {
DBG0("%s: Low WM hit, waking queue=%p\n",
__func__, skb);
netif_wake_queue(dev);
}
spin_unlock_irqrestore(&p->tx_queue_lock, flags);
}
static void bam_notify(void *dev, int event, unsigned long data)
{
struct rmnet_private *p = netdev_priv(dev);
unsigned long flags;
switch (event) {
case BAM_DMUX_RECEIVE:
bam_recv_notify(dev, (struct sk_buff *)(data));
break;
case BAM_DMUX_WRITE_DONE:
bam_write_done(dev, (struct sk_buff *)(data));
break;
case BAM_DMUX_UL_CONNECTED:
spin_lock_irqsave(&p->lock, flags);
if (p->waiting_for_ul_skb != NULL) {
struct sk_buff *skb;
int ret;
skb = p->waiting_for_ul_skb;
p->waiting_for_ul_skb = NULL;
spin_unlock_irqrestore(&p->lock, flags);
ret = _rmnet_xmit(skb, dev);
if (ret) {
pr_err("%s: error %d dropping delayed TX SKB %p\n",
__func__, ret, skb);
dev_kfree_skb_any(skb);
}
netif_wake_queue(dev);
} else {
spin_unlock_irqrestore(&p->lock, flags);
}
break;
case BAM_DMUX_UL_DISCONNECTED:
break;
}
}
static int __rmnet_open(struct net_device *dev)
{
int r;
struct rmnet_private *p = netdev_priv(dev);
DBG0("[%s] __rmnet_open()\n", dev->name);
if (p->device_up == DEVICE_UNINITIALIZED) {
r = msm_bam_dmux_open(p->ch_id, dev, bam_notify);
if (r < 0) {
DBG0("%s: ch=%d failed with rc %d\n",
__func__, p->ch_id, r);
return -ENODEV;
}
r = platform_driver_register(p->bam_pdev);
if (r) {
pr_err("%s: bam pdev registration failed n=%d rc=%d\n",
__func__, p->ch_id, r);
msm_bam_dmux_close(p->ch_id);
return r;
}
}
p->device_up = DEVICE_ACTIVE;
return 0;
}
static int rmnet_open(struct net_device *dev)
{
int rc = 0;
DBG0("[%s] rmnet_open()\n", dev->name);
rc = __rmnet_open(dev);
if (rc == 0)
netif_start_queue(dev);
return rc;
}
static int __rmnet_close(struct net_device *dev)
{
struct rmnet_private *p = netdev_priv(dev);
int rc = 0;
if (p->device_up == DEVICE_ACTIVE) {
/* do not close rmnet port once up, this causes
remote side to hang if tried to open again */
p->device_up = DEVICE_INACTIVE;
return rc;
} else
return -EBADF;
}
static int rmnet_stop(struct net_device *dev)
{
DBG0("[%s] rmnet_stop()\n", dev->name);
__rmnet_close(dev);
netif_stop_queue(dev);
return 0;
}
static int rmnet_change_mtu(struct net_device *dev, int new_mtu)
{
if (0 > new_mtu || RMNET_DATA_LEN < new_mtu)
return -EINVAL;
DBG0("[%s] MTU change: old=%d new=%d\n",
dev->name, dev->mtu, new_mtu);
dev->mtu = new_mtu;
return 0;
}
static int rmnet_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct rmnet_private *p = netdev_priv(dev);
unsigned long flags;
int awake;
int ret = 0;
if (netif_queue_stopped(dev)) {
pr_err("[%s]fatal: rmnet_xmit called when "
"netif_queue is stopped", dev->name);
return 0;
}
spin_lock_irqsave(&p->lock, flags);
awake = msm_bam_dmux_ul_power_vote();
if (!awake) {
/* send SKB once wakeup is complete */
netif_stop_queue(dev);
p->waiting_for_ul_skb = skb;
spin_unlock_irqrestore(&p->lock, flags);
ret = 0;
goto exit;
}
spin_unlock_irqrestore(&p->lock, flags);
ret = _rmnet_xmit(skb, dev);
if (ret == -EPERM) {
ret = NETDEV_TX_BUSY;
goto exit;
}
/*
* detected SSR a bit early. shut some things down now, and leave
* the rest to the main ssr handling code when that happens later
*/
if (ret == -EFAULT) {
netif_carrier_off(dev);
dev_kfree_skb_any(skb);
ret = 0;
goto exit;
}
if (ret == -EAGAIN) {
/*
* This should not happen
* EAGAIN means we attempted to overflow the high watermark
* Clearly the queue is not stopped like it should be, so
* stop it and return BUSY to the TCP/IP framework. It will
* retry this packet with the queue is restarted which happens
* in the write_done callback when the low watermark is hit.
*/
netif_stop_queue(dev);
ret = NETDEV_TX_BUSY;
goto exit;
}
spin_lock_irqsave(&p->tx_queue_lock, flags);
if (msm_bam_dmux_is_ch_full(p->ch_id)) {
netif_stop_queue(dev);
DBG0("%s: High WM hit, stopping queue=%p\n", __func__, skb);
}
spin_unlock_irqrestore(&p->tx_queue_lock, flags);
exit:
msm_bam_dmux_ul_power_unvote();
return ret;
}
static struct net_device_stats *rmnet_get_stats(struct net_device *dev)
{
struct rmnet_private *p = netdev_priv(dev);
return &p->stats;
}
static void rmnet_tx_timeout(struct net_device *dev)
{
pr_warning("[%s] rmnet_tx_timeout()\n", dev->name);
}
static const struct net_device_ops rmnet_ops_ether = {
.ndo_open = rmnet_open,
.ndo_stop = rmnet_stop,
.ndo_start_xmit = rmnet_xmit,
.ndo_get_stats = rmnet_get_stats,
.ndo_tx_timeout = rmnet_tx_timeout,
.ndo_do_ioctl = rmnet_ioctl,
.ndo_change_mtu = rmnet_change_mtu,
.ndo_set_mac_address = eth_mac_addr,
.ndo_validate_addr = eth_validate_addr,
};
static const struct net_device_ops rmnet_ops_ip = {
.ndo_open = rmnet_open,
.ndo_stop = rmnet_stop,
.ndo_start_xmit = rmnet_xmit,
.ndo_get_stats = rmnet_get_stats,
.ndo_tx_timeout = rmnet_tx_timeout,
.ndo_do_ioctl = rmnet_ioctl,
.ndo_change_mtu = rmnet_change_mtu,
.ndo_set_mac_address = 0,
.ndo_validate_addr = 0,
};
static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct rmnet_private *p = netdev_priv(dev);
u32 old_opmode = p->operation_mode;
unsigned long flags;
int prev_mtu = dev->mtu;
int rc = 0;
/* Process IOCTL command */
switch (cmd) {
case RMNET_IOCTL_SET_LLP_ETHERNET: /* Set Ethernet protocol */
/* Perform Ethernet config only if in IP mode currently*/
if (p->operation_mode & RMNET_MODE_LLP_IP) {
ether_setup(dev);
random_ether_addr(dev->dev_addr);
dev->mtu = prev_mtu;
dev->netdev_ops = &rmnet_ops_ether;
spin_lock_irqsave(&p->lock, flags);
p->operation_mode &= ~RMNET_MODE_LLP_IP;
p->operation_mode |= RMNET_MODE_LLP_ETH;
spin_unlock_irqrestore(&p->lock, flags);
DBG0("[%s] rmnet_ioctl(): "
"set Ethernet protocol mode\n",
dev->name);
}
break;
case RMNET_IOCTL_SET_LLP_IP: /* Set RAWIP protocol */
/* Perform IP config only if in Ethernet mode currently*/
if (p->operation_mode & RMNET_MODE_LLP_ETH) {
/* Undo config done in ether_setup() */
dev->header_ops = 0; /* No header */
dev->type = ARPHRD_RAWIP;
dev->hard_header_len = 0;
dev->mtu = prev_mtu;
dev->addr_len = 0;
dev->flags &= ~(IFF_BROADCAST|
IFF_MULTICAST);
dev->needed_headroom = HEADROOM_FOR_BAM +
HEADROOM_FOR_QOS;
dev->needed_tailroom = TAILROOM;
dev->netdev_ops = &rmnet_ops_ip;
spin_lock_irqsave(&p->lock, flags);
p->operation_mode &= ~RMNET_MODE_LLP_ETH;
p->operation_mode |= RMNET_MODE_LLP_IP;
spin_unlock_irqrestore(&p->lock, flags);
DBG0("[%s] rmnet_ioctl(): "
"set IP protocol mode\n",
dev->name);
}
break;
case RMNET_IOCTL_GET_LLP: /* Get link protocol state */
ifr->ifr_ifru.ifru_data =
(void *)(p->operation_mode &
(RMNET_MODE_LLP_ETH|RMNET_MODE_LLP_IP));
break;
case RMNET_IOCTL_SET_QOS_ENABLE: /* Set QoS header enabled */
spin_lock_irqsave(&p->lock, flags);
p->operation_mode |= RMNET_MODE_QOS;
spin_unlock_irqrestore(&p->lock, flags);
DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
dev->name);
break;
case RMNET_IOCTL_SET_QOS_DISABLE: /* Set QoS header disabled */
spin_lock_irqsave(&p->lock, flags);
p->operation_mode &= ~RMNET_MODE_QOS;
spin_unlock_irqrestore(&p->lock, flags);
DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
dev->name);
break;
case RMNET_IOCTL_FLOW_ENABLE:
tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 1);
DBG0("[%s] rmnet_ioctl(): enabled flow", dev->name);
break;
case RMNET_IOCTL_FLOW_DISABLE:
tc_qdisc_flow_control(dev, (u32)ifr->ifr_data, 0);
DBG0("[%s] rmnet_ioctl(): disabled flow", dev->name);
break;
case RMNET_IOCTL_GET_QOS: /* Get QoS header state */
ifr->ifr_ifru.ifru_data =
(void *)(p->operation_mode & RMNET_MODE_QOS);
break;
case RMNET_IOCTL_GET_OPMODE: /* Get operation mode */
ifr->ifr_ifru.ifru_data = (void *)p->operation_mode;
break;
case RMNET_IOCTL_OPEN: /* Open transport port */
rc = __rmnet_open(dev);
DBG0("[%s] rmnet_ioctl(): open transport port\n",
dev->name);
break;
case RMNET_IOCTL_CLOSE: /* Close transport port */
rc = __rmnet_close(dev);
DBG0("[%s] rmnet_ioctl(): close transport port\n",
dev->name);
break;
default:
pr_err("[%s] error: rmnet_ioct called for unsupported cmd[%d]",
dev->name, cmd);
return -EINVAL;
}
DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08x\n",
dev->name, __func__, cmd, old_opmode, p->operation_mode);
return rc;
}
static void __init rmnet_setup(struct net_device *dev)
{
/* Using Ethernet mode by default */
dev->netdev_ops = &rmnet_ops_ether;
ether_setup(dev);
/* set this after calling ether_setup */
dev->mtu = RMNET_DATA_LEN;
dev->needed_headroom = HEADROOM_FOR_BAM + HEADROOM_FOR_QOS ;
dev->needed_tailroom = TAILROOM;
random_ether_addr(dev->dev_addr);
dev->watchdog_timeo = 1000; /* 10 seconds? */
}
static struct net_device *netdevs[RMNET_DEVICE_COUNT];
static struct platform_driver bam_rmnet_drivers[RMNET_DEVICE_COUNT];
static int bam_rmnet_probe(struct platform_device *pdev)
{
int i;
char name[BAM_DMUX_CH_NAME_MAX_LEN];
struct rmnet_private *p;
for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
break;
}
if (i >= RMNET_DEVICE_COUNT) {
pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
return -ENODEV;
}
p = netdev_priv(netdevs[i]);
if (p->in_reset) {
p->in_reset = 0;
msm_bam_dmux_open(p->ch_id, netdevs[i], bam_notify);
netif_carrier_on(netdevs[i]);
netif_start_queue(netdevs[i]);
}
return 0;
}
static int bam_rmnet_remove(struct platform_device *pdev)
{
int i;
char name[BAM_DMUX_CH_NAME_MAX_LEN];
struct rmnet_private *p;
for (i = 0; i < RMNET_DEVICE_COUNT; ++i) {
scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d", i);
if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
break;
}
p = netdev_priv(netdevs[i]);
p->in_reset = 1;
if (p->waiting_for_ul_skb != NULL) {
dev_kfree_skb_any(p->waiting_for_ul_skb);
p->waiting_for_ul_skb = NULL;
}
msm_bam_dmux_close(p->ch_id);
netif_carrier_off(netdevs[i]);
netif_stop_queue(netdevs[i]);
return 0;
}
/* support for 9 new rmnet ports */
#define RMNET_REV_DEVICE_COUNT (9)
static struct net_device *netdevs_rev[RMNET_REV_DEVICE_COUNT];
static struct platform_driver bam_rmnet_rev_drivers[RMNET_REV_DEVICE_COUNT];
static int bam_rmnet_rev_probe(struct platform_device *pdev)
{
int i;
char name[BAM_DMUX_CH_NAME_MAX_LEN];
struct rmnet_private *p;
for (i = 0; i < RMNET_REV_DEVICE_COUNT; ++i) {
scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
(i+BAM_DMUX_DATA_REV_RMNET_0));
if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
break;
}
if (i >= RMNET_REV_DEVICE_COUNT) {
pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
return -ENODEV;
}
p = netdev_priv(netdevs_rev[i]);
if (p->in_reset) {
p->in_reset = 0;
msm_bam_dmux_open(p->ch_id, netdevs_rev[i], bam_notify);
netif_carrier_on(netdevs_rev[i]);
netif_start_queue(netdevs_rev[i]);
}
return 0;
}
static int bam_rmnet_rev_remove(struct platform_device *pdev)
{
int i;
char name[BAM_DMUX_CH_NAME_MAX_LEN];
struct rmnet_private *p;
for (i = 0; i < RMNET_REV_DEVICE_COUNT; ++i) {
scnprintf(name, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
(i+BAM_DMUX_DATA_REV_RMNET_0));
if (!strncmp(pdev->name, name, BAM_DMUX_CH_NAME_MAX_LEN))
break;
}
if (i >= RMNET_REV_DEVICE_COUNT) {
pr_err("%s: wrong netdev %s\n", __func__, pdev->name);
return 0;
}
p = netdev_priv(netdevs_rev[i]);
p->in_reset = 1;
if (p->waiting_for_ul_skb != NULL) {
dev_kfree_skb_any(p->waiting_for_ul_skb);
p->waiting_for_ul_skb = NULL;
}
msm_bam_dmux_close(p->ch_id);
netif_carrier_off(netdevs_rev[i]);
netif_stop_queue(netdevs_rev[i]);
return 0;
}
#ifdef CONFIG_MSM_RMNET_DEBUG
#ifdef CONFIG_HAS_EARLYSUSPEND
static int rmnet_debug_init_timeout_suspend(struct net_device *dev)
{
struct device *d;
d = &(dev->dev);
return device_create_file(d, &dev_attr_timeout_suspend);
}
#else
static int rmnet_debug_init_timeout_suspend(struct net_device *dev)
{
return 0;
}
#endif
static int rmnet_debug_init(struct net_device *dev)
{
struct device *d;
struct rmnet_private *p;
int err = 0;
d = &(dev->dev);
p = netdev_priv(dev);
p->timeout_us = 0;
p->wakeups_xmit = p->wakeups_rcv = 0;
err = device_create_file(d, &dev_attr_timeout);
if (err)
return err;
err = device_create_file(d, &dev_attr_wakeups_xmit);
if (err)
return err;
err = device_create_file(d, &dev_attr_wakeups_rcv);
if (err)
return err;
err = rmnet_debug_init_timeout_suspend(dev);
return err;
}
#else
static int rmnet_debug_init(struct net_device *dev)
{
return 0;
}
#endif
static int __init rmnet_init(void)
{
int ret;
struct device *d;
struct net_device *dev;
struct rmnet_private *p;
unsigned n;
char *tempname;
pr_info("%s: BAM devices[%d]\n", __func__, RMNET_DEVICE_COUNT);
#ifdef CONFIG_MSM_RMNET_DEBUG
timeout_us = 0;
#ifdef CONFIG_HAS_EARLYSUSPEND
timeout_suspend_us = 0;
#endif
#endif
for (n = 0; n < RMNET_DEVICE_COUNT; n++) {
const char *dev_name = "rmnet%d";
if (n == BAM_DMUX_USB_RMNET_0)
dev_name = "rmnet_usb%d";
dev = alloc_netdev(sizeof(struct rmnet_private),
dev_name, rmnet_setup);
if (!dev) {
pr_err("%s: no memory for netdev %d\n", __func__, n);
return -ENOMEM;
}
netdevs[n] = dev;
d = &(dev->dev);
p = netdev_priv(dev);
/* Initial config uses Ethernet */
p->operation_mode = RMNET_MODE_LLP_ETH;
p->ch_id = n;
p->waiting_for_ul_skb = NULL;
p->in_reset = 0;
p->device_up = DEVICE_UNINITIALIZED;
spin_lock_init(&p->lock);
spin_lock_init(&p->tx_queue_lock);
#ifdef CONFIG_MSM_RMNET_DEBUG
p->timeout_us = timeout_us;
p->wakeups_xmit = p->wakeups_rcv = 0;
#endif
ret = register_netdev(dev);
if (ret) {
pr_err("%s: unable to register netdev"
" %d rc=%d\n", __func__, n, ret);
netdevs[n] = NULL;
free_netdev(dev);
return ret;
}
#ifdef CONFIG_MSM_RMNET_DEBUG
if (device_create_file(d, &dev_attr_timeout))
continue;
if (device_create_file(d, &dev_attr_wakeups_xmit))
continue;
if (device_create_file(d, &dev_attr_wakeups_rcv))
continue;
#ifdef CONFIG_HAS_EARLYSUSPEND
if (device_create_file(d, &dev_attr_timeout_suspend))
continue;
/* Only care about rmnet0 for suspend/resume tiemout hooks. */
if (n == 0)
rmnet0 = d;
#endif
#endif
bam_rmnet_drivers[n].probe = bam_rmnet_probe;
bam_rmnet_drivers[n].remove = bam_rmnet_remove;
tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
if (tempname == NULL) {
netdevs[n] = NULL;
ret = -ENOMEM;
goto error;
}
scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
n);
bam_rmnet_drivers[n].driver.name = tempname;
bam_rmnet_drivers[n].driver.owner = THIS_MODULE;
p->bam_pdev = &bam_rmnet_drivers[n];
}
/*Support for new rmnet ports */
for (n = 0; n < RMNET_REV_DEVICE_COUNT; n++) {
dev = alloc_netdev(sizeof(struct rmnet_private),
"rev_rmnet%d", rmnet_setup);
if (!dev) {
pr_err("%s: no memory for rev netdev %d\n",
__func__, n);
return -ENOMEM;
}
netdevs_rev[n] = dev;
d = &(dev->dev);
p = netdev_priv(dev);
/* Initial config uses Ethernet */
p->operation_mode = RMNET_MODE_LLP_ETH;
p->ch_id = n+BAM_DMUX_DATA_REV_RMNET_0;
p->waiting_for_ul_skb = NULL;
p->in_reset = 0;
p->device_up = DEVICE_UNINITIALIZED;
spin_lock_init(&p->lock);
spin_lock_init(&p->tx_queue_lock);
ret = register_netdev(dev);
if (ret) {
pr_err("%s: unable to register rev netdev %d rc=%d\n",
__func__, n, ret);
netdevs_rev[n] = NULL;
free_netdev(dev);
return ret;
}
if (rmnet_debug_init(dev))
continue;
bam_rmnet_rev_drivers[n].probe = bam_rmnet_rev_probe;
bam_rmnet_rev_drivers[n].remove = bam_rmnet_rev_remove;
tempname = kmalloc(BAM_DMUX_CH_NAME_MAX_LEN, GFP_KERNEL);
if (tempname == NULL) {
netdevs_rev[n] = NULL;
ret = -ENOMEM;
goto error;
}
scnprintf(tempname, BAM_DMUX_CH_NAME_MAX_LEN, "bam_dmux_ch_%d",
(n+BAM_DMUX_DATA_REV_RMNET_0));
bam_rmnet_rev_drivers[n].driver.name = tempname;
bam_rmnet_rev_drivers[n].driver.owner = THIS_MODULE;
p->bam_pdev = &bam_rmnet_rev_drivers[n];
}
return 0;
error:
unregister_netdev(dev);
free_netdev(dev);
return ret;
}
module_init(rmnet_init);
MODULE_DESCRIPTION("MSM RMNET BAM TRANSPORT");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
vdsirotkin/vds_kernel_cm10.1 | drivers/video/msm/vidc/720p/ddl/vcd_ddl_metadata.c | 906 | 15924 | /* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <media/msm/vidc_type.h>
#include "vcd_ddl_utils.h"
#include "vcd_ddl_metadata.h"
static u32 *ddl_metadata_hdr_entry(struct ddl_client_context *ddl,
u32 meta_data)
{
u32 skip_words = 0;
u32 *buffer;
if (ddl->decoding) {
buffer = (u32 *)
ddl->codec_data.decoder.meta_data_input.
align_virtual_addr;
skip_words = 32 + 1;
buffer += skip_words;
switch (meta_data) {
default:
case VCD_METADATA_DATANONE:
{
skip_words = 0;
break;
}
case VCD_METADATA_QPARRAY:
{
skip_words = 3;
break;
}
case VCD_METADATA_CONCEALMB:
{
skip_words = 6;
break;
}
case VCD_METADATA_VC1:
{
skip_words = 9;
break;
}
case VCD_METADATA_SEI:
{
skip_words = 12;
break;
}
case VCD_METADATA_VUI:
{
skip_words = 15;
break;
}
case VCD_METADATA_PASSTHROUGH:
{
skip_words = 18;
break;
}
case VCD_METADATA_QCOMFILLER:
{
skip_words = 21;
break;
}
}
} else {
buffer = (u32 *)
ddl->codec_data.encoder.meta_data_input.
align_virtual_addr;
skip_words = 2;
buffer += skip_words;
switch (meta_data) {
default:
case VCD_METADATA_DATANONE:
{
skip_words = 0;
break;
}
case VCD_METADATA_ENC_SLICE:
{
skip_words = 3;
break;
}
case VCD_METADATA_QCOMFILLER:
{
skip_words = 6;
break;
}
}
}
buffer += skip_words;
return buffer;
}
void ddl_set_default_meta_data_hdr(struct ddl_client_context *ddl)
{
struct ddl_buf_addr *main_buffer =
&ddl->ddl_context->metadata_shared_input;
struct ddl_buf_addr *client_buffer;
u32 *hdr_entry;
if (ddl->decoding)
client_buffer = &(ddl->codec_data.decoder.meta_data_input);
else
client_buffer = &(ddl->codec_data.encoder.meta_data_input);
DDL_METADATA_CLIENT_INPUTBUF(main_buffer, client_buffer,
ddl->channel_id);
hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QCOMFILLER;
hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_DATANONE);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_DATANONE;
if (ddl->decoding) {
hdr_entry =
ddl_metadata_hdr_entry(ddl, VCD_METADATA_QPARRAY);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_QPARRAY;
hdr_entry =
ddl_metadata_hdr_entry(ddl, VCD_METADATA_CONCEALMB);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_CONCEALMB;
hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_SEI);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_SEI;
hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VUI);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VUI;
hdr_entry = ddl_metadata_hdr_entry(ddl, VCD_METADATA_VC1);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] = VCD_METADATA_VC1;
hdr_entry =
ddl_metadata_hdr_entry(ddl, VCD_METADATA_PASSTHROUGH);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
VCD_METADATA_PASSTHROUGH;
} else {
hdr_entry =
ddl_metadata_hdr_entry(ddl, VCD_METADATA_ENC_SLICE);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] = 0x00000101;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] = 1;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
VCD_METADATA_ENC_SLICE;
}
}
static u32 ddl_supported_metadata_flag(struct ddl_client_context *ddl)
{
u32 flag = 0;
if (ddl->decoding) {
enum vcd_codec codec =
ddl->codec_data.decoder.codec.codec;
flag |= (VCD_METADATA_CONCEALMB |
VCD_METADATA_PASSTHROUGH | VCD_METADATA_QPARRAY);
if (codec == VCD_CODEC_H264) {
flag |= (VCD_METADATA_SEI | VCD_METADATA_VUI);
} else if (codec == VCD_CODEC_VC1 ||
codec == VCD_CODEC_VC1_RCV) {
flag |= VCD_METADATA_VC1;
}
} else {
flag |= VCD_METADATA_ENC_SLICE;
}
return flag;
}
void ddl_set_default_metadata_flag(struct ddl_client_context *ddl)
{
if (ddl->decoding)
ddl->codec_data.decoder.meta_data_enable_flag = 0;
else
ddl->codec_data.encoder.meta_data_enable_flag = 0;
}
void ddl_set_default_decoder_metadata_buffer_size(
struct ddl_decoder_data *decoder,
struct vcd_property_frame_size *frame_size,
struct vcd_buffer_requirement *output_buf_req)
{
u32 flag = decoder->meta_data_enable_flag;
u32 suffix = 0;
size_t sz = 0;
if (!flag) {
decoder->suffix = 0;
return;
}
if (flag & VCD_METADATA_QPARRAY) {
u32 num_of_mb =
((frame_size->width * frame_size->height) >> 8);
sz = DDL_METADATA_HDR_SIZE;
sz += num_of_mb;
DDL_METADATA_ALIGNSIZE(sz);
suffix += sz;
}
if (flag & VCD_METADATA_CONCEALMB) {
u32 num_of_mb =
((frame_size->width * frame_size->height) >> 8);
sz = DDL_METADATA_HDR_SIZE + (num_of_mb >> 3);
DDL_METADATA_ALIGNSIZE(sz);
suffix += sz;
}
if (flag & VCD_METADATA_VC1) {
sz = DDL_METADATA_HDR_SIZE;
sz += DDL_METADATA_VC1_PAYLOAD_SIZE;
DDL_METADATA_ALIGNSIZE(sz);
suffix += sz;
}
if (flag & VCD_METADATA_SEI) {
sz = DDL_METADATA_HDR_SIZE;
sz += DDL_METADATA_SEI_PAYLOAD_SIZE;
DDL_METADATA_ALIGNSIZE(sz);
suffix += (sz * DDL_METADATA_SEI_MAX);
}
if (flag & VCD_METADATA_VUI) {
sz = DDL_METADATA_HDR_SIZE;
sz += DDL_METADATA_VUI_PAYLOAD_SIZE;
DDL_METADATA_ALIGNSIZE(sz);
suffix += (sz);
}
if (flag & VCD_METADATA_PASSTHROUGH) {
sz = DDL_METADATA_HDR_SIZE;
sz += DDL_METADATA_PASSTHROUGH_PAYLOAD_SIZE;
DDL_METADATA_ALIGNSIZE(sz);
suffix += (sz);
}
sz = DDL_METADATA_EXTRADATANONE_SIZE;
DDL_METADATA_ALIGNSIZE(sz);
suffix += (sz);
suffix += DDL_METADATA_EXTRAPAD_SIZE;
DDL_METADATA_ALIGNSIZE(suffix);
decoder->suffix = suffix;
output_buf_req->sz += suffix;
return;
}
void ddl_set_default_encoder_metadata_buffer_size(struct ddl_encoder_data
*encoder)
{
u32 flag = encoder->meta_data_enable_flag;
u32 suffix = 0;
size_t sz = 0;
if (!flag) {
encoder->suffix = 0;
return;
}
if (flag & VCD_METADATA_ENC_SLICE) {
u32 num_of_mb = (encoder->frame_size.width *
encoder->frame_size.height / 16 / 16);
sz = DDL_METADATA_HDR_SIZE;
sz += 4;
sz += (8 * num_of_mb);
DDL_METADATA_ALIGNSIZE(sz);
suffix += sz;
}
sz = DDL_METADATA_EXTRADATANONE_SIZE;
DDL_METADATA_ALIGNSIZE(sz);
suffix += (sz);
suffix += DDL_METADATA_EXTRAPAD_SIZE;
DDL_METADATA_ALIGNSIZE(suffix);
encoder->suffix = suffix;
encoder->output_buf_req.sz += suffix;
}
u32 ddl_set_metadata_params(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr,
void *property_value)
{
u32 vcd_status = VCD_ERR_ILLEGAL_PARM;
if (property_hdr->prop_id == VCD_I_METADATA_ENABLE) {
struct vcd_property_meta_data_enable *meta_data_enable =
(struct vcd_property_meta_data_enable *)
property_value;
u32 *meta_data_enable_flag;
enum vcd_codec codec;
if (ddl->decoding) {
meta_data_enable_flag =
&(ddl->codec_data.decoder.
meta_data_enable_flag);
codec = ddl->codec_data.decoder.codec.codec;
} else {
meta_data_enable_flag =
&(ddl->codec_data.encoder.
meta_data_enable_flag);
codec = ddl->codec_data.encoder.codec.codec;
}
if (sizeof(struct vcd_property_meta_data_enable) ==
property_hdr->sz &&
DDLCLIENT_STATE_IS(ddl, DDL_CLIENT_OPEN) &&
codec) {
u32 flag = ddl_supported_metadata_flag(ddl);
flag &= (meta_data_enable->meta_data_enable_flag);
if (flag)
flag |= DDL_METADATA_MANDATORY;
if (flag != *meta_data_enable_flag) {
*meta_data_enable_flag = flag;
if (ddl->decoding) {
ddl_set_default_decoder_buffer_req
(&ddl->codec_data.decoder,
true);
} else {
ddl_set_default_encoder_buffer_req
(&ddl->codec_data.encoder);
}
}
vcd_status = VCD_S_SUCCESS;
}
} else if (property_hdr->prop_id == VCD_I_METADATA_HEADER) {
struct vcd_property_metadata_hdr *hdr =
(struct vcd_property_metadata_hdr *)property_value;
if (sizeof(struct vcd_property_metadata_hdr) ==
property_hdr->sz) {
u32 flag = ddl_supported_metadata_flag(ddl);
flag |= DDL_METADATA_MANDATORY;
flag &= hdr->meta_data_id;
if (!(flag & (flag - 1))) {
u32 *hdr_entry =
ddl_metadata_hdr_entry(ddl, flag);
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX] =
hdr->version;
hdr_entry[DDL_METADATA_HDR_PORT_INDEX] =
hdr->port_index;
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX] =
hdr->type;
vcd_status = VCD_S_SUCCESS;
}
}
}
return vcd_status;
}
u32 ddl_get_metadata_params(struct ddl_client_context *ddl,
struct vcd_property_hdr *property_hdr,
void *property_value)
{
u32 vcd_status = VCD_ERR_ILLEGAL_PARM ;
if (property_hdr->prop_id == VCD_I_METADATA_ENABLE &&
sizeof(struct vcd_property_meta_data_enable)
== property_hdr->sz) {
struct vcd_property_meta_data_enable *meta_data_enable =
(struct vcd_property_meta_data_enable *)
property_value;
meta_data_enable->meta_data_enable_flag =
((ddl->decoding) ?
(ddl->codec_data.decoder.meta_data_enable_flag)
: (ddl->codec_data.encoder.meta_data_enable_flag));
vcd_status = VCD_S_SUCCESS;
} else if (property_hdr->prop_id == VCD_I_METADATA_HEADER &&
sizeof(struct vcd_property_metadata_hdr) ==
property_hdr->sz) {
struct vcd_property_metadata_hdr *hdr =
(struct vcd_property_metadata_hdr *)
property_value;
u32 flag = ddl_supported_metadata_flag(ddl);
flag |= DDL_METADATA_MANDATORY;
flag &= hdr->meta_data_id;
if (!(flag & (flag - 1))) {
u32 *hdr_entry = ddl_metadata_hdr_entry(ddl,
flag);
hdr->version =
hdr_entry[DDL_METADATA_HDR_VERSION_INDEX];
hdr->port_index =
hdr_entry[DDL_METADATA_HDR_PORT_INDEX];
hdr->type =
hdr_entry[DDL_METADATA_HDR_TYPE_INDEX];
vcd_status = VCD_S_SUCCESS;
}
}
return vcd_status;
}
void ddl_metadata_enable(struct ddl_client_context *ddl)
{
u32 flag, hal_flag = 0;
u32 *metadata_input;
if (ddl->decoding) {
flag = ddl->codec_data.decoder.meta_data_enable_flag;
metadata_input =
ddl->codec_data.decoder.meta_data_input.
align_physical_addr;
} else {
flag = ddl->codec_data.encoder.meta_data_enable_flag;
metadata_input =
ddl->codec_data.encoder.meta_data_input.
align_physical_addr;
}
if (flag) {
if (flag & VCD_METADATA_QPARRAY)
hal_flag |= VIDC_720P_METADATA_ENABLE_QP;
if (flag & VCD_METADATA_CONCEALMB)
hal_flag |= VIDC_720P_METADATA_ENABLE_CONCEALMB;
if (flag & VCD_METADATA_VC1)
hal_flag |= VIDC_720P_METADATA_ENABLE_VC1;
if (flag & VCD_METADATA_SEI)
hal_flag |= VIDC_720P_METADATA_ENABLE_SEI;
if (flag & VCD_METADATA_VUI)
hal_flag |= VIDC_720P_METADATA_ENABLE_VUI;
if (flag & VCD_METADATA_ENC_SLICE)
hal_flag |= VIDC_720P_METADATA_ENABLE_ENCSLICE;
if (flag & VCD_METADATA_PASSTHROUGH)
hal_flag |= VIDC_720P_METADATA_ENABLE_PASSTHROUGH;
} else {
metadata_input = 0;
}
vidc_720p_metadata_enable(hal_flag, metadata_input);
}
u32 ddl_encode_set_metadata_output_buf(struct ddl_client_context *ddl)
{
struct ddl_encoder_data *encoder = &ddl->codec_data.encoder;
u32 *buffer;
struct vcd_frame_data *stream = &(ddl->output_frame.vcd_frm);
u32 ext_buffer_end, hw_metadata_start;
ext_buffer_end = (u32) stream->physical + stream->alloc_len;
if (!encoder->meta_data_enable_flag) {
ext_buffer_end &= ~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
return ext_buffer_end;
}
hw_metadata_start = (ext_buffer_end - encoder->suffix) &
~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
ext_buffer_end = (hw_metadata_start - 1) &
~(DDL_STREAMBUF_ALIGN_GUARD_BYTES);
buffer = encoder->meta_data_input.align_virtual_addr;
*buffer++ = encoder->suffix;
*buffer = hw_metadata_start;
encoder->meta_data_offset =
hw_metadata_start - (u32) stream->physical;
return ext_buffer_end;
}
void ddl_decode_set_metadata_output(struct ddl_decoder_data *decoder)
{
u32 *buffer;
u32 loopc;
if (!decoder->meta_data_enable_flag) {
decoder->meta_data_offset = 0;
return;
}
decoder->meta_data_offset = ddl_get_yuv_buffer_size(
&decoder->client_frame_size, &decoder->buf_format,
(!decoder->progressive_only), decoder->codec.codec);
buffer = decoder->meta_data_input.align_virtual_addr;
*buffer++ = decoder->suffix;
for (loopc = 0; loopc < decoder->dp_buf.no_of_dec_pic_buf;
++loopc) {
*buffer++ = (u32) (decoder->meta_data_offset + (u8 *)
decoder->dp_buf.
dec_pic_buffers[loopc].vcd_frm.
physical);
}
}
void ddl_process_encoder_metadata(struct ddl_client_context *ddl)
{
struct ddl_encoder_data *encoder = &(ddl->codec_data.encoder);
struct vcd_frame_data *out_frame =
&(ddl->output_frame.vcd_frm);
u32 *qfiller_hdr, *qfiller, start_addr;
u32 qfiller_size;
if (!encoder->meta_data_enable_flag) {
out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
return;
}
if (!encoder->enc_frame_info.metadata_exists) {
out_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
return;
}
out_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
start_addr = (u32) ((u8 *) out_frame->virtual +
out_frame->offset);
qfiller = (u32 *) ((out_frame->data_len + start_addr + 3) & ~3);
qfiller_size = (u32) ((encoder->meta_data_offset +
(u8 *) out_frame->virtual) -
(u8 *) qfiller);
qfiller_hdr = ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
*qfiller++ = qfiller_size;
*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX];
*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX];
*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX];
*qfiller = (u32) (qfiller_size - DDL_METADATA_HDR_SIZE);
}
void ddl_process_decoder_metadata(struct ddl_client_context *ddl)
{
struct ddl_decoder_data *decoder = &(ddl->codec_data.decoder);
struct vcd_frame_data *output_frame =
&(ddl->output_frame.vcd_frm);
u32 *qfiller_hdr, *qfiller;
u32 qfiller_size;
if (!decoder->meta_data_enable_flag) {
output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
return;
}
if (!decoder->dec_disp_info.metadata_exists) {
output_frame->flags &= ~(VCD_FRAME_FLAG_EXTRADATA);
return;
}
output_frame->flags |= VCD_FRAME_FLAG_EXTRADATA;
if (output_frame->data_len != decoder->meta_data_offset) {
qfiller = (u32 *) ((u32) ((output_frame->data_len +
output_frame->offset +
(u8 *) output_frame->virtual) +
3) & ~3);
qfiller_size = (u32) ((decoder->meta_data_offset +
(u8 *) output_frame->virtual) -
(u8 *) qfiller);
qfiller_hdr =
ddl_metadata_hdr_entry(ddl, VCD_METADATA_QCOMFILLER);
*qfiller++ = qfiller_size;
*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_VERSION_INDEX];
*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_PORT_INDEX];
*qfiller++ = qfiller_hdr[DDL_METADATA_HDR_TYPE_INDEX];
*qfiller = (u32) (qfiller_size - DDL_METADATA_HDR_SIZE);
}
}
| gpl-2.0 |
Kali-/lge-kernel-msm7x30 | sound/pci/lx6464es/lx6464es.c | 906 | 28185 | /* -*- linux-c -*- *
*
* ALSA driver for the digigram lx6464es interface
*
* Copyright (c) 2008, 2009 Tim Blechmann <tim@klingt.org>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 02111-1307, USA.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <sound/initval.h>
#include <sound/control.h>
#include <sound/info.h>
#include "lx6464es.h"
MODULE_AUTHOR("Tim Blechmann");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("digigram lx6464es");
MODULE_SUPPORTED_DEVICE("{digigram lx6464es{}}");
static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR;
static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP;
module_param_array(index, int, NULL, 0444);
MODULE_PARM_DESC(index, "Index value for Digigram LX6464ES interface.");
module_param_array(id, charp, NULL, 0444);
MODULE_PARM_DESC(id, "ID string for Digigram LX6464ES interface.");
module_param_array(enable, bool, NULL, 0444);
MODULE_PARM_DESC(enable, "Enable/disable specific Digigram LX6464ES soundcards.");
static const char card_name[] = "LX6464ES";
#define PCI_DEVICE_ID_PLX_LX6464ES PCI_DEVICE_ID_PLX_9056
static DEFINE_PCI_DEVICE_TABLE(snd_lx6464es_ids) = {
{ PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES),
.subvendor = PCI_VENDOR_ID_DIGIGRAM,
.subdevice = PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_SERIAL_SUBSYSTEM
}, /* LX6464ES */
{ PCI_DEVICE(PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_LX6464ES),
.subvendor = PCI_VENDOR_ID_DIGIGRAM,
.subdevice = PCI_SUBDEVICE_ID_DIGIGRAM_LX6464ES_CAE_SERIAL_SUBSYSTEM
}, /* LX6464ES-CAE */
{ 0, },
};
MODULE_DEVICE_TABLE(pci, snd_lx6464es_ids);
/* PGO pour USERo dans le registre pci_0x06/loc_0xEC */
#define CHIPSC_RESET_XILINX (1L<<16)
/* alsa callbacks */
static struct snd_pcm_hardware lx_caps = {
.info = (SNDRV_PCM_INFO_MMAP |
SNDRV_PCM_INFO_INTERLEAVED |
SNDRV_PCM_INFO_MMAP_VALID |
SNDRV_PCM_INFO_SYNC_START),
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S16_BE |
SNDRV_PCM_FMTBIT_S24_3LE |
SNDRV_PCM_FMTBIT_S24_3BE),
.rates = (SNDRV_PCM_RATE_CONTINUOUS |
SNDRV_PCM_RATE_8000_192000),
.rate_min = 8000,
.rate_max = 192000,
.channels_min = 2,
.channels_max = 64,
.buffer_bytes_max = 64*2*3*MICROBLAZE_IBL_MAX*MAX_STREAM_BUFFER,
.period_bytes_min = (2*2*MICROBLAZE_IBL_MIN*2),
.period_bytes_max = (4*64*MICROBLAZE_IBL_MAX*MAX_STREAM_BUFFER),
.periods_min = 2,
.periods_max = MAX_STREAM_BUFFER,
};
static int lx_set_granularity(struct lx6464es *chip, u32 gran);
static int lx_hardware_open(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
int channels = runtime->channels;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_pcm_uframes_t period_size = runtime->period_size;
snd_printd(LXP "allocating pipe for %d channels\n", channels);
err = lx_pipe_allocate(chip, 0, is_capture, channels);
if (err < 0) {
snd_printk(KERN_ERR LXP "allocating pipe failed\n");
return err;
}
err = lx_set_granularity(chip, period_size);
if (err < 0) {
snd_printk(KERN_ERR LXP "setting granularity to %ld failed\n",
period_size);
return err;
}
return 0;
}
static int lx_hardware_start(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
struct snd_pcm_runtime *runtime = substream->runtime;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printd(LXP "setting stream format\n");
err = lx_stream_set_format(chip, runtime, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "setting stream format failed\n");
return err;
}
snd_printd(LXP "starting pipe\n");
err = lx_pipe_start(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "starting pipe failed\n");
return err;
}
snd_printd(LXP "waiting for pipe to start\n");
err = lx_pipe_wait_for_start(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "waiting for pipe failed\n");
return err;
}
return err;
}
static int lx_hardware_stop(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printd(LXP "pausing pipe\n");
err = lx_pipe_pause(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "pausing pipe failed\n");
return err;
}
snd_printd(LXP "waiting for pipe to become idle\n");
err = lx_pipe_wait_for_idle(chip, 0, is_capture);
if (err < 0) {
snd_printk(KERN_ERR LXP "waiting for pipe failed\n");
return err;
}
snd_printd(LXP "stopping pipe\n");
err = lx_pipe_stop(chip, 0, is_capture);
if (err < 0) {
snd_printk(LXP "stopping pipe failed\n");
return err;
}
return err;
}
static int lx_hardware_close(struct lx6464es *chip,
struct snd_pcm_substream *substream)
{
int err = 0;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printd(LXP "releasing pipe\n");
err = lx_pipe_release(chip, 0, is_capture);
if (err < 0) {
snd_printk(LXP "releasing pipe failed\n");
return err;
}
return err;
}
static int lx_pcm_open(struct snd_pcm_substream *substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
struct snd_pcm_runtime *runtime = substream->runtime;
int err = 0;
int board_rate;
snd_printdd("->lx_pcm_open\n");
mutex_lock(&chip->setup_mutex);
/* copy the struct snd_pcm_hardware struct */
runtime->hw = lx_caps;
#if 0
/* buffer-size should better be multiple of period-size */
err = snd_pcm_hw_constraint_integer(runtime,
SNDRV_PCM_HW_PARAM_PERIODS);
if (err < 0) {
snd_printk(KERN_WARNING LXP "could not constrain periods\n");
goto exit;
}
#endif
/* the clock rate cannot be changed */
board_rate = chip->board_sample_rate;
err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_RATE,
board_rate, board_rate);
if (err < 0) {
snd_printk(KERN_WARNING LXP "could not constrain periods\n");
goto exit;
}
/* constrain period size */
err = snd_pcm_hw_constraint_minmax(runtime,
SNDRV_PCM_HW_PARAM_PERIOD_SIZE,
MICROBLAZE_IBL_MIN,
MICROBLAZE_IBL_MAX);
if (err < 0) {
snd_printk(KERN_WARNING LXP
"could not constrain period size\n");
goto exit;
}
snd_pcm_hw_constraint_step(runtime, 0,
SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 32);
snd_pcm_set_sync(substream);
err = 0;
exit:
runtime->private_data = chip;
mutex_unlock(&chip->setup_mutex);
snd_printdd("<-lx_pcm_open, %d\n", err);
return err;
}
static int lx_pcm_close(struct snd_pcm_substream *substream)
{
int err = 0;
snd_printdd("->lx_pcm_close\n");
return err;
}
static snd_pcm_uframes_t lx_pcm_stream_pointer(struct snd_pcm_substream
*substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
snd_pcm_uframes_t pos;
unsigned long flags;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
struct lx_stream *lx_stream = is_capture ? &chip->capture_stream :
&chip->playback_stream;
snd_printdd("->lx_pcm_stream_pointer\n");
spin_lock_irqsave(&chip->lock, flags);
pos = lx_stream->frame_pos * substream->runtime->period_size;
spin_unlock_irqrestore(&chip->lock, flags);
snd_printdd(LXP "stream_pointer at %ld\n", pos);
return pos;
}
static int lx_pcm_prepare(struct snd_pcm_substream *substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
int err = 0;
const int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printdd("->lx_pcm_prepare\n");
mutex_lock(&chip->setup_mutex);
if (chip->hardware_running[is_capture]) {
err = lx_hardware_stop(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to stop hardware. "
"Error code %d\n", err);
goto exit;
}
err = lx_hardware_close(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to close hardware. "
"Error code %d\n", err);
goto exit;
}
}
snd_printd(LXP "opening hardware\n");
err = lx_hardware_open(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to open hardware. "
"Error code %d\n", err);
goto exit;
}
err = lx_hardware_start(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to start hardware. "
"Error code %d\n", err);
goto exit;
}
chip->hardware_running[is_capture] = 1;
if (chip->board_sample_rate != substream->runtime->rate) {
if (!err)
chip->board_sample_rate = substream->runtime->rate;
}
exit:
mutex_unlock(&chip->setup_mutex);
return err;
}
static int lx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params, int is_capture)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
int err = 0;
snd_printdd("->lx_pcm_hw_params\n");
mutex_lock(&chip->setup_mutex);
/* set dma buffer */
err = snd_pcm_lib_malloc_pages(substream,
params_buffer_bytes(hw_params));
if (is_capture)
chip->capture_stream.stream = substream;
else
chip->playback_stream.stream = substream;
mutex_unlock(&chip->setup_mutex);
return err;
}
static int lx_pcm_hw_params_playback(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return lx_pcm_hw_params(substream, hw_params, 0);
}
static int lx_pcm_hw_params_capture(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *hw_params)
{
return lx_pcm_hw_params(substream, hw_params, 1);
}
static int lx_pcm_hw_free(struct snd_pcm_substream *substream)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
int err = 0;
int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
snd_printdd("->lx_pcm_hw_free\n");
mutex_lock(&chip->setup_mutex);
if (chip->hardware_running[is_capture]) {
err = lx_hardware_stop(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to stop hardware. "
"Error code %d\n", err);
goto exit;
}
err = lx_hardware_close(chip, substream);
if (err < 0) {
snd_printk(KERN_ERR LXP "failed to close hardware. "
"Error code %d\n", err);
goto exit;
}
chip->hardware_running[is_capture] = 0;
}
err = snd_pcm_lib_free_pages(substream);
if (is_capture)
chip->capture_stream.stream = 0;
else
chip->playback_stream.stream = 0;
exit:
mutex_unlock(&chip->setup_mutex);
return err;
}
static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
{
struct snd_pcm_substream *substream = lx_stream->stream;
const int is_capture = lx_stream->is_capture;
int err;
const u32 channels = substream->runtime->channels;
const u32 bytes_per_frame = channels * 3;
const u32 period_size = substream->runtime->period_size;
const u32 periods = substream->runtime->periods;
const u32 period_bytes = period_size * bytes_per_frame;
dma_addr_t buf = substream->dma_buffer.addr;
int i;
u32 needed, freed;
u32 size_array[5];
for (i = 0; i != periods; ++i) {
u32 buffer_index = 0;
err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed,
size_array);
snd_printdd(LXP "starting: needed %d, freed %d\n",
needed, freed);
err = lx_buffer_give(chip, 0, is_capture, period_bytes,
lower_32_bits(buf), upper_32_bits(buf),
&buffer_index);
snd_printdd(LXP "starting: buffer index %x on %p (%d bytes)\n",
buffer_index, (void *)buf, period_bytes);
buf += period_bytes;
}
err = lx_buffer_ask(chip, 0, is_capture, &needed, &freed, size_array);
snd_printdd(LXP "starting: needed %d, freed %d\n", needed, freed);
snd_printd(LXP "starting: starting stream\n");
err = lx_stream_start(chip, 0, is_capture);
if (err < 0)
snd_printk(KERN_ERR LXP "couldn't start stream\n");
else
lx_stream->status = LX_STREAM_STATUS_RUNNING;
lx_stream->frame_pos = 0;
}
static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
{
const int is_capture = lx_stream->is_capture;
int err;
snd_printd(LXP "stopping: stopping stream\n");
err = lx_stream_stop(chip, 0, is_capture);
if (err < 0)
snd_printk(KERN_ERR LXP "couldn't stop stream\n");
else
lx_stream->status = LX_STREAM_STATUS_FREE;
}
static void lx_trigger_tasklet_dispatch_stream(struct lx6464es *chip,
struct lx_stream *lx_stream)
{
switch (lx_stream->status) {
case LX_STREAM_STATUS_SCHEDULE_RUN:
lx_trigger_start(chip, lx_stream);
break;
case LX_STREAM_STATUS_SCHEDULE_STOP:
lx_trigger_stop(chip, lx_stream);
break;
default:
break;
}
}
static void lx_trigger_tasklet(unsigned long data)
{
struct lx6464es *chip = (struct lx6464es *)data;
unsigned long flags;
snd_printdd("->lx_trigger_tasklet\n");
spin_lock_irqsave(&chip->lock, flags);
lx_trigger_tasklet_dispatch_stream(chip, &chip->capture_stream);
lx_trigger_tasklet_dispatch_stream(chip, &chip->playback_stream);
spin_unlock_irqrestore(&chip->lock, flags);
}
static int lx_pcm_trigger_dispatch(struct lx6464es *chip,
struct lx_stream *lx_stream, int cmd)
{
int err = 0;
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
lx_stream->status = LX_STREAM_STATUS_SCHEDULE_RUN;
break;
case SNDRV_PCM_TRIGGER_STOP:
lx_stream->status = LX_STREAM_STATUS_SCHEDULE_STOP;
break;
default:
err = -EINVAL;
goto exit;
}
tasklet_schedule(&chip->trigger_tasklet);
exit:
return err;
}
static int lx_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
struct lx6464es *chip = snd_pcm_substream_chip(substream);
const int is_capture = (substream->stream == SNDRV_PCM_STREAM_CAPTURE);
struct lx_stream *stream = is_capture ? &chip->capture_stream :
&chip->playback_stream;
snd_printdd("->lx_pcm_trigger\n");
return lx_pcm_trigger_dispatch(chip, stream, cmd);
}
static int snd_lx6464es_free(struct lx6464es *chip)
{
snd_printdd("->snd_lx6464es_free\n");
lx_irq_disable(chip);
if (chip->irq >= 0)
free_irq(chip->irq, chip);
iounmap(chip->port_dsp_bar);
ioport_unmap(chip->port_plx_remapped);
pci_release_regions(chip->pci);
pci_disable_device(chip->pci);
kfree(chip);
return 0;
}
static int snd_lx6464es_dev_free(struct snd_device *device)
{
return snd_lx6464es_free(device->device_data);
}
/* reset the dsp during initialization */
static int __devinit lx_init_xilinx_reset(struct lx6464es *chip)
{
int i;
u32 plx_reg = lx_plx_reg_read(chip, ePLX_CHIPSC);
snd_printdd("->lx_init_xilinx_reset\n");
/* activate reset of xilinx */
plx_reg &= ~CHIPSC_RESET_XILINX;
lx_plx_reg_write(chip, ePLX_CHIPSC, plx_reg);
msleep(1);
lx_plx_reg_write(chip, ePLX_MBOX3, 0);
msleep(1);
plx_reg |= CHIPSC_RESET_XILINX;
lx_plx_reg_write(chip, ePLX_CHIPSC, plx_reg);
/* deactivate reset of xilinx */
for (i = 0; i != 100; ++i) {
u32 reg_mbox3;
msleep(10);
reg_mbox3 = lx_plx_reg_read(chip, ePLX_MBOX3);
if (reg_mbox3) {
snd_printd(LXP "xilinx reset done\n");
snd_printdd(LXP "xilinx took %d loops\n", i);
break;
}
}
/* todo: add some error handling? */
/* clear mr */
lx_dsp_reg_write(chip, eReg_CSM, 0);
/* le xilinx ES peut ne pas etre encore pret, on attend. */
msleep(600);
return 0;
}
static int __devinit lx_init_xilinx_test(struct lx6464es *chip)
{
u32 reg;
snd_printdd("->lx_init_xilinx_test\n");
/* TEST if we have access to Xilinx/MicroBlaze */
lx_dsp_reg_write(chip, eReg_CSM, 0);
reg = lx_dsp_reg_read(chip, eReg_CSM);
if (reg) {
snd_printk(KERN_ERR LXP "Problem: Reg_CSM %x.\n", reg);
/* PCI9056_SPACE0_REMAP */
lx_plx_reg_write(chip, ePLX_PCICR, 1);
reg = lx_dsp_reg_read(chip, eReg_CSM);
if (reg) {
snd_printk(KERN_ERR LXP "Error: Reg_CSM %x.\n", reg);
return -EAGAIN; /* seems to be appropriate */
}
}
snd_printd(LXP "Xilinx/MicroBlaze access test successful\n");
return 0;
}
/* initialize ethersound */
static int __devinit lx_init_ethersound_config(struct lx6464es *chip)
{
int i;
u32 orig_conf_es = lx_dsp_reg_read(chip, eReg_CONFES);
/* configure 64 io channels */
u32 conf_es = (orig_conf_es & CONFES_READ_PART_MASK) |
(64 << IOCR_INPUTS_OFFSET) |
(64 << IOCR_OUTPUTS_OFFSET) |
(FREQ_RATIO_SINGLE_MODE << FREQ_RATIO_OFFSET);
snd_printdd("->lx_init_ethersound\n");
chip->freq_ratio = FREQ_RATIO_SINGLE_MODE;
/*
* write it to the card !
* this actually kicks the ES xilinx, the first time since poweron.
* the MAC address in the Reg_ADMACESMSB Reg_ADMACESLSB registers
* is not ready before this is done, and the bit 2 in Reg_CSES is set.
* */
lx_dsp_reg_write(chip, eReg_CONFES, conf_es);
for (i = 0; i != 1000; ++i) {
if (lx_dsp_reg_read(chip, eReg_CSES) & 4) {
snd_printd(LXP "ethersound initialized after %dms\n",
i);
goto ethersound_initialized;
}
msleep(1);
}
snd_printk(KERN_WARNING LXP
"ethersound could not be initialized after %dms\n", i);
return -ETIMEDOUT;
ethersound_initialized:
snd_printd(LXP "ethersound initialized\n");
return 0;
}
static int __devinit lx_init_get_version_features(struct lx6464es *chip)
{
u32 dsp_version;
int err;
snd_printdd("->lx_init_get_version_features\n");
err = lx_dsp_get_version(chip, &dsp_version);
if (err == 0) {
u32 freq;
snd_printk(LXP "DSP version: V%02d.%02d #%d\n",
(dsp_version>>16) & 0xff, (dsp_version>>8) & 0xff,
dsp_version & 0xff);
/* later: what firmware version do we expect? */
/* retrieve Play/Rec features */
/* done here because we may have to handle alternate
* DSP files. */
/* later */
/* init the EtherSound sample rate */
err = lx_dsp_get_clock_frequency(chip, &freq);
if (err == 0)
chip->board_sample_rate = freq;
snd_printd(LXP "actual clock frequency %d\n", freq);
} else {
snd_printk(KERN_ERR LXP "DSP corrupted \n");
err = -EAGAIN;
}
return err;
}
static int lx_set_granularity(struct lx6464es *chip, u32 gran)
{
int err = 0;
u32 snapped_gran = MICROBLAZE_IBL_MIN;
snd_printdd("->lx_set_granularity\n");
/* blocksize is a power of 2 */
while ((snapped_gran < gran) &&
(snapped_gran < MICROBLAZE_IBL_MAX)) {
snapped_gran *= 2;
}
if (snapped_gran == chip->pcm_granularity)
return 0;
err = lx_dsp_set_granularity(chip, snapped_gran);
if (err < 0) {
snd_printk(KERN_WARNING LXP "could not set granularity\n");
err = -EAGAIN;
}
if (snapped_gran != gran)
snd_printk(LXP "snapped blocksize to %d\n", snapped_gran);
snd_printd(LXP "set blocksize on board %d\n", snapped_gran);
chip->pcm_granularity = snapped_gran;
return err;
}
/* initialize and test the xilinx dsp chip */
static int __devinit lx_init_dsp(struct lx6464es *chip)
{
int err;
u8 mac_address[6];
int i;
snd_printdd("->lx_init_dsp\n");
snd_printd(LXP "initialize board\n");
err = lx_init_xilinx_reset(chip);
if (err)
return err;
snd_printd(LXP "testing board\n");
err = lx_init_xilinx_test(chip);
if (err)
return err;
snd_printd(LXP "initialize ethersound configuration\n");
err = lx_init_ethersound_config(chip);
if (err)
return err;
lx_irq_enable(chip);
/** \todo the mac address should be ready by not, but it isn't,
* so we wait for it */
for (i = 0; i != 1000; ++i) {
err = lx_dsp_get_mac(chip, mac_address);
if (err)
return err;
if (mac_address[0] || mac_address[1] || mac_address[2] ||
mac_address[3] || mac_address[4] || mac_address[5])
goto mac_ready;
msleep(1);
}
return -ETIMEDOUT;
mac_ready:
snd_printd(LXP "mac address ready read after: %dms\n", i);
snd_printk(LXP "mac address: %02X.%02X.%02X.%02X.%02X.%02X\n",
mac_address[0], mac_address[1], mac_address[2],
mac_address[3], mac_address[4], mac_address[5]);
err = lx_init_get_version_features(chip);
if (err)
return err;
lx_set_granularity(chip, MICROBLAZE_IBL_DEFAULT);
chip->playback_mute = 0;
return err;
}
static struct snd_pcm_ops lx_ops_playback = {
.open = lx_pcm_open,
.close = lx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.prepare = lx_pcm_prepare,
.hw_params = lx_pcm_hw_params_playback,
.hw_free = lx_pcm_hw_free,
.trigger = lx_pcm_trigger,
.pointer = lx_pcm_stream_pointer,
};
static struct snd_pcm_ops lx_ops_capture = {
.open = lx_pcm_open,
.close = lx_pcm_close,
.ioctl = snd_pcm_lib_ioctl,
.prepare = lx_pcm_prepare,
.hw_params = lx_pcm_hw_params_capture,
.hw_free = lx_pcm_hw_free,
.trigger = lx_pcm_trigger,
.pointer = lx_pcm_stream_pointer,
};
static int __devinit lx_pcm_create(struct lx6464es *chip)
{
int err;
struct snd_pcm *pcm;
u32 size = 64 * /* channels */
3 * /* 24 bit samples */
MAX_STREAM_BUFFER * /* periods */
MICROBLAZE_IBL_MAX * /* frames per period */
2; /* duplex */
size = PAGE_ALIGN(size);
/* hardcoded device name & channel count */
err = snd_pcm_new(chip->card, (char *)card_name, 0,
1, 1, &pcm);
pcm->private_data = chip;
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &lx_ops_playback);
snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &lx_ops_capture);
pcm->info_flags = 0;
strcpy(pcm->name, card_name);
err = snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV,
snd_dma_pci_data(chip->pci),
size, size);
if (err < 0)
return err;
chip->pcm = pcm;
chip->capture_stream.is_capture = 1;
return 0;
}
static int lx_control_playback_info(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo)
{
uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
uinfo->count = 1;
uinfo->value.integer.min = 0;
uinfo->value.integer.max = 1;
return 0;
}
static int lx_control_playback_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct lx6464es *chip = snd_kcontrol_chip(kcontrol);
ucontrol->value.integer.value[0] = chip->playback_mute;
return 0;
}
static int lx_control_playback_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct lx6464es *chip = snd_kcontrol_chip(kcontrol);
int changed = 0;
int current_value = chip->playback_mute;
if (current_value != ucontrol->value.integer.value[0]) {
lx_level_unmute(chip, 0, !current_value);
chip->playback_mute = !current_value;
changed = 1;
}
return changed;
}
static struct snd_kcontrol_new lx_control_playback_switch __devinitdata = {
.iface = SNDRV_CTL_ELEM_IFACE_MIXER,
.name = "PCM Playback Switch",
.index = 0,
.access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
.private_value = 0,
.info = lx_control_playback_info,
.get = lx_control_playback_get,
.put = lx_control_playback_put
};
static void lx_proc_levels_read(struct snd_info_entry *entry,
struct snd_info_buffer *buffer)
{
u32 levels[64];
int err;
int i, j;
struct lx6464es *chip = entry->private_data;
snd_iprintf(buffer, "capture levels:\n");
err = lx_level_peaks(chip, 1, 64, levels);
if (err < 0)
return;
for (i = 0; i != 8; ++i) {
for (j = 0; j != 8; ++j)
snd_iprintf(buffer, "%08x ", levels[i*8+j]);
snd_iprintf(buffer, "\n");
}
snd_iprintf(buffer, "\nplayback levels:\n");
err = lx_level_peaks(chip, 0, 64, levels);
if (err < 0)
return;
for (i = 0; i != 8; ++i) {
for (j = 0; j != 8; ++j)
snd_iprintf(buffer, "%08x ", levels[i*8+j]);
snd_iprintf(buffer, "\n");
}
snd_iprintf(buffer, "\n");
}
static int __devinit lx_proc_create(struct snd_card *card, struct lx6464es *chip)
{
struct snd_info_entry *entry;
int err = snd_card_proc_new(card, "levels", &entry);
if (err < 0)
return err;
snd_info_set_text_ops(entry, chip, lx_proc_levels_read);
return 0;
}
static int __devinit snd_lx6464es_create(struct snd_card *card,
struct pci_dev *pci,
struct lx6464es **rchip)
{
struct lx6464es *chip;
int err;
static struct snd_device_ops ops = {
.dev_free = snd_lx6464es_dev_free,
};
snd_printdd("->snd_lx6464es_create\n");
*rchip = NULL;
/* enable PCI device */
err = pci_enable_device(pci);
if (err < 0)
return err;
pci_set_master(pci);
/* check if we can restrict PCI DMA transfers to 32 bits */
err = pci_set_dma_mask(pci, DMA_BIT_MASK(32));
if (err < 0) {
snd_printk(KERN_ERR "architecture does not support "
"32bit PCI busmaster DMA\n");
pci_disable_device(pci);
return -ENXIO;
}
chip = kzalloc(sizeof(*chip), GFP_KERNEL);
if (chip == NULL) {
err = -ENOMEM;
goto alloc_failed;
}
chip->card = card;
chip->pci = pci;
chip->irq = -1;
/* initialize synchronization structs */
spin_lock_init(&chip->lock);
spin_lock_init(&chip->msg_lock);
mutex_init(&chip->setup_mutex);
tasklet_init(&chip->trigger_tasklet, lx_trigger_tasklet,
(unsigned long)chip);
tasklet_init(&chip->tasklet_capture, lx_tasklet_capture,
(unsigned long)chip);
tasklet_init(&chip->tasklet_playback, lx_tasklet_playback,
(unsigned long)chip);
/* request resources */
err = pci_request_regions(pci, card_name);
if (err < 0)
goto request_regions_failed;
/* plx port */
chip->port_plx = pci_resource_start(pci, 1);
chip->port_plx_remapped = ioport_map(chip->port_plx,
pci_resource_len(pci, 1));
/* dsp port */
chip->port_dsp_bar = pci_ioremap_bar(pci, 2);
err = request_irq(pci->irq, lx_interrupt, IRQF_SHARED,
card_name, chip);
if (err) {
snd_printk(KERN_ERR LXP "unable to grab IRQ %d\n", pci->irq);
goto request_irq_failed;
}
chip->irq = pci->irq;
err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
if (err < 0)
goto device_new_failed;
err = lx_init_dsp(chip);
if (err < 0) {
snd_printk(KERN_ERR LXP "error during DSP initialization\n");
return err;
}
err = lx_pcm_create(chip);
if (err < 0)
return err;
err = lx_proc_create(card, chip);
if (err < 0)
return err;
err = snd_ctl_add(card, snd_ctl_new1(&lx_control_playback_switch,
chip));
if (err < 0)
return err;
snd_card_set_dev(card, &pci->dev);
*rchip = chip;
return 0;
device_new_failed:
free_irq(pci->irq, chip);
request_irq_failed:
pci_release_regions(pci);
request_regions_failed:
kfree(chip);
alloc_failed:
pci_disable_device(pci);
return err;
}
static int __devinit snd_lx6464es_probe(struct pci_dev *pci,
const struct pci_device_id *pci_id)
{
static int dev;
struct snd_card *card;
struct lx6464es *chip;
int err;
snd_printdd("->snd_lx6464es_probe\n");
if (dev >= SNDRV_CARDS)
return -ENODEV;
if (!enable[dev]) {
dev++;
return -ENOENT;
}
err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card);
if (err < 0)
return err;
err = snd_lx6464es_create(card, pci, &chip);
if (err < 0) {
snd_printk(KERN_ERR LXP "error during snd_lx6464es_create\n");
goto out_free;
}
strcpy(card->driver, "lx6464es");
strcpy(card->shortname, "Digigram LX6464ES");
sprintf(card->longname, "%s at 0x%lx, 0x%p, irq %i",
card->shortname, chip->port_plx,
chip->port_dsp_bar, chip->irq);
err = snd_card_register(card);
if (err < 0)
goto out_free;
snd_printdd(LXP "initialization successful\n");
pci_set_drvdata(pci, card);
dev++;
return 0;
out_free:
snd_card_free(card);
return err;
}
static void __devexit snd_lx6464es_remove(struct pci_dev *pci)
{
snd_card_free(pci_get_drvdata(pci));
pci_set_drvdata(pci, NULL);
}
static struct pci_driver driver = {
.name = "Digigram LX6464ES",
.id_table = snd_lx6464es_ids,
.probe = snd_lx6464es_probe,
.remove = __devexit_p(snd_lx6464es_remove),
};
/* module initialization */
static int __init mod_init(void)
{
return pci_register_driver(&driver);
}
static void __exit mod_exit(void)
{
pci_unregister_driver(&driver);
}
module_init(mod_init);
module_exit(mod_exit);
| gpl-2.0 |
KaSt/Kappa34 | drivers/usb/dwc3/ep0.c | 1674 | 21271 | /**
* ep0.c - DesignWare USB3 DRD Controller Endpoint 0 Handling
*
* Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
*
* Authors: Felipe Balbi <balbi@ti.com>,
* Sebastian Andrzej Siewior <bigeasy@linutronix.de>
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The names of the above-listed copyright holders may not be used
* to endorse or promote products derived from this software without
* specific prior written permission.
*
* ALTERNATIVELY, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2, as published by the Free
* Software Foundation.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
* IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
* THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
* NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/list.h>
#include <linux/dma-mapping.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <linux/usb/composite.h>
#include "core.h"
#include "gadget.h"
#include "io.h"
static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum);
static const char *dwc3_ep0_state_string(enum dwc3_ep0_state state)
{
switch (state) {
case EP0_UNCONNECTED:
return "Unconnected";
case EP0_SETUP_PHASE:
return "Setup Phase";
case EP0_DATA_PHASE:
return "Data Phase";
case EP0_STATUS_PHASE:
return "Status Phase";
default:
return "UNKNOWN";
}
}
static int dwc3_ep0_start_trans(struct dwc3 *dwc, u8 epnum, dma_addr_t buf_dma,
u32 len, u32 type)
{
struct dwc3_gadget_ep_cmd_params params;
struct dwc3_trb *trb;
struct dwc3_ep *dep;
int ret;
dep = dwc->eps[epnum];
if (dep->flags & DWC3_EP_BUSY) {
dev_vdbg(dwc->dev, "%s: still busy\n", dep->name);
return 0;
}
trb = dwc->ep0_trb;
trb->bpl = lower_32_bits(buf_dma);
trb->bph = upper_32_bits(buf_dma);
trb->size = len;
trb->ctrl = type;
trb->ctrl |= (DWC3_TRB_CTRL_HWO
| DWC3_TRB_CTRL_LST
| DWC3_TRB_CTRL_IOC
| DWC3_TRB_CTRL_ISP_IMI);
memset(¶ms, 0, sizeof(params));
params.param0 = upper_32_bits(dwc->ep0_trb_addr);
params.param1 = lower_32_bits(dwc->ep0_trb_addr);
ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
DWC3_DEPCMD_STARTTRANSFER, ¶ms);
if (ret < 0) {
dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
return ret;
}
dep->flags |= DWC3_EP_BUSY;
dep->res_trans_idx = dwc3_gadget_ep_get_transfer_index(dwc,
dep->number);
dwc->ep0_next_event = DWC3_EP0_COMPLETE;
return 0;
}
static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep,
struct dwc3_request *req)
{
struct dwc3 *dwc = dep->dwc;
int ret = 0;
req->request.actual = 0;
req->request.status = -EINPROGRESS;
req->epnum = dep->number;
list_add_tail(&req->list, &dep->request_list);
/*
* Gadget driver might not be quick enough to queue a request
* before we get a Transfer Not Ready event on this endpoint.
*
* In that case, we will set DWC3_EP_PENDING_REQUEST. When that
* flag is set, it's telling us that as soon as Gadget queues the
* required request, we should kick the transfer here because the
* IRQ we were waiting for is long gone.
*/
if (dep->flags & DWC3_EP_PENDING_REQUEST) {
unsigned direction;
direction = !!(dep->flags & DWC3_EP0_DIR_IN);
if (dwc->ep0state != EP0_DATA_PHASE) {
dev_WARN(dwc->dev, "Unexpected pending request\n");
return 0;
}
ret = dwc3_ep0_start_trans(dwc, direction,
req->request.dma, req->request.length,
DWC3_TRBCTL_CONTROL_DATA);
dep->flags &= ~(DWC3_EP_PENDING_REQUEST |
DWC3_EP0_DIR_IN);
} else if (dwc->delayed_status) {
dwc->delayed_status = false;
if (dwc->ep0state == EP0_STATUS_PHASE)
dwc3_ep0_do_control_status(dwc, 1);
else
dev_dbg(dwc->dev, "too early for delayed status\n");
}
return ret;
}
int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
gfp_t gfp_flags)
{
struct dwc3_request *req = to_dwc3_request(request);
struct dwc3_ep *dep = to_dwc3_ep(ep);
struct dwc3 *dwc = dep->dwc;
unsigned long flags;
int ret;
spin_lock_irqsave(&dwc->lock, flags);
if (!dep->desc) {
dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
request, dep->name);
ret = -ESHUTDOWN;
goto out;
}
/* we share one TRB for ep0/1 */
if (!list_empty(&dep->request_list)) {
ret = -EBUSY;
goto out;
}
dev_vdbg(dwc->dev, "queueing request %p to %s length %d, state '%s'\n",
request, dep->name, request->length,
dwc3_ep0_state_string(dwc->ep0state));
ret = __dwc3_gadget_ep0_queue(dep, req);
out:
spin_unlock_irqrestore(&dwc->lock, flags);
return ret;
}
static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
{
struct dwc3_ep *dep = dwc->eps[0];
/* stall is always issued on EP0 */
__dwc3_gadget_ep_set_halt(dep, 1);
dep->flags = DWC3_EP_ENABLED;
dwc->delayed_status = false;
if (!list_empty(&dep->request_list)) {
struct dwc3_request *req;
req = next_request(&dep->request_list);
dwc3_gadget_giveback(dep, req, -ECONNRESET);
}
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
void dwc3_ep0_out_start(struct dwc3 *dwc)
{
int ret;
ret = dwc3_ep0_start_trans(dwc, 0, dwc->ctrl_req_addr, 8,
DWC3_TRBCTL_CONTROL_SETUP);
WARN_ON(ret < 0);
}
static struct dwc3_ep *dwc3_wIndex_to_dep(struct dwc3 *dwc, __le16 wIndex_le)
{
struct dwc3_ep *dep;
u32 windex = le16_to_cpu(wIndex_le);
u32 epnum;
epnum = (windex & USB_ENDPOINT_NUMBER_MASK) << 1;
if ((windex & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN)
epnum |= 1;
dep = dwc->eps[epnum];
if (dep->flags & DWC3_EP_ENABLED)
return dep;
return NULL;
}
static void dwc3_ep0_status_cmpl(struct usb_ep *ep, struct usb_request *req)
{
}
/*
* ch 9.4.5
*/
static int dwc3_ep0_handle_status(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl)
{
struct dwc3_ep *dep;
u32 recip;
u16 usb_status = 0;
__le16 *response_pkt;
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
/*
* We are self-powered. U1/U2/LTM will be set later
* once we handle this states. RemoteWakeup is 0 on SS
*/
usb_status |= dwc->is_selfpowered << USB_DEVICE_SELF_POWERED;
break;
case USB_RECIP_INTERFACE:
/*
* Function Remote Wake Capable D0
* Function Remote Wakeup D1
*/
break;
case USB_RECIP_ENDPOINT:
dep = dwc3_wIndex_to_dep(dwc, ctrl->wIndex);
if (!dep)
return -EINVAL;
if (dep->flags & DWC3_EP_STALL)
usb_status = 1 << USB_ENDPOINT_HALT;
break;
default:
return -EINVAL;
};
response_pkt = (__le16 *) dwc->setup_buf;
*response_pkt = cpu_to_le16(usb_status);
dep = dwc->eps[0];
dwc->ep0_usb_req.dep = dep;
dwc->ep0_usb_req.request.length = sizeof(*response_pkt);
dwc->ep0_usb_req.request.buf = dwc->setup_buf;
dwc->ep0_usb_req.request.complete = dwc3_ep0_status_cmpl;
return __dwc3_gadget_ep0_queue(dep, &dwc->ep0_usb_req);
}
static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
struct usb_ctrlrequest *ctrl, int set)
{
struct dwc3_ep *dep;
u32 recip;
u32 wValue;
u32 wIndex;
int ret;
wValue = le16_to_cpu(ctrl->wValue);
wIndex = le16_to_cpu(ctrl->wIndex);
recip = ctrl->bRequestType & USB_RECIP_MASK;
switch (recip) {
case USB_RECIP_DEVICE:
/*
* 9.4.1 says only only for SS, in AddressState only for
* default control pipe
*/
switch (wValue) {
case USB_DEVICE_U1_ENABLE:
case USB_DEVICE_U2_ENABLE:
case USB_DEVICE_LTM_ENABLE:
if (dwc->dev_state != DWC3_CONFIGURED_STATE)
return -EINVAL;
if (dwc->speed != DWC3_DSTS_SUPERSPEED)
return -EINVAL;
}
/* XXX add U[12] & LTM */
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
break;
case USB_DEVICE_U1_ENABLE:
break;
case USB_DEVICE_U2_ENABLE:
break;
case USB_DEVICE_LTM_ENABLE:
break;
case USB_DEVICE_TEST_MODE:
if ((wIndex & 0xff) != 0)
return -EINVAL;
if (!set)
return -EINVAL;
dwc->test_mode_nr = wIndex >> 8;
dwc->test_mode = true;
break;
default:
return -EINVAL;
}
break;
case USB_RECIP_INTERFACE:
switch (wValue) {
case USB_INTRF_FUNC_SUSPEND:
if (wIndex & USB_INTRF_FUNC_SUSPEND_LP)
/* XXX enable Low power suspend */
;
if (wIndex & USB_INTRF_FUNC_SUSPEND_RW)
/* XXX enable remote wakeup */
;
break;
default:
return -EINVAL;
}
break;
case USB_RECIP_ENDPOINT:
switch (wValue) {
case USB_ENDPOINT_HALT:
dep = dwc3_wIndex_to_dep(dwc, wIndex);
if (!dep)
return -EINVAL;
ret = __dwc3_gadget_ep_set_halt(dep, set);
if (ret)
return -EINVAL;
break;
default:
return -EINVAL;
}
break;
default:
return -EINVAL;
};
return 0;
}
static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
u32 addr;
u32 reg;
addr = le16_to_cpu(ctrl->wValue);
if (addr > 127) {
dev_dbg(dwc->dev, "invalid device address %d\n", addr);
return -EINVAL;
}
if (dwc->dev_state == DWC3_CONFIGURED_STATE) {
dev_dbg(dwc->dev, "trying to set address when configured\n");
return -EINVAL;
}
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
reg &= ~(DWC3_DCFG_DEVADDR_MASK);
reg |= DWC3_DCFG_DEVADDR(addr);
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
if (addr)
dwc->dev_state = DWC3_ADDRESS_STATE;
else
dwc->dev_state = DWC3_DEFAULT_STATE;
return 0;
}
static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
int ret;
spin_unlock(&dwc->lock);
ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl);
spin_lock(&dwc->lock);
return ret;
}
static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
u32 cfg;
int ret;
dwc->start_config_issued = false;
cfg = le16_to_cpu(ctrl->wValue);
switch (dwc->dev_state) {
case DWC3_DEFAULT_STATE:
return -EINVAL;
break;
case DWC3_ADDRESS_STATE:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
/* if the cfg matches and the cfg is non zero */
if (cfg && (!ret || (ret == USB_GADGET_DELAYED_STATUS))) {
dwc->dev_state = DWC3_CONFIGURED_STATE;
dwc->resize_fifos = true;
dev_dbg(dwc->dev, "resize fifos flag SET\n");
}
break;
case DWC3_CONFIGURED_STATE:
ret = dwc3_ep0_delegate_req(dwc, ctrl);
if (!cfg)
dwc->dev_state = DWC3_ADDRESS_STATE;
break;
default:
ret = -EINVAL;
}
return ret;
}
static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
{
int ret;
switch (ctrl->bRequest) {
case USB_REQ_GET_STATUS:
dev_vdbg(dwc->dev, "USB_REQ_GET_STATUS\n");
ret = dwc3_ep0_handle_status(dwc, ctrl);
break;
case USB_REQ_CLEAR_FEATURE:
dev_vdbg(dwc->dev, "USB_REQ_CLEAR_FEATURE\n");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 0);
break;
case USB_REQ_SET_FEATURE:
dev_vdbg(dwc->dev, "USB_REQ_SET_FEATURE\n");
ret = dwc3_ep0_handle_feature(dwc, ctrl, 1);
break;
case USB_REQ_SET_ADDRESS:
dev_vdbg(dwc->dev, "USB_REQ_SET_ADDRESS\n");
ret = dwc3_ep0_set_address(dwc, ctrl);
break;
case USB_REQ_SET_CONFIGURATION:
dev_vdbg(dwc->dev, "USB_REQ_SET_CONFIGURATION\n");
ret = dwc3_ep0_set_config(dwc, ctrl);
break;
default:
dev_vdbg(dwc->dev, "Forwarding to gadget driver\n");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
break;
};
return ret;
}
static void dwc3_ep0_inspect_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct usb_ctrlrequest *ctrl = dwc->ctrl_req;
int ret;
u32 len;
if (!dwc->gadget_driver)
goto err;
len = le16_to_cpu(ctrl->wLength);
if (!len) {
dwc->three_stage_setup = false;
dwc->ep0_expect_in = false;
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
} else {
dwc->three_stage_setup = true;
dwc->ep0_expect_in = !!(ctrl->bRequestType & USB_DIR_IN);
dwc->ep0_next_event = DWC3_EP0_NRDY_DATA;
}
if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD)
ret = dwc3_ep0_std_request(dwc, ctrl);
else
ret = dwc3_ep0_delegate_req(dwc, ctrl);
if (ret == USB_GADGET_DELAYED_STATUS)
dwc->delayed_status = true;
if (ret >= 0)
return;
err:
dwc3_ep0_stall_and_restart(dwc);
}
static void dwc3_ep0_complete_data(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_request *r = NULL;
struct usb_request *ur;
struct dwc3_trb *trb;
struct dwc3_ep *ep0;
u32 transferred;
u32 length;
u8 epnum;
epnum = event->endpoint_number;
ep0 = dwc->eps[0];
dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
r = next_request(&ep0->request_list);
ur = &r->request;
trb = dwc->ep0_trb;
length = trb->size & DWC3_TRB_SIZE_MASK;
if (dwc->ep0_bounced) {
unsigned transfer_size = ur->length;
unsigned maxp = ep0->endpoint.maxpacket;
transfer_size += (maxp - (transfer_size % maxp));
transferred = min_t(u32, ur->length,
transfer_size - length);
memcpy(ur->buf, dwc->ep0_bounce, transferred);
dwc->ep0_bounced = false;
} else {
transferred = ur->length - length;
}
ur->actual += transferred;
if ((epnum & 1) && ur->actual < ur->length) {
/* for some reason we did not get everything out */
dwc3_ep0_stall_and_restart(dwc);
} else {
/*
* handle the case where we have to send a zero packet. This
* seems to be case when req.length > maxpacket. Could it be?
*/
if (r)
dwc3_gadget_giveback(ep0, r, 0);
}
}
static void dwc3_ep0_complete_req(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_request *r;
struct dwc3_ep *dep;
dep = dwc->eps[0];
if (!list_empty(&dep->request_list)) {
r = next_request(&dep->request_list);
dwc3_gadget_giveback(dep, r, 0);
}
if (dwc->test_mode) {
int ret;
ret = dwc3_gadget_set_test_mode(dwc, dwc->test_mode_nr);
if (ret < 0) {
dev_dbg(dwc->dev, "Invalid Test #%d\n",
dwc->test_mode_nr);
dwc3_ep0_stall_and_restart(dwc);
}
}
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_out_start(dwc);
}
static void dwc3_ep0_xfer_complete(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_ep *dep = dwc->eps[event->endpoint_number];
dep->flags &= ~DWC3_EP_BUSY;
dep->res_trans_idx = 0;
dwc->setup_packet_pending = false;
switch (dwc->ep0state) {
case EP0_SETUP_PHASE:
dev_vdbg(dwc->dev, "Inspecting Setup Bytes\n");
dwc3_ep0_inspect_setup(dwc, event);
break;
case EP0_DATA_PHASE:
dev_vdbg(dwc->dev, "Data Phase\n");
dwc3_ep0_complete_data(dwc, event);
break;
case EP0_STATUS_PHASE:
dev_vdbg(dwc->dev, "Status Phase\n");
dwc3_ep0_complete_req(dwc, event);
break;
default:
WARN(true, "UNKNOWN ep0state %d\n", dwc->ep0state);
}
}
static void dwc3_ep0_do_control_setup(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
dwc3_ep0_out_start(dwc);
}
static void dwc3_ep0_do_control_data(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
struct dwc3_ep *dep;
struct dwc3_request *req;
int ret;
dep = dwc->eps[0];
if (list_empty(&dep->request_list)) {
dev_vdbg(dwc->dev, "pending request for EP0 Data phase\n");
dep->flags |= DWC3_EP_PENDING_REQUEST;
if (event->endpoint_number)
dep->flags |= DWC3_EP0_DIR_IN;
return;
}
req = next_request(&dep->request_list);
req->direction = !!event->endpoint_number;
if (req->request.length == 0) {
ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
dwc->ctrl_req_addr, 0,
DWC3_TRBCTL_CONTROL_DATA);
} else if ((req->request.length % dep->endpoint.maxpacket)
&& (event->endpoint_number == 0)) {
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
event->endpoint_number);
if (ret) {
dev_dbg(dwc->dev, "failed to map request\n");
return;
}
WARN_ON(req->request.length > dep->endpoint.maxpacket);
dwc->ep0_bounced = true;
/*
* REVISIT in case request length is bigger than EP0
* wMaxPacketSize, we will need two chained TRBs to handle
* the transfer.
*/
ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
dwc->ep0_bounce_addr, dep->endpoint.maxpacket,
DWC3_TRBCTL_CONTROL_DATA);
} else {
ret = usb_gadget_map_request(&dwc->gadget, &req->request,
event->endpoint_number);
if (ret) {
dev_dbg(dwc->dev, "failed to map request\n");
return;
}
ret = dwc3_ep0_start_trans(dwc, event->endpoint_number,
req->request.dma, req->request.length,
DWC3_TRBCTL_CONTROL_DATA);
}
WARN_ON(ret < 0);
}
static int dwc3_ep0_start_control_status(struct dwc3_ep *dep)
{
struct dwc3 *dwc = dep->dwc;
u32 type;
type = dwc->three_stage_setup ? DWC3_TRBCTL_CONTROL_STATUS3
: DWC3_TRBCTL_CONTROL_STATUS2;
return dwc3_ep0_start_trans(dwc, dep->number,
dwc->ctrl_req_addr, 0, type);
}
static void dwc3_ep0_do_control_status(struct dwc3 *dwc, u32 epnum)
{
struct dwc3_ep *dep = dwc->eps[epnum];
if (dwc->resize_fifos) {
dev_dbg(dwc->dev, "starting to resize fifos\n");
dwc3_gadget_resize_tx_fifos(dwc);
dwc->resize_fifos = 0;
}
WARN_ON(dwc3_ep0_start_control_status(dep));
}
static void dwc3_ep0_xfernotready(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
dwc->setup_packet_pending = true;
/*
* This part is very tricky: If we has just handled
* XferNotReady(Setup) and we're now expecting a
* XferComplete but, instead, we receive another
* XferNotReady(Setup), we should STALL and restart
* the state machine.
*
* In all other cases, we just continue waiting
* for the XferComplete event.
*
* We are a little bit unsafe here because we're
* not trying to ensure that last event was, indeed,
* XferNotReady(Setup).
*
* Still, we don't expect any condition where that
* should happen and, even if it does, it would be
* another error condition.
*/
if (dwc->ep0_next_event == DWC3_EP0_COMPLETE) {
switch (event->status) {
case DEPEVT_STATUS_CONTROL_SETUP:
dev_vdbg(dwc->dev, "Unexpected XferNotReady(Setup)\n");
dwc3_ep0_stall_and_restart(dwc);
break;
case DEPEVT_STATUS_CONTROL_DATA:
/* FALLTHROUGH */
case DEPEVT_STATUS_CONTROL_STATUS:
/* FALLTHROUGH */
default:
dev_vdbg(dwc->dev, "waiting for XferComplete\n");
}
return;
}
switch (event->status) {
case DEPEVT_STATUS_CONTROL_SETUP:
dev_vdbg(dwc->dev, "Control Setup\n");
dwc->ep0state = EP0_SETUP_PHASE;
dwc3_ep0_do_control_setup(dwc, event);
break;
case DEPEVT_STATUS_CONTROL_DATA:
dev_vdbg(dwc->dev, "Control Data\n");
dwc->ep0state = EP0_DATA_PHASE;
if (dwc->ep0_next_event != DWC3_EP0_NRDY_DATA) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
DWC3_EP0_NRDY_DATA);
dwc3_ep0_stall_and_restart(dwc);
return;
}
/*
* One of the possible error cases is when Host _does_
* request for Data Phase, but it does so on the wrong
* direction.
*
* Here, we already know ep0_next_event is DATA (see above),
* so we only need to check for direction.
*/
if (dwc->ep0_expect_in != event->endpoint_number) {
dev_vdbg(dwc->dev, "Wrong direction for Data phase\n");
dwc3_ep0_stall_and_restart(dwc);
return;
}
dwc3_ep0_do_control_data(dwc, event);
break;
case DEPEVT_STATUS_CONTROL_STATUS:
dev_vdbg(dwc->dev, "Control Status\n");
dwc->ep0state = EP0_STATUS_PHASE;
if (dwc->ep0_next_event != DWC3_EP0_NRDY_STATUS) {
dev_vdbg(dwc->dev, "Expected %d got %d\n",
dwc->ep0_next_event,
DWC3_EP0_NRDY_STATUS);
dwc3_ep0_stall_and_restart(dwc);
return;
}
if (dwc->delayed_status) {
WARN_ON_ONCE(event->endpoint_number != 1);
dev_vdbg(dwc->dev, "Mass Storage delayed status\n");
return;
}
dwc3_ep0_do_control_status(dwc, event->endpoint_number);
}
}
void dwc3_ep0_interrupt(struct dwc3 *dwc,
const struct dwc3_event_depevt *event)
{
u8 epnum = event->endpoint_number;
dev_dbg(dwc->dev, "%s while ep%d%s in state '%s'\n",
dwc3_ep_event_string(event->endpoint_event),
epnum >> 1, (epnum & 1) ? "in" : "out",
dwc3_ep0_state_string(dwc->ep0state));
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
dwc3_ep0_xfer_complete(dwc, event);
break;
case DWC3_DEPEVT_XFERNOTREADY:
dwc3_ep0_xfernotready(dwc, event);
break;
case DWC3_DEPEVT_XFERINPROGRESS:
case DWC3_DEPEVT_RXTXFIFOEVT:
case DWC3_DEPEVT_STREAMEVT:
case DWC3_DEPEVT_EPCMDCMPLT:
break;
}
}
| gpl-2.0 |
schqiushui/Kernel_Lollipop_GPE5.1_M8ACE | drivers/video/msm/mdp4_overlay_lcdc.c | 2186 | 23873 | /* Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/time.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/hrtimer.h>
#include <linux/delay.h>
#include <mach/hardware.h>
#include <linux/io.h>
#include <asm/system.h>
#include <asm/mach-types.h>
#include <linux/semaphore.h>
#include <linux/spinlock.h>
#include <linux/fb.h>
#include "mdp.h"
#include "msm_fb.h"
#include "mdp4.h"
#ifdef CONFIG_FB_MSM_MDP40
#define LCDC_BASE 0xC0000
#else
#define LCDC_BASE 0xE0000
#endif
int first_pixel_start_x;
int first_pixel_start_y;
static int lcdc_enabled;
#define MAX_CONTROLLER 1
static struct vsycn_ctrl {
struct device *dev;
int inited;
int update_ndx;
int ov_koff;
int ov_done;
atomic_t suspend;
atomic_t vsync_resume;
int wait_vsync_cnt;
int blt_change;
int blt_free;
int sysfs_created;
struct mutex update_lock;
struct completion ov_comp;
struct completion dmap_comp;
struct completion vsync_comp;
spinlock_t spin_lock;
struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *base_pipe;
struct vsync_update vlist[2];
int vsync_irq_enabled;
ktime_t vsync_time;
} vsync_ctrl_db[MAX_CONTROLLER];
/*******************************************************
to do:
1) move vsync_irq_enable/vsync_irq_disable to mdp.c to be shared
*******************************************************/
static void vsync_irq_enable(int intr, int term)
{
unsigned long flag;
spin_lock_irqsave(&mdp_spin_lock, flag);
outp32(MDP_INTR_CLEAR, intr);
mdp_intr_mask |= intr;
outp32(MDP_INTR_ENABLE, mdp_intr_mask);
mdp_enable_irq(term);
spin_unlock_irqrestore(&mdp_spin_lock, flag);
pr_debug("%s: IRQ-en done, term=%x\n", __func__, term);
}
static void vsync_irq_disable(int intr, int term)
{
unsigned long flag;
spin_lock_irqsave(&mdp_spin_lock, flag);
outp32(MDP_INTR_CLEAR, intr);
mdp_intr_mask &= ~intr;
outp32(MDP_INTR_ENABLE, mdp_intr_mask);
mdp_disable_irq_nosync(term);
spin_unlock_irqrestore(&mdp_spin_lock, flag);
pr_debug("%s: IRQ-dis done, term=%x\n", __func__, term);
}
static void mdp4_overlay_lcdc_start(void)
{
if (!lcdc_enabled) {
/* enable DSI block */
mdp4_iommu_attach();
mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + LCDC_BASE, 1);
lcdc_enabled = 1;
}
}
/*
* mdp4_lcdc_pipe_queue:
* called from thread context
*/
void mdp4_lcdc_pipe_queue(int cndx, struct mdp4_overlay_pipe *pipe)
{
struct vsycn_ctrl *vctrl;
struct vsync_update *vp;
struct mdp4_overlay_pipe *pp;
int undx;
if (cndx >= MAX_CONTROLLER) {
pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
vctrl = &vsync_ctrl_db[cndx];
if (atomic_read(&vctrl->suspend) > 0)
return;
mutex_lock(&vctrl->update_lock);
undx = vctrl->update_ndx;
vp = &vctrl->vlist[undx];
pp = &vp->plist[pipe->pipe_ndx - 1]; /* ndx start form 1 */
pr_debug("%s: vndx=%d pipe_ndx=%d pid=%d\n", __func__,
undx, pipe->pipe_ndx, current->pid);
*pp = *pipe; /* clone it */
vp->update_cnt++;
mutex_unlock(&vctrl->update_lock);
mdp4_stat.overlay_play[pipe->mixer_num]++;
}
static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe);
static void mdp4_lcdc_wait4dmap(int cndx);
static void mdp4_lcdc_wait4ov(int cndx);
int mdp4_lcdc_pipe_commit(int cndx, int wait)
{
int i, undx;
int mixer = 0;
struct vsycn_ctrl *vctrl;
struct vsync_update *vp;
struct mdp4_overlay_pipe *pipe;
struct mdp4_overlay_pipe *real_pipe;
unsigned long flags;
int cnt = 0;
vctrl = &vsync_ctrl_db[cndx];
mutex_lock(&vctrl->update_lock);
undx = vctrl->update_ndx;
vp = &vctrl->vlist[undx];
pipe = vctrl->base_pipe;
mixer = pipe->mixer_num;
mdp_update_pm(vctrl->mfd, vctrl->vsync_time);
if (vp->update_cnt == 0) {
mutex_unlock(&vctrl->update_lock);
return 0;
}
vctrl->update_ndx++;
vctrl->update_ndx &= 0x01;
vp->update_cnt = 0; /* reset */
if (vctrl->blt_free) {
vctrl->blt_free--;
if (vctrl->blt_free == 0)
mdp4_free_writeback_buf(vctrl->mfd, mixer);
}
mutex_unlock(&vctrl->update_lock);
/* free previous committed iommu back to pool */
mdp4_overlay_iommu_unmap_freelist(mixer);
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (vctrl->ov_koff != vctrl->ov_done) {
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
pr_err("%s: Error, frame dropped %d %d\n", __func__,
vctrl->ov_koff, vctrl->ov_done);
return 0;
}
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
mdp4_overlay_mdp_perf_upd(vctrl->mfd, 1);
if (vctrl->blt_change) {
pipe = vctrl->base_pipe;
spin_lock_irqsave(&vctrl->spin_lock, flags);
INIT_COMPLETION(vctrl->dmap_comp);
INIT_COMPLETION(vctrl->ov_comp);
vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
mdp4_lcdc_wait4dmap(0);
if (pipe->ov_blt_addr)
mdp4_lcdc_wait4ov(0);
}
pipe = vp->plist;
for (i = 0; i < OVERLAY_PIPE_MAX; i++, pipe++) {
if (pipe->pipe_used) {
cnt++;
real_pipe = mdp4_overlay_ndx2pipe(pipe->pipe_ndx);
if (real_pipe && real_pipe->pipe_used) {
/* pipe not unset */
mdp4_overlay_vsync_commit(pipe);
}
/* free previous iommu to freelist
* which will be freed at next
* pipe_commit
*/
mdp4_overlay_iommu_pipe_free(pipe->pipe_ndx, 0);
pipe->pipe_used = 0; /* clear */
}
}
mdp4_mixer_stage_commit(mixer);
/* start timing generator & mmu if they are not started yet */
mdp4_overlay_lcdc_start();
pipe = vctrl->base_pipe;
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (pipe->ov_blt_addr) {
mdp4_lcdc_blt_ov_update(pipe);
pipe->ov_cnt++;
INIT_COMPLETION(vctrl->ov_comp);
vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
mb();
vctrl->ov_koff++;
/* kickoff overlay engine */
mdp4_stat.kickoff_ov0++;
outpdw(MDP_BASE + 0x0004, 0);
} else {
/* schedule second phase update at dmap */
INIT_COMPLETION(vctrl->dmap_comp);
vsync_irq_enable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
}
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
mdp4_stat.overlay_commit[pipe->mixer_num]++;
if (wait) {
if (pipe->ov_blt_addr)
mdp4_lcdc_wait4ov(cndx);
else
mdp4_lcdc_wait4dmap(cndx);
}
return cnt;
}
void mdp4_lcdc_vsync_ctrl(struct fb_info *info, int enable)
{
struct vsycn_ctrl *vctrl;
int cndx = 0;
vctrl = &vsync_ctrl_db[cndx];
if (vctrl->vsync_irq_enabled == enable)
return;
pr_debug("%s: vsync enable=%d\n", __func__, enable);
vctrl->vsync_irq_enabled = enable;
if (enable)
vsync_irq_enable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
else
vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
if (vctrl->vsync_irq_enabled && atomic_read(&vctrl->suspend) == 0)
atomic_set(&vctrl->vsync_resume, 1);
}
void mdp4_lcdc_wait4vsync(int cndx, long long *vtime)
{
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
unsigned long flags;
if (cndx >= MAX_CONTROLLER) {
pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
if (atomic_read(&vctrl->suspend) > 0) {
*vtime = -1;
return;
}
/* start timing generator & mmu if they are not started yet */
mdp4_overlay_lcdc_start();
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (vctrl->wait_vsync_cnt == 0)
INIT_COMPLETION(vctrl->vsync_comp);
vctrl->wait_vsync_cnt++;
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
wait_for_completion(&vctrl->vsync_comp);
mdp4_stat.wait4vsync0++;
*vtime = vctrl->vsync_time.tv64;
}
static void mdp4_lcdc_wait4dmap(int cndx)
{
struct vsycn_ctrl *vctrl;
if (cndx >= MAX_CONTROLLER) {
pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
vctrl = &vsync_ctrl_db[cndx];
if (atomic_read(&vctrl->suspend) > 0)
return;
wait_for_completion(&vctrl->dmap_comp);
}
static void mdp4_lcdc_wait4ov(int cndx)
{
struct vsycn_ctrl *vctrl;
if (cndx >= MAX_CONTROLLER) {
pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
vctrl = &vsync_ctrl_db[cndx];
if (atomic_read(&vctrl->suspend) > 0)
return;
wait_for_completion(&vctrl->ov_comp);
}
ssize_t mdp4_lcdc_show_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
int cndx;
struct vsycn_ctrl *vctrl;
ssize_t ret = 0;
unsigned long flags;
u64 vsync_tick;
cndx = 0;
vctrl = &vsync_ctrl_db[0];
if (atomic_read(&vctrl->suspend) > 0 ||
atomic_read(&vctrl->vsync_resume) == 0)
return 0;
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (vctrl->wait_vsync_cnt == 0)
INIT_COMPLETION(vctrl->vsync_comp);
vctrl->wait_vsync_cnt++;
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
ret = wait_for_completion_interruptible_timeout(&vctrl->vsync_comp,
msecs_to_jiffies(VSYNC_PERIOD * 4));
if (ret <= 0) {
vctrl->wait_vsync_cnt = 0;
vsync_tick = ktime_to_ns(ktime_get());
ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
buf[strlen(buf) + 1] = '\0';
return ret;
}
spin_lock_irqsave(&vctrl->spin_lock, flags);
vsync_tick = ktime_to_ns(vctrl->vsync_time);
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
ret = snprintf(buf, PAGE_SIZE, "VSYNC=%llu", vsync_tick);
buf[strlen(buf) + 1] = '\0';
return ret;
}
void mdp4_lcdc_vsync_init(int cndx)
{
struct vsycn_ctrl *vctrl;
if (cndx >= MAX_CONTROLLER) {
pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
pr_info("%s: ndx=%d\n", __func__, cndx);
vctrl = &vsync_ctrl_db[cndx];
if (vctrl->inited)
return;
vctrl->inited = 1;
vctrl->update_ndx = 0;
mutex_init(&vctrl->update_lock);
init_completion(&vctrl->vsync_comp);
init_completion(&vctrl->dmap_comp);
init_completion(&vctrl->ov_comp);
atomic_set(&vctrl->suspend, 1);
atomic_set(&vctrl->vsync_resume, 1);
spin_lock_init(&vctrl->spin_lock);
}
void mdp4_lcdc_base_swap(int cndx, struct mdp4_overlay_pipe *pipe)
{
struct vsycn_ctrl *vctrl;
if (cndx >= MAX_CONTROLLER) {
pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
vctrl = &vsync_ctrl_db[cndx];
vctrl->base_pipe = pipe;
}
int mdp4_lcdc_on(struct platform_device *pdev)
{
int lcdc_width;
int lcdc_height;
int lcdc_bpp;
int lcdc_border_clr;
int lcdc_underflow_clr;
int lcdc_hsync_skew;
int hsync_period;
int hsync_ctrl;
int vsync_period;
int display_hctl;
int display_v_start;
int display_v_end;
int active_hctl;
int active_h_start;
int active_h_end;
int active_v_start;
int active_v_end;
int ctrl_polarity;
int h_back_porch;
int h_front_porch;
int v_back_porch;
int v_front_porch;
int hsync_pulse_width;
int vsync_pulse_width;
int hsync_polarity;
int vsync_polarity;
int data_en_polarity;
int hsync_start_x;
int hsync_end_x;
uint8 *buf;
unsigned int buf_offset;
int bpp, ptype;
struct fb_info *fbi;
struct fb_var_screeninfo *var;
struct msm_fb_data_type *mfd;
struct mdp4_overlay_pipe *pipe;
int ret = 0;
int cndx = 0;
struct vsycn_ctrl *vctrl;
vctrl = &vsync_ctrl_db[cndx];
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
vctrl->mfd = mfd;
vctrl->dev = mfd->fbi->dev;
/* mdp clock on */
mdp_clk_ctrl(1);
fbi = mfd->fbi;
var = &fbi->var;
bpp = fbi->var.bits_per_pixel / 8;
buf = (uint8 *) fbi->fix.smem_start;
buf_offset = calc_fb_offset(mfd, fbi, bpp);
if (vctrl->base_pipe == NULL) {
ptype = mdp4_overlay_format2type(mfd->fb_imgType);
if (ptype < 0)
printk(KERN_INFO "%s: format2type failed\n", __func__);
pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0);
if (pipe == NULL)
printk(KERN_INFO "%s: pipe_alloc failed\n", __func__);
pipe->pipe_used++;
pipe->mixer_stage = MDP4_MIXER_STAGE_BASE;
pipe->mixer_num = MDP4_MIXER0;
pipe->src_format = mfd->fb_imgType;
mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_LCDC);
ret = mdp4_overlay_format2pipe(pipe);
if (ret < 0)
printk(KERN_INFO "%s: format2pipe failed\n", __func__);
mdp4_init_writeback_buf(mfd, MDP4_MIXER0);
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
vctrl->base_pipe = pipe; /* keep it */
} else {
pipe = vctrl->base_pipe;
}
pipe->src_height = fbi->var.yres;
pipe->src_width = fbi->var.xres;
pipe->src_h = fbi->var.yres;
pipe->src_w = fbi->var.xres;
pipe->src_y = 0;
pipe->src_x = 0;
pipe->dst_h = fbi->var.yres;
pipe->dst_w = fbi->var.xres;
if (mfd->display_iova)
pipe->srcp0_addr = mfd->display_iova + buf_offset;
else
pipe->srcp0_addr = (uint32)(buf + buf_offset);
pipe->srcp0_ystride = fbi->fix.line_length;
pipe->bpp = bpp;
mdp4_overlay_mdp_pipe_req(pipe, mfd);
mdp4_calc_blt_mdp_bw(mfd, pipe);
atomic_set(&vctrl->suspend, 0);
mdp4_overlay_dmap_xy(pipe);
mdp4_overlay_dmap_cfg(mfd, 1);
mdp4_overlay_rgb_setup(pipe);
mdp4_overlayproc_cfg(pipe);
mdp4_overlay_reg_flush(pipe, 1);
mdp4_mixer_stage_up(pipe, 0);
/*
* LCDC timing setting
*/
h_back_porch = var->left_margin;
h_front_porch = var->right_margin;
v_back_porch = var->upper_margin;
v_front_porch = var->lower_margin;
hsync_pulse_width = var->hsync_len;
vsync_pulse_width = var->vsync_len;
lcdc_border_clr = mfd->panel_info.lcdc.border_clr;
lcdc_underflow_clr = mfd->panel_info.lcdc.underflow_clr;
lcdc_hsync_skew = mfd->panel_info.lcdc.hsync_skew;
lcdc_width = var->xres + mfd->panel_info.lcdc.xres_pad;
lcdc_height = var->yres + mfd->panel_info.lcdc.yres_pad;
lcdc_bpp = mfd->panel_info.bpp;
hsync_period =
hsync_pulse_width + h_back_porch + h_front_porch;
if ((mfd->panel_info.type == LVDS_PANEL) &&
(mfd->panel_info.lvds.channel_mode == LVDS_DUAL_CHANNEL_MODE))
hsync_period += lcdc_width / 2;
else
hsync_period += lcdc_width;
hsync_ctrl = (hsync_period << 16) | hsync_pulse_width;
hsync_start_x = hsync_pulse_width + h_back_porch;
hsync_end_x = hsync_period - h_front_porch - 1;
display_hctl = (hsync_end_x << 16) | hsync_start_x;
vsync_period =
(vsync_pulse_width + v_back_porch + lcdc_height +
v_front_porch) * hsync_period;
display_v_start =
(vsync_pulse_width + v_back_porch) * hsync_period + lcdc_hsync_skew;
display_v_end =
vsync_period - (v_front_porch * hsync_period) + lcdc_hsync_skew - 1;
if (lcdc_width != var->xres) {
active_h_start = hsync_start_x + first_pixel_start_x;
active_h_end = active_h_start + var->xres - 1;
active_hctl =
ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start;
} else {
active_hctl = 0;
}
if (lcdc_height != var->yres) {
active_v_start =
display_v_start + first_pixel_start_y * hsync_period;
active_v_end = active_v_start + (var->yres) * hsync_period - 1;
active_v_start |= ACTIVE_START_Y_EN;
} else {
active_v_start = 0;
active_v_end = 0;
}
#ifdef CONFIG_FB_MSM_MDP40
if (mfd->panel_info.lcdc.is_sync_active_high) {
hsync_polarity = 0;
vsync_polarity = 0;
} else {
hsync_polarity = 1;
vsync_polarity = 1;
}
lcdc_underflow_clr |= 0x80000000; /* enable recovery */
#else
hsync_polarity = 0;
vsync_polarity = 0;
#endif
data_en_polarity = 0;
ctrl_polarity =
(data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x4, hsync_ctrl);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x8, vsync_period);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0xc, vsync_pulse_width * hsync_period);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x10, display_hctl);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x14, display_v_start);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x18, display_v_end);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x28, lcdc_border_clr);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x2c, lcdc_underflow_clr);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x30, lcdc_hsync_skew);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x38, ctrl_polarity);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x1c, active_hctl);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x20, active_v_start);
MDP_OUTP(MDP_BASE + LCDC_BASE + 0x24, active_v_end);
mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
mdp_histogram_ctrl_all(TRUE);
return ret;
}
int mdp4_lcdc_off(struct platform_device *pdev)
{
int ret = 0;
int cndx = 0;
struct msm_fb_data_type *mfd;
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
struct vsync_update *vp;
unsigned long flags;
int undx, need_wait = 0;
mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev);
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
atomic_set(&vctrl->suspend, 1);
atomic_set(&vctrl->vsync_resume, 0);
msleep(20); /* >= 17 ms */
complete_all(&vctrl->vsync_comp);
if (pipe->ov_blt_addr) {
spin_lock_irqsave(&vctrl->spin_lock, flags);
if (vctrl->ov_koff != vctrl->ov_done)
need_wait = 1;
spin_unlock_irqrestore(&vctrl->spin_lock, flags);
if (need_wait)
mdp4_lcdc_wait4ov(0);
}
mdp_histogram_ctrl_all(FALSE);
MDP_OUTP(MDP_BASE + LCDC_BASE, 0);
lcdc_enabled = 0;
if (vctrl->vsync_irq_enabled) {
vctrl->vsync_irq_enabled = 0;
vsync_irq_disable(INTR_PRIMARY_VSYNC, MDP_PRIM_VSYNC_TERM);
}
undx = vctrl->update_ndx;
vp = &vctrl->vlist[undx];
if (vp->update_cnt) {
/*
* pipe's iommu will be freed at next overlay play
* and iommu_drop statistic will be increased by one
*/
vp->update_cnt = 0; /* empty queue */
}
if (pipe) {
/* sanity check, free pipes besides base layer */
mdp4_overlay_unset_mixer(pipe->mixer_num);
if (mfd->ref_cnt == 0) {
/* adb stop */
if (pipe->pipe_type == OVERLAY_TYPE_BF)
mdp4_overlay_borderfill_stage_down(pipe);
/* base pipe may change after borderfill_stage_down */
pipe = vctrl->base_pipe;
mdp4_mixer_stage_down(pipe, 1);
mdp4_overlay_pipe_free(pipe);
vctrl->base_pipe = NULL;
} else {
/* system suspending */
mdp4_mixer_stage_down(vctrl->base_pipe, 1);
mdp4_overlay_iommu_pipe_free(
vctrl->base_pipe->pipe_ndx, 1);
}
}
/* MDP clock disable */
mdp_clk_ctrl(0);
mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE);
return ret;
}
static void mdp4_lcdc_blt_ov_update(struct mdp4_overlay_pipe *pipe)
{
uint32 off, addr;
int bpp;
char *overlay_base;
if (pipe->ov_blt_addr == 0)
return;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
if (pipe->ov_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
addr = pipe->ov_blt_addr + off;
/* overlay 0 */
overlay_base = MDP_BASE + MDP4_OVERLAYPROC0_BASE;/* 0x10000 */
outpdw(overlay_base + 0x000c, addr);
outpdw(overlay_base + 0x001c, addr);
}
static void mdp4_lcdc_blt_dmap_update(struct mdp4_overlay_pipe *pipe)
{
uint32 off, addr;
int bpp;
if (pipe->ov_blt_addr == 0)
return;
#ifdef BLT_RGB565
bpp = 2; /* overlay ouput is RGB565 */
#else
bpp = 3; /* overlay ouput is RGB888 */
#endif
off = 0;
if (pipe->dmap_cnt & 0x01)
off = pipe->src_height * pipe->src_width * bpp;
addr = pipe->dma_blt_addr + off;
/* dmap */
MDP_OUTP(MDP_BASE + 0x90008, addr);
}
/*
* mdp4_primary_vsync_lcdc: called from isr
*/
void mdp4_primary_vsync_lcdc(void)
{
int cndx;
struct vsycn_ctrl *vctrl;
cndx = 0;
vctrl = &vsync_ctrl_db[cndx];
pr_debug("%s: cpu=%d\n", __func__, smp_processor_id());
spin_lock(&vctrl->spin_lock);
vctrl->vsync_time = ktime_get();
if (vctrl->wait_vsync_cnt) {
complete_all(&vctrl->vsync_comp);
vctrl->wait_vsync_cnt = 0;
}
spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_dma_p_done_lcdc: called from isr
*/
void mdp4_dmap_done_lcdc(int cndx)
{
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
if (cndx >= MAX_CONTROLLER) {
pr_err("%s: out or range: cndx=%d\n", __func__, cndx);
return;
}
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
if (pipe == NULL)
return;
spin_lock(&vctrl->spin_lock);
vsync_irq_disable(INTR_DMA_P_DONE, MDP_DMAP_TERM);
if (vctrl->blt_change) {
mdp4_overlayproc_cfg(pipe);
mdp4_overlay_dmap_xy(pipe);
if (pipe->ov_blt_addr) {
mdp4_lcdc_blt_ov_update(pipe);
pipe->ov_cnt++;
/* Prefill one frame */
vsync_irq_enable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
/* kickoff overlay0 engine */
mdp4_stat.kickoff_ov0++;
vctrl->ov_koff++; /* make up for prefill */
outpdw(MDP_BASE + 0x0004, 0);
}
vctrl->blt_change = 0;
}
complete_all(&vctrl->dmap_comp);
if (mdp_rev <= MDP_REV_41)
mdp4_mixer_blend_cfg(MDP4_MIXER0);
mdp4_overlay_dma_commit(cndx);
spin_unlock(&vctrl->spin_lock);
}
/*
* mdp4_overlay0_done_lcdc: called from isr
*/
void mdp4_overlay0_done_lcdc(int cndx)
{
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
if (pipe == NULL)
return;
spin_lock(&vctrl->spin_lock);
vsync_irq_disable(INTR_OVERLAY0_DONE, MDP_OVERLAY0_TERM);
vctrl->ov_done++;
complete_all(&vctrl->ov_comp);
if (pipe->ov_blt_addr == 0) {
spin_unlock(&vctrl->spin_lock);
return;
}
mdp4_lcdc_blt_dmap_update(pipe);
pipe->dmap_cnt++;
spin_unlock(&vctrl->spin_lock);
}
static void mdp4_lcdc_do_blt(struct msm_fb_data_type *mfd, int enable)
{
unsigned long flag;
int cndx = 0;
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
mdp4_allocate_writeback_buf(mfd, MDP4_MIXER0);
if (mfd->ov0_wb_buf->write_addr == 0) {
pr_info("%s: no blt_base assigned\n", __func__);
return;
}
spin_lock_irqsave(&vctrl->spin_lock, flag);
if (enable && pipe->ov_blt_addr == 0) {
pipe->ov_blt_addr = mfd->ov0_wb_buf->write_addr;
pipe->dma_blt_addr = mfd->ov0_wb_buf->read_addr;
pipe->ov_cnt = 0;
pipe->dmap_cnt = 0;
vctrl->ov_koff = 0;
vctrl->ov_done = 0;
vctrl->blt_free = 0;
mdp4_stat.blt_lcdc++;
vctrl->blt_change++;
} else if (enable == 0 && pipe->ov_blt_addr) {
pipe->ov_blt_addr = 0;
pipe->dma_blt_addr = 0;
vctrl->blt_free = 4; /* 4 commits to free wb buf */
vctrl->blt_change++;
}
pr_info("%s: enable=%d change=%d blt_addr=%x\n", __func__,
vctrl->blt_change, enable, (int)pipe->ov_blt_addr);
if (!vctrl->blt_change) {
spin_unlock_irqrestore(&vctrl->spin_lock, flag);
return;
}
spin_unlock_irqrestore(&vctrl->spin_lock, flag);
}
void mdp4_lcdc_overlay_blt(struct msm_fb_data_type *mfd,
struct msmfb_overlay_blt *req)
{
mdp4_lcdc_do_blt(mfd, req->enable);
}
void mdp4_lcdc_overlay_blt_start(struct msm_fb_data_type *mfd)
{
mdp4_lcdc_do_blt(mfd, 1);
}
void mdp4_lcdc_overlay_blt_stop(struct msm_fb_data_type *mfd)
{
mdp4_lcdc_do_blt(mfd, 0);
}
void mdp4_lcdc_overlay(struct msm_fb_data_type *mfd)
{
struct fb_info *fbi = mfd->fbi;
uint8 *buf;
unsigned int buf_offset;
int bpp;
int cnt, cndx = 0;
struct vsycn_ctrl *vctrl;
struct mdp4_overlay_pipe *pipe;
mutex_lock(&mfd->dma->ov_mutex);
vctrl = &vsync_ctrl_db[cndx];
pipe = vctrl->base_pipe;
if (!pipe || !mfd->panel_power_on) {
mutex_unlock(&mfd->dma->ov_mutex);
return;
}
pr_debug("%s: cpu=%d pid=%d\n", __func__,
smp_processor_id(), current->pid);
if (pipe->pipe_type == OVERLAY_TYPE_RGB) {
bpp = fbi->var.bits_per_pixel / 8;
buf = (uint8 *) fbi->fix.smem_start;
buf_offset = calc_fb_offset(mfd, fbi, bpp);
if (mfd->display_iova)
pipe->srcp0_addr = mfd->display_iova + buf_offset;
else
pipe->srcp0_addr = (uint32)(buf + buf_offset);
mdp4_lcdc_pipe_queue(0, pipe);
}
mdp4_overlay_mdp_perf_upd(mfd, 1);
cnt = mdp4_lcdc_pipe_commit(cndx, 0);
if (cnt) {
if (pipe->ov_blt_addr)
mdp4_lcdc_wait4ov(cndx);
else
mdp4_lcdc_wait4dmap(cndx);
}
mdp4_overlay_mdp_perf_upd(mfd, 0);
mutex_unlock(&mfd->dma->ov_mutex);
}
| gpl-2.0 |
Abhinav1997/android_kernel_sony_riogrande | drivers/staging/iio/accel/lis3l02dq_ring.c | 2698 | 11522 | #include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/gpio.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/spi/spi.h>
#include <linux/sysfs.h>
#include <linux/slab.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../ring_sw.h"
#include "../kfifo_buf.h"
#include "accel.h"
#include "../trigger.h"
#include "lis3l02dq.h"
/**
* combine_8_to_16() utility function to munge to u8s into u16
**/
static inline u16 combine_8_to_16(u8 lower, u8 upper)
{
u16 _lower = lower;
u16 _upper = upper;
return _lower | (_upper << 8);
}
/**
* lis3l02dq_data_rdy_trig_poll() the event handler for the data rdy trig
**/
irqreturn_t lis3l02dq_data_rdy_trig_poll(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct lis3l02dq_state *st = iio_priv(indio_dev);
if (st->trigger_on) {
iio_trigger_poll(st->trig, iio_get_time_ns());
return IRQ_HANDLED;
} else
return IRQ_WAKE_THREAD;
}
/**
* lis3l02dq_read_accel_from_ring() individual acceleration read from ring
**/
ssize_t lis3l02dq_read_accel_from_ring(struct iio_ring_buffer *ring,
int index,
int *val)
{
int ret;
s16 *data;
if (!iio_scan_mask_query(ring, index))
return -EINVAL;
if (!ring->access->read_last)
return -EBUSY;
data = kmalloc(ring->access->get_bytes_per_datum(ring),
GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
ret = ring->access->read_last(ring, (u8 *)data);
if (ret)
goto error_free_data;
*val = data[bitmap_weight(&ring->scan_mask, index)];
error_free_data:
kfree(data);
return ret;
}
static const u8 read_all_tx_array[] = {
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_X_H_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Y_H_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_L_ADDR), 0,
LIS3L02DQ_READ_REG(LIS3L02DQ_REG_OUT_Z_H_ADDR), 0,
};
/**
* lis3l02dq_read_all() Reads all channels currently selected
* @st: device specific state
* @rx_array: (dma capable) receive array, must be at least
* 4*number of channels
**/
static int lis3l02dq_read_all(struct iio_dev *indio_dev, u8 *rx_array)
{
struct iio_ring_buffer *ring = indio_dev->ring;
struct lis3l02dq_state *st = iio_priv(indio_dev);
struct spi_transfer *xfers;
struct spi_message msg;
int ret, i, j = 0;
xfers = kzalloc((ring->scan_count) * 2
* sizeof(*xfers), GFP_KERNEL);
if (!xfers)
return -ENOMEM;
mutex_lock(&st->buf_lock);
for (i = 0; i < ARRAY_SIZE(read_all_tx_array)/4; i++)
if (ring->scan_mask & (1 << i)) {
/* lower byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4];
st->tx[2*j + 1] = 0;
if (rx_array)
xfers[j].rx_buf = rx_array + j*2;
xfers[j].bits_per_word = 8;
xfers[j].len = 2;
xfers[j].cs_change = 1;
j++;
/* upper byte */
xfers[j].tx_buf = st->tx + 2*j;
st->tx[2*j] = read_all_tx_array[i*4 + 2];
st->tx[2*j + 1] = 0;
if (rx_array)
xfers[j].rx_buf = rx_array + j*2;
xfers[j].bits_per_word = 8;
xfers[j].len = 2;
xfers[j].cs_change = 1;
j++;
}
/* After these are transmitted, the rx_buff should have
* values in alternate bytes
*/
spi_message_init(&msg);
for (j = 0; j < ring->scan_count * 2; j++)
spi_message_add_tail(&xfers[j], &msg);
ret = spi_sync(st->us, &msg);
mutex_unlock(&st->buf_lock);
kfree(xfers);
return ret;
}
static int lis3l02dq_get_ring_element(struct iio_dev *indio_dev,
u8 *buf)
{
int ret, i;
u8 *rx_array ;
s16 *data = (s16 *)buf;
rx_array = kzalloc(4 * (indio_dev->ring->scan_count), GFP_KERNEL);
if (rx_array == NULL)
return -ENOMEM;
ret = lis3l02dq_read_all(indio_dev, rx_array);
if (ret < 0)
return ret;
for (i = 0; i < indio_dev->ring->scan_count; i++)
data[i] = combine_8_to_16(rx_array[i*4+1],
rx_array[i*4+3]);
kfree(rx_array);
return i*sizeof(data[0]);
}
static irqreturn_t lis3l02dq_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->private_data;
struct iio_ring_buffer *ring = indio_dev->ring;
int len = 0;
size_t datasize = ring->access->get_bytes_per_datum(ring);
char *data = kmalloc(datasize, GFP_KERNEL);
if (data == NULL) {
dev_err(indio_dev->dev.parent,
"memory alloc failed in ring bh");
return -ENOMEM;
}
if (ring->scan_count)
len = lis3l02dq_get_ring_element(indio_dev, data);
/* Guaranteed to be aligned with 8 byte boundary */
if (ring->scan_timestamp)
*(s64 *)(((phys_addr_t)data + len
+ sizeof(s64) - 1) & ~(sizeof(s64) - 1))
= pf->timestamp;
ring->access->store_to(ring, (u8 *)data, pf->timestamp);
iio_trigger_notify_done(indio_dev->trig);
kfree(data);
return IRQ_HANDLED;
}
/* Caller responsible for locking as necessary. */
static int
__lis3l02dq_write_data_ready_config(struct device *dev, bool state)
{
int ret;
u8 valold;
bool currentlyset;
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct lis3l02dq_state *st = iio_priv(indio_dev);
/* Get the current event mask register */
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
&valold);
if (ret)
goto error_ret;
/* Find out if data ready is already on */
currentlyset
= valold & LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
/* Disable requested */
if (!state && currentlyset) {
/* disable the data ready signal */
valold &= ~LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
/* The double write is to overcome a hardware bug?*/
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
st->trigger_on = false;
/* Enable requested */
} else if (state && !currentlyset) {
/* if not set, enable requested */
/* first disable all events */
ret = lis3l02dq_disable_all_events(indio_dev);
if (ret < 0)
goto error_ret;
valold = ret |
LIS3L02DQ_REG_CTRL_2_ENABLE_DATA_READY_GENERATION;
st->trigger_on = true;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_2_ADDR,
valold);
if (ret)
goto error_ret;
}
return 0;
error_ret:
return ret;
}
/**
* lis3l02dq_data_rdy_trigger_set_state() set datardy interrupt state
*
* If disabling the interrupt also does a final read to ensure it is clear.
* This is only important in some cases where the scan enable elements are
* switched before the ring is reenabled.
**/
static int lis3l02dq_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct iio_dev *indio_dev = trig->private_data;
int ret = 0;
u8 t;
__lis3l02dq_write_data_ready_config(&indio_dev->dev, state);
if (state == false) {
/*
* A possible quirk with teh handler is currently worked around
* by ensuring outstanding read events are cleared.
*/
ret = lis3l02dq_read_all(indio_dev, NULL);
}
lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_WAKE_UP_SRC_ADDR,
&t);
return ret;
}
/**
* lis3l02dq_trig_try_reen() try renabling irq for data rdy trigger
* @trig: the datardy trigger
*/
static int lis3l02dq_trig_try_reen(struct iio_trigger *trig)
{
struct iio_dev *indio_dev = trig->private_data;
struct lis3l02dq_state *st = iio_priv(indio_dev);
int i;
/* If gpio still high (or high again) */
/* In theory possible we will need to do this several times */
for (i = 0; i < 5; i++)
if (gpio_get_value(irq_to_gpio(st->us->irq)))
lis3l02dq_read_all(indio_dev, NULL);
else
break;
if (i == 5)
printk(KERN_INFO
"Failed to clear the interrupt for lis3l02dq\n");
/* irq reenabled so success! */
return 0;
}
int lis3l02dq_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
struct lis3l02dq_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("lis3l02dq-dev%d", indio_dev->id);
if (!st->trig) {
ret = -ENOMEM;
goto error_ret;
}
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &lis3l02dq_data_rdy_trigger_set_state;
st->trig->try_reenable = &lis3l02dq_trig_try_reen;
ret = iio_trigger_register(st->trig);
if (ret)
goto error_free_trig;
return 0;
error_free_trig:
iio_free_trigger(st->trig);
error_ret:
return ret;
}
void lis3l02dq_remove_trigger(struct iio_dev *indio_dev)
{
struct lis3l02dq_state *st = iio_priv(indio_dev);
iio_trigger_unregister(st->trig);
iio_free_trigger(st->trig);
}
void lis3l02dq_unconfigure_ring(struct iio_dev *indio_dev)
{
iio_dealloc_pollfunc(indio_dev->pollfunc);
lis3l02dq_free_buf(indio_dev->ring);
}
static int lis3l02dq_ring_postenable(struct iio_dev *indio_dev)
{
/* Disable unwanted channels otherwise the interrupt will not clear */
u8 t;
int ret;
bool oneenabled = false;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&t);
if (ret)
goto error_ret;
if (iio_scan_mask_query(indio_dev->ring, 0)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE;
if (iio_scan_mask_query(indio_dev->ring, 1)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE;
if (iio_scan_mask_query(indio_dev->ring, 2)) {
t |= LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
oneenabled = true;
} else
t &= ~LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
if (!oneenabled) /* what happens in this case is unknown */
return -EINVAL;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
t);
if (ret)
goto error_ret;
return iio_triggered_ring_postenable(indio_dev);
error_ret:
return ret;
}
/* Turn all channels on again */
static int lis3l02dq_ring_predisable(struct iio_dev *indio_dev)
{
u8 t;
int ret;
ret = iio_triggered_ring_predisable(indio_dev);
if (ret)
goto error_ret;
ret = lis3l02dq_spi_read_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
&t);
if (ret)
goto error_ret;
t |= LIS3L02DQ_REG_CTRL_1_AXES_X_ENABLE |
LIS3L02DQ_REG_CTRL_1_AXES_Y_ENABLE |
LIS3L02DQ_REG_CTRL_1_AXES_Z_ENABLE;
ret = lis3l02dq_spi_write_reg_8(indio_dev,
LIS3L02DQ_REG_CTRL_1_ADDR,
t);
error_ret:
return ret;
}
static const struct iio_ring_setup_ops lis3l02dq_ring_setup_ops = {
.preenable = &iio_sw_ring_preenable,
.postenable = &lis3l02dq_ring_postenable,
.predisable = &lis3l02dq_ring_predisable,
};
int lis3l02dq_configure_ring(struct iio_dev *indio_dev)
{
int ret;
struct iio_ring_buffer *ring;
ring = lis3l02dq_alloc_buf(indio_dev);
if (!ring)
return -ENOMEM;
indio_dev->ring = ring;
/* Effectively select the ring buffer implementation */
indio_dev->ring->access = &lis3l02dq_access_funcs;
ring->bpe = 2;
ring->scan_timestamp = true;
ring->setup_ops = &lis3l02dq_ring_setup_ops;
ring->owner = THIS_MODULE;
/* Set default scan mode */
iio_scan_mask_set(ring, 0);
iio_scan_mask_set(ring, 1);
iio_scan_mask_set(ring, 2);
/* Functions are NULL as we set handler below */
indio_dev->pollfunc = iio_alloc_pollfunc(&iio_pollfunc_store_time,
&lis3l02dq_trigger_handler,
0,
indio_dev,
"lis3l02dq_consumer%d",
indio_dev->id);
if (indio_dev->pollfunc == NULL) {
ret = -ENOMEM;
goto error_iio_sw_rb_free;
}
indio_dev->modes |= INDIO_RING_TRIGGERED;
return 0;
error_iio_sw_rb_free:
lis3l02dq_free_buf(indio_dev->ring);
return ret;
}
| gpl-2.0 |
yangyang1989/linux-3.8.2-mini2440 | drivers/staging/rtl8712/rtl871x_xmit.c | 2954 | 31707 | /******************************************************************************
* rtl871x_xmit.c
*
* Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192SU
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
* Contact information:
* WLAN FAE <wlanfae@realtek.com>
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
#define _RTL871X_XMIT_C_
#include "osdep_service.h"
#include "drv_types.h"
#include "wifi.h"
#include "osdep_intf.h"
#include "usb_ops.h"
static const u8 P802_1H_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0xf8};
static const u8 RFC1042_OUI[P80211_OUI_LEN] = {0x00, 0x00, 0x00};
static void init_hwxmits(struct hw_xmit *phwxmit, sint entry);
static void alloc_hwxmits(struct _adapter *padapter);
static void free_hwxmits(struct _adapter *padapter);
static void _init_txservq(struct tx_servq *ptxservq)
{
_init_listhead(&ptxservq->tx_pending);
_init_queue(&ptxservq->sta_pending);
ptxservq->qcnt = 0;
}
void _r8712_init_sta_xmit_priv(struct sta_xmit_priv *psta_xmitpriv)
{
memset((unsigned char *)psta_xmitpriv, 0,
sizeof(struct sta_xmit_priv));
spin_lock_init(&psta_xmitpriv->lock);
_init_txservq(&psta_xmitpriv->be_q);
_init_txservq(&psta_xmitpriv->bk_q);
_init_txservq(&psta_xmitpriv->vi_q);
_init_txservq(&psta_xmitpriv->vo_q);
_init_listhead(&psta_xmitpriv->legacy_dz);
_init_listhead(&psta_xmitpriv->apsd);
}
sint _r8712_init_xmit_priv(struct xmit_priv *pxmitpriv,
struct _adapter *padapter)
{
sint i;
struct xmit_buf *pxmitbuf;
struct xmit_frame *pxframe;
memset((unsigned char *)pxmitpriv, 0, sizeof(struct xmit_priv));
spin_lock_init(&pxmitpriv->lock);
/*
Please insert all the queue initialization using _init_queue below
*/
pxmitpriv->adapter = padapter;
_init_queue(&pxmitpriv->be_pending);
_init_queue(&pxmitpriv->bk_pending);
_init_queue(&pxmitpriv->vi_pending);
_init_queue(&pxmitpriv->vo_pending);
_init_queue(&pxmitpriv->bm_pending);
_init_queue(&pxmitpriv->legacy_dz_queue);
_init_queue(&pxmitpriv->apsd_queue);
_init_queue(&pxmitpriv->free_xmit_queue);
/*
Please allocate memory with the sz = (struct xmit_frame) * NR_XMITFRAME,
and initialize free_xmit_frame below.
Please also apply free_txobj to link_up all the xmit_frames...
*/
pxmitpriv->pallocated_frame_buf = _malloc(NR_XMITFRAME *
sizeof(struct xmit_frame) + 4);
if (pxmitpriv->pallocated_frame_buf == NULL) {
pxmitpriv->pxmit_frame_buf = NULL;
return _FAIL;
}
pxmitpriv->pxmit_frame_buf = pxmitpriv->pallocated_frame_buf + 4 -
((addr_t) (pxmitpriv->pallocated_frame_buf) & 3);
pxframe = (struct xmit_frame *) pxmitpriv->pxmit_frame_buf;
for (i = 0; i < NR_XMITFRAME; i++) {
_init_listhead(&(pxframe->list));
pxframe->padapter = padapter;
pxframe->frame_tag = DATA_FRAMETAG;
pxframe->pkt = NULL;
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
list_insert_tail(&(pxframe->list),
&(pxmitpriv->free_xmit_queue.queue));
pxframe++;
}
pxmitpriv->free_xmitframe_cnt = NR_XMITFRAME;
/*
init xmit hw_txqueue
*/
_r8712_init_hw_txqueue(&pxmitpriv->be_txqueue, BE_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->bk_txqueue, BK_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->vi_txqueue, VI_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->vo_txqueue, VO_QUEUE_INX);
_r8712_init_hw_txqueue(&pxmitpriv->bmc_txqueue, BMC_QUEUE_INX);
pxmitpriv->frag_len = MAX_FRAG_THRESHOLD;
pxmitpriv->txirp_cnt = 1;
/*per AC pending irp*/
pxmitpriv->beq_cnt = 0;
pxmitpriv->bkq_cnt = 0;
pxmitpriv->viq_cnt = 0;
pxmitpriv->voq_cnt = 0;
/*init xmit_buf*/
_init_queue(&pxmitpriv->free_xmitbuf_queue);
_init_queue(&pxmitpriv->pending_xmitbuf_queue);
pxmitpriv->pallocated_xmitbuf = _malloc(NR_XMITBUFF *
sizeof(struct xmit_buf) + 4);
if (pxmitpriv->pallocated_xmitbuf == NULL)
return _FAIL;
pxmitpriv->pxmitbuf = pxmitpriv->pallocated_xmitbuf + 4 -
((addr_t)(pxmitpriv->pallocated_xmitbuf) & 3);
pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
for (i = 0; i < NR_XMITBUFF; i++) {
_init_listhead(&pxmitbuf->list);
pxmitbuf->pallocated_buf = _malloc(MAX_XMITBUF_SZ +
XMITBUF_ALIGN_SZ);
if (pxmitbuf->pallocated_buf == NULL)
return _FAIL;
pxmitbuf->pbuf = pxmitbuf->pallocated_buf + XMITBUF_ALIGN_SZ -
((addr_t) (pxmitbuf->pallocated_buf) &
(XMITBUF_ALIGN_SZ - 1));
r8712_xmit_resource_alloc(padapter, pxmitbuf);
list_insert_tail(&pxmitbuf->list,
&(pxmitpriv->free_xmitbuf_queue.queue));
pxmitbuf++;
}
pxmitpriv->free_xmitbuf_cnt = NR_XMITBUFF;
_init_workitem(&padapter->wkFilterRxFF0, r8712_SetFilter, padapter);
alloc_hwxmits(padapter);
init_hwxmits(pxmitpriv->hwxmits, pxmitpriv->hwxmit_entry);
tasklet_init(&pxmitpriv->xmit_tasklet,
(void(*)(unsigned long))r8712_xmit_bh,
(unsigned long)padapter);
return _SUCCESS;
}
void _free_xmit_priv(struct xmit_priv *pxmitpriv)
{
int i;
struct _adapter *padapter = pxmitpriv->adapter;
struct xmit_frame *pxmitframe = (struct xmit_frame *)
pxmitpriv->pxmit_frame_buf;
struct xmit_buf *pxmitbuf = (struct xmit_buf *)pxmitpriv->pxmitbuf;
if (pxmitpriv->pxmit_frame_buf == NULL)
return;
for (i = 0; i < NR_XMITFRAME; i++) {
r8712_xmit_complete(padapter, pxmitframe);
pxmitframe++;
}
for (i = 0; i < NR_XMITBUFF; i++) {
r8712_xmit_resource_free(padapter, pxmitbuf);
kfree(pxmitbuf->pallocated_buf);
pxmitbuf++;
}
kfree(pxmitpriv->pallocated_frame_buf);
kfree(pxmitpriv->pallocated_xmitbuf);
free_hwxmits(padapter);
}
sint r8712_update_attrib(struct _adapter *padapter, _pkt *pkt,
struct pkt_attrib *pattrib)
{
uint i;
struct pkt_file pktfile;
struct sta_info *psta = NULL;
struct ethhdr etherhdr;
struct tx_cmd txdesc;
sint bmcast;
struct sta_priv *pstapriv = &padapter->stapriv;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
_r8712_open_pktfile(pkt, &pktfile);
i = _r8712_pktfile_read(&pktfile, (unsigned char *)ðerhdr, ETH_HLEN);
pattrib->ether_type = ntohs(etherhdr.h_proto);
{
u8 bool;
/*If driver xmit ARP packet, driver can set ps mode to initial
* setting. It stands for getting DHCP or fix IP.*/
if (pattrib->ether_type == 0x0806) {
if (padapter->pwrctrlpriv.pwr_mode !=
padapter->registrypriv.power_mgnt) {
_cancel_timer(&(pmlmepriv->dhcp_timer), &bool);
r8712_set_ps_mode(padapter, padapter->registrypriv.
power_mgnt, padapter->registrypriv.smart_ps);
}
}
}
memcpy(pattrib->dst, ðerhdr.h_dest, ETH_ALEN);
memcpy(pattrib->src, ðerhdr.h_source, ETH_ALEN);
pattrib->pctrl = 0;
if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true) ||
(check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true)) {
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
} else if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) {
memcpy(pattrib->ra, get_bssid(pmlmepriv), ETH_ALEN);
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
} else if (check_fwstate(pmlmepriv, WIFI_AP_STATE)) {
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
memcpy(pattrib->ta, get_bssid(pmlmepriv), ETH_ALEN);
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
/*firstly, filter packet not belongs to mp*/
if (pattrib->ether_type != 0x8712)
return _FAIL;
/* for mp storing the txcmd per packet,
* according to the info of txcmd to update pattrib */
/*get MP_TXDESC_SIZE bytes txcmd per packet*/
i = _r8712_pktfile_read(&pktfile, (u8 *)&txdesc, TXDESC_SIZE);
memcpy(pattrib->ra, pattrib->dst, ETH_ALEN);
memcpy(pattrib->ta, pattrib->src, ETH_ALEN);
pattrib->pctrl = 1;
}
/* r8712_xmitframe_coalesce() overwrite this!*/
pattrib->pktlen = pktfile.pkt_len;
if (ETH_P_IP == pattrib->ether_type) {
/* The following is for DHCP and ARP packet, we use cck1M to
* tx these packets and let LPS awake some time
* to prevent DHCP protocol fail */
u8 tmp[24];
_r8712_pktfile_read(&pktfile, &tmp[0], 24);
pattrib->dhcp_pkt = 0;
if (pktfile.pkt_len > 282) {/*MINIMUM_DHCP_PACKET_SIZE)*/
if (ETH_P_IP == pattrib->ether_type) {/* IP header*/
if (((tmp[21] == 68) && (tmp[23] == 67)) ||
((tmp[21] == 67) && (tmp[23] == 68))) {
/* 68 : UDP BOOTP client
* 67 : UDP BOOTP server
* Use low rate to send DHCP packet.*/
pattrib->dhcp_pkt = 1;
}
}
}
}
bmcast = IS_MCAST(pattrib->ra);
/* get sta_info*/
if (bmcast) {
psta = r8712_get_bcmc_stainfo(padapter);
pattrib->mac_id = 4;
} else {
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
psta = r8712_get_stainfo(pstapriv,
get_bssid(pmlmepriv));
pattrib->mac_id = 5;
} else {
psta = r8712_get_stainfo(pstapriv, pattrib->ra);
if (psta == NULL) /* drop the pkt */
return _FAIL;
if (check_fwstate(pmlmepriv, WIFI_STATION_STATE))
pattrib->mac_id = 5;
else
pattrib->mac_id = psta->mac_id;
}
}
if (psta) {
pattrib->psta = psta;
} else {
/* if we cannot get psta => drrp the pkt */
return _FAIL;
}
pattrib->ack_policy = 0;
/* get ether_hdr_len */
pattrib->pkt_hdrlen = ETH_HLEN;
if (pqospriv->qos_option)
r8712_set_qos(&pktfile, pattrib);
else {
pattrib->hdrlen = WLAN_HDR_A3_LEN;
pattrib->subtype = WIFI_DATA_TYPE;
pattrib->priority = 0;
}
if (psta->ieee8021x_blocked == true) {
pattrib->encrypt = 0;
if ((pattrib->ether_type != 0x888e) &&
(check_fwstate(pmlmepriv, WIFI_MP_STATE) == false))
return _FAIL;
} else
GET_ENCRY_ALGO(psecuritypriv, psta, pattrib->encrypt, bmcast);
switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
pattrib->iv_len = 4;
pattrib->icv_len = 4;
break;
case _TKIP_:
pattrib->iv_len = 8;
pattrib->icv_len = 4;
if (padapter->securitypriv.busetkipkey == _FAIL)
return _FAIL;
break;
case _AES_:
pattrib->iv_len = 8;
pattrib->icv_len = 8;
break;
default:
pattrib->iv_len = 0;
pattrib->icv_len = 0;
break;
}
if (pattrib->encrypt &&
((padapter->securitypriv.sw_encrypt == true) ||
(psecuritypriv->hw_decrypted == false)))
pattrib->bswenc = true;
else
pattrib->bswenc = false;
/* if in MP_STATE, update pkt_attrib from mp_txcmd, and overwrite
* some settings above.*/
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
pattrib->priority = (txdesc.txdw1 >> QSEL_SHT) & 0x1f;
return _SUCCESS;
}
static sint xmitframe_addmic(struct _adapter *padapter,
struct xmit_frame *pxmitframe)
{
u32 curfragnum, length, datalen;
u8 *pframe, *payload, mic[8];
struct mic_data micdata;
struct sta_info *stainfo;
struct qos_priv *pqospriv = &(padapter->mlmepriv.qospriv);
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
u8 priority[4] = {0x0, 0x0, 0x0, 0x0};
sint bmcst = IS_MCAST(pattrib->ra);
if (pattrib->psta)
stainfo = pattrib->psta;
else
stainfo = r8712_get_stainfo(&padapter->stapriv,
&pattrib->ra[0]);
if (pattrib->encrypt == _TKIP_) {
/*encode mic code*/
if (stainfo != NULL) {
u8 null_key[16] = {0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0,
0x0, 0x0};
datalen = pattrib->pktlen - pattrib->hdrlen;
pframe = pxmitframe->buf_addr + TXDESC_OFFSET;
if (bmcst) {
if (!memcmp(psecuritypriv->XGrptxmickey
[psecuritypriv->XGrpKeyid].skey,
null_key, 16))
return _FAIL;
/*start to calculate the mic code*/
r8712_secmicsetkey(&micdata,
psecuritypriv->
XGrptxmickey[psecuritypriv->
XGrpKeyid].skey);
} else {
if (!memcmp(&stainfo->tkiptxmickey.skey[0],
null_key, 16))
return _FAIL;
/* start to calculate the mic code */
r8712_secmicsetkey(&micdata,
&stainfo->tkiptxmickey.skey[0]);
}
if (pframe[1] & 1) { /* ToDS==1 */
r8712_secmicappend(&micdata,
&pframe[16], 6); /*DA*/
if (pframe[1]&2) /* From Ds==1 */
r8712_secmicappend(&micdata,
&pframe[24], 6);
else
r8712_secmicappend(&micdata,
&pframe[10], 6);
} else { /* ToDS==0 */
r8712_secmicappend(&micdata,
&pframe[4], 6); /* DA */
if (pframe[1]&2) /* From Ds==1 */
r8712_secmicappend(&micdata,
&pframe[16], 6);
else
r8712_secmicappend(&micdata,
&pframe[10], 6);
}
if (pqospriv->qos_option == 1)
priority[0] = (u8)pxmitframe->
attrib.priority;
r8712_secmicappend(&micdata, &priority[0], 4);
payload = pframe;
for (curfragnum = 0; curfragnum < pattrib->nr_frags;
curfragnum++) {
payload = (u8 *)RND4((addr_t)(payload));
payload = payload+pattrib->
hdrlen+pattrib->iv_len;
if ((curfragnum + 1) == pattrib->nr_frags) {
length = pattrib->last_txcmdsz -
pattrib->hdrlen -
pattrib->iv_len -
((psecuritypriv->sw_encrypt)
? pattrib->icv_len : 0);
r8712_secmicappend(&micdata, payload,
length);
payload = payload+length;
} else{
length = pxmitpriv->frag_len -
pattrib->hdrlen-pattrib->iv_len -
((psecuritypriv->sw_encrypt) ?
pattrib->icv_len : 0);
r8712_secmicappend(&micdata, payload,
length);
payload = payload + length +
pattrib->icv_len;
}
}
r8712_secgetmic(&micdata, &(mic[0]));
/* add mic code and add the mic code length in
* last_txcmdsz */
memcpy(payload, &(mic[0]), 8);
pattrib->last_txcmdsz += 8;
payload = payload-pattrib->last_txcmdsz + 8;
}
}
return _SUCCESS;
}
static sint xmitframe_swencrypt(struct _adapter *padapter,
struct xmit_frame *pxmitframe)
{
struct pkt_attrib *pattrib = &pxmitframe->attrib;
if (pattrib->bswenc) {
switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
r8712_wep_encrypt(padapter, (u8 *)pxmitframe);
break;
case _TKIP_:
r8712_tkip_encrypt(padapter, (u8 *)pxmitframe);
break;
case _AES_:
r8712_aes_encrypt(padapter, (u8 *)pxmitframe);
break;
default:
break;
}
}
return _SUCCESS;
}
static sint make_wlanhdr(struct _adapter *padapter , u8 *hdr,
struct pkt_attrib *pattrib)
{
u16 *qc;
struct ieee80211_hdr *pwlanhdr = (struct ieee80211_hdr *)hdr;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct qos_priv *pqospriv = &pmlmepriv->qospriv;
u16 *fctrl = &pwlanhdr->frame_ctl;
memset(hdr, 0, WLANHDR_OFFSET);
SetFrameSubType(fctrl, pattrib->subtype);
if (pattrib->subtype & WIFI_DATA_TYPE) {
if ((check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true)) {
/* to_ds = 1, fr_ds = 0; */
SetToDs(fctrl);
memcpy(pwlanhdr->addr1, get_bssid(pmlmepriv),
ETH_ALEN);
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, pattrib->dst, ETH_ALEN);
} else if ((check_fwstate(pmlmepriv, WIFI_AP_STATE) == true)) {
/* to_ds = 0, fr_ds = 1; */
SetFrDs(fctrl);
memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
memcpy(pwlanhdr->addr2, get_bssid(pmlmepriv),
ETH_ALEN);
memcpy(pwlanhdr->addr3, pattrib->src, ETH_ALEN);
} else if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)
|| (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)
== true)) {
memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv),
ETH_ALEN);
} else if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
memcpy(pwlanhdr->addr1, pattrib->dst, ETH_ALEN);
memcpy(pwlanhdr->addr2, pattrib->src, ETH_ALEN);
memcpy(pwlanhdr->addr3, get_bssid(pmlmepriv),
ETH_ALEN);
} else
return _FAIL;
if (pattrib->encrypt)
SetPrivacy(fctrl);
if (pqospriv->qos_option) {
qc = (unsigned short *)(hdr + pattrib->hdrlen - 2);
if (pattrib->priority)
SetPriority(qc, pattrib->priority);
SetAckpolicy(qc, pattrib->ack_policy);
}
/* TODO: fill HT Control Field */
/* Update Seq Num will be handled by f/w */
{
struct sta_info *psta;
sint bmcst = IS_MCAST(pattrib->ra);
if (pattrib->psta)
psta = pattrib->psta;
else {
if (bmcst)
psta = r8712_get_bcmc_stainfo(padapter);
else
psta =
r8712_get_stainfo(&padapter->stapriv,
pattrib->ra);
}
if (psta) {
psta->sta_xmitpriv.txseq_tid
[pattrib->priority]++;
psta->sta_xmitpriv.txseq_tid[pattrib->priority]
&= 0xFFF;
pattrib->seqnum = psta->sta_xmitpriv.
txseq_tid[pattrib->priority];
SetSeqNum(hdr, pattrib->seqnum);
}
}
}
return _SUCCESS;
}
static sint r8712_put_snap(u8 *data, u16 h_proto)
{
struct ieee80211_snap_hdr *snap;
const u8 *oui;
snap = (struct ieee80211_snap_hdr *)data;
snap->dsap = 0xaa;
snap->ssap = 0xaa;
snap->ctrl = 0x03;
if (h_proto == 0x8137 || h_proto == 0x80f3)
oui = P802_1H_OUI;
else
oui = RFC1042_OUI;
snap->oui[0] = oui[0];
snap->oui[1] = oui[1];
snap->oui[2] = oui[2];
*(u16 *)(data + SNAP_SIZE) = htons(h_proto);
return SNAP_SIZE + sizeof(u16);
}
/*
* This sub-routine will perform all the following:
* 1. remove 802.3 header.
* 2. create wlan_header, based on the info in pxmitframe
* 3. append sta's iv/ext-iv
* 4. append LLC
* 5. move frag chunk from pframe to pxmitframe->mem
* 6. apply sw-encrypt, if necessary.
*/
sint r8712_xmitframe_coalesce(struct _adapter *padapter, _pkt *pkt,
struct xmit_frame *pxmitframe)
{
struct pkt_file pktfile;
sint frg_len, mpdu_len, llc_sz;
u32 mem_sz;
u8 frg_inx;
addr_t addr;
u8 *pframe, *mem_start, *ptxdesc;
struct sta_info *psta;
struct security_priv *psecuritypriv = &padapter->securitypriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
u8 *pbuf_start;
sint bmcst = IS_MCAST(pattrib->ra);
if (pattrib->psta == NULL)
return _FAIL;
psta = pattrib->psta;
if (pxmitframe->buf_addr == NULL)
return _FAIL;
pbuf_start = pxmitframe->buf_addr;
ptxdesc = pbuf_start;
mem_start = pbuf_start + TXDESC_OFFSET;
if (make_wlanhdr(padapter, mem_start, pattrib) == _FAIL)
return _FAIL;
_r8712_open_pktfile(pkt, &pktfile);
_r8712_pktfile_read(&pktfile, NULL, (uint) pattrib->pkt_hdrlen);
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true) {
/* truncate TXDESC_SIZE bytes txcmd if at mp mode for 871x */
if (pattrib->ether_type == 0x8712) {
/* take care - update_txdesc overwrite this */
_r8712_pktfile_read(&pktfile, ptxdesc, TXDESC_SIZE);
}
}
pattrib->pktlen = pktfile.pkt_len;
frg_inx = 0;
frg_len = pxmitpriv->frag_len - 4;
while (1) {
llc_sz = 0;
mpdu_len = frg_len;
pframe = mem_start;
SetMFrag(mem_start);
pframe += pattrib->hdrlen;
mpdu_len -= pattrib->hdrlen;
/* adding icv, if necessary...*/
if (pattrib->iv_len) {
if (psta != NULL) {
switch (pattrib->encrypt) {
case _WEP40_:
case _WEP104_:
WEP_IV(pattrib->iv, psta->txpn,
(u8)psecuritypriv->
PrivacyKeyIndex);
break;
case _TKIP_:
if (bmcst)
TKIP_IV(pattrib->iv,
psta->txpn,
(u8)psecuritypriv->
XGrpKeyid);
else
TKIP_IV(pattrib->iv, psta->txpn,
0);
break;
case _AES_:
if (bmcst)
AES_IV(pattrib->iv, psta->txpn,
(u8)psecuritypriv->
XGrpKeyid);
else
AES_IV(pattrib->iv, psta->txpn,
0);
break;
}
}
memcpy(pframe, pattrib->iv, pattrib->iv_len);
pframe += pattrib->iv_len;
mpdu_len -= pattrib->iv_len;
}
if (frg_inx == 0) {
llc_sz = r8712_put_snap(pframe, pattrib->ether_type);
pframe += llc_sz;
mpdu_len -= llc_sz;
}
if ((pattrib->icv_len > 0) && (pattrib->bswenc))
mpdu_len -= pattrib->icv_len;
if (bmcst)
mem_sz = _r8712_pktfile_read(&pktfile, pframe,
pattrib->pktlen);
else
mem_sz = _r8712_pktfile_read(&pktfile, pframe,
mpdu_len);
pframe += mem_sz;
if ((pattrib->icv_len > 0) && (pattrib->bswenc)) {
memcpy(pframe, pattrib->icv, pattrib->icv_len);
pframe += pattrib->icv_len;
}
frg_inx++;
if (bmcst || (r8712_endofpktfile(&pktfile) == true)) {
pattrib->nr_frags = frg_inx;
pattrib->last_txcmdsz = pattrib->hdrlen +
pattrib->iv_len +
((pattrib->nr_frags == 1) ?
llc_sz : 0) +
((pattrib->bswenc) ?
pattrib->icv_len : 0) + mem_sz;
ClearMFrag(mem_start);
break;
}
addr = (addr_t)(pframe);
mem_start = (unsigned char *)RND4(addr) + TXDESC_OFFSET;
memcpy(mem_start, pbuf_start + TXDESC_OFFSET, pattrib->hdrlen);
}
if (xmitframe_addmic(padapter, pxmitframe) == _FAIL)
return _FAIL;
xmitframe_swencrypt(padapter, pxmitframe);
return _SUCCESS;
}
void r8712_update_protection(struct _adapter *padapter, u8 *ie, uint ie_len)
{
uint protection;
u8 *perp;
sint erp_len;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct registry_priv *pregistrypriv = &padapter->registrypriv;
switch (pxmitpriv->vcs_setting) {
case DISABLE_VCS:
pxmitpriv->vcs = NONE_VCS;
break;
case ENABLE_VCS:
break;
case AUTO_VCS:
default:
perp = r8712_get_ie(ie, _ERPINFO_IE_, &erp_len, ie_len);
if (perp == NULL)
pxmitpriv->vcs = NONE_VCS;
else {
protection = (*(perp + 2)) & BIT(1);
if (protection) {
if (pregistrypriv->vcs_type == RTS_CTS)
pxmitpriv->vcs = RTS_CTS;
else
pxmitpriv->vcs = CTS_TO_SELF;
} else
pxmitpriv->vcs = NONE_VCS;
}
break;
}
}
struct xmit_buf *r8712_alloc_xmitbuf(struct xmit_priv *pxmitpriv)
{
unsigned long irqL;
struct xmit_buf *pxmitbuf = NULL;
struct list_head *plist, *phead;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
if (_queue_empty(pfree_xmitbuf_queue) == true)
pxmitbuf = NULL;
else {
phead = get_list_head(pfree_xmitbuf_queue);
plist = get_next(phead);
pxmitbuf = LIST_CONTAINOR(plist, struct xmit_buf, list);
list_delete(&(pxmitbuf->list));
}
if (pxmitbuf != NULL)
pxmitpriv->free_xmitbuf_cnt--;
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
return pxmitbuf;
}
int r8712_free_xmitbuf(struct xmit_priv *pxmitpriv, struct xmit_buf *pxmitbuf)
{
unsigned long irqL;
struct __queue *pfree_xmitbuf_queue = &pxmitpriv->free_xmitbuf_queue;
if (pxmitbuf == NULL)
return _FAIL;
spin_lock_irqsave(&pfree_xmitbuf_queue->lock, irqL);
list_delete(&pxmitbuf->list);
list_insert_tail(&(pxmitbuf->list), get_list_head(pfree_xmitbuf_queue));
pxmitpriv->free_xmitbuf_cnt++;
spin_unlock_irqrestore(&pfree_xmitbuf_queue->lock, irqL);
return _SUCCESS;
}
/*
Calling context:
1. OS_TXENTRY
2. RXENTRY (rx_thread or RX_ISR/RX_CallBack)
If we turn on USE_RXTHREAD, then, no need for critical section.
Otherwise, we must use _enter/_exit critical to protect free_xmit_queue...
Must be very very cautious...
*/
struct xmit_frame *r8712_alloc_xmitframe(struct xmit_priv *pxmitpriv)
{
/*
Please remember to use all the osdep_service api,
and lock/unlock or _enter/_exit critical to protect
pfree_xmit_queue
*/
unsigned long irqL;
struct xmit_frame *pxframe = NULL;
struct list_head *plist, *phead;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
if (_queue_empty(pfree_xmit_queue) == true)
pxframe = NULL;
else {
phead = get_list_head(pfree_xmit_queue);
plist = get_next(phead);
pxframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
list_delete(&(pxframe->list));
}
if (pxframe != NULL) {
pxmitpriv->free_xmitframe_cnt--;
pxframe->buf_addr = NULL;
pxframe->pxmitbuf = NULL;
pxframe->attrib.psta = NULL;
pxframe->pkt = NULL;
}
spin_unlock_irqrestore(&pfree_xmit_queue->lock, irqL);
return pxframe;
}
void r8712_free_xmitframe(struct xmit_priv *pxmitpriv,
struct xmit_frame *pxmitframe)
{
unsigned long irqL;
struct __queue *pfree_xmit_queue = &pxmitpriv->free_xmit_queue;
struct _adapter *padapter = pxmitpriv->adapter;
struct sk_buff *pndis_pkt = NULL;
if (pxmitframe == NULL)
return;
spin_lock_irqsave(&pfree_xmit_queue->lock, irqL);
list_delete(&pxmitframe->list);
if (pxmitframe->pkt) {
pndis_pkt = pxmitframe->pkt;
pxmitframe->pkt = NULL;
}
list_insert_tail(&pxmitframe->list, get_list_head(pfree_xmit_queue));
pxmitpriv->free_xmitframe_cnt++;
spin_unlock_irqrestore(&pfree_xmit_queue->lock, irqL);
if (netif_queue_stopped(padapter->pnetdev))
netif_wake_queue(padapter->pnetdev);
}
void r8712_free_xmitframe_ex(struct xmit_priv *pxmitpriv,
struct xmit_frame *pxmitframe)
{
if (pxmitframe == NULL)
return;
if (pxmitframe->frame_tag == DATA_FRAMETAG)
r8712_free_xmitframe(pxmitpriv, pxmitframe);
}
void r8712_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
struct __queue *pframequeue)
{
unsigned long irqL;
struct list_head *plist, *phead;
struct xmit_frame *pxmitframe;
spin_lock_irqsave(&(pframequeue->lock), irqL);
phead = get_list_head(pframequeue);
plist = get_next(phead);
while (end_of_queue_search(phead, plist) == false) {
pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
plist = get_next(plist);
r8712_free_xmitframe(pxmitpriv, pxmitframe);
}
spin_unlock_irqrestore(&(pframequeue->lock), irqL);
}
static inline struct tx_servq *get_sta_pending(struct _adapter *padapter,
struct __queue **ppstapending,
struct sta_info *psta, sint up)
{
struct tx_servq *ptxservq;
struct hw_xmit *phwxmits = padapter->xmitpriv.hwxmits;
switch (up) {
case 1:
case 2:
ptxservq = &(psta->sta_xmitpriv.bk_q);
*ppstapending = &padapter->xmitpriv.bk_pending;
(phwxmits+3)->accnt++;
break;
case 4:
case 5:
ptxservq = &(psta->sta_xmitpriv.vi_q);
*ppstapending = &padapter->xmitpriv.vi_pending;
(phwxmits+1)->accnt++;
break;
case 6:
case 7:
ptxservq = &(psta->sta_xmitpriv.vo_q);
*ppstapending = &padapter->xmitpriv.vo_pending;
(phwxmits+0)->accnt++;
break;
case 0:
case 3:
default:
ptxservq = &(psta->sta_xmitpriv.be_q);
*ppstapending = &padapter->xmitpriv.be_pending;
(phwxmits + 2)->accnt++;
break;
}
return ptxservq;
}
/*
* Will enqueue pxmitframe to the proper queue, and indicate it
* to xx_pending list.....
*/
sint r8712_xmit_classifier(struct _adapter *padapter,
struct xmit_frame *pxmitframe)
{
unsigned long irqL0;
struct __queue *pstapending;
struct sta_info *psta;
struct tx_servq *ptxservq;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
struct sta_priv *pstapriv = &padapter->stapriv;
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
sint bmcst = IS_MCAST(pattrib->ra);
if (pattrib->psta)
psta = pattrib->psta;
else {
if (bmcst)
psta = r8712_get_bcmc_stainfo(padapter);
else {
if (check_fwstate(pmlmepriv, WIFI_MP_STATE) == true)
psta = r8712_get_stainfo(pstapriv,
get_bssid(pmlmepriv));
else
psta = r8712_get_stainfo(pstapriv, pattrib->ra);
}
}
if (psta == NULL)
return _FAIL;
ptxservq = get_sta_pending(padapter, &pstapending,
psta, pattrib->priority);
spin_lock_irqsave(&pstapending->lock, irqL0);
if (is_list_empty(&ptxservq->tx_pending))
list_insert_tail(&ptxservq->tx_pending,
get_list_head(pstapending));
list_insert_tail(&pxmitframe->list,
get_list_head(&ptxservq->sta_pending));
ptxservq->qcnt++;
spin_unlock_irqrestore(&pstapending->lock, irqL0);
return _SUCCESS;
}
static void alloc_hwxmits(struct _adapter *padapter)
{
struct hw_xmit *hwxmits;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
pxmitpriv->hwxmit_entry = HWXMIT_ENTRY;
pxmitpriv->hwxmits = (struct hw_xmit *)_malloc(sizeof(struct hw_xmit) *
pxmitpriv->hwxmit_entry);
if (pxmitpriv->hwxmits == NULL)
return;
hwxmits = pxmitpriv->hwxmits;
if (pxmitpriv->hwxmit_entry == 5) {
pxmitpriv->bmc_txqueue.head = 0;
hwxmits[0] .phwtxqueue = &pxmitpriv->bmc_txqueue;
hwxmits[0] .sta_queue = &pxmitpriv->bm_pending;
pxmitpriv->vo_txqueue.head = 0;
hwxmits[1] .phwtxqueue = &pxmitpriv->vo_txqueue;
hwxmits[1] .sta_queue = &pxmitpriv->vo_pending;
pxmitpriv->vi_txqueue.head = 0;
hwxmits[2] .phwtxqueue = &pxmitpriv->vi_txqueue;
hwxmits[2] .sta_queue = &pxmitpriv->vi_pending;
pxmitpriv->bk_txqueue.head = 0;
hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue;
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
pxmitpriv->be_txqueue.head = 0;
hwxmits[4] .phwtxqueue = &pxmitpriv->be_txqueue;
hwxmits[4] .sta_queue = &pxmitpriv->be_pending;
} else if (pxmitpriv->hwxmit_entry == 4) {
pxmitpriv->vo_txqueue.head = 0;
hwxmits[0] .phwtxqueue = &pxmitpriv->vo_txqueue;
hwxmits[0] .sta_queue = &pxmitpriv->vo_pending;
pxmitpriv->vi_txqueue.head = 0;
hwxmits[1] .phwtxqueue = &pxmitpriv->vi_txqueue;
hwxmits[1] .sta_queue = &pxmitpriv->vi_pending;
pxmitpriv->be_txqueue.head = 0;
hwxmits[2] .phwtxqueue = &pxmitpriv->be_txqueue;
hwxmits[2] .sta_queue = &pxmitpriv->be_pending;
pxmitpriv->bk_txqueue.head = 0;
hwxmits[3] .phwtxqueue = &pxmitpriv->bk_txqueue;
hwxmits[3] .sta_queue = &pxmitpriv->bk_pending;
}
}
static void free_hwxmits(struct _adapter *padapter)
{
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
kfree(pxmitpriv->hwxmits);
}
static void init_hwxmits(struct hw_xmit *phwxmit, sint entry)
{
sint i;
for (i = 0; i < entry; i++, phwxmit++) {
spin_lock_init(&phwxmit->xmit_lock);
_init_listhead(&phwxmit->pending);
phwxmit->txcmdcnt = 0;
phwxmit->accnt = 0;
}
}
void xmitframe_xmitbuf_attach(struct xmit_frame *pxmitframe,
struct xmit_buf *pxmitbuf)
{
/* pxmitbuf attach to pxmitframe */
pxmitframe->pxmitbuf = pxmitbuf;
/* urb and irp connection */
pxmitframe->pxmit_urb[0] = pxmitbuf->pxmit_urb[0];
/* buffer addr assoc */
pxmitframe->buf_addr = pxmitbuf->pbuf;
/* pxmitframe attach to pxmitbuf */
pxmitbuf->priv_data = pxmitframe;
}
/*
* tx_action == 0 == no frames to transmit
* tx_action > 0 ==> we have frames to transmit
* tx_action < 0 ==> we have frames to transmit, but TXFF is not even enough
* to transmit 1 frame.
*/
int r8712_pre_xmit(struct _adapter *padapter, struct xmit_frame *pxmitframe)
{
unsigned long irqL;
int ret;
struct xmit_buf *pxmitbuf = NULL;
struct xmit_priv *pxmitpriv = &padapter->xmitpriv;
struct pkt_attrib *pattrib = &pxmitframe->attrib;
r8712_do_queue_select(padapter, pattrib);
spin_lock_irqsave(&pxmitpriv->lock, irqL);
if (r8712_txframes_sta_ac_pending(padapter, pattrib) > 0) {
ret = false;
r8712_xmit_enqueue(padapter, pxmitframe);
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
return ret;
}
pxmitbuf = r8712_alloc_xmitbuf(pxmitpriv);
if (pxmitbuf == NULL) { /*enqueue packet*/
ret = false;
r8712_xmit_enqueue(padapter, pxmitframe);
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
} else { /*dump packet directly*/
spin_unlock_irqrestore(&pxmitpriv->lock, irqL);
ret = true;
xmitframe_xmitbuf_attach(pxmitframe, pxmitbuf);
r8712_xmit_direct(padapter, pxmitframe);
}
return ret;
}
| gpl-2.0 |
zombi-x/android_kernel_asus_moorefield | arch/arm/mach-pxa/pxa3xx-ulpi.c | 3210 | 8454 | /*
* linux/arch/arm/mach-pxa/pxa3xx-ulpi.c
*
* code specific to pxa3xx aka Monahans
*
* Copyright (C) 2010 CompuLab Ltd.
*
* 2010-13-07: Igor Grinberg <grinberg@compulab.co.il>
* initial version: pxa310 USB Host mode support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/device.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/clk.h>
#include <linux/usb.h>
#include <linux/usb/otg.h>
#include <mach/hardware.h>
#include <mach/regs-u2d.h>
#include <linux/platform_data/usb-pxa3xx-ulpi.h>
struct pxa3xx_u2d_ulpi {
struct clk *clk;
void __iomem *mmio_base;
struct usb_phy *otg;
unsigned int ulpi_mode;
};
static struct pxa3xx_u2d_ulpi *u2d;
static inline u32 u2d_readl(u32 reg)
{
return __raw_readl(u2d->mmio_base + reg);
}
static inline void u2d_writel(u32 reg, u32 val)
{
__raw_writel(val, u2d->mmio_base + reg);
}
#if defined(CONFIG_PXA310_ULPI)
enum u2d_ulpi_phy_mode {
SYNCH = 0,
CARKIT = (1 << 0),
SER_3PIN = (1 << 1),
SER_6PIN = (1 << 2),
LOWPOWER = (1 << 3),
};
static inline enum u2d_ulpi_phy_mode pxa310_ulpi_get_phymode(void)
{
return (u2d_readl(U2DOTGUSR) >> 28) & 0xF;
}
static int pxa310_ulpi_poll(void)
{
int timeout = 50000;
while (timeout--) {
if (!(u2d_readl(U2DOTGUCR) & U2DOTGUCR_RUN))
return 0;
cpu_relax();
}
pr_warning("%s: ULPI access timed out!\n", __func__);
return -ETIMEDOUT;
}
static int pxa310_ulpi_read(struct usb_phy *otg, u32 reg)
{
int err;
if (pxa310_ulpi_get_phymode() != SYNCH) {
pr_warning("%s: PHY is not in SYNCH mode!\n", __func__);
return -EBUSY;
}
u2d_writel(U2DOTGUCR, U2DOTGUCR_RUN | U2DOTGUCR_RNW | (reg << 16));
msleep(5);
err = pxa310_ulpi_poll();
if (err)
return err;
return u2d_readl(U2DOTGUCR) & U2DOTGUCR_RDATA;
}
static int pxa310_ulpi_write(struct usb_phy *otg, u32 val, u32 reg)
{
if (pxa310_ulpi_get_phymode() != SYNCH) {
pr_warning("%s: PHY is not in SYNCH mode!\n", __func__);
return -EBUSY;
}
u2d_writel(U2DOTGUCR, U2DOTGUCR_RUN | (reg << 16) | (val << 8));
msleep(5);
return pxa310_ulpi_poll();
}
struct usb_phy_io_ops pxa310_ulpi_access_ops = {
.read = pxa310_ulpi_read,
.write = pxa310_ulpi_write,
};
static void pxa310_otg_transceiver_rtsm(void)
{
u32 u2dotgcr;
/* put PHY to sync mode */
u2dotgcr = u2d_readl(U2DOTGCR);
u2dotgcr |= U2DOTGCR_RTSM | U2DOTGCR_UTMID;
u2d_writel(U2DOTGCR, u2dotgcr);
msleep(10);
/* setup OTG sync mode */
u2dotgcr = u2d_readl(U2DOTGCR);
u2dotgcr |= U2DOTGCR_ULAF;
u2dotgcr &= ~(U2DOTGCR_SMAF | U2DOTGCR_CKAF);
u2d_writel(U2DOTGCR, u2dotgcr);
}
static int pxa310_start_otg_host_transcvr(struct usb_bus *host)
{
int err;
pxa310_otg_transceiver_rtsm();
err = usb_phy_init(u2d->otg);
if (err) {
pr_err("OTG transceiver init failed");
return err;
}
err = otg_set_vbus(u2d->otg->otg, 1);
if (err) {
pr_err("OTG transceiver VBUS set failed");
return err;
}
err = otg_set_host(u2d->otg->otg, host);
if (err)
pr_err("OTG transceiver Host mode set failed");
return err;
}
static int pxa310_start_otg_hc(struct usb_bus *host)
{
u32 u2dotgcr;
int err;
/* disable USB device controller */
u2d_writel(U2DCR, u2d_readl(U2DCR) & ~U2DCR_UDE);
u2d_writel(U2DOTGCR, u2d_readl(U2DOTGCR) | U2DOTGCR_UTMID);
u2d_writel(U2DOTGICR, u2d_readl(U2DOTGICR) & ~0x37F7F);
err = pxa310_start_otg_host_transcvr(host);
if (err)
return err;
/* set xceiver mode */
if (u2d->ulpi_mode & ULPI_IC_6PIN_SERIAL)
u2d_writel(U2DP3CR, u2d_readl(U2DP3CR) & ~U2DP3CR_P2SS);
else if (u2d->ulpi_mode & ULPI_IC_3PIN_SERIAL)
u2d_writel(U2DP3CR, u2d_readl(U2DP3CR) | U2DP3CR_P2SS);
/* start OTG host controller */
u2dotgcr = u2d_readl(U2DOTGCR) | U2DOTGCR_SMAF;
u2d_writel(U2DOTGCR, u2dotgcr & ~(U2DOTGCR_ULAF | U2DOTGCR_CKAF));
return 0;
}
static void pxa310_stop_otg_hc(void)
{
pxa310_otg_transceiver_rtsm();
otg_set_host(u2d->otg->otg, NULL);
otg_set_vbus(u2d->otg->otg, 0);
usb_phy_shutdown(u2d->otg);
}
static void pxa310_u2d_setup_otg_hc(void)
{
u32 u2dotgcr;
u2dotgcr = u2d_readl(U2DOTGCR);
u2dotgcr |= U2DOTGCR_ULAF | U2DOTGCR_UTMID;
u2dotgcr &= ~(U2DOTGCR_SMAF | U2DOTGCR_CKAF);
u2d_writel(U2DOTGCR, u2dotgcr);
msleep(5);
u2d_writel(U2DOTGCR, u2dotgcr | U2DOTGCR_ULE);
msleep(5);
u2d_writel(U2DOTGICR, u2d_readl(U2DOTGICR) & ~0x37F7F);
}
static int pxa310_otg_init(struct pxa3xx_u2d_platform_data *pdata)
{
unsigned int ulpi_mode = ULPI_OTG_DRVVBUS;
if (pdata) {
if (pdata->ulpi_mode & ULPI_SER_6PIN)
ulpi_mode |= ULPI_IC_6PIN_SERIAL;
else if (pdata->ulpi_mode & ULPI_SER_3PIN)
ulpi_mode |= ULPI_IC_3PIN_SERIAL;
}
u2d->ulpi_mode = ulpi_mode;
u2d->otg = otg_ulpi_create(&pxa310_ulpi_access_ops, ulpi_mode);
if (!u2d->otg)
return -ENOMEM;
u2d->otg->io_priv = u2d->mmio_base;
return 0;
}
static void pxa310_otg_exit(void)
{
kfree(u2d->otg);
}
#else
static inline void pxa310_u2d_setup_otg_hc(void) {}
static inline int pxa310_start_otg_hc(struct usb_bus *host)
{
return 0;
}
static inline void pxa310_stop_otg_hc(void) {}
static inline int pxa310_otg_init(struct pxa3xx_u2d_platform_data *pdata)
{
return 0;
}
static inline void pxa310_otg_exit(void) {}
#endif /* CONFIG_PXA310_ULPI */
int pxa3xx_u2d_start_hc(struct usb_bus *host)
{
int err = 0;
/* In case the PXA3xx ULPI isn't used, do nothing. */
if (!u2d)
return 0;
clk_enable(u2d->clk);
if (cpu_is_pxa310()) {
pxa310_u2d_setup_otg_hc();
err = pxa310_start_otg_hc(host);
}
return err;
}
EXPORT_SYMBOL_GPL(pxa3xx_u2d_start_hc);
void pxa3xx_u2d_stop_hc(struct usb_bus *host)
{
/* In case the PXA3xx ULPI isn't used, do nothing. */
if (!u2d)
return;
if (cpu_is_pxa310())
pxa310_stop_otg_hc();
clk_disable(u2d->clk);
}
EXPORT_SYMBOL_GPL(pxa3xx_u2d_stop_hc);
static int pxa3xx_u2d_probe(struct platform_device *pdev)
{
struct pxa3xx_u2d_platform_data *pdata = pdev->dev.platform_data;
struct resource *r;
int err;
u2d = kzalloc(sizeof(struct pxa3xx_u2d_ulpi), GFP_KERNEL);
if (!u2d) {
dev_err(&pdev->dev, "failed to allocate memory\n");
return -ENOMEM;
}
u2d->clk = clk_get(&pdev->dev, NULL);
if (IS_ERR(u2d->clk)) {
dev_err(&pdev->dev, "failed to get u2d clock\n");
err = PTR_ERR(u2d->clk);
goto err_free_mem;
}
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!r) {
dev_err(&pdev->dev, "no IO memory resource defined\n");
err = -ENODEV;
goto err_put_clk;
}
r = request_mem_region(r->start, resource_size(r), pdev->name);
if (!r) {
dev_err(&pdev->dev, "failed to request memory resource\n");
err = -EBUSY;
goto err_put_clk;
}
u2d->mmio_base = ioremap(r->start, resource_size(r));
if (!u2d->mmio_base) {
dev_err(&pdev->dev, "ioremap() failed\n");
err = -ENODEV;
goto err_free_res;
}
if (pdata->init) {
err = pdata->init(&pdev->dev);
if (err)
goto err_free_io;
}
/* Only PXA310 U2D has OTG functionality */
if (cpu_is_pxa310()) {
err = pxa310_otg_init(pdata);
if (err)
goto err_free_plat;
}
platform_set_drvdata(pdev, &u2d);
return 0;
err_free_plat:
if (pdata->exit)
pdata->exit(&pdev->dev);
err_free_io:
iounmap(u2d->mmio_base);
err_free_res:
release_mem_region(r->start, resource_size(r));
err_put_clk:
clk_put(u2d->clk);
err_free_mem:
kfree(u2d);
return err;
}
static int pxa3xx_u2d_remove(struct platform_device *pdev)
{
struct pxa3xx_u2d_platform_data *pdata = pdev->dev.platform_data;
struct resource *r;
if (cpu_is_pxa310()) {
pxa310_stop_otg_hc();
pxa310_otg_exit();
}
if (pdata->exit)
pdata->exit(&pdev->dev);
platform_set_drvdata(pdev, NULL);
iounmap(u2d->mmio_base);
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(r->start, resource_size(r));
clk_put(u2d->clk);
kfree(u2d);
return 0;
}
static struct platform_driver pxa3xx_u2d_ulpi_driver = {
.driver = {
.name = "pxa3xx-u2d",
.owner = THIS_MODULE,
},
.probe = pxa3xx_u2d_probe,
.remove = pxa3xx_u2d_remove,
};
module_platform_driver(pxa3xx_u2d_ulpi_driver);
MODULE_DESCRIPTION("PXA3xx U2D ULPI driver");
MODULE_AUTHOR("Igor Grinberg");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
LibiSC/Smt520Test | arch/ia64/kernel/smpboot.c | 3978 | 22145 | /*
* SMP boot-related support
*
* Copyright (C) 1998-2003, 2005 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2001, 2004-2005 Intel Corp
* Rohit Seth <rohit.seth@intel.com>
* Suresh Siddha <suresh.b.siddha@intel.com>
* Gordon Jin <gordon.jin@intel.com>
* Ashok Raj <ashok.raj@intel.com>
*
* 01/05/16 Rohit Seth <rohit.seth@intel.com> Moved SMP booting functions from smp.c to here.
* 01/04/27 David Mosberger <davidm@hpl.hp.com> Added ITC synching code.
* 02/07/31 David Mosberger <davidm@hpl.hp.com> Switch over to hotplug-CPU boot-sequence.
* smp_boot_cpus()/smp_commence() is replaced by
* smp_prepare_cpus()/__cpu_up()/smp_cpus_done().
* 04/06/21 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
* 04/12/26 Jin Gordon <gordon.jin@intel.com>
* 04/12/26 Rohit Seth <rohit.seth@intel.com>
* Add multi-threading and multi-core detection
* 05/01/30 Suresh Siddha <suresh.b.siddha@intel.com>
* Setup cpu_sibling_map and cpu_core_map
*/
#include <linux/module.h>
#include <linux/acpi.h>
#include <linux/bootmem.h>
#include <linux/cpu.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/smp.h>
#include <linux/spinlock.h>
#include <linux/efi.h>
#include <linux/percpu.h>
#include <linux/bitops.h>
#include <linux/atomic.h>
#include <asm/cache.h>
#include <asm/current.h>
#include <asm/delay.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/machvec.h>
#include <asm/mca.h>
#include <asm/page.h>
#include <asm/paravirt.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/sal.h>
#include <asm/tlbflush.h>
#include <asm/unistd.h>
#include <asm/sn/arch.h>
#define SMP_DEBUG 0
#if SMP_DEBUG
#define Dprintk(x...) printk(x)
#else
#define Dprintk(x...)
#endif
#ifdef CONFIG_HOTPLUG_CPU
#ifdef CONFIG_PERMIT_BSP_REMOVE
#define bsp_remove_ok 1
#else
#define bsp_remove_ok 0
#endif
/*
* Store all idle threads, this can be reused instead of creating
* a new thread. Also avoids complicated thread destroy functionality
* for idle threads.
*/
struct task_struct *idle_thread_array[NR_CPUS];
/*
* Global array allocated for NR_CPUS at boot time
*/
struct sal_to_os_boot sal_boot_rendez_state[NR_CPUS];
/*
* start_ap in head.S uses this to store current booting cpu
* info.
*/
struct sal_to_os_boot *sal_state_for_booting_cpu = &sal_boot_rendez_state[0];
#define set_brendez_area(x) (sal_state_for_booting_cpu = &sal_boot_rendez_state[(x)]);
#define get_idle_for_cpu(x) (idle_thread_array[(x)])
#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
#else
#define get_idle_for_cpu(x) (NULL)
#define set_idle_for_cpu(x,p)
#define set_brendez_area(x)
#endif
/*
* ITC synchronization related stuff:
*/
#define MASTER (0)
#define SLAVE (SMP_CACHE_BYTES/8)
#define NUM_ROUNDS 64 /* magic value */
#define NUM_ITERS 5 /* likewise */
static DEFINE_SPINLOCK(itc_sync_lock);
static volatile unsigned long go[SLAVE + 1];
#define DEBUG_ITC_SYNC 0
extern void start_ap (void);
extern unsigned long ia64_iobase;
struct task_struct *task_for_booting_cpu;
/*
* State for each CPU
*/
DEFINE_PER_CPU(int, cpu_state);
cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
EXPORT_SYMBOL(cpu_core_map);
DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
int smp_num_siblings = 1;
/* which logical CPU number maps to which CPU (physical APIC ID) */
volatile int ia64_cpu_to_sapicid[NR_CPUS];
EXPORT_SYMBOL(ia64_cpu_to_sapicid);
static volatile cpumask_t cpu_callin_map;
struct smp_boot_data smp_boot_data __initdata;
unsigned long ap_wakeup_vector = -1; /* External Int use to wakeup APs */
char __initdata no_int_routing;
unsigned char smp_int_redirect; /* are INT and IPI redirectable by the chipset? */
#ifdef CONFIG_FORCE_CPEI_RETARGET
#define CPEI_OVERRIDE_DEFAULT (1)
#else
#define CPEI_OVERRIDE_DEFAULT (0)
#endif
unsigned int force_cpei_retarget = CPEI_OVERRIDE_DEFAULT;
static int __init
cmdl_force_cpei(char *str)
{
int value=0;
get_option (&str, &value);
force_cpei_retarget = value;
return 1;
}
__setup("force_cpei=", cmdl_force_cpei);
static int __init
nointroute (char *str)
{
no_int_routing = 1;
printk ("no_int_routing on\n");
return 1;
}
__setup("nointroute", nointroute);
static void fix_b0_for_bsp(void)
{
#ifdef CONFIG_HOTPLUG_CPU
int cpuid;
static int fix_bsp_b0 = 1;
cpuid = smp_processor_id();
/*
* Cache the b0 value on the first AP that comes up
*/
if (!(fix_bsp_b0 && cpuid))
return;
sal_boot_rendez_state[0].br[0] = sal_boot_rendez_state[cpuid].br[0];
printk ("Fixed BSP b0 value from CPU %d\n", cpuid);
fix_bsp_b0 = 0;
#endif
}
void
sync_master (void *arg)
{
unsigned long flags, i;
go[MASTER] = 0;
local_irq_save(flags);
{
for (i = 0; i < NUM_ROUNDS*NUM_ITERS; ++i) {
while (!go[MASTER])
cpu_relax();
go[MASTER] = 0;
go[SLAVE] = ia64_get_itc();
}
}
local_irq_restore(flags);
}
/*
* Return the number of cycles by which our itc differs from the itc on the master
* (time-keeper) CPU. A positive number indicates our itc is ahead of the master,
* negative that it is behind.
*/
static inline long
get_delta (long *rt, long *master)
{
unsigned long best_t0 = 0, best_t1 = ~0UL, best_tm = 0;
unsigned long tcenter, t0, t1, tm;
long i;
for (i = 0; i < NUM_ITERS; ++i) {
t0 = ia64_get_itc();
go[MASTER] = 1;
while (!(tm = go[SLAVE]))
cpu_relax();
go[SLAVE] = 0;
t1 = ia64_get_itc();
if (t1 - t0 < best_t1 - best_t0)
best_t0 = t0, best_t1 = t1, best_tm = tm;
}
*rt = best_t1 - best_t0;
*master = best_tm - best_t0;
/* average best_t0 and best_t1 without overflow: */
tcenter = (best_t0/2 + best_t1/2);
if (best_t0 % 2 + best_t1 % 2 == 2)
++tcenter;
return tcenter - best_tm;
}
/*
* Synchronize ar.itc of the current (slave) CPU with the ar.itc of the MASTER CPU
* (normally the time-keeper CPU). We use a closed loop to eliminate the possibility of
* unaccounted-for errors (such as getting a machine check in the middle of a calibration
* step). The basic idea is for the slave to ask the master what itc value it has and to
* read its own itc before and after the master responds. Each iteration gives us three
* timestamps:
*
* slave master
*
* t0 ---\
* ---\
* --->
* tm
* /---
* /---
* t1 <---
*
*
* The goal is to adjust the slave's ar.itc such that tm falls exactly half-way between t0
* and t1. If we achieve this, the clocks are synchronized provided the interconnect
* between the slave and the master is symmetric. Even if the interconnect were
* asymmetric, we would still know that the synchronization error is smaller than the
* roundtrip latency (t0 - t1).
*
* When the interconnect is quiet and symmetric, this lets us synchronize the itc to
* within one or two cycles. However, we can only *guarantee* that the synchronization is
* accurate to within a round-trip time, which is typically in the range of several
* hundred cycles (e.g., ~500 cycles). In practice, this means that the itc's are usually
* almost perfectly synchronized, but we shouldn't assume that the accuracy is much better
* than half a micro second or so.
*/
void
ia64_sync_itc (unsigned int master)
{
long i, delta, adj, adjust_latency = 0, done = 0;
unsigned long flags, rt, master_time_stamp, bound;
#if DEBUG_ITC_SYNC
struct {
long rt; /* roundtrip time */
long master; /* master's timestamp */
long diff; /* difference between midpoint and master's timestamp */
long lat; /* estimate of itc adjustment latency */
} t[NUM_ROUNDS];
#endif
/*
* Make sure local timer ticks are disabled while we sync. If
* they were enabled, we'd have to worry about nasty issues
* like setting the ITC ahead of (or a long time before) the
* next scheduled tick.
*/
BUG_ON((ia64_get_itv() & (1 << 16)) == 0);
go[MASTER] = 1;
if (smp_call_function_single(master, sync_master, NULL, 0) < 0) {
printk(KERN_ERR "sync_itc: failed to get attention of CPU %u!\n", master);
return;
}
while (go[MASTER])
cpu_relax(); /* wait for master to be ready */
spin_lock_irqsave(&itc_sync_lock, flags);
{
for (i = 0; i < NUM_ROUNDS; ++i) {
delta = get_delta(&rt, &master_time_stamp);
if (delta == 0) {
done = 1; /* let's lock on to this... */
bound = rt;
}
if (!done) {
if (i > 0) {
adjust_latency += -delta;
adj = -delta + adjust_latency/4;
} else
adj = -delta;
ia64_set_itc(ia64_get_itc() + adj);
}
#if DEBUG_ITC_SYNC
t[i].rt = rt;
t[i].master = master_time_stamp;
t[i].diff = delta;
t[i].lat = adjust_latency/4;
#endif
}
}
spin_unlock_irqrestore(&itc_sync_lock, flags);
#if DEBUG_ITC_SYNC
for (i = 0; i < NUM_ROUNDS; ++i)
printk("rt=%5ld master=%5ld diff=%5ld adjlat=%5ld\n",
t[i].rt, t[i].master, t[i].diff, t[i].lat);
#endif
printk(KERN_INFO "CPU %d: synchronized ITC with CPU %u (last diff %ld cycles, "
"maxerr %lu cycles)\n", smp_processor_id(), master, delta, rt);
}
/*
* Ideally sets up per-cpu profiling hooks. Doesn't do much now...
*/
static inline void __devinit
smp_setup_percpu_timer (void)
{
}
static void __cpuinit
smp_callin (void)
{
int cpuid, phys_id, itc_master;
struct cpuinfo_ia64 *last_cpuinfo, *this_cpuinfo;
extern void ia64_init_itm(void);
extern volatile int time_keeper_id;
#ifdef CONFIG_PERFMON
extern void pfm_init_percpu(void);
#endif
cpuid = smp_processor_id();
phys_id = hard_smp_processor_id();
itc_master = time_keeper_id;
if (cpu_online(cpuid)) {
printk(KERN_ERR "huh, phys CPU#0x%x, CPU#0x%x already present??\n",
phys_id, cpuid);
BUG();
}
fix_b0_for_bsp();
/*
* numa_node_id() works after this.
*/
set_numa_node(cpu_to_node_map[cpuid]);
set_numa_mem(local_memory_node(cpu_to_node_map[cpuid]));
ipi_call_lock_irq();
spin_lock(&vector_lock);
/* Setup the per cpu irq handling data structures */
__setup_vector_irq(cpuid);
notify_cpu_starting(cpuid);
set_cpu_online(cpuid, true);
per_cpu(cpu_state, cpuid) = CPU_ONLINE;
spin_unlock(&vector_lock);
ipi_call_unlock_irq();
smp_setup_percpu_timer();
ia64_mca_cmc_vector_setup(); /* Setup vector on AP */
#ifdef CONFIG_PERFMON
pfm_init_percpu();
#endif
local_irq_enable();
if (!(sal_platform_features & IA64_SAL_PLATFORM_FEATURE_ITC_DRIFT)) {
/*
* Synchronize the ITC with the BP. Need to do this after irqs are
* enabled because ia64_sync_itc() calls smp_call_function_single(), which
* calls spin_unlock_bh(), which calls spin_unlock_bh(), which calls
* local_bh_enable(), which bugs out if irqs are not enabled...
*/
Dprintk("Going to syncup ITC with ITC Master.\n");
ia64_sync_itc(itc_master);
}
/*
* Get our bogomips.
*/
ia64_init_itm();
/*
* Delay calibration can be skipped if new processor is identical to the
* previous processor.
*/
last_cpuinfo = cpu_data(cpuid - 1);
this_cpuinfo = local_cpu_data;
if (last_cpuinfo->itc_freq != this_cpuinfo->itc_freq ||
last_cpuinfo->proc_freq != this_cpuinfo->proc_freq ||
last_cpuinfo->features != this_cpuinfo->features ||
last_cpuinfo->revision != this_cpuinfo->revision ||
last_cpuinfo->family != this_cpuinfo->family ||
last_cpuinfo->archrev != this_cpuinfo->archrev ||
last_cpuinfo->model != this_cpuinfo->model)
calibrate_delay();
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
/*
* Allow the master to continue.
*/
cpu_set(cpuid, cpu_callin_map);
Dprintk("Stack on CPU %d at about %p\n",cpuid, &cpuid);
}
/*
* Activate a secondary processor. head.S calls this.
*/
int __cpuinit
start_secondary (void *unused)
{
/* Early console may use I/O ports */
ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase));
#ifndef CONFIG_PRINTK_TIME
Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id());
#endif
efi_map_pal_code();
cpu_init();
preempt_disable();
smp_callin();
cpu_idle();
return 0;
}
struct pt_regs * __cpuinit idle_regs(struct pt_regs *regs)
{
return NULL;
}
struct create_idle {
struct work_struct work;
struct task_struct *idle;
struct completion done;
int cpu;
};
void __cpuinit
do_fork_idle(struct work_struct *work)
{
struct create_idle *c_idle =
container_of(work, struct create_idle, work);
c_idle->idle = fork_idle(c_idle->cpu);
complete(&c_idle->done);
}
static int __cpuinit
do_boot_cpu (int sapicid, int cpu)
{
int timeout;
struct create_idle c_idle = {
.work = __WORK_INITIALIZER(c_idle.work, do_fork_idle),
.cpu = cpu,
.done = COMPLETION_INITIALIZER(c_idle.done),
};
/*
* We can't use kernel_thread since we must avoid to
* reschedule the child.
*/
c_idle.idle = get_idle_for_cpu(cpu);
if (c_idle.idle) {
init_idle(c_idle.idle, cpu);
goto do_rest;
}
schedule_work(&c_idle.work);
wait_for_completion(&c_idle.done);
if (IS_ERR(c_idle.idle))
panic("failed fork for CPU %d", cpu);
set_idle_for_cpu(cpu, c_idle.idle);
do_rest:
task_for_booting_cpu = c_idle.idle;
Dprintk("Sending wakeup vector %lu to AP 0x%x/0x%x.\n", ap_wakeup_vector, cpu, sapicid);
set_brendez_area(cpu);
platform_send_ipi(cpu, ap_wakeup_vector, IA64_IPI_DM_INT, 0);
/*
* Wait 10s total for the AP to start
*/
Dprintk("Waiting on callin_map ...");
for (timeout = 0; timeout < 100000; timeout++) {
if (cpu_isset(cpu, cpu_callin_map))
break; /* It has booted */
udelay(100);
}
Dprintk("\n");
if (!cpu_isset(cpu, cpu_callin_map)) {
printk(KERN_ERR "Processor 0x%x/0x%x is stuck.\n", cpu, sapicid);
ia64_cpu_to_sapicid[cpu] = -1;
set_cpu_online(cpu, false); /* was set in smp_callin() */
return -EINVAL;
}
return 0;
}
static int __init
decay (char *str)
{
int ticks;
get_option (&str, &ticks);
return 1;
}
__setup("decay=", decay);
/*
* Initialize the logical CPU number to SAPICID mapping
*/
void __init
smp_build_cpu_map (void)
{
int sapicid, cpu, i;
int boot_cpu_id = hard_smp_processor_id();
for (cpu = 0; cpu < NR_CPUS; cpu++) {
ia64_cpu_to_sapicid[cpu] = -1;
}
ia64_cpu_to_sapicid[0] = boot_cpu_id;
init_cpu_present(cpumask_of(0));
set_cpu_possible(0, true);
for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) {
sapicid = smp_boot_data.cpu_phys_id[i];
if (sapicid == boot_cpu_id)
continue;
set_cpu_present(cpu, true);
set_cpu_possible(cpu, true);
ia64_cpu_to_sapicid[cpu] = sapicid;
cpu++;
}
}
/*
* Cycle through the APs sending Wakeup IPIs to boot each.
*/
void __init
smp_prepare_cpus (unsigned int max_cpus)
{
int boot_cpu_id = hard_smp_processor_id();
/*
* Initialize the per-CPU profiling counter/multiplier
*/
smp_setup_percpu_timer();
cpu_set(0, cpu_callin_map);
local_cpu_data->loops_per_jiffy = loops_per_jiffy;
ia64_cpu_to_sapicid[0] = boot_cpu_id;
printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id);
current_thread_info()->cpu = 0;
/*
* If SMP should be disabled, then really disable it!
*/
if (!max_cpus) {
printk(KERN_INFO "SMP mode deactivated.\n");
init_cpu_online(cpumask_of(0));
init_cpu_present(cpumask_of(0));
init_cpu_possible(cpumask_of(0));
return;
}
}
void __devinit smp_prepare_boot_cpu(void)
{
set_cpu_online(smp_processor_id(), true);
cpu_set(smp_processor_id(), cpu_callin_map);
set_numa_node(cpu_to_node_map[smp_processor_id()]);
per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
paravirt_post_smp_prepare_boot_cpu();
}
#ifdef CONFIG_HOTPLUG_CPU
static inline void
clear_cpu_sibling_map(int cpu)
{
int i;
for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
for_each_cpu_mask(i, cpu_core_map[cpu])
cpu_clear(cpu, cpu_core_map[i]);
per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
}
static void
remove_siblinginfo(int cpu)
{
int last = 0;
if (cpu_data(cpu)->threads_per_core == 1 &&
cpu_data(cpu)->cores_per_socket == 1) {
cpu_clear(cpu, cpu_core_map[cpu]);
cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
return;
}
last = (cpus_weight(cpu_core_map[cpu]) == 1 ? 1 : 0);
/* remove it from all sibling map's */
clear_cpu_sibling_map(cpu);
}
extern void fixup_irqs(void);
int migrate_platform_irqs(unsigned int cpu)
{
int new_cpei_cpu;
struct irq_data *data = NULL;
const struct cpumask *mask;
int retval = 0;
/*
* dont permit CPEI target to removed.
*/
if (cpe_vector > 0 && is_cpu_cpei_target(cpu)) {
printk ("CPU (%d) is CPEI Target\n", cpu);
if (can_cpei_retarget()) {
/*
* Now re-target the CPEI to a different processor
*/
new_cpei_cpu = cpumask_any(cpu_online_mask);
mask = cpumask_of(new_cpei_cpu);
set_cpei_target_cpu(new_cpei_cpu);
data = irq_get_irq_data(ia64_cpe_irq);
/*
* Switch for now, immediately, we need to do fake intr
* as other interrupts, but need to study CPEI behaviour with
* polling before making changes.
*/
if (data && data->chip) {
data->chip->irq_disable(data);
data->chip->irq_set_affinity(data, mask, false);
data->chip->irq_enable(data);
printk ("Re-targeting CPEI to cpu %d\n", new_cpei_cpu);
}
}
if (!data) {
printk ("Unable to retarget CPEI, offline cpu [%d] failed\n", cpu);
retval = -EBUSY;
}
}
return retval;
}
/* must be called with cpucontrol mutex held */
int __cpu_disable(void)
{
int cpu = smp_processor_id();
/*
* dont permit boot processor for now
*/
if (cpu == 0 && !bsp_remove_ok) {
printk ("Your platform does not support removal of BSP\n");
return (-EBUSY);
}
if (ia64_platform_is("sn2")) {
if (!sn_cpu_disable_allowed(cpu))
return -EBUSY;
}
set_cpu_online(cpu, false);
if (migrate_platform_irqs(cpu)) {
set_cpu_online(cpu, true);
return -EBUSY;
}
remove_siblinginfo(cpu);
fixup_irqs();
local_flush_tlb_all();
cpu_clear(cpu, cpu_callin_map);
return 0;
}
void __cpu_die(unsigned int cpu)
{
unsigned int i;
for (i = 0; i < 100; i++) {
/* They ack this in play_dead by setting CPU_DEAD */
if (per_cpu(cpu_state, cpu) == CPU_DEAD)
{
printk ("CPU %d is now offline\n", cpu);
return;
}
msleep(100);
}
printk(KERN_ERR "CPU %u didn't die...\n", cpu);
}
#endif /* CONFIG_HOTPLUG_CPU */
void
smp_cpus_done (unsigned int dummy)
{
int cpu;
unsigned long bogosum = 0;
/*
* Allow the user to impress friends.
*/
for_each_online_cpu(cpu) {
bogosum += cpu_data(cpu)->loops_per_jiffy;
}
printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
(int)num_online_cpus(), bogosum/(500000/HZ), (bogosum/(5000/HZ))%100);
}
static inline void __devinit
set_cpu_sibling_map(int cpu)
{
int i;
for_each_online_cpu(i) {
if ((cpu_data(cpu)->socket_id == cpu_data(i)->socket_id)) {
cpu_set(i, cpu_core_map[cpu]);
cpu_set(cpu, cpu_core_map[i]);
if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
cpu_set(i, per_cpu(cpu_sibling_map, cpu));
cpu_set(cpu, per_cpu(cpu_sibling_map, i));
}
}
}
}
int __cpuinit
__cpu_up (unsigned int cpu)
{
int ret;
int sapicid;
sapicid = ia64_cpu_to_sapicid[cpu];
if (sapicid == -1)
return -EINVAL;
/*
* Already booted cpu? not valid anymore since we dont
* do idle loop tightspin anymore.
*/
if (cpu_isset(cpu, cpu_callin_map))
return -EINVAL;
per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
/* Processor goes to start_secondary(), sets online flag */
ret = do_boot_cpu(sapicid, cpu);
if (ret < 0)
return ret;
if (cpu_data(cpu)->threads_per_core == 1 &&
cpu_data(cpu)->cores_per_socket == 1) {
cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
cpu_set(cpu, cpu_core_map[cpu]);
return 0;
}
set_cpu_sibling_map(cpu);
return 0;
}
/*
* Assume that CPUs have been discovered by some platform-dependent interface. For
* SoftSDV/Lion, that would be ACPI.
*
* Setup of the IPI irq handler is done in irq.c:init_IRQ_SMP().
*/
void __init
init_smp_config(void)
{
struct fptr {
unsigned long fp;
unsigned long gp;
} *ap_startup;
long sal_ret;
/* Tell SAL where to drop the APs. */
ap_startup = (struct fptr *) start_ap;
sal_ret = ia64_sal_set_vectors(SAL_VECTOR_OS_BOOT_RENDEZ,
ia64_tpa(ap_startup->fp), ia64_tpa(ap_startup->gp), 0, 0, 0, 0);
if (sal_ret < 0)
printk(KERN_ERR "SMP: Can't set SAL AP Boot Rendezvous: %s\n",
ia64_sal_strerror(sal_ret));
}
/*
* identify_siblings(cpu) gets called from identify_cpu. This populates the
* information related to logical execution units in per_cpu_data structure.
*/
void __devinit
identify_siblings(struct cpuinfo_ia64 *c)
{
long status;
u16 pltid;
pal_logical_to_physical_t info;
status = ia64_pal_logical_to_phys(-1, &info);
if (status != PAL_STATUS_SUCCESS) {
if (status != PAL_STATUS_UNIMPLEMENTED) {
printk(KERN_ERR
"ia64_pal_logical_to_phys failed with %ld\n",
status);
return;
}
info.overview_ppid = 0;
info.overview_cpp = 1;
info.overview_tpc = 1;
}
status = ia64_sal_physical_id_info(&pltid);
if (status != PAL_STATUS_SUCCESS) {
if (status != PAL_STATUS_UNIMPLEMENTED)
printk(KERN_ERR
"ia64_sal_pltid failed with %ld\n",
status);
return;
}
c->socket_id = (pltid << 8) | info.overview_ppid;
if (info.overview_cpp == 1 && info.overview_tpc == 1)
return;
c->cores_per_socket = info.overview_cpp;
c->threads_per_core = info.overview_tpc;
c->num_log = info.overview_num_log;
c->core_id = info.log1_cid;
c->thread_id = info.log1_tid;
}
/*
* returns non zero, if multi-threading is enabled
* on at least one physical package. Due to hotplug cpu
* and (maxcpus=), all threads may not necessarily be enabled
* even though the processor supports multi-threading.
*/
int is_multithreading_enabled(void)
{
int i, j;
for_each_present_cpu(i) {
for_each_present_cpu(j) {
if (j == i)
continue;
if ((cpu_data(j)->socket_id == cpu_data(i)->socket_id)) {
if (cpu_data(j)->core_id == cpu_data(i)->core_id)
return 1;
}
}
}
return 0;
}
EXPORT_SYMBOL_GPL(is_multithreading_enabled);
| gpl-2.0 |
varchild/android_kernel_htc_msm8660 | drivers/usb/storage/datafab.c | 4746 | 20676 | /* Driver for Datafab USB Compact Flash reader
*
* datafab driver v0.1:
*
* First release
*
* Current development and maintenance by:
* (c) 2000 Jimmie Mayfield (mayfield+datafab@sackheads.org)
*
* Many thanks to Robert Baruch for the SanDisk SmartMedia reader driver
* which I used as a template for this driver.
*
* Some bugfixes and scatter-gather code by Gregory P. Smith
* (greg-usb@electricrain.com)
*
* Fix for media change by Joerg Schneider (js@joergschneider.com)
*
* Other contributors:
* (c) 2002 Alan Stern <stern@rowland.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2, or (at your option) any
* later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
* This driver attempts to support USB CompactFlash reader/writer devices
* based on Datafab USB-to-ATA chips. It was specifically developed for the
* Datafab MDCFE-B USB CompactFlash reader but has since been found to work
* with a variety of Datafab-based devices from a number of manufacturers.
* I've received a report of this driver working with a Datafab-based
* SmartMedia device though please be aware that I'm personally unable to
* test SmartMedia support.
*
* This driver supports reading and writing. If you're truly paranoid,
* however, you can force the driver into a write-protected state by setting
* the WP enable bits in datafab_handle_mode_sense(). See the comments
* in that routine.
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include "usb.h"
#include "transport.h"
#include "protocol.h"
#include "debug.h"
MODULE_DESCRIPTION("Driver for Datafab USB Compact Flash reader");
MODULE_AUTHOR("Jimmie Mayfield <mayfield+datafab@sackheads.org>");
MODULE_LICENSE("GPL");
struct datafab_info {
unsigned long sectors; /* total sector count */
unsigned long ssize; /* sector size in bytes */
signed char lun; /* used for dual-slot readers */
/* the following aren't used yet */
unsigned char sense_key;
unsigned long sense_asc; /* additional sense code */
unsigned long sense_ascq; /* additional sense code qualifier */
};
static int datafab_determine_lun(struct us_data *us,
struct datafab_info *info);
/*
* The table of devices
*/
#define UNUSUAL_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
vendorName, productName, useProtocol, useTransport, \
initFunction, flags) \
{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
.driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
struct usb_device_id datafab_usb_ids[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, datafab_usb_ids);
#undef UNUSUAL_DEV
/*
* The flags table
*/
#define UNUSUAL_DEV(idVendor, idProduct, bcdDeviceMin, bcdDeviceMax, \
vendor_name, product_name, use_protocol, use_transport, \
init_function, Flags) \
{ \
.vendorName = vendor_name, \
.productName = product_name, \
.useProtocol = use_protocol, \
.useTransport = use_transport, \
.initFunction = init_function, \
}
static struct us_unusual_dev datafab_unusual_dev_list[] = {
# include "unusual_datafab.h"
{ } /* Terminating entry */
};
#undef UNUSUAL_DEV
static inline int
datafab_bulk_read(struct us_data *us, unsigned char *data, unsigned int len) {
if (len == 0)
return USB_STOR_XFER_GOOD;
US_DEBUGP("datafab_bulk_read: len = %d\n", len);
return usb_stor_bulk_transfer_buf(us, us->recv_bulk_pipe,
data, len, NULL);
}
static inline int
datafab_bulk_write(struct us_data *us, unsigned char *data, unsigned int len) {
if (len == 0)
return USB_STOR_XFER_GOOD;
US_DEBUGP("datafab_bulk_write: len = %d\n", len);
return usb_stor_bulk_transfer_buf(us, us->send_bulk_pipe,
data, len, NULL);
}
static int datafab_read_data(struct us_data *us,
struct datafab_info *info,
u32 sector,
u32 sectors)
{
unsigned char *command = us->iobuf;
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
// we're working in LBA mode. according to the ATA spec,
// we can support up to 28-bit addressing. I don't know if Datafab
// supports beyond 24-bit addressing. It's kind of hard to test
// since it requires > 8GB CF card.
//
if (sectors > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
if (info->lun == -1) {
result = datafab_determine_lun(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
}
totallen = sectors * info->ssize;
// Since we don't read more than 64 KB at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
// loop, never allocate or transfer more than 64k at once
// (min(128k, 255*info->ssize) is the real limit)
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
command[0] = 0;
command[1] = thistime;
command[2] = sector & 0xFF;
command[3] = (sector >> 8) & 0xFF;
command[4] = (sector >> 16) & 0xFF;
command[5] = 0xE0 + (info->lun << 4);
command[5] |= (sector >> 24) & 0x0F;
command[6] = 0x20;
command[7] = 0x01;
// send the read command
result = datafab_bulk_write(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// read the result
result = datafab_bulk_read(us, buffer, len);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// Store the data in the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, TO_XFER_BUF);
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
static int datafab_write_data(struct us_data *us,
struct datafab_info *info,
u32 sector,
u32 sectors)
{
unsigned char *command = us->iobuf;
unsigned char *reply = us->iobuf;
unsigned char *buffer;
unsigned char thistime;
unsigned int totallen, alloclen;
int len, result;
unsigned int sg_offset = 0;
struct scatterlist *sg = NULL;
// we're working in LBA mode. according to the ATA spec,
// we can support up to 28-bit addressing. I don't know if Datafab
// supports beyond 24-bit addressing. It's kind of hard to test
// since it requires > 8GB CF card.
//
if (sectors > 0x0FFFFFFF)
return USB_STOR_TRANSPORT_ERROR;
if (info->lun == -1) {
result = datafab_determine_lun(us, info);
if (result != USB_STOR_TRANSPORT_GOOD)
return result;
}
totallen = sectors * info->ssize;
// Since we don't write more than 64 KB at a time, we have to create
// a bounce buffer and move the data a piece at a time between the
// bounce buffer and the actual transfer buffer.
alloclen = min(totallen, 65536u);
buffer = kmalloc(alloclen, GFP_NOIO);
if (buffer == NULL)
return USB_STOR_TRANSPORT_ERROR;
do {
// loop, never allocate or transfer more than 64k at once
// (min(128k, 255*info->ssize) is the real limit)
len = min(totallen, alloclen);
thistime = (len / info->ssize) & 0xff;
// Get the data from the transfer buffer
usb_stor_access_xfer_buf(buffer, len, us->srb,
&sg, &sg_offset, FROM_XFER_BUF);
command[0] = 0;
command[1] = thistime;
command[2] = sector & 0xFF;
command[3] = (sector >> 8) & 0xFF;
command[4] = (sector >> 16) & 0xFF;
command[5] = 0xE0 + (info->lun << 4);
command[5] |= (sector >> 24) & 0x0F;
command[6] = 0x30;
command[7] = 0x02;
// send the command
result = datafab_bulk_write(us, command, 8);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// send the data
result = datafab_bulk_write(us, buffer, len);
if (result != USB_STOR_XFER_GOOD)
goto leave;
// read the result
result = datafab_bulk_read(us, reply, 2);
if (result != USB_STOR_XFER_GOOD)
goto leave;
if (reply[0] != 0x50 && reply[1] != 0) {
US_DEBUGP("datafab_write_data: Gah! "
"write return code: %02x %02x\n",
reply[0], reply[1]);
result = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
sector += thistime;
totallen -= len;
} while (totallen > 0);
kfree(buffer);
return USB_STOR_TRANSPORT_GOOD;
leave:
kfree(buffer);
return USB_STOR_TRANSPORT_ERROR;
}
static int datafab_determine_lun(struct us_data *us,
struct datafab_info *info)
{
// Dual-slot readers can be thought of as dual-LUN devices.
// We need to determine which card slot is being used.
// We'll send an IDENTIFY DEVICE command and see which LUN responds...
//
// There might be a better way of doing this?
static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *buf;
int count = 0, rc;
if (!info)
return USB_STOR_TRANSPORT_ERROR;
memcpy(command, scommand, 8);
buf = kmalloc(512, GFP_NOIO);
if (!buf)
return USB_STOR_TRANSPORT_ERROR;
US_DEBUGP("datafab_determine_lun: locating...\n");
// we'll try 3 times before giving up...
//
while (count++ < 3) {
command[5] = 0xa0;
rc = datafab_bulk_write(us, command, 8);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
rc = datafab_bulk_read(us, buf, 512);
if (rc == USB_STOR_XFER_GOOD) {
info->lun = 0;
rc = USB_STOR_TRANSPORT_GOOD;
goto leave;
}
command[5] = 0xb0;
rc = datafab_bulk_write(us, command, 8);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
rc = datafab_bulk_read(us, buf, 512);
if (rc == USB_STOR_XFER_GOOD) {
info->lun = 1;
rc = USB_STOR_TRANSPORT_GOOD;
goto leave;
}
msleep(20);
}
rc = USB_STOR_TRANSPORT_ERROR;
leave:
kfree(buf);
return rc;
}
static int datafab_id_device(struct us_data *us,
struct datafab_info *info)
{
// this is a variation of the ATA "IDENTIFY DEVICE" command...according
// to the ATA spec, 'Sector Count' isn't used but the Windows driver
// sets this bit so we do too...
//
static unsigned char scommand[8] = { 0, 1, 0, 0, 0, 0xa0, 0xec, 1 };
unsigned char *command = us->iobuf;
unsigned char *reply;
int rc;
if (!info)
return USB_STOR_TRANSPORT_ERROR;
if (info->lun == -1) {
rc = datafab_determine_lun(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
}
memcpy(command, scommand, 8);
reply = kmalloc(512, GFP_NOIO);
if (!reply)
return USB_STOR_TRANSPORT_ERROR;
command[5] += (info->lun << 4);
rc = datafab_bulk_write(us, command, 8);
if (rc != USB_STOR_XFER_GOOD) {
rc = USB_STOR_TRANSPORT_ERROR;
goto leave;
}
// we'll go ahead and extract the media capacity while we're here...
//
rc = datafab_bulk_read(us, reply, 512);
if (rc == USB_STOR_XFER_GOOD) {
// capacity is at word offset 57-58
//
info->sectors = ((u32)(reply[117]) << 24) |
((u32)(reply[116]) << 16) |
((u32)(reply[115]) << 8) |
((u32)(reply[114]) );
rc = USB_STOR_TRANSPORT_GOOD;
goto leave;
}
rc = USB_STOR_TRANSPORT_ERROR;
leave:
kfree(reply);
return rc;
}
static int datafab_handle_mode_sense(struct us_data *us,
struct scsi_cmnd * srb,
int sense_6)
{
static unsigned char rw_err_page[12] = {
0x1, 0xA, 0x21, 1, 0, 0, 0, 0, 1, 0, 0, 0
};
static unsigned char cache_page[12] = {
0x8, 0xA, 0x1, 0, 0, 0, 0, 0, 0, 0, 0, 0
};
static unsigned char rbac_page[12] = {
0x1B, 0xA, 0, 0x81, 0, 0, 0, 0, 0, 0, 0, 0
};
static unsigned char timer_page[8] = {
0x1C, 0x6, 0, 0, 0, 0
};
unsigned char pc, page_code;
unsigned int i = 0;
struct datafab_info *info = (struct datafab_info *) (us->extra);
unsigned char *ptr = us->iobuf;
// most of this stuff is just a hack to get things working. the
// datafab reader doesn't present a SCSI interface so we
// fudge the SCSI commands...
//
pc = srb->cmnd[2] >> 6;
page_code = srb->cmnd[2] & 0x3F;
switch (pc) {
case 0x0:
US_DEBUGP("datafab_handle_mode_sense: Current values\n");
break;
case 0x1:
US_DEBUGP("datafab_handle_mode_sense: Changeable values\n");
break;
case 0x2:
US_DEBUGP("datafab_handle_mode_sense: Default values\n");
break;
case 0x3:
US_DEBUGP("datafab_handle_mode_sense: Saves values\n");
break;
}
memset(ptr, 0, 8);
if (sense_6) {
ptr[2] = 0x00; // WP enable: 0x80
i = 4;
} else {
ptr[3] = 0x00; // WP enable: 0x80
i = 8;
}
switch (page_code) {
default:
// vendor-specific mode
info->sense_key = 0x05;
info->sense_asc = 0x24;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
case 0x1:
memcpy(ptr + i, rw_err_page, sizeof(rw_err_page));
i += sizeof(rw_err_page);
break;
case 0x8:
memcpy(ptr + i, cache_page, sizeof(cache_page));
i += sizeof(cache_page);
break;
case 0x1B:
memcpy(ptr + i, rbac_page, sizeof(rbac_page));
i += sizeof(rbac_page);
break;
case 0x1C:
memcpy(ptr + i, timer_page, sizeof(timer_page));
i += sizeof(timer_page);
break;
case 0x3F: // retrieve all pages
memcpy(ptr + i, timer_page, sizeof(timer_page));
i += sizeof(timer_page);
memcpy(ptr + i, rbac_page, sizeof(rbac_page));
i += sizeof(rbac_page);
memcpy(ptr + i, cache_page, sizeof(cache_page));
i += sizeof(cache_page);
memcpy(ptr + i, rw_err_page, sizeof(rw_err_page));
i += sizeof(rw_err_page);
break;
}
if (sense_6)
ptr[0] = i - 1;
else
((__be16 *) ptr)[0] = cpu_to_be16(i - 2);
usb_stor_set_xfer_buf(ptr, i, srb);
return USB_STOR_TRANSPORT_GOOD;
}
static void datafab_info_destructor(void *extra)
{
// this routine is a placeholder...
// currently, we don't allocate any extra memory so we're okay
}
// Transport for the Datafab MDCFE-B
//
static int datafab_transport(struct scsi_cmnd *srb, struct us_data *us)
{
struct datafab_info *info;
int rc;
unsigned long block, blocks;
unsigned char *ptr = us->iobuf;
static unsigned char inquiry_reply[8] = {
0x00, 0x80, 0x00, 0x01, 0x1F, 0x00, 0x00, 0x00
};
if (!us->extra) {
us->extra = kzalloc(sizeof(struct datafab_info), GFP_NOIO);
if (!us->extra) {
US_DEBUGP("datafab_transport: Gah! "
"Can't allocate storage for Datafab info struct!\n");
return USB_STOR_TRANSPORT_ERROR;
}
us->extra_destructor = datafab_info_destructor;
((struct datafab_info *)us->extra)->lun = -1;
}
info = (struct datafab_info *) (us->extra);
if (srb->cmnd[0] == INQUIRY) {
US_DEBUGP("datafab_transport: INQUIRY. Returning bogus response");
memcpy(ptr, inquiry_reply, sizeof(inquiry_reply));
fill_inquiry_response(us, ptr, 36);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == READ_CAPACITY) {
info->ssize = 0x200; // hard coded 512 byte sectors as per ATA spec
rc = datafab_id_device(us, info);
if (rc != USB_STOR_TRANSPORT_GOOD)
return rc;
US_DEBUGP("datafab_transport: READ_CAPACITY: %ld sectors, %ld bytes per sector\n",
info->sectors, info->ssize);
// build the reply
// we need the last sector, not the number of sectors
((__be32 *) ptr)[0] = cpu_to_be32(info->sectors - 1);
((__be32 *) ptr)[1] = cpu_to_be32(info->ssize);
usb_stor_set_xfer_buf(ptr, 8, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SELECT_10) {
US_DEBUGP("datafab_transport: Gah! MODE_SELECT_10.\n");
return USB_STOR_TRANSPORT_ERROR;
}
// don't bother implementing READ_6 or WRITE_6.
//
if (srb->cmnd[0] == READ_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
US_DEBUGP("datafab_transport: READ_10: read block 0x%04lx count %ld\n", block, blocks);
return datafab_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == READ_12) {
// we'll probably never see a READ_12 but we'll do it anyway...
//
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
US_DEBUGP("datafab_transport: READ_12: read block 0x%04lx count %ld\n", block, blocks);
return datafab_read_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_10) {
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[7]) << 8) | ((u32)(srb->cmnd[8]));
US_DEBUGP("datafab_transport: WRITE_10: write block 0x%04lx count %ld\n", block, blocks);
return datafab_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == WRITE_12) {
// we'll probably never see a WRITE_12 but we'll do it anyway...
//
block = ((u32)(srb->cmnd[2]) << 24) | ((u32)(srb->cmnd[3]) << 16) |
((u32)(srb->cmnd[4]) << 8) | ((u32)(srb->cmnd[5]));
blocks = ((u32)(srb->cmnd[6]) << 24) | ((u32)(srb->cmnd[7]) << 16) |
((u32)(srb->cmnd[8]) << 8) | ((u32)(srb->cmnd[9]));
US_DEBUGP("datafab_transport: WRITE_12: write block 0x%04lx count %ld\n", block, blocks);
return datafab_write_data(us, info, block, blocks);
}
if (srb->cmnd[0] == TEST_UNIT_READY) {
US_DEBUGP("datafab_transport: TEST_UNIT_READY.\n");
return datafab_id_device(us, info);
}
if (srb->cmnd[0] == REQUEST_SENSE) {
US_DEBUGP("datafab_transport: REQUEST_SENSE. Returning faked response\n");
// this response is pretty bogus right now. eventually if necessary
// we can set the correct sense data. so far though it hasn't been
// necessary
//
memset(ptr, 0, 18);
ptr[0] = 0xF0;
ptr[2] = info->sense_key;
ptr[7] = 11;
ptr[12] = info->sense_asc;
ptr[13] = info->sense_ascq;
usb_stor_set_xfer_buf(ptr, 18, srb);
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == MODE_SENSE) {
US_DEBUGP("datafab_transport: MODE_SENSE_6 detected\n");
return datafab_handle_mode_sense(us, srb, 1);
}
if (srb->cmnd[0] == MODE_SENSE_10) {
US_DEBUGP("datafab_transport: MODE_SENSE_10 detected\n");
return datafab_handle_mode_sense(us, srb, 0);
}
if (srb->cmnd[0] == ALLOW_MEDIUM_REMOVAL) {
// sure. whatever. not like we can stop the user from
// popping the media out of the device (no locking doors, etc)
//
return USB_STOR_TRANSPORT_GOOD;
}
if (srb->cmnd[0] == START_STOP) {
/* this is used by sd.c'check_scsidisk_media_change to detect
media change */
US_DEBUGP("datafab_transport: START_STOP.\n");
/* the first datafab_id_device after a media change returns
an error (determined experimentally) */
rc = datafab_id_device(us, info);
if (rc == USB_STOR_TRANSPORT_GOOD) {
info->sense_key = NO_SENSE;
srb->result = SUCCESS;
} else {
info->sense_key = UNIT_ATTENTION;
srb->result = SAM_STAT_CHECK_CONDITION;
}
return rc;
}
US_DEBUGP("datafab_transport: Gah! Unknown command: %d (0x%x)\n",
srb->cmnd[0], srb->cmnd[0]);
info->sense_key = 0x05;
info->sense_asc = 0x20;
info->sense_ascq = 0x00;
return USB_STOR_TRANSPORT_FAILED;
}
static int datafab_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct us_data *us;
int result;
result = usb_stor_probe1(&us, intf, id,
(id - datafab_usb_ids) + datafab_unusual_dev_list);
if (result)
return result;
us->transport_name = "Datafab Bulk-Only";
us->transport = datafab_transport;
us->transport_reset = usb_stor_Bulk_reset;
us->max_lun = 1;
result = usb_stor_probe2(us);
return result;
}
static struct usb_driver datafab_driver = {
.name = "ums-datafab",
.probe = datafab_probe,
.disconnect = usb_stor_disconnect,
.suspend = usb_stor_suspend,
.resume = usb_stor_resume,
.reset_resume = usb_stor_reset_resume,
.pre_reset = usb_stor_pre_reset,
.post_reset = usb_stor_post_reset,
.id_table = datafab_usb_ids,
.soft_unbind = 1,
};
static int __init datafab_init(void)
{
return usb_register(&datafab_driver);
}
static void __exit datafab_exit(void)
{
usb_deregister(&datafab_driver);
}
module_init(datafab_init);
module_exit(datafab_exit);
| gpl-2.0 |
MROM/android_kernel_bn_encore | fs/fat/fatent.c | 6538 | 16686 | /*
* Copyright (C) 2004, OGAWA Hirofumi
* Released under GPL v2.
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/msdos_fs.h>
#include <linux/blkdev.h>
#include "fat.h"
struct fatent_operations {
void (*ent_blocknr)(struct super_block *, int, int *, sector_t *);
void (*ent_set_ptr)(struct fat_entry *, int);
int (*ent_bread)(struct super_block *, struct fat_entry *,
int, sector_t);
int (*ent_get)(struct fat_entry *);
void (*ent_put)(struct fat_entry *, int);
int (*ent_next)(struct fat_entry *);
};
static DEFINE_SPINLOCK(fat12_entry_lock);
static void fat12_ent_blocknr(struct super_block *sb, int entry,
int *offset, sector_t *blocknr)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = entry + (entry >> 1);
WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
static void fat_ent_blocknr(struct super_block *sb, int entry,
int *offset, sector_t *blocknr)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
int bytes = (entry << sbi->fatent_shift);
WARN_ON(entry < FAT_START_ENT || sbi->max_cluster <= entry);
*offset = bytes & (sb->s_blocksize - 1);
*blocknr = sbi->fat_start + (bytes >> sb->s_blocksize_bits);
}
static void fat12_ent_set_ptr(struct fat_entry *fatent, int offset)
{
struct buffer_head **bhs = fatent->bhs;
if (fatent->nr_bhs == 1) {
WARN_ON(offset >= (bhs[0]->b_size - 1));
fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
fatent->u.ent12_p[1] = bhs[0]->b_data + (offset + 1);
} else {
WARN_ON(offset != (bhs[0]->b_size - 1));
fatent->u.ent12_p[0] = bhs[0]->b_data + offset;
fatent->u.ent12_p[1] = bhs[1]->b_data;
}
}
static void fat16_ent_set_ptr(struct fat_entry *fatent, int offset)
{
WARN_ON(offset & (2 - 1));
fatent->u.ent16_p = (__le16 *)(fatent->bhs[0]->b_data + offset);
}
static void fat32_ent_set_ptr(struct fat_entry *fatent, int offset)
{
WARN_ON(offset & (4 - 1));
fatent->u.ent32_p = (__le32 *)(fatent->bhs[0]->b_data + offset);
}
static int fat12_ent_bread(struct super_block *sb, struct fat_entry *fatent,
int offset, sector_t blocknr)
{
struct buffer_head **bhs = fatent->bhs;
WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
bhs[0] = sb_bread(sb, blocknr);
if (!bhs[0])
goto err;
if ((offset + 1) < sb->s_blocksize)
fatent->nr_bhs = 1;
else {
/* This entry is block boundary, it needs the next block */
blocknr++;
bhs[1] = sb_bread(sb, blocknr);
if (!bhs[1])
goto err_brelse;
fatent->nr_bhs = 2;
}
fat12_ent_set_ptr(fatent, offset);
return 0;
err_brelse:
brelse(bhs[0]);
err:
fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)", (llu)blocknr);
return -EIO;
}
static int fat_ent_bread(struct super_block *sb, struct fat_entry *fatent,
int offset, sector_t blocknr)
{
struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
WARN_ON(blocknr < MSDOS_SB(sb)->fat_start);
fatent->fat_inode = MSDOS_SB(sb)->fat_inode;
fatent->bhs[0] = sb_bread(sb, blocknr);
if (!fatent->bhs[0]) {
fat_msg(sb, KERN_ERR, "FAT read failed (blocknr %llu)",
(llu)blocknr);
return -EIO;
}
fatent->nr_bhs = 1;
ops->ent_set_ptr(fatent, offset);
return 0;
}
static int fat12_ent_get(struct fat_entry *fatent)
{
u8 **ent12_p = fatent->u.ent12_p;
int next;
spin_lock(&fat12_entry_lock);
if (fatent->entry & 1)
next = (*ent12_p[0] >> 4) | (*ent12_p[1] << 4);
else
next = (*ent12_p[1] << 8) | *ent12_p[0];
spin_unlock(&fat12_entry_lock);
next &= 0x0fff;
if (next >= BAD_FAT12)
next = FAT_ENT_EOF;
return next;
}
static int fat16_ent_get(struct fat_entry *fatent)
{
int next = le16_to_cpu(*fatent->u.ent16_p);
WARN_ON((unsigned long)fatent->u.ent16_p & (2 - 1));
if (next >= BAD_FAT16)
next = FAT_ENT_EOF;
return next;
}
static int fat32_ent_get(struct fat_entry *fatent)
{
int next = le32_to_cpu(*fatent->u.ent32_p) & 0x0fffffff;
WARN_ON((unsigned long)fatent->u.ent32_p & (4 - 1));
if (next >= BAD_FAT32)
next = FAT_ENT_EOF;
return next;
}
static void fat12_ent_put(struct fat_entry *fatent, int new)
{
u8 **ent12_p = fatent->u.ent12_p;
if (new == FAT_ENT_EOF)
new = EOF_FAT12;
spin_lock(&fat12_entry_lock);
if (fatent->entry & 1) {
*ent12_p[0] = (new << 4) | (*ent12_p[0] & 0x0f);
*ent12_p[1] = new >> 4;
} else {
*ent12_p[0] = new & 0xff;
*ent12_p[1] = (*ent12_p[1] & 0xf0) | (new >> 8);
}
spin_unlock(&fat12_entry_lock);
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
if (fatent->nr_bhs == 2)
mark_buffer_dirty_inode(fatent->bhs[1], fatent->fat_inode);
}
static void fat16_ent_put(struct fat_entry *fatent, int new)
{
if (new == FAT_ENT_EOF)
new = EOF_FAT16;
*fatent->u.ent16_p = cpu_to_le16(new);
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
}
static void fat32_ent_put(struct fat_entry *fatent, int new)
{
if (new == FAT_ENT_EOF)
new = EOF_FAT32;
WARN_ON(new & 0xf0000000);
new |= le32_to_cpu(*fatent->u.ent32_p) & ~0x0fffffff;
*fatent->u.ent32_p = cpu_to_le32(new);
mark_buffer_dirty_inode(fatent->bhs[0], fatent->fat_inode);
}
static int fat12_ent_next(struct fat_entry *fatent)
{
u8 **ent12_p = fatent->u.ent12_p;
struct buffer_head **bhs = fatent->bhs;
u8 *nextp = ent12_p[1] + 1 + (fatent->entry & 1);
fatent->entry++;
if (fatent->nr_bhs == 1) {
WARN_ON(ent12_p[0] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 2)));
WARN_ON(ent12_p[1] > (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
if (nextp < (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1))) {
ent12_p[0] = nextp - 1;
ent12_p[1] = nextp;
return 1;
}
} else {
WARN_ON(ent12_p[0] != (u8 *)(bhs[0]->b_data + (bhs[0]->b_size - 1)));
WARN_ON(ent12_p[1] != (u8 *)bhs[1]->b_data);
ent12_p[0] = nextp - 1;
ent12_p[1] = nextp;
brelse(bhs[0]);
bhs[0] = bhs[1];
fatent->nr_bhs = 1;
return 1;
}
ent12_p[0] = NULL;
ent12_p[1] = NULL;
return 0;
}
static int fat16_ent_next(struct fat_entry *fatent)
{
const struct buffer_head *bh = fatent->bhs[0];
fatent->entry++;
if (fatent->u.ent16_p < (__le16 *)(bh->b_data + (bh->b_size - 2))) {
fatent->u.ent16_p++;
return 1;
}
fatent->u.ent16_p = NULL;
return 0;
}
static int fat32_ent_next(struct fat_entry *fatent)
{
const struct buffer_head *bh = fatent->bhs[0];
fatent->entry++;
if (fatent->u.ent32_p < (__le32 *)(bh->b_data + (bh->b_size - 4))) {
fatent->u.ent32_p++;
return 1;
}
fatent->u.ent32_p = NULL;
return 0;
}
static struct fatent_operations fat12_ops = {
.ent_blocknr = fat12_ent_blocknr,
.ent_set_ptr = fat12_ent_set_ptr,
.ent_bread = fat12_ent_bread,
.ent_get = fat12_ent_get,
.ent_put = fat12_ent_put,
.ent_next = fat12_ent_next,
};
static struct fatent_operations fat16_ops = {
.ent_blocknr = fat_ent_blocknr,
.ent_set_ptr = fat16_ent_set_ptr,
.ent_bread = fat_ent_bread,
.ent_get = fat16_ent_get,
.ent_put = fat16_ent_put,
.ent_next = fat16_ent_next,
};
static struct fatent_operations fat32_ops = {
.ent_blocknr = fat_ent_blocknr,
.ent_set_ptr = fat32_ent_set_ptr,
.ent_bread = fat_ent_bread,
.ent_get = fat32_ent_get,
.ent_put = fat32_ent_put,
.ent_next = fat32_ent_next,
};
static inline void lock_fat(struct msdos_sb_info *sbi)
{
mutex_lock(&sbi->fat_lock);
}
static inline void unlock_fat(struct msdos_sb_info *sbi)
{
mutex_unlock(&sbi->fat_lock);
}
void fat_ent_access_init(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
mutex_init(&sbi->fat_lock);
switch (sbi->fat_bits) {
case 32:
sbi->fatent_shift = 2;
sbi->fatent_ops = &fat32_ops;
break;
case 16:
sbi->fatent_shift = 1;
sbi->fatent_ops = &fat16_ops;
break;
case 12:
sbi->fatent_shift = -1;
sbi->fatent_ops = &fat12_ops;
break;
}
}
static inline int fat_ent_update_ptr(struct super_block *sb,
struct fat_entry *fatent,
int offset, sector_t blocknr)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct fatent_operations *ops = sbi->fatent_ops;
struct buffer_head **bhs = fatent->bhs;
/* Is this fatent's blocks including this entry? */
if (!fatent->nr_bhs || bhs[0]->b_blocknr != blocknr)
return 0;
if (sbi->fat_bits == 12) {
if ((offset + 1) < sb->s_blocksize) {
/* This entry is on bhs[0]. */
if (fatent->nr_bhs == 2) {
brelse(bhs[1]);
fatent->nr_bhs = 1;
}
} else {
/* This entry needs the next block. */
if (fatent->nr_bhs != 2)
return 0;
if (bhs[1]->b_blocknr != (blocknr + 1))
return 0;
}
}
ops->ent_set_ptr(fatent, offset);
return 1;
}
int fat_ent_read(struct inode *inode, struct fat_entry *fatent, int entry)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
struct fatent_operations *ops = sbi->fatent_ops;
int err, offset;
sector_t blocknr;
if (entry < FAT_START_ENT || sbi->max_cluster <= entry) {
fatent_brelse(fatent);
fat_fs_error(sb, "invalid access to FAT (entry 0x%08x)", entry);
return -EIO;
}
fatent_set_entry(fatent, entry);
ops->ent_blocknr(sb, entry, &offset, &blocknr);
if (!fat_ent_update_ptr(sb, fatent, offset, blocknr)) {
fatent_brelse(fatent);
err = ops->ent_bread(sb, fatent, offset, blocknr);
if (err)
return err;
}
return ops->ent_get(fatent);
}
/* FIXME: We can write the blocks as more big chunk. */
static int fat_mirror_bhs(struct super_block *sb, struct buffer_head **bhs,
int nr_bhs)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct buffer_head *c_bh;
int err, n, copy;
err = 0;
for (copy = 1; copy < sbi->fats; copy++) {
sector_t backup_fat = sbi->fat_length * copy;
for (n = 0; n < nr_bhs; n++) {
c_bh = sb_getblk(sb, backup_fat + bhs[n]->b_blocknr);
if (!c_bh) {
err = -ENOMEM;
goto error;
}
memcpy(c_bh->b_data, bhs[n]->b_data, sb->s_blocksize);
set_buffer_uptodate(c_bh);
mark_buffer_dirty_inode(c_bh, sbi->fat_inode);
if (sb->s_flags & MS_SYNCHRONOUS)
err = sync_dirty_buffer(c_bh);
brelse(c_bh);
if (err)
goto error;
}
}
error:
return err;
}
int fat_ent_write(struct inode *inode, struct fat_entry *fatent,
int new, int wait)
{
struct super_block *sb = inode->i_sb;
struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
int err;
ops->ent_put(fatent, new);
if (wait) {
err = fat_sync_bhs(fatent->bhs, fatent->nr_bhs);
if (err)
return err;
}
return fat_mirror_bhs(sb, fatent->bhs, fatent->nr_bhs);
}
static inline int fat_ent_next(struct msdos_sb_info *sbi,
struct fat_entry *fatent)
{
if (sbi->fatent_ops->ent_next(fatent)) {
if (fatent->entry < sbi->max_cluster)
return 1;
}
return 0;
}
static inline int fat_ent_read_block(struct super_block *sb,
struct fat_entry *fatent)
{
struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
sector_t blocknr;
int offset;
fatent_brelse(fatent);
ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
return ops->ent_bread(sb, fatent, offset, blocknr);
}
static void fat_collect_bhs(struct buffer_head **bhs, int *nr_bhs,
struct fat_entry *fatent)
{
int n, i;
for (n = 0; n < fatent->nr_bhs; n++) {
for (i = 0; i < *nr_bhs; i++) {
if (fatent->bhs[n] == bhs[i])
break;
}
if (i == *nr_bhs) {
get_bh(fatent->bhs[n]);
bhs[i] = fatent->bhs[n];
(*nr_bhs)++;
}
}
}
int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent, prev_ent;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
int i, count, err, nr_bhs, idx_clus;
BUG_ON(nr_cluster > (MAX_BUF_PER_PAGE / 2)); /* fixed limit */
lock_fat(sbi);
if (sbi->free_clusters != -1 && sbi->free_clus_valid &&
sbi->free_clusters < nr_cluster) {
unlock_fat(sbi);
return -ENOSPC;
}
err = nr_bhs = idx_clus = 0;
count = FAT_START_ENT;
fatent_init(&prev_ent);
fatent_init(&fatent);
fatent_set_entry(&fatent, sbi->prev_free + 1);
while (count < sbi->max_cluster) {
if (fatent.entry >= sbi->max_cluster)
fatent.entry = FAT_START_ENT;
fatent_set_entry(&fatent, fatent.entry);
err = fat_ent_read_block(sb, &fatent);
if (err)
goto out;
/* Find the free entries in a block */
do {
if (ops->ent_get(&fatent) == FAT_ENT_FREE) {
int entry = fatent.entry;
/* make the cluster chain */
ops->ent_put(&fatent, FAT_ENT_EOF);
if (prev_ent.nr_bhs)
ops->ent_put(&prev_ent, entry);
fat_collect_bhs(bhs, &nr_bhs, &fatent);
sbi->prev_free = entry;
if (sbi->free_clusters != -1)
sbi->free_clusters--;
sb->s_dirt = 1;
cluster[idx_clus] = entry;
idx_clus++;
if (idx_clus == nr_cluster)
goto out;
/*
* fat_collect_bhs() gets ref-count of bhs,
* so we can still use the prev_ent.
*/
prev_ent = fatent;
}
count++;
if (count == sbi->max_cluster)
break;
} while (fat_ent_next(sbi, &fatent));
}
/* Couldn't allocate the free entries */
sbi->free_clusters = 0;
sbi->free_clus_valid = 1;
sb->s_dirt = 1;
err = -ENOSPC;
out:
unlock_fat(sbi);
fatent_brelse(&fatent);
if (!err) {
if (inode_needs_sync(inode))
err = fat_sync_bhs(bhs, nr_bhs);
if (!err)
err = fat_mirror_bhs(sb, bhs, nr_bhs);
}
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
if (err && idx_clus)
fat_free_clusters(inode, cluster[0]);
return err;
}
int fat_free_clusters(struct inode *inode, int cluster)
{
struct super_block *sb = inode->i_sb;
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
struct buffer_head *bhs[MAX_BUF_PER_PAGE];
int i, err, nr_bhs;
int first_cl = cluster;
nr_bhs = 0;
fatent_init(&fatent);
lock_fat(sbi);
do {
cluster = fat_ent_read(inode, &fatent, cluster);
if (cluster < 0) {
err = cluster;
goto error;
} else if (cluster == FAT_ENT_FREE) {
fat_fs_error(sb, "%s: deleting FAT entry beyond EOF",
__func__);
err = -EIO;
goto error;
}
if (sbi->options.discard) {
/*
* Issue discard for the sectors we no longer
* care about, batching contiguous clusters
* into one request
*/
if (cluster != fatent.entry + 1) {
int nr_clus = fatent.entry - first_cl + 1;
sb_issue_discard(sb,
fat_clus_to_blknr(sbi, first_cl),
nr_clus * sbi->sec_per_clus,
GFP_NOFS, 0);
first_cl = cluster;
}
}
ops->ent_put(&fatent, FAT_ENT_FREE);
if (sbi->free_clusters != -1) {
sbi->free_clusters++;
sb->s_dirt = 1;
}
if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
if (sb->s_flags & MS_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
}
err = fat_mirror_bhs(sb, bhs, nr_bhs);
if (err)
goto error;
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
nr_bhs = 0;
}
fat_collect_bhs(bhs, &nr_bhs, &fatent);
} while (cluster != FAT_ENT_EOF);
if (sb->s_flags & MS_SYNCHRONOUS) {
err = fat_sync_bhs(bhs, nr_bhs);
if (err)
goto error;
}
err = fat_mirror_bhs(sb, bhs, nr_bhs);
error:
fatent_brelse(&fatent);
for (i = 0; i < nr_bhs; i++)
brelse(bhs[i]);
unlock_fat(sbi);
return err;
}
EXPORT_SYMBOL_GPL(fat_free_clusters);
/* 128kb is the whole sectors for FAT12 and FAT16 */
#define FAT_READA_SIZE (128 * 1024)
static void fat_ent_reada(struct super_block *sb, struct fat_entry *fatent,
unsigned long reada_blocks)
{
struct fatent_operations *ops = MSDOS_SB(sb)->fatent_ops;
sector_t blocknr;
int i, offset;
ops->ent_blocknr(sb, fatent->entry, &offset, &blocknr);
for (i = 0; i < reada_blocks; i++)
sb_breadahead(sb, blocknr + i);
}
int fat_count_free_clusters(struct super_block *sb)
{
struct msdos_sb_info *sbi = MSDOS_SB(sb);
struct fatent_operations *ops = sbi->fatent_ops;
struct fat_entry fatent;
unsigned long reada_blocks, reada_mask, cur_block;
int err = 0, free;
lock_fat(sbi);
if (sbi->free_clusters != -1 && sbi->free_clus_valid)
goto out;
reada_blocks = FAT_READA_SIZE >> sb->s_blocksize_bits;
reada_mask = reada_blocks - 1;
cur_block = 0;
free = 0;
fatent_init(&fatent);
fatent_set_entry(&fatent, FAT_START_ENT);
while (fatent.entry < sbi->max_cluster) {
/* readahead of fat blocks */
if ((cur_block & reada_mask) == 0) {
unsigned long rest = sbi->fat_length - cur_block;
fat_ent_reada(sb, &fatent, min(reada_blocks, rest));
}
cur_block++;
err = fat_ent_read_block(sb, &fatent);
if (err)
goto out;
do {
if (ops->ent_get(&fatent) == FAT_ENT_FREE)
free++;
} while (fat_ent_next(sbi, &fatent));
}
sbi->free_clusters = free;
sbi->free_clus_valid = 1;
sb->s_dirt = 1;
fatent_brelse(&fatent);
out:
unlock_fat(sbi);
return err;
}
| gpl-2.0 |
NoelMacwan/android_kernel_sony_msm8928 | drivers/media/video/videobuf-dma-contig.c | 7818 | 9705 | /*
* helper functions for physically contiguous capture buffers
*
* The functions support hardware lacking scatter gather support
* (i.e. the buffers must be linear in physical memory)
*
* Copyright (c) 2008 Magnus Damm
*
* Based on videobuf-vmalloc.c,
* (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
unsigned long size;
};
#define MAGIC_DC_MEM 0x0733ac61
#define MAGIC_CHECK(is, should) \
if (unlikely((is) != (should))) { \
pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
BUG(); \
}
static void
videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count++;
}
static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
int i;
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
videobuf_queue_cancel(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map != map)
continue;
mem = q->bufs[i]->priv;
if (mem) {
/* This callback is called only if kernel has
allocated memory and this memory is mmapped.
In this case, memory should be freed,
in order to do memory unmap.
*/
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
dma_free_coherent(q->dev, mem->size,
mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
kfree(map);
videobuf_queue_unlock(q);
}
}
static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
/**
* videobuf_dma_contig_user_put() - reset pointer to user space buffer
* @mem: per-buffer private videobuf-dma-contig data
*
* This function resets the user space pointer
*/
static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
{
mem->dma_handle = 0;
mem->size = 0;
}
/**
* videobuf_dma_contig_user_get() - setup user space memory pointer
* @mem: per-buffer private videobuf-dma-contig data
* @vb: video buffer to map
*
* This function validates and sets up a pointer to user space memory.
* Only physically contiguous pfn-mapped memory is accepted.
*
* Returns 0 if successful.
*/
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct videobuf_buffer *vb)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
unsigned long pages_done, user_address;
unsigned int offset;
int ret;
offset = vb->baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, vb->baddr);
if (!vma)
goto out_up;
if ((vb->baddr + mem->size) > vma->vm_end)
goto out_up;
pages_done = 0;
prev_pfn = 0; /* kill warning */
user_address = vb->baddr;
while (pages_done < (mem->size >> PAGE_SHIFT)) {
ret = follow_pfn(vma, user_address, &this_pfn);
if (ret)
break;
if (pages_done == 0)
mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
else if (this_pfn != (prev_pfn + 1))
ret = -EFAULT;
if (ret)
break;
prev_pfn = this_pfn;
user_address += PAGE_SIZE;
pages_done++;
}
out_up:
up_read(¤t->mm->mmap_sem);
return ret;
}
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (vb) {
mem = vb->priv = ((char *)vb) + size;
mem->magic = MAGIC_DC_MEM;
}
return vb;
}
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->vaddr;
}
static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
struct videobuf_dma_contig_memory *mem = vb->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
dev_err(q->dev, "memory is not alloced/mmapped.\n");
return -EINVAL;
}
break;
case V4L2_MEMORY_USERPTR:
dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
/* handle pointer from user space */
if (vb->baddr)
return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
mem->size = PAGE_ALIGN(vb->size);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
mem->size);
return -ENOMEM;
}
dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
mem->vaddr, mem->size);
break;
case V4L2_MEMORY_OVERLAY:
default:
dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
__func__);
return -EINVAL;
}
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
int retval;
unsigned long size;
dev_dbg(q->dev, "%s\n", __func__);
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (!map)
return -ENOMEM;
buf->map = map;
map->q = q;
buf->baddr = vma->vm_start;
mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
mem->size = PAGE_ALIGN(buf->bsize);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
mem->size);
goto error;
}
dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
mem->vaddr, mem->size);
/* Try to remap memory */
size = vma->vm_end - vma->vm_start;
size = (size < mem->size) ? size : mem->size;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
retval = remap_pfn_range(vma, vma->vm_start,
mem->dma_handle >> PAGE_SHIFT,
size, vma->vm_page_prot);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
dma_free_coherent(q->dev, mem->size,
mem->vaddr, mem->dma_handle);
goto error;
}
vma->vm_ops = &videobuf_vm_ops;
vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
(long int)buf->bsize,
vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
return 0;
error:
kfree(map);
return -ENOMEM;
}
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_vb,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->dma_handle;
}
EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
void videobuf_dma_contig_free(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
/* mmapped memory can't be freed here, otherwise mmapped region
would be released, while still needed. In this case, the memory
release should happen inside videobuf_vm_close().
So, it should free memory only if the memory were allocated for
read() operation.
*/
if (buf->memory != V4L2_MEMORY_USERPTR)
return;
if (!mem)
return;
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* handle user space pointer case */
if (buf->baddr) {
videobuf_dma_contig_user_put(mem);
return;
}
/* read() method */
if (mem->vaddr) {
dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
}
EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
| gpl-2.0 |
gdetal/samsung_i9100_mptcp | drivers/media/video/videobuf-dma-contig.c | 7818 | 9705 | /*
* helper functions for physically contiguous capture buffers
*
* The functions support hardware lacking scatter gather support
* (i.e. the buffers must be linear in physical memory)
*
* Copyright (c) 2008 Magnus Damm
*
* Based on videobuf-vmalloc.c,
* (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <media/videobuf-dma-contig.h>
struct videobuf_dma_contig_memory {
u32 magic;
void *vaddr;
dma_addr_t dma_handle;
unsigned long size;
};
#define MAGIC_DC_MEM 0x0733ac61
#define MAGIC_CHECK(is, should) \
if (unlikely((is) != (should))) { \
pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
BUG(); \
}
static void
videobuf_vm_open(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count++;
}
static void videobuf_vm_close(struct vm_area_struct *vma)
{
struct videobuf_mapping *map = vma->vm_private_data;
struct videobuf_queue *q = map->q;
int i;
dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
map, map->count, vma->vm_start, vma->vm_end);
map->count--;
if (0 == map->count) {
struct videobuf_dma_contig_memory *mem;
dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
videobuf_queue_lock(q);
/* We need first to cancel streams, before unmapping */
if (q->streaming)
videobuf_queue_cancel(q);
for (i = 0; i < VIDEO_MAX_FRAME; i++) {
if (NULL == q->bufs[i])
continue;
if (q->bufs[i]->map != map)
continue;
mem = q->bufs[i]->priv;
if (mem) {
/* This callback is called only if kernel has
allocated memory and this memory is mmapped.
In this case, memory should be freed,
in order to do memory unmap.
*/
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* vfree is not atomic - can't be
called with IRQ's disabled
*/
dev_dbg(q->dev, "buf[%d] freeing %p\n",
i, mem->vaddr);
dma_free_coherent(q->dev, mem->size,
mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
q->bufs[i]->map = NULL;
q->bufs[i]->baddr = 0;
}
kfree(map);
videobuf_queue_unlock(q);
}
}
static const struct vm_operations_struct videobuf_vm_ops = {
.open = videobuf_vm_open,
.close = videobuf_vm_close,
};
/**
* videobuf_dma_contig_user_put() - reset pointer to user space buffer
* @mem: per-buffer private videobuf-dma-contig data
*
* This function resets the user space pointer
*/
static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
{
mem->dma_handle = 0;
mem->size = 0;
}
/**
* videobuf_dma_contig_user_get() - setup user space memory pointer
* @mem: per-buffer private videobuf-dma-contig data
* @vb: video buffer to map
*
* This function validates and sets up a pointer to user space memory.
* Only physically contiguous pfn-mapped memory is accepted.
*
* Returns 0 if successful.
*/
static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
struct videobuf_buffer *vb)
{
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
unsigned long prev_pfn, this_pfn;
unsigned long pages_done, user_address;
unsigned int offset;
int ret;
offset = vb->baddr & ~PAGE_MASK;
mem->size = PAGE_ALIGN(vb->size + offset);
ret = -EINVAL;
down_read(&mm->mmap_sem);
vma = find_vma(mm, vb->baddr);
if (!vma)
goto out_up;
if ((vb->baddr + mem->size) > vma->vm_end)
goto out_up;
pages_done = 0;
prev_pfn = 0; /* kill warning */
user_address = vb->baddr;
while (pages_done < (mem->size >> PAGE_SHIFT)) {
ret = follow_pfn(vma, user_address, &this_pfn);
if (ret)
break;
if (pages_done == 0)
mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
else if (this_pfn != (prev_pfn + 1))
ret = -EFAULT;
if (ret)
break;
prev_pfn = this_pfn;
user_address += PAGE_SIZE;
pages_done++;
}
out_up:
up_read(¤t->mm->mmap_sem);
return ret;
}
static struct videobuf_buffer *__videobuf_alloc_vb(size_t size)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_buffer *vb;
vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
if (vb) {
mem = vb->priv = ((char *)vb) + size;
mem->magic = MAGIC_DC_MEM;
}
return vb;
}
static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->vaddr;
}
static int __videobuf_iolock(struct videobuf_queue *q,
struct videobuf_buffer *vb,
struct v4l2_framebuffer *fbuf)
{
struct videobuf_dma_contig_memory *mem = vb->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
switch (vb->memory) {
case V4L2_MEMORY_MMAP:
dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
/* All handling should be done by __videobuf_mmap_mapper() */
if (!mem->vaddr) {
dev_err(q->dev, "memory is not alloced/mmapped.\n");
return -EINVAL;
}
break;
case V4L2_MEMORY_USERPTR:
dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
/* handle pointer from user space */
if (vb->baddr)
return videobuf_dma_contig_user_get(mem, vb);
/* allocate memory for the read() method */
mem->size = PAGE_ALIGN(vb->size);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
dev_err(q->dev, "dma_alloc_coherent %ld failed\n",
mem->size);
return -ENOMEM;
}
dev_dbg(q->dev, "dma_alloc_coherent data is at %p (%ld)\n",
mem->vaddr, mem->size);
break;
case V4L2_MEMORY_OVERLAY:
default:
dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n",
__func__);
return -EINVAL;
}
return 0;
}
static int __videobuf_mmap_mapper(struct videobuf_queue *q,
struct videobuf_buffer *buf,
struct vm_area_struct *vma)
{
struct videobuf_dma_contig_memory *mem;
struct videobuf_mapping *map;
int retval;
unsigned long size;
dev_dbg(q->dev, "%s\n", __func__);
/* create mapping + update buffer list */
map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
if (!map)
return -ENOMEM;
buf->map = map;
map->q = q;
buf->baddr = vma->vm_start;
mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
mem->size = PAGE_ALIGN(buf->bsize);
mem->vaddr = dma_alloc_coherent(q->dev, mem->size,
&mem->dma_handle, GFP_KERNEL);
if (!mem->vaddr) {
dev_err(q->dev, "dma_alloc_coherent size %ld failed\n",
mem->size);
goto error;
}
dev_dbg(q->dev, "dma_alloc_coherent data is at addr %p (size %ld)\n",
mem->vaddr, mem->size);
/* Try to remap memory */
size = vma->vm_end - vma->vm_start;
size = (size < mem->size) ? size : mem->size;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
retval = remap_pfn_range(vma, vma->vm_start,
mem->dma_handle >> PAGE_SHIFT,
size, vma->vm_page_prot);
if (retval) {
dev_err(q->dev, "mmap: remap failed with error %d. ", retval);
dma_free_coherent(q->dev, mem->size,
mem->vaddr, mem->dma_handle);
goto error;
}
vma->vm_ops = &videobuf_vm_ops;
vma->vm_flags |= VM_DONTEXPAND;
vma->vm_private_data = map;
dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
map, q, vma->vm_start, vma->vm_end,
(long int)buf->bsize,
vma->vm_pgoff, buf->i);
videobuf_vm_open(vma);
return 0;
error:
kfree(map);
return -ENOMEM;
}
static struct videobuf_qtype_ops qops = {
.magic = MAGIC_QTYPE_OPS,
.alloc_vb = __videobuf_alloc_vb,
.iolock = __videobuf_iolock,
.mmap_mapper = __videobuf_mmap_mapper,
.vaddr = __videobuf_to_vaddr,
};
void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
const struct videobuf_queue_ops *ops,
struct device *dev,
spinlock_t *irqlock,
enum v4l2_buf_type type,
enum v4l2_field field,
unsigned int msize,
void *priv,
struct mutex *ext_lock)
{
videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
priv, &qops, ext_lock);
}
EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
BUG_ON(!mem);
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
return mem->dma_handle;
}
EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
void videobuf_dma_contig_free(struct videobuf_queue *q,
struct videobuf_buffer *buf)
{
struct videobuf_dma_contig_memory *mem = buf->priv;
/* mmapped memory can't be freed here, otherwise mmapped region
would be released, while still needed. In this case, the memory
release should happen inside videobuf_vm_close().
So, it should free memory only if the memory were allocated for
read() operation.
*/
if (buf->memory != V4L2_MEMORY_USERPTR)
return;
if (!mem)
return;
MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
/* handle user space pointer case */
if (buf->baddr) {
videobuf_dma_contig_user_put(mem);
return;
}
/* read() method */
if (mem->vaddr) {
dma_free_coherent(q->dev, mem->size, mem->vaddr, mem->dma_handle);
mem->vaddr = NULL;
}
}
EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
MODULE_AUTHOR("Magnus Damm");
MODULE_LICENSE("GPL");
| gpl-2.0 |
alyubomirov/Amlogic_s905-kernel | arch/sh/kernel/crash_dump.c | 11914 | 1310 | /*
* crash_dump.c - Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <asm/uaccess.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (userbuf) {
if (copy_to_user(buf, (vaddr + offset), csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, (vaddr + offset), csize);
iounmap(vaddr);
return csize;
}
| gpl-2.0 |
kozmikkick/tripndroid-endeavoru-3.6 | arch/sh/kernel/crash_dump.c | 11914 | 1310 | /*
* crash_dump.c - Memory preserving reboot related code.
*
* Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
* Copyright (C) IBM Corporation, 2004. All rights reserved
*/
#include <linux/errno.h>
#include <linux/crash_dump.h>
#include <linux/io.h>
#include <asm/uaccess.h>
/**
* copy_oldmem_page - copy one page from "oldmem"
* @pfn: page frame number to be copied
* @buf: target memory address for the copy; this can be in kernel address
* space or user address space (see @userbuf)
* @csize: number of bytes to copy
* @offset: offset in bytes into the page (based on pfn) to begin the copy
* @userbuf: if set, @buf is in user address space, use copy_to_user(),
* otherwise @buf is in kernel address space, use memcpy().
*
* Copy a page from "oldmem". For this page, there is no pte mapped
* in the current kernel. We stitch up a pte, similar to kmap_atomic.
*/
ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
size_t csize, unsigned long offset, int userbuf)
{
void *vaddr;
if (!csize)
return 0;
vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE);
if (userbuf) {
if (copy_to_user(buf, (vaddr + offset), csize)) {
iounmap(vaddr);
return -EFAULT;
}
} else
memcpy(buf, (vaddr + offset), csize);
iounmap(vaddr);
return csize;
}
| gpl-2.0 |
bju2000/android_kernel_samsung_slteskt | drivers/atm/solos-attrlist.c | 12938 | 2279 | SOLOS_ATTR_RO(DriverVersion)
SOLOS_ATTR_RO(APIVersion)
SOLOS_ATTR_RO(FirmwareVersion)
SOLOS_ATTR_RO(Version)
// SOLOS_ATTR_RO(DspVersion)
// SOLOS_ATTR_RO(CommonHandshake)
SOLOS_ATTR_RO(Connected)
SOLOS_ATTR_RO(OperationalMode)
SOLOS_ATTR_RO(State)
SOLOS_ATTR_RO(Watchdog)
SOLOS_ATTR_RO(OperationProgress)
SOLOS_ATTR_RO(LastFailed)
SOLOS_ATTR_RO(TxBitRate)
SOLOS_ATTR_RO(RxBitRate)
// SOLOS_ATTR_RO(DeltACTATPds)
// SOLOS_ATTR_RO(DeltACTATPus)
SOLOS_ATTR_RO(TxATTNDR)
SOLOS_ATTR_RO(RxATTNDR)
SOLOS_ATTR_RO(AnnexType)
SOLOS_ATTR_RO(GeneralFailure)
SOLOS_ATTR_RO(InterleaveDpDn)
SOLOS_ATTR_RO(InterleaveDpUp)
SOLOS_ATTR_RO(RSCorrectedErrorsDn)
SOLOS_ATTR_RO(RSUnCorrectedErrorsDn)
SOLOS_ATTR_RO(RSCorrectedErrorsUp)
SOLOS_ATTR_RO(RSUnCorrectedErrorsUp)
SOLOS_ATTR_RO(InterleaveRDn)
SOLOS_ATTR_RO(InterleaveRUp)
SOLOS_ATTR_RO(BisRDn)
SOLOS_ATTR_RO(BisRUp)
SOLOS_ATTR_RO(INPdown)
SOLOS_ATTR_RO(INPup)
SOLOS_ATTR_RO(ShowtimeStart)
SOLOS_ATTR_RO(ATURVendor)
SOLOS_ATTR_RO(ATUCCountry)
SOLOS_ATTR_RO(ATURANSIRev)
SOLOS_ATTR_RO(ATURANSISTD)
SOLOS_ATTR_RO(ATUCANSIRev)
SOLOS_ATTR_RO(ATUCANSIId)
SOLOS_ATTR_RO(ATUCANSISTD)
SOLOS_ATTR_RO(DataBoost)
SOLOS_ATTR_RO(LocalITUCountryCode)
SOLOS_ATTR_RO(LocalSEF)
SOLOS_ATTR_RO(LocalEndLOS)
SOLOS_ATTR_RO(LocalSNRMargin)
SOLOS_ATTR_RO(LocalLineAttn)
SOLOS_ATTR_RO(RawAttn)
SOLOS_ATTR_RO(LocalTxPower)
SOLOS_ATTR_RO(RemoteTxPower)
SOLOS_ATTR_RO(RemoteSEF)
SOLOS_ATTR_RO(RemoteLOS)
SOLOS_ATTR_RO(RemoteLineAttn)
SOLOS_ATTR_RO(RemoteSNRMargin)
SOLOS_ATTR_RO(LineUpCount)
SOLOS_ATTR_RO(SRACnt)
SOLOS_ATTR_RO(SRACntUp)
SOLOS_ATTR_RO(ProfileStatus)
SOLOS_ATTR_RW(Action)
SOLOS_ATTR_RW(ActivateLine)
SOLOS_ATTR_RO(LineStatus)
SOLOS_ATTR_RW(HostControl)
SOLOS_ATTR_RW(AutoStart)
SOLOS_ATTR_RW(Failsafe)
SOLOS_ATTR_RW(ShowtimeLed)
SOLOS_ATTR_RW(Retrain)
SOLOS_ATTR_RW(Defaults)
SOLOS_ATTR_RW(LineMode)
SOLOS_ATTR_RW(Profile)
SOLOS_ATTR_RW(DetectNoise)
SOLOS_ATTR_RW(BisAForceSNRMarginDn)
SOLOS_ATTR_RW(BisMForceSNRMarginDn)
SOLOS_ATTR_RW(BisAMaxMargin)
SOLOS_ATTR_RW(BisMMaxMargin)
SOLOS_ATTR_RW(AnnexAForceSNRMarginDn)
SOLOS_ATTR_RW(AnnexAMaxMargin)
SOLOS_ATTR_RW(AnnexMMaxMargin)
SOLOS_ATTR_RO(SupportedAnnexes)
SOLOS_ATTR_RO(Status)
SOLOS_ATTR_RO(TotalStart)
SOLOS_ATTR_RO(RecentShowtimeStart)
SOLOS_ATTR_RO(TotalRxBlocks)
SOLOS_ATTR_RO(TotalTxBlocks)
| gpl-2.0 |
DirtyUnicorns/android_kernel_motorola_msm8960dt-common | drivers/video/geode/video_gx.c | 13962 | 10122 | /*
* Geode GX video processor device.
*
* Copyright (C) 2006 Arcom Control Systems Ltd.
*
* Portions from AMD's original 2.4 driver:
* Copyright (C) 2004 Advanced Micro Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/fb.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/msr.h>
#include <linux/cs5535.h>
#include "gxfb.h"
/*
* Tables of register settings for various DOTCLKs.
*/
struct gx_pll_entry {
long pixclock; /* ps */
u32 sys_rstpll_bits;
u32 dotpll_value;
};
#define POSTDIV3 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3)
#define PREMULT2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPREMULT2)
#define PREDIV2 ((u32)MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3)
static const struct gx_pll_entry gx_pll_table_48MHz[] = {
{ 40123, POSTDIV3, 0x00000BF2 }, /* 24.9230 */
{ 39721, 0, 0x00000037 }, /* 25.1750 */
{ 35308, POSTDIV3|PREMULT2, 0x00000B1A }, /* 28.3220 */
{ 31746, POSTDIV3, 0x000002D2 }, /* 31.5000 */
{ 27777, POSTDIV3|PREMULT2, 0x00000FE2 }, /* 36.0000 */
{ 26666, POSTDIV3, 0x0000057A }, /* 37.5000 */
{ 25000, POSTDIV3, 0x0000030A }, /* 40.0000 */
{ 22271, 0, 0x00000063 }, /* 44.9000 */
{ 20202, 0, 0x0000054B }, /* 49.5000 */
{ 20000, 0, 0x0000026E }, /* 50.0000 */
{ 19860, PREMULT2, 0x00000037 }, /* 50.3500 */
{ 18518, POSTDIV3|PREMULT2, 0x00000B0D }, /* 54.0000 */
{ 17777, 0, 0x00000577 }, /* 56.2500 */
{ 17733, 0, 0x000007F7 }, /* 56.3916 */
{ 17653, 0, 0x0000057B }, /* 56.6444 */
{ 16949, PREMULT2, 0x00000707 }, /* 59.0000 */
{ 15873, POSTDIV3|PREMULT2, 0x00000B39 }, /* 63.0000 */
{ 15384, POSTDIV3|PREMULT2, 0x00000B45 }, /* 65.0000 */
{ 14814, POSTDIV3|PREMULT2, 0x00000FC1 }, /* 67.5000 */
{ 14124, POSTDIV3, 0x00000561 }, /* 70.8000 */
{ 13888, POSTDIV3, 0x000007E1 }, /* 72.0000 */
{ 13426, PREMULT2, 0x00000F4A }, /* 74.4810 */
{ 13333, 0, 0x00000052 }, /* 75.0000 */
{ 12698, 0, 0x00000056 }, /* 78.7500 */
{ 12500, POSTDIV3|PREMULT2, 0x00000709 }, /* 80.0000 */
{ 11135, PREMULT2, 0x00000262 }, /* 89.8000 */
{ 10582, 0, 0x000002D2 }, /* 94.5000 */
{ 10101, PREMULT2, 0x00000B4A }, /* 99.0000 */
{ 10000, PREMULT2, 0x00000036 }, /* 100.0000 */
{ 9259, 0, 0x000007E2 }, /* 108.0000 */
{ 8888, 0, 0x000007F6 }, /* 112.5000 */
{ 7692, POSTDIV3|PREMULT2, 0x00000FB0 }, /* 130.0000 */
{ 7407, POSTDIV3|PREMULT2, 0x00000B50 }, /* 135.0000 */
{ 6349, 0, 0x00000055 }, /* 157.5000 */
{ 6172, 0, 0x000009C1 }, /* 162.0000 */
{ 5787, PREMULT2, 0x0000002D }, /* 172.798 */
{ 5698, 0, 0x000002C1 }, /* 175.5000 */
{ 5291, 0, 0x000002D1 }, /* 189.0000 */
{ 4938, 0, 0x00000551 }, /* 202.5000 */
{ 4357, 0, 0x0000057D }, /* 229.5000 */
};
static const struct gx_pll_entry gx_pll_table_14MHz[] = {
{ 39721, 0, 0x00000037 }, /* 25.1750 */
{ 35308, 0, 0x00000B7B }, /* 28.3220 */
{ 31746, 0, 0x000004D3 }, /* 31.5000 */
{ 27777, 0, 0x00000BE3 }, /* 36.0000 */
{ 26666, 0, 0x0000074F }, /* 37.5000 */
{ 25000, 0, 0x0000050B }, /* 40.0000 */
{ 22271, 0, 0x00000063 }, /* 44.9000 */
{ 20202, 0, 0x0000054B }, /* 49.5000 */
{ 20000, 0, 0x0000026E }, /* 50.0000 */
{ 19860, 0, 0x000007C3 }, /* 50.3500 */
{ 18518, 0, 0x000007E3 }, /* 54.0000 */
{ 17777, 0, 0x00000577 }, /* 56.2500 */
{ 17733, 0, 0x000002FB }, /* 56.3916 */
{ 17653, 0, 0x0000057B }, /* 56.6444 */
{ 16949, 0, 0x0000058B }, /* 59.0000 */
{ 15873, 0, 0x0000095E }, /* 63.0000 */
{ 15384, 0, 0x0000096A }, /* 65.0000 */
{ 14814, 0, 0x00000BC2 }, /* 67.5000 */
{ 14124, 0, 0x0000098A }, /* 70.8000 */
{ 13888, 0, 0x00000BE2 }, /* 72.0000 */
{ 13333, 0, 0x00000052 }, /* 75.0000 */
{ 12698, 0, 0x00000056 }, /* 78.7500 */
{ 12500, 0, 0x0000050A }, /* 80.0000 */
{ 11135, 0, 0x0000078E }, /* 89.8000 */
{ 10582, 0, 0x000002D2 }, /* 94.5000 */
{ 10101, 0, 0x000011F6 }, /* 99.0000 */
{ 10000, 0, 0x0000054E }, /* 100.0000 */
{ 9259, 0, 0x000007E2 }, /* 108.0000 */
{ 8888, 0, 0x000002FA }, /* 112.5000 */
{ 7692, 0, 0x00000BB1 }, /* 130.0000 */
{ 7407, 0, 0x00000975 }, /* 135.0000 */
{ 6349, 0, 0x00000055 }, /* 157.5000 */
{ 6172, 0, 0x000009C1 }, /* 162.0000 */
{ 5698, 0, 0x000002C1 }, /* 175.5000 */
{ 5291, 0, 0x00000539 }, /* 189.0000 */
{ 4938, 0, 0x00000551 }, /* 202.5000 */
{ 4357, 0, 0x0000057D }, /* 229.5000 */
};
void gx_set_dclk_frequency(struct fb_info *info)
{
const struct gx_pll_entry *pll_table;
int pll_table_len;
int i, best_i;
long min, diff;
u64 dotpll, sys_rstpll;
int timeout = 1000;
/* Rev. 1 Geode GXs use a 14 MHz reference clock instead of 48 MHz. */
if (cpu_data(0).x86_mask == 1) {
pll_table = gx_pll_table_14MHz;
pll_table_len = ARRAY_SIZE(gx_pll_table_14MHz);
} else {
pll_table = gx_pll_table_48MHz;
pll_table_len = ARRAY_SIZE(gx_pll_table_48MHz);
}
/* Search the table for the closest pixclock. */
best_i = 0;
min = abs(pll_table[0].pixclock - info->var.pixclock);
for (i = 1; i < pll_table_len; i++) {
diff = abs(pll_table[i].pixclock - info->var.pixclock);
if (diff < min) {
min = diff;
best_i = i;
}
}
rdmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
rdmsrl(MSR_GLCP_DOTPLL, dotpll);
/* Program new M, N and P. */
dotpll &= 0x00000000ffffffffull;
dotpll |= (u64)pll_table[best_i].dotpll_value << 32;
dotpll |= MSR_GLCP_DOTPLL_DOTRESET;
dotpll &= ~MSR_GLCP_DOTPLL_BYPASS;
wrmsrl(MSR_GLCP_DOTPLL, dotpll);
/* Program dividers. */
sys_rstpll &= ~( MSR_GLCP_SYS_RSTPLL_DOTPREDIV2
| MSR_GLCP_SYS_RSTPLL_DOTPREMULT2
| MSR_GLCP_SYS_RSTPLL_DOTPOSTDIV3 );
sys_rstpll |= pll_table[best_i].sys_rstpll_bits;
wrmsrl(MSR_GLCP_SYS_RSTPLL, sys_rstpll);
/* Clear reset bit to start PLL. */
dotpll &= ~(MSR_GLCP_DOTPLL_DOTRESET);
wrmsrl(MSR_GLCP_DOTPLL, dotpll);
/* Wait for LOCK bit. */
do {
rdmsrl(MSR_GLCP_DOTPLL, dotpll);
} while (timeout-- && !(dotpll & MSR_GLCP_DOTPLL_LOCK));
}
static void
gx_configure_tft(struct fb_info *info)
{
struct gxfb_par *par = info->par;
unsigned long val;
unsigned long fp;
/* Set up the DF pad select MSR */
rdmsrl(MSR_GX_MSR_PADSEL, val);
val &= ~MSR_GX_MSR_PADSEL_MASK;
val |= MSR_GX_MSR_PADSEL_TFT;
wrmsrl(MSR_GX_MSR_PADSEL, val);
/* Turn off the panel */
fp = read_fp(par, FP_PM);
fp &= ~FP_PM_P;
write_fp(par, FP_PM, fp);
/* Set timing 1 */
fp = read_fp(par, FP_PT1);
fp &= FP_PT1_VSIZE_MASK;
fp |= info->var.yres << FP_PT1_VSIZE_SHIFT;
write_fp(par, FP_PT1, fp);
/* Timing 2 */
/* Set bits that are always on for TFT */
fp = 0x0F100000;
/* Configure sync polarity */
if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
fp |= FP_PT2_VSP;
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
fp |= FP_PT2_HSP;
write_fp(par, FP_PT2, fp);
/* Set the dither control */
write_fp(par, FP_DFC, FP_DFC_NFI);
/* Enable the FP data and power (in case the BIOS didn't) */
fp = read_vp(par, VP_DCFG);
fp |= VP_DCFG_FP_PWR_EN | VP_DCFG_FP_DATA_EN;
write_vp(par, VP_DCFG, fp);
/* Unblank the panel */
fp = read_fp(par, FP_PM);
fp |= FP_PM_P;
write_fp(par, FP_PM, fp);
}
void gx_configure_display(struct fb_info *info)
{
struct gxfb_par *par = info->par;
u32 dcfg, misc;
/* Write the display configuration */
dcfg = read_vp(par, VP_DCFG);
/* Disable hsync and vsync */
dcfg &= ~(VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN);
write_vp(par, VP_DCFG, dcfg);
/* Clear bits from existing mode. */
dcfg &= ~(VP_DCFG_CRT_SYNC_SKW
| VP_DCFG_CRT_HSYNC_POL | VP_DCFG_CRT_VSYNC_POL
| VP_DCFG_VSYNC_EN | VP_DCFG_HSYNC_EN);
/* Set default sync skew. */
dcfg |= VP_DCFG_CRT_SYNC_SKW_DEFAULT;
/* Enable hsync and vsync. */
dcfg |= VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN;
misc = read_vp(par, VP_MISC);
/* Disable gamma correction */
misc |= VP_MISC_GAM_EN;
if (par->enable_crt) {
/* Power up the CRT DACs */
misc &= ~(VP_MISC_APWRDN | VP_MISC_DACPWRDN);
write_vp(par, VP_MISC, misc);
/* Only change the sync polarities if we are running
* in CRT mode. The FP polarities will be handled in
* gxfb_configure_tft */
if (!(info->var.sync & FB_SYNC_HOR_HIGH_ACT))
dcfg |= VP_DCFG_CRT_HSYNC_POL;
if (!(info->var.sync & FB_SYNC_VERT_HIGH_ACT))
dcfg |= VP_DCFG_CRT_VSYNC_POL;
} else {
/* Power down the CRT DACs if in FP mode */
misc |= (VP_MISC_APWRDN | VP_MISC_DACPWRDN);
write_vp(par, VP_MISC, misc);
}
/* Enable the display logic */
/* Set up the DACS to blank normally */
dcfg |= VP_DCFG_CRT_EN | VP_DCFG_DAC_BL_EN;
/* Enable the external DAC VREF? */
write_vp(par, VP_DCFG, dcfg);
/* Set up the flat panel (if it is enabled) */
if (par->enable_crt == 0)
gx_configure_tft(info);
}
int gx_blank_display(struct fb_info *info, int blank_mode)
{
struct gxfb_par *par = info->par;
u32 dcfg, fp_pm;
int blank, hsync, vsync, crt;
/* CRT power saving modes. */
switch (blank_mode) {
case FB_BLANK_UNBLANK:
blank = 0; hsync = 1; vsync = 1; crt = 1;
break;
case FB_BLANK_NORMAL:
blank = 1; hsync = 1; vsync = 1; crt = 1;
break;
case FB_BLANK_VSYNC_SUSPEND:
blank = 1; hsync = 1; vsync = 0; crt = 1;
break;
case FB_BLANK_HSYNC_SUSPEND:
blank = 1; hsync = 0; vsync = 1; crt = 1;
break;
case FB_BLANK_POWERDOWN:
blank = 1; hsync = 0; vsync = 0; crt = 0;
break;
default:
return -EINVAL;
}
dcfg = read_vp(par, VP_DCFG);
dcfg &= ~(VP_DCFG_DAC_BL_EN | VP_DCFG_HSYNC_EN | VP_DCFG_VSYNC_EN |
VP_DCFG_CRT_EN);
if (!blank)
dcfg |= VP_DCFG_DAC_BL_EN;
if (hsync)
dcfg |= VP_DCFG_HSYNC_EN;
if (vsync)
dcfg |= VP_DCFG_VSYNC_EN;
if (crt)
dcfg |= VP_DCFG_CRT_EN;
write_vp(par, VP_DCFG, dcfg);
/* Power on/off flat panel. */
if (par->enable_crt == 0) {
fp_pm = read_fp(par, FP_PM);
if (blank_mode == FB_BLANK_POWERDOWN)
fp_pm &= ~FP_PM_P;
else
fp_pm |= FP_PM_P;
write_fp(par, FP_PM, fp_pm);
}
return 0;
}
| gpl-2.0 |
turl/zeppelin_kernel | arch/mips/kernel/cevt-smtc.c | 139 | 8897 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
/*
* Variant clock event timer support for SMTC on MIPS 34K, 1004K
* or other MIPS MT cores.
*
* Notes on SMTC Support:
*
* SMTC has multiple microthread TCs pretending to be Linux CPUs.
* But there's only one Count/Compare pair per VPE, and Compare
* interrupts are taken opportunisitically by available TCs
* bound to the VPE with the Count register. The new timer
* framework provides for global broadcasts, but we really
* want VPE-level multicasts for best behavior. So instead
* of invoking the high-level clock-event broadcast code,
* this version of SMTC support uses the historical SMTC
* multicast mechanisms "under the hood", appearing to the
* generic clock layer as if the interrupts are per-CPU.
*
* The approach taken here is to maintain a set of NR_CPUS
* virtual timers, and track which "CPU" needs to be alerted
* at each event.
*
* It's unlikely that we'll see a MIPS MT core with more than
* 2 VPEs, but we *know* that we won't need to handle more
* VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
* is always going to be overkill, but always going to be enough.
*/
unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
static int smtc_nextinvpe[NR_CPUS];
/*
* Timestamps stored are absolute values to be programmed
* into Count register. Valid timestamps will never be zero.
* If a Zero Count value is actually calculated, it is converted
* to be a 1, which will introduce 1 or two CPU cycles of error
* roughly once every four billion events, which at 1000 HZ means
* about once every 50 days. If that's actually a problem, one
* could alternate squashing 0 to 1 and to -1.
*/
#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
#define ISVALID(x) ((x) != 0L)
/*
* Time comparison is subtle, as it's really truncated
* modular arithmetic.
*/
#define IS_SOONER(a, b, reference) \
(((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
/*
* CATCHUP_INCREMENT, used when the function falls behind the counter.
* Could be an increasing function instead of a constant;
*/
#define CATCHUP_INCREMENT 64
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long flags;
unsigned int mtflags;
unsigned long timestamp, reference, previous;
unsigned long nextcomp = 0L;
int vpe = current_cpu_data.vpe_id;
int cpu = smp_processor_id();
local_irq_save(flags);
mtflags = dmt();
/*
* Maintain the per-TC virtual timer
* and program the per-VPE shared Count register
* as appropriate here...
*/
reference = (unsigned long)read_c0_count();
timestamp = MAKEVALID(reference + delta);
/*
* To really model the clock, we have to catch the case
* where the current next-in-VPE timestamp is the old
* timestamp for the calling CPE, but the new value is
* in fact later. In that case, we have to do a full
* scan and discover the new next-in-VPE CPU id and
* timestamp.
*/
previous = smtc_nexttime[vpe][cpu];
if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
&& IS_SOONER(previous, timestamp, reference)) {
int i;
int soonest = cpu;
/*
* Update timestamp array here, so that new
* value gets considered along with those of
* other virtual CPUs on the VPE.
*/
smtc_nexttime[vpe][cpu] = timestamp;
for_each_online_cpu(i) {
if (ISVALID(smtc_nexttime[vpe][i])
&& IS_SOONER(smtc_nexttime[vpe][i],
smtc_nexttime[vpe][soonest], reference)) {
soonest = i;
}
}
smtc_nextinvpe[vpe] = soonest;
nextcomp = smtc_nexttime[vpe][soonest];
/*
* Otherwise, we don't have to process the whole array rank,
* we just have to see if the event horizon has gotten closer.
*/
} else {
if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
IS_SOONER(timestamp,
smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
smtc_nextinvpe[vpe] = cpu;
nextcomp = timestamp;
}
/*
* Since next-in-VPE may me the same as the executing
* virtual CPU, we update the array *after* checking
* its value.
*/
smtc_nexttime[vpe][cpu] = timestamp;
}
/*
* It may be that, in fact, we don't need to update Compare,
* but if we do, we want to make sure we didn't fall into
* a crack just behind Count.
*/
if (ISVALID(nextcomp)) {
write_c0_compare(nextcomp);
ehb();
/*
* We never return an error, we just make sure
* that we trigger the handlers as quickly as
* we can if we fell behind.
*/
while ((nextcomp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX) {
nextcomp += CATCHUP_INCREMENT;
write_c0_compare(nextcomp);
ehb();
}
}
emt(mtflags);
local_irq_restore(flags);
return 0;
}
void smtc_distribute_timer(int vpe)
{
unsigned long flags;
unsigned int mtflags;
int cpu;
struct clock_event_device *cd;
unsigned long nextstamp = 0L;
unsigned long reference;
repeat:
for_each_online_cpu(cpu) {
/*
* Find virtual CPUs within the current VPE who have
* unserviced timer requests whose time is now past.
*/
local_irq_save(flags);
mtflags = dmt();
if (cpu_data[cpu].vpe_id == vpe &&
ISVALID(smtc_nexttime[vpe][cpu])) {
reference = (unsigned long)read_c0_count();
if ((smtc_nexttime[vpe][cpu] - reference)
> (unsigned long)LONG_MAX) {
smtc_nexttime[vpe][cpu] = 0L;
emt(mtflags);
local_irq_restore(flags);
/*
* We don't send IPIs to ourself.
*/
if (cpu != smp_processor_id()) {
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
} else {
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
} else {
/* Local to VPE but Valid Time not yet reached. */
if (!ISVALID(nextstamp) ||
IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
reference)) {
smtc_nextinvpe[vpe] = cpu;
nextstamp = smtc_nexttime[vpe][cpu];
}
emt(mtflags);
local_irq_restore(flags);
}
} else {
emt(mtflags);
local_irq_restore(flags);
}
}
/* Reprogram for interrupt at next soonest timestamp for VPE */
if (ISVALID(nextstamp)) {
write_c0_compare(nextstamp);
ehb();
if ((nextstamp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX)
goto repeat;
}
}
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
handle_perf_irq(1);
if (read_c0_cause() & (1 << 30)) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
smtc_distribute_timer(cpu_data[cpu].vpe_id);
}
return IRQ_HANDLED;
}
int __cpuinit mips_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
int i;
int j;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
if (cpu == 0) {
for (i = 0; i < num_possible_cpus(); i++) {
smtc_nextinvpe[i] = 0;
for (j = 0; j < num_possible_cpus(); j++)
smtc_nexttime[i][j] = 0L;
}
/*
* SMTC also can't have the usablility test
* run by secondary TCs once Compare is in use.
*/
if (!c0_compare_int_usable())
return -ENXIO;
}
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it's liking.
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
if (get_c0_compare_int)
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
/* Calculate the min / max delta */
cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
/*
* On SMTC we only want to do the data structure
* initialization and IRQ setup once.
*/
if (cpu)
return 0;
/*
* And we need the hwmask associated with the c0_compare
* vector to be initialized.
*/
irq_hwmask[irq] = (0x100 << cp0_compare_irq);
if (cp0_timer_irq_installed)
return 0;
cp0_timer_irq_installed = 1;
setup_irq(irq, &c0_compare_irqaction);
return 0;
}
| gpl-2.0 |
cnbin/linux | sound/soc/codecs/wm8770.c | 395 | 19330 | /*
* wm8770.c -- WM8770 ALSA SoC Audio driver
*
* Copyright 2010 Wolfson Microelectronics plc
*
* Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/pm.h>
#include <linux/spi/spi.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include <sound/tlv.h>
#include "wm8770.h"
#define WM8770_NUM_SUPPLIES 3
static const char *wm8770_supply_names[WM8770_NUM_SUPPLIES] = {
"AVDD1",
"AVDD2",
"DVDD"
};
static const struct reg_default wm8770_reg_defaults[] = {
{ 0, 0x7f },
{ 1, 0x7f },
{ 2, 0x7f },
{ 3, 0x7f },
{ 4, 0x7f },
{ 5, 0x7f },
{ 6, 0x7f },
{ 7, 0x7f },
{ 8, 0x7f },
{ 9, 0xff },
{ 10, 0xff },
{ 11, 0xff },
{ 12, 0xff },
{ 13, 0xff },
{ 14, 0xff },
{ 15, 0xff },
{ 16, 0xff },
{ 17, 0xff },
{ 18, 0 },
{ 19, 0x90 },
{ 20, 0 },
{ 21, 0 },
{ 22, 0x22 },
{ 23, 0x22 },
{ 24, 0x3e },
{ 25, 0xc },
{ 26, 0xc },
{ 27, 0x100 },
{ 28, 0x189 },
{ 29, 0x189 },
{ 30, 0x8770 },
};
static bool wm8770_volatile_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8770_RESET:
return true;
default:
return false;
}
}
struct wm8770_priv {
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8770_NUM_SUPPLIES];
struct notifier_block disable_nb[WM8770_NUM_SUPPLIES];
struct snd_soc_codec *codec;
int sysclk;
};
static int vout12supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
static int vout34supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event);
/*
* We can't use the same notifier block for more than one supply and
* there's no way I can see to get from a callback to the caller
* except container_of().
*/
#define WM8770_REGULATOR_EVENT(n) \
static int wm8770_regulator_event_##n(struct notifier_block *nb, \
unsigned long event, void *data) \
{ \
struct wm8770_priv *wm8770 = container_of(nb, struct wm8770_priv, \
disable_nb[n]); \
if (event & REGULATOR_EVENT_DISABLE) { \
regcache_mark_dirty(wm8770->regmap); \
} \
return 0; \
}
WM8770_REGULATOR_EVENT(0)
WM8770_REGULATOR_EVENT(1)
WM8770_REGULATOR_EVENT(2)
static const DECLARE_TLV_DB_SCALE(adc_tlv, -1200, 100, 0);
static const DECLARE_TLV_DB_SCALE(dac_dig_tlv, -12750, 50, 1);
static const DECLARE_TLV_DB_SCALE(dac_alg_tlv, -12700, 100, 1);
static const char *dac_phase_text[][2] = {
{ "DAC1 Normal", "DAC1 Inverted" },
{ "DAC2 Normal", "DAC2 Inverted" },
{ "DAC3 Normal", "DAC3 Inverted" },
{ "DAC4 Normal", "DAC4 Inverted" },
};
static const struct soc_enum dac_phase[] = {
SOC_ENUM_DOUBLE(WM8770_DACPHASE, 0, 1, 2, dac_phase_text[0]),
SOC_ENUM_DOUBLE(WM8770_DACPHASE, 2, 3, 2, dac_phase_text[1]),
SOC_ENUM_DOUBLE(WM8770_DACPHASE, 4, 5, 2, dac_phase_text[2]),
SOC_ENUM_DOUBLE(WM8770_DACPHASE, 6, 7, 2, dac_phase_text[3]),
};
static const struct snd_kcontrol_new wm8770_snd_controls[] = {
/* global DAC playback controls */
SOC_SINGLE_TLV("DAC Playback Volume", WM8770_MSDIGVOL, 0, 255, 0,
dac_dig_tlv),
SOC_SINGLE("DAC Playback Switch", WM8770_DACMUTE, 4, 1, 1),
SOC_SINGLE("DAC Playback ZC Switch", WM8770_DACCTRL1, 0, 1, 0),
/* global VOUT playback controls */
SOC_SINGLE_TLV("VOUT Playback Volume", WM8770_MSALGVOL, 0, 127, 0,
dac_alg_tlv),
SOC_SINGLE("VOUT Playback ZC Switch", WM8770_MSALGVOL, 7, 1, 0),
/* VOUT1/2/3/4 specific controls */
SOC_DOUBLE_R_TLV("VOUT1 Playback Volume", WM8770_VOUT1LVOL,
WM8770_VOUT1RVOL, 0, 127, 0, dac_alg_tlv),
SOC_DOUBLE_R("VOUT1 Playback ZC Switch", WM8770_VOUT1LVOL,
WM8770_VOUT1RVOL, 7, 1, 0),
SOC_DOUBLE_R_TLV("VOUT2 Playback Volume", WM8770_VOUT2LVOL,
WM8770_VOUT2RVOL, 0, 127, 0, dac_alg_tlv),
SOC_DOUBLE_R("VOUT2 Playback ZC Switch", WM8770_VOUT2LVOL,
WM8770_VOUT2RVOL, 7, 1, 0),
SOC_DOUBLE_R_TLV("VOUT3 Playback Volume", WM8770_VOUT3LVOL,
WM8770_VOUT3RVOL, 0, 127, 0, dac_alg_tlv),
SOC_DOUBLE_R("VOUT3 Playback ZC Switch", WM8770_VOUT3LVOL,
WM8770_VOUT3RVOL, 7, 1, 0),
SOC_DOUBLE_R_TLV("VOUT4 Playback Volume", WM8770_VOUT4LVOL,
WM8770_VOUT4RVOL, 0, 127, 0, dac_alg_tlv),
SOC_DOUBLE_R("VOUT4 Playback ZC Switch", WM8770_VOUT4LVOL,
WM8770_VOUT4RVOL, 7, 1, 0),
/* DAC1/2/3/4 specific controls */
SOC_DOUBLE_R_TLV("DAC1 Playback Volume", WM8770_DAC1LVOL,
WM8770_DAC1RVOL, 0, 255, 0, dac_dig_tlv),
SOC_SINGLE("DAC1 Deemphasis Switch", WM8770_DACCTRL2, 0, 1, 0),
SOC_ENUM("DAC1 Phase", dac_phase[0]),
SOC_DOUBLE_R_TLV("DAC2 Playback Volume", WM8770_DAC2LVOL,
WM8770_DAC2RVOL, 0, 255, 0, dac_dig_tlv),
SOC_SINGLE("DAC2 Deemphasis Switch", WM8770_DACCTRL2, 1, 1, 0),
SOC_ENUM("DAC2 Phase", dac_phase[1]),
SOC_DOUBLE_R_TLV("DAC3 Playback Volume", WM8770_DAC3LVOL,
WM8770_DAC3RVOL, 0, 255, 0, dac_dig_tlv),
SOC_SINGLE("DAC3 Deemphasis Switch", WM8770_DACCTRL2, 2, 1, 0),
SOC_ENUM("DAC3 Phase", dac_phase[2]),
SOC_DOUBLE_R_TLV("DAC4 Playback Volume", WM8770_DAC4LVOL,
WM8770_DAC4RVOL, 0, 255, 0, dac_dig_tlv),
SOC_SINGLE("DAC4 Deemphasis Switch", WM8770_DACCTRL2, 3, 1, 0),
SOC_ENUM("DAC4 Phase", dac_phase[3]),
/* ADC specific controls */
SOC_DOUBLE_R_TLV("Capture Volume", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
0, 31, 0, adc_tlv),
SOC_DOUBLE_R("Capture Switch", WM8770_ADCLCTRL, WM8770_ADCRCTRL,
5, 1, 1),
/* other controls */
SOC_SINGLE("ADC 128x Oversampling Switch", WM8770_MSTRCTRL, 3, 1, 0),
SOC_SINGLE("ADC Highpass Filter Switch", WM8770_IFACECTRL, 8, 1, 1)
};
static const char *ain_text[] = {
"AIN1", "AIN2", "AIN3", "AIN4",
"AIN5", "AIN6", "AIN7", "AIN8"
};
static SOC_ENUM_DOUBLE_DECL(ain_enum,
WM8770_ADCMUX, 0, 4, ain_text);
static const struct snd_kcontrol_new ain_mux =
SOC_DAPM_ENUM("Capture Mux", ain_enum);
static const struct snd_kcontrol_new vout1_mix_controls[] = {
SOC_DAPM_SINGLE("DAC1 Switch", WM8770_OUTMUX1, 0, 1, 0),
SOC_DAPM_SINGLE("AUX1 Switch", WM8770_OUTMUX1, 1, 1, 0),
SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 2, 1, 0)
};
static const struct snd_kcontrol_new vout2_mix_controls[] = {
SOC_DAPM_SINGLE("DAC2 Switch", WM8770_OUTMUX1, 3, 1, 0),
SOC_DAPM_SINGLE("AUX2 Switch", WM8770_OUTMUX1, 4, 1, 0),
SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX1, 5, 1, 0)
};
static const struct snd_kcontrol_new vout3_mix_controls[] = {
SOC_DAPM_SINGLE("DAC3 Switch", WM8770_OUTMUX2, 0, 1, 0),
SOC_DAPM_SINGLE("AUX3 Switch", WM8770_OUTMUX2, 1, 1, 0),
SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 2, 1, 0)
};
static const struct snd_kcontrol_new vout4_mix_controls[] = {
SOC_DAPM_SINGLE("DAC4 Switch", WM8770_OUTMUX2, 3, 1, 0),
SOC_DAPM_SINGLE("Bypass Switch", WM8770_OUTMUX2, 4, 1, 0)
};
static const struct snd_soc_dapm_widget wm8770_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("AUX1"),
SND_SOC_DAPM_INPUT("AUX2"),
SND_SOC_DAPM_INPUT("AUX3"),
SND_SOC_DAPM_INPUT("AIN1"),
SND_SOC_DAPM_INPUT("AIN2"),
SND_SOC_DAPM_INPUT("AIN3"),
SND_SOC_DAPM_INPUT("AIN4"),
SND_SOC_DAPM_INPUT("AIN5"),
SND_SOC_DAPM_INPUT("AIN6"),
SND_SOC_DAPM_INPUT("AIN7"),
SND_SOC_DAPM_INPUT("AIN8"),
SND_SOC_DAPM_MUX("Capture Mux", WM8770_ADCMUX, 8, 1, &ain_mux),
SND_SOC_DAPM_ADC("ADC", "Capture", WM8770_PWDNCTRL, 1, 1),
SND_SOC_DAPM_DAC("DAC1", "Playback", WM8770_PWDNCTRL, 2, 1),
SND_SOC_DAPM_DAC("DAC2", "Playback", WM8770_PWDNCTRL, 3, 1),
SND_SOC_DAPM_DAC("DAC3", "Playback", WM8770_PWDNCTRL, 4, 1),
SND_SOC_DAPM_DAC("DAC4", "Playback", WM8770_PWDNCTRL, 5, 1),
SND_SOC_DAPM_SUPPLY("VOUT12 Supply", SND_SOC_NOPM, 0, 0,
vout12supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_SUPPLY("VOUT34 Supply", SND_SOC_NOPM, 0, 0,
vout34supply_event, SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_MIXER("VOUT1 Mixer", SND_SOC_NOPM, 0, 0,
vout1_mix_controls, ARRAY_SIZE(vout1_mix_controls)),
SND_SOC_DAPM_MIXER("VOUT2 Mixer", SND_SOC_NOPM, 0, 0,
vout2_mix_controls, ARRAY_SIZE(vout2_mix_controls)),
SND_SOC_DAPM_MIXER("VOUT3 Mixer", SND_SOC_NOPM, 0, 0,
vout3_mix_controls, ARRAY_SIZE(vout3_mix_controls)),
SND_SOC_DAPM_MIXER("VOUT4 Mixer", SND_SOC_NOPM, 0, 0,
vout4_mix_controls, ARRAY_SIZE(vout4_mix_controls)),
SND_SOC_DAPM_OUTPUT("VOUT1"),
SND_SOC_DAPM_OUTPUT("VOUT2"),
SND_SOC_DAPM_OUTPUT("VOUT3"),
SND_SOC_DAPM_OUTPUT("VOUT4")
};
static const struct snd_soc_dapm_route wm8770_intercon[] = {
{ "Capture Mux", "AIN1", "AIN1" },
{ "Capture Mux", "AIN2", "AIN2" },
{ "Capture Mux", "AIN3", "AIN3" },
{ "Capture Mux", "AIN4", "AIN4" },
{ "Capture Mux", "AIN5", "AIN5" },
{ "Capture Mux", "AIN6", "AIN6" },
{ "Capture Mux", "AIN7", "AIN7" },
{ "Capture Mux", "AIN8", "AIN8" },
{ "ADC", NULL, "Capture Mux" },
{ "VOUT1 Mixer", NULL, "VOUT12 Supply" },
{ "VOUT1 Mixer", "DAC1 Switch", "DAC1" },
{ "VOUT1 Mixer", "AUX1 Switch", "AUX1" },
{ "VOUT1 Mixer", "Bypass Switch", "Capture Mux" },
{ "VOUT2 Mixer", NULL, "VOUT12 Supply" },
{ "VOUT2 Mixer", "DAC2 Switch", "DAC2" },
{ "VOUT2 Mixer", "AUX2 Switch", "AUX2" },
{ "VOUT2 Mixer", "Bypass Switch", "Capture Mux" },
{ "VOUT3 Mixer", NULL, "VOUT34 Supply" },
{ "VOUT3 Mixer", "DAC3 Switch", "DAC3" },
{ "VOUT3 Mixer", "AUX3 Switch", "AUX3" },
{ "VOUT3 Mixer", "Bypass Switch", "Capture Mux" },
{ "VOUT4 Mixer", NULL, "VOUT34 Supply" },
{ "VOUT4 Mixer", "DAC4 Switch", "DAC4" },
{ "VOUT4 Mixer", "Bypass Switch", "Capture Mux" },
{ "VOUT1", NULL, "VOUT1 Mixer" },
{ "VOUT2", NULL, "VOUT2 Mixer" },
{ "VOUT3", NULL, "VOUT3 Mixer" },
{ "VOUT4", NULL, "VOUT4 Mixer" }
};
static int vout12supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, WM8770_OUTMUX1, 0x180, 0x180);
break;
}
return 0;
}
static int vout34supply_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0);
break;
case SND_SOC_DAPM_POST_PMD:
snd_soc_update_bits(codec, WM8770_OUTMUX2, 0x180, 0x180);
break;
}
return 0;
}
static int wm8770_reset(struct snd_soc_codec *codec)
{
return snd_soc_write(codec, WM8770_RESET, 0);
}
static int wm8770_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct snd_soc_codec *codec;
int iface, master;
codec = dai->codec;
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBM_CFM:
master = 0x100;
break;
case SND_SOC_DAIFMT_CBS_CFS:
master = 0;
break;
default:
return -EINVAL;
}
iface = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
iface |= 0x2;
break;
case SND_SOC_DAIFMT_RIGHT_J:
break;
case SND_SOC_DAIFMT_LEFT_J:
iface |= 0x1;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
iface |= 0xc;
break;
case SND_SOC_DAIFMT_IB_NF:
iface |= 0x8;
break;
case SND_SOC_DAIFMT_NB_IF:
iface |= 0x4;
break;
default:
return -EINVAL;
}
snd_soc_update_bits(codec, WM8770_IFACECTRL, 0xf, iface);
snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x100, master);
return 0;
}
static const int mclk_ratios[] = {
128,
192,
256,
384,
512,
768
};
static int wm8770_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_codec *codec;
struct wm8770_priv *wm8770;
int i;
int iface;
int shift;
int ratio;
codec = dai->codec;
wm8770 = snd_soc_codec_get_drvdata(codec);
iface = 0;
switch (params_width(params)) {
case 16:
break;
case 20:
iface |= 0x10;
break;
case 24:
iface |= 0x20;
break;
case 32:
iface |= 0x30;
break;
}
switch (substream->stream) {
case SNDRV_PCM_STREAM_PLAYBACK:
i = 0;
shift = 4;
break;
case SNDRV_PCM_STREAM_CAPTURE:
i = 2;
shift = 0;
break;
default:
return -EINVAL;
}
/* Only need to set MCLK/LRCLK ratio if we're master */
if (snd_soc_read(codec, WM8770_MSTRCTRL) & 0x100) {
for (; i < ARRAY_SIZE(mclk_ratios); ++i) {
ratio = wm8770->sysclk / params_rate(params);
if (ratio == mclk_ratios[i])
break;
}
if (i == ARRAY_SIZE(mclk_ratios)) {
dev_err(codec->dev,
"Unable to configure MCLK ratio %d/%d\n",
wm8770->sysclk, params_rate(params));
return -EINVAL;
}
dev_dbg(codec->dev, "MCLK is %dfs\n", mclk_ratios[i]);
snd_soc_update_bits(codec, WM8770_MSTRCTRL, 0x7 << shift,
i << shift);
}
snd_soc_update_bits(codec, WM8770_IFACECTRL, 0x30, iface);
return 0;
}
static int wm8770_mute(struct snd_soc_dai *dai, int mute)
{
struct snd_soc_codec *codec;
codec = dai->codec;
return snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10,
!!mute << 4);
}
static int wm8770_set_sysclk(struct snd_soc_dai *dai,
int clk_id, unsigned int freq, int dir)
{
struct snd_soc_codec *codec;
struct wm8770_priv *wm8770;
codec = dai->codec;
wm8770 = snd_soc_codec_get_drvdata(codec);
wm8770->sysclk = freq;
return 0;
}
static int wm8770_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
int ret;
struct wm8770_priv *wm8770;
wm8770 = snd_soc_codec_get_drvdata(codec);
switch (level) {
case SND_SOC_BIAS_ON:
break;
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (snd_soc_codec_get_bias_level(codec) == SND_SOC_BIAS_OFF) {
ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
wm8770->supplies);
if (ret) {
dev_err(codec->dev,
"Failed to enable supplies: %d\n",
ret);
return ret;
}
regcache_sync(wm8770->regmap);
/* global powerup */
snd_soc_write(codec, WM8770_PWDNCTRL, 0);
}
break;
case SND_SOC_BIAS_OFF:
/* global powerdown */
snd_soc_write(codec, WM8770_PWDNCTRL, 1);
regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies),
wm8770->supplies);
break;
}
return 0;
}
#define WM8770_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | \
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8770_dai_ops = {
.digital_mute = wm8770_mute,
.hw_params = wm8770_hw_params,
.set_fmt = wm8770_set_fmt,
.set_sysclk = wm8770_set_sysclk,
};
static struct snd_soc_dai_driver wm8770_dai = {
.name = "wm8770-hifi",
.playback = {
.stream_name = "Playback",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = WM8770_FORMATS
},
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_96000,
.formats = WM8770_FORMATS
},
.ops = &wm8770_dai_ops,
.symmetric_rates = 1
};
static int wm8770_probe(struct snd_soc_codec *codec)
{
struct wm8770_priv *wm8770;
int ret;
wm8770 = snd_soc_codec_get_drvdata(codec);
wm8770->codec = codec;
ret = regulator_bulk_enable(ARRAY_SIZE(wm8770->supplies),
wm8770->supplies);
if (ret) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
return ret;
}
ret = wm8770_reset(codec);
if (ret < 0) {
dev_err(codec->dev, "Failed to issue reset: %d\n", ret);
goto err_reg_enable;
}
/* latch the volume update bits */
snd_soc_update_bits(codec, WM8770_MSDIGVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_MSALGVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_VOUT1RVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_VOUT2RVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_VOUT3RVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_VOUT4RVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_DAC1RVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_DAC2RVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_DAC3RVOL, 0x100, 0x100);
snd_soc_update_bits(codec, WM8770_DAC4RVOL, 0x100, 0x100);
/* mute all DACs */
snd_soc_update_bits(codec, WM8770_DACMUTE, 0x10, 0x10);
err_reg_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8770->supplies), wm8770->supplies);
return ret;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8770 = {
.probe = wm8770_probe,
.set_bias_level = wm8770_set_bias_level,
.idle_bias_off = true,
.controls = wm8770_snd_controls,
.num_controls = ARRAY_SIZE(wm8770_snd_controls),
.dapm_widgets = wm8770_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8770_dapm_widgets),
.dapm_routes = wm8770_intercon,
.num_dapm_routes = ARRAY_SIZE(wm8770_intercon),
};
static const struct of_device_id wm8770_of_match[] = {
{ .compatible = "wlf,wm8770", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8770_of_match);
static const struct regmap_config wm8770_regmap = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8770_RESET,
.reg_defaults = wm8770_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8770_reg_defaults),
.cache_type = REGCACHE_RBTREE,
.volatile_reg = wm8770_volatile_reg,
};
static int wm8770_spi_probe(struct spi_device *spi)
{
struct wm8770_priv *wm8770;
int ret, i;
wm8770 = devm_kzalloc(&spi->dev, sizeof(struct wm8770_priv),
GFP_KERNEL);
if (!wm8770)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++)
wm8770->supplies[i].supply = wm8770_supply_names[i];
ret = devm_regulator_bulk_get(&spi->dev, ARRAY_SIZE(wm8770->supplies),
wm8770->supplies);
if (ret) {
dev_err(&spi->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
wm8770->disable_nb[0].notifier_call = wm8770_regulator_event_0;
wm8770->disable_nb[1].notifier_call = wm8770_regulator_event_1;
wm8770->disable_nb[2].notifier_call = wm8770_regulator_event_2;
/* This should really be moved into the regulator core */
for (i = 0; i < ARRAY_SIZE(wm8770->supplies); i++) {
ret = regulator_register_notifier(wm8770->supplies[i].consumer,
&wm8770->disable_nb[i]);
if (ret) {
dev_err(&spi->dev,
"Failed to register regulator notifier: %d\n",
ret);
}
}
wm8770->regmap = devm_regmap_init_spi(spi, &wm8770_regmap);
if (IS_ERR(wm8770->regmap))
return PTR_ERR(wm8770->regmap);
spi_set_drvdata(spi, wm8770);
ret = snd_soc_register_codec(&spi->dev,
&soc_codec_dev_wm8770, &wm8770_dai, 1);
return ret;
}
static int wm8770_spi_remove(struct spi_device *spi)
{
struct wm8770_priv *wm8770 = spi_get_drvdata(spi);
int i;
for (i = 0; i < ARRAY_SIZE(wm8770->supplies); ++i)
regulator_unregister_notifier(wm8770->supplies[i].consumer,
&wm8770->disable_nb[i]);
snd_soc_unregister_codec(&spi->dev);
return 0;
}
static struct spi_driver wm8770_spi_driver = {
.driver = {
.name = "wm8770",
.owner = THIS_MODULE,
.of_match_table = wm8770_of_match,
},
.probe = wm8770_spi_probe,
.remove = wm8770_spi_remove
};
module_spi_driver(wm8770_spi_driver);
MODULE_DESCRIPTION("ASoC WM8770 driver");
MODULE_AUTHOR("Dimitris Papastamos <dp@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
pakohan/RPi-kernel | linux/drivers/isdn/hardware/mISDN/netjet.c | 651 | 29225 | /*
* NETJet mISDN driver
*
* Author Karsten Keil <keil@isdn4linux.de>
*
* Copyright 2009 by Karsten Keil <keil@isdn4linux.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/mISDNhw.h>
#include <linux/slab.h>
#include "ipac.h"
#include "iohelper.h"
#include "netjet.h"
#include <linux/isdn/hdlc.h>
#define NETJET_REV "2.0"
enum nj_types {
NETJET_S_TJ300,
NETJET_S_TJ320,
ENTERNOW__TJ320,
};
struct tiger_dma {
size_t size;
u32 *start;
int idx;
u32 dmastart;
u32 dmairq;
u32 dmaend;
u32 dmacur;
};
struct tiger_hw;
struct tiger_ch {
struct bchannel bch;
struct tiger_hw *nj;
int idx;
int free;
int lastrx;
u16 rxstate;
u16 txstate;
struct isdnhdlc_vars hsend;
struct isdnhdlc_vars hrecv;
u8 *hsbuf;
u8 *hrbuf;
};
#define TX_INIT 0x0001
#define TX_IDLE 0x0002
#define TX_RUN 0x0004
#define TX_UNDERRUN 0x0100
#define RX_OVERRUN 0x0100
#define LOG_SIZE 64
struct tiger_hw {
struct list_head list;
struct pci_dev *pdev;
char name[MISDN_MAX_IDLEN];
enum nj_types typ;
int irq;
u32 irqcnt;
u32 base;
size_t base_s;
dma_addr_t dma;
void *dma_p;
spinlock_t lock; /* lock HW */
struct isac_hw isac;
struct tiger_dma send;
struct tiger_dma recv;
struct tiger_ch bc[2];
u8 ctrlreg;
u8 dmactrl;
u8 auxd;
u8 last_is0;
u8 irqmask0;
char log[LOG_SIZE];
};
static LIST_HEAD(Cards);
static DEFINE_RWLOCK(card_lock); /* protect Cards */
static u32 debug;
static int nj_cnt;
static void
_set_debug(struct tiger_hw *card)
{
card->isac.dch.debug = debug;
card->bc[0].bch.debug = debug;
card->bc[1].bch.debug = debug;
}
static int
set_debug(const char *val, struct kernel_param *kp)
{
int ret;
struct tiger_hw *card;
ret = param_set_uint(val, kp);
if (!ret) {
read_lock(&card_lock);
list_for_each_entry(card, &Cards, list)
_set_debug(card);
read_unlock(&card_lock);
}
return ret;
}
MODULE_AUTHOR("Karsten Keil");
MODULE_LICENSE("GPL v2");
MODULE_VERSION(NETJET_REV);
module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(debug, "Netjet debug mask");
static void
nj_disable_hwirq(struct tiger_hw *card)
{
outb(0, card->base + NJ_IRQMASK0);
outb(0, card->base + NJ_IRQMASK1);
}
static u8
ReadISAC_nj(void *p, u8 offset)
{
struct tiger_hw *card = p;
u8 ret;
card->auxd &= 0xfc;
card->auxd |= (offset >> 4) & 3;
outb(card->auxd, card->base + NJ_AUXDATA);
ret = inb(card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
return ret;
}
static void
WriteISAC_nj(void *p, u8 offset, u8 value)
{
struct tiger_hw *card = p;
card->auxd &= 0xfc;
card->auxd |= (offset >> 4) & 3;
outb(card->auxd, card->base + NJ_AUXDATA);
outb(value, card->base + NJ_ISAC_OFF + ((offset & 0x0f) << 2));
}
static void
ReadFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
{
struct tiger_hw *card = p;
card->auxd &= 0xfc;
outb(card->auxd, card->base + NJ_AUXDATA);
insb(card->base + NJ_ISAC_OFF, data, size);
}
static void
WriteFiFoISAC_nj(void *p, u8 offset, u8 *data, int size)
{
struct tiger_hw *card = p;
card->auxd &= 0xfc;
outb(card->auxd, card->base + NJ_AUXDATA);
outsb(card->base + NJ_ISAC_OFF, data, size);
}
static void
fill_mem(struct tiger_ch *bc, u32 idx, u32 cnt, u32 fill)
{
struct tiger_hw *card = bc->bch.hw;
u32 mask = 0xff, val;
pr_debug("%s: B%1d fill %02x len %d idx %d/%d\n", card->name,
bc->bch.nr, fill, cnt, idx, card->send.idx);
if (bc->bch.nr & 2) {
fill <<= 8;
mask <<= 8;
}
mask ^= 0xffffffff;
while (cnt--) {
val = card->send.start[idx];
val &= mask;
val |= fill;
card->send.start[idx++] = val;
if (idx >= card->send.size)
idx = 0;
}
}
static int
mode_tiger(struct tiger_ch *bc, u32 protocol)
{
struct tiger_hw *card = bc->bch.hw;
pr_debug("%s: B%1d protocol %x-->%x\n", card->name,
bc->bch.nr, bc->bch.state, protocol);
switch (protocol) {
case ISDN_P_NONE:
if (bc->bch.state == ISDN_P_NONE)
break;
fill_mem(bc, 0, card->send.size, 0xff);
bc->bch.state = protocol;
/* only stop dma and interrupts if both channels NULL */
if ((card->bc[0].bch.state == ISDN_P_NONE) &&
(card->bc[1].bch.state == ISDN_P_NONE)) {
card->dmactrl = 0;
outb(card->dmactrl, card->base + NJ_DMACTRL);
outb(0, card->base + NJ_IRQMASK0);
}
test_and_clear_bit(FLG_HDLC, &bc->bch.Flags);
test_and_clear_bit(FLG_TRANSPARENT, &bc->bch.Flags);
bc->txstate = 0;
bc->rxstate = 0;
bc->lastrx = -1;
break;
case ISDN_P_B_RAW:
test_and_set_bit(FLG_TRANSPARENT, &bc->bch.Flags);
bc->bch.state = protocol;
bc->idx = 0;
bc->free = card->send.size/2;
bc->rxstate = 0;
bc->txstate = TX_INIT | TX_IDLE;
bc->lastrx = -1;
if (!card->dmactrl) {
card->dmactrl = 1;
outb(card->dmactrl, card->base + NJ_DMACTRL);
outb(0x0f, card->base + NJ_IRQMASK0);
}
break;
case ISDN_P_B_HDLC:
test_and_set_bit(FLG_HDLC, &bc->bch.Flags);
bc->bch.state = protocol;
bc->idx = 0;
bc->free = card->send.size/2;
bc->rxstate = 0;
bc->txstate = TX_INIT | TX_IDLE;
isdnhdlc_rcv_init(&bc->hrecv, 0);
isdnhdlc_out_init(&bc->hsend, 0);
bc->lastrx = -1;
if (!card->dmactrl) {
card->dmactrl = 1;
outb(card->dmactrl, card->base + NJ_DMACTRL);
outb(0x0f, card->base + NJ_IRQMASK0);
}
break;
default:
pr_info("%s: %s protocol %x not handled\n", card->name,
__func__, protocol);
return -ENOPROTOOPT;
}
card->send.dmacur = inl(card->base + NJ_DMA_READ_ADR);
card->recv.dmacur = inl(card->base + NJ_DMA_WRITE_ADR);
card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
pr_debug("%s: %s ctrl %x irq %02x/%02x idx %d/%d\n",
card->name, __func__,
inb(card->base + NJ_DMACTRL),
inb(card->base + NJ_IRQMASK0),
inb(card->base + NJ_IRQSTAT0),
card->send.idx,
card->recv.idx);
return 0;
}
static void
nj_reset(struct tiger_hw *card)
{
outb(0xff, card->base + NJ_CTRL); /* Reset On */
mdelay(1);
/* now edge triggered for TJ320 GE 13/07/00 */
/* see comment in IRQ function */
if (card->typ == NETJET_S_TJ320) /* TJ320 */
card->ctrlreg = 0x40; /* Reset Off and status read clear */
else
card->ctrlreg = 0x00; /* Reset Off and status read clear */
outb(card->ctrlreg, card->base + NJ_CTRL);
mdelay(10);
/* configure AUX pins (all output except ISAC IRQ pin) */
card->auxd = 0;
card->dmactrl = 0;
outb(~NJ_ISACIRQ, card->base + NJ_AUXCTRL);
outb(NJ_ISACIRQ, card->base + NJ_IRQMASK1);
outb(card->auxd, card->base + NJ_AUXDATA);
}
static int
inittiger(struct tiger_hw *card)
{
int i;
card->dma_p = pci_alloc_consistent(card->pdev, NJ_DMA_SIZE,
&card->dma);
if (!card->dma_p) {
pr_info("%s: No DMA memory\n", card->name);
return -ENOMEM;
}
if ((u64)card->dma > 0xffffffff) {
pr_info("%s: DMA outside 32 bit\n", card->name);
return -ENOMEM;
}
for (i = 0; i < 2; i++) {
card->bc[i].hsbuf = kmalloc(NJ_DMA_TXSIZE, GFP_ATOMIC);
if (!card->bc[i].hsbuf) {
pr_info("%s: no B%d send buffer\n", card->name, i + 1);
return -ENOMEM;
}
card->bc[i].hrbuf = kmalloc(NJ_DMA_RXSIZE, GFP_ATOMIC);
if (!card->bc[i].hrbuf) {
pr_info("%s: no B%d recv buffer\n", card->name, i + 1);
return -ENOMEM;
}
}
memset(card->dma_p, 0xff, NJ_DMA_SIZE);
card->send.start = card->dma_p;
card->send.dmastart = (u32)card->dma;
card->send.dmaend = card->send.dmastart +
(4 * (NJ_DMA_TXSIZE - 1));
card->send.dmairq = card->send.dmastart +
(4 * ((NJ_DMA_TXSIZE / 2) - 1));
card->send.size = NJ_DMA_TXSIZE;
if (debug & DEBUG_HW)
pr_notice("%s: send buffer phy %#x - %#x - %#x virt %p"
" size %zu u32\n", card->name,
card->send.dmastart, card->send.dmairq,
card->send.dmaend, card->send.start, card->send.size);
outl(card->send.dmastart, card->base + NJ_DMA_READ_START);
outl(card->send.dmairq, card->base + NJ_DMA_READ_IRQ);
outl(card->send.dmaend, card->base + NJ_DMA_READ_END);
card->recv.start = card->dma_p + (NJ_DMA_SIZE / 2);
card->recv.dmastart = (u32)card->dma + (NJ_DMA_SIZE / 2);
card->recv.dmaend = card->recv.dmastart +
(4 * (NJ_DMA_RXSIZE - 1));
card->recv.dmairq = card->recv.dmastart +
(4 * ((NJ_DMA_RXSIZE / 2) - 1));
card->recv.size = NJ_DMA_RXSIZE;
if (debug & DEBUG_HW)
pr_notice("%s: recv buffer phy %#x - %#x - %#x virt %p"
" size %zu u32\n", card->name,
card->recv.dmastart, card->recv.dmairq,
card->recv.dmaend, card->recv.start, card->recv.size);
outl(card->recv.dmastart, card->base + NJ_DMA_WRITE_START);
outl(card->recv.dmairq, card->base + NJ_DMA_WRITE_IRQ);
outl(card->recv.dmaend, card->base + NJ_DMA_WRITE_END);
return 0;
}
static void
read_dma(struct tiger_ch *bc, u32 idx, int cnt)
{
struct tiger_hw *card = bc->bch.hw;
int i, stat;
u32 val;
u8 *p, *pn;
if (bc->lastrx == idx) {
bc->rxstate |= RX_OVERRUN;
pr_info("%s: B%1d overrun at idx %d\n", card->name,
bc->bch.nr, idx);
}
bc->lastrx = idx;
if (!bc->bch.rx_skb) {
bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen, GFP_ATOMIC);
if (!bc->bch.rx_skb) {
pr_info("%s: B%1d receive out of memory\n",
card->name, bc->bch.nr);
return;
}
}
if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags)) {
if ((bc->bch.rx_skb->len + cnt) > bc->bch.maxlen) {
pr_debug("%s: B%1d overrun %d\n", card->name,
bc->bch.nr, bc->bch.rx_skb->len + cnt);
skb_trim(bc->bch.rx_skb, 0);
return;
}
p = skb_put(bc->bch.rx_skb, cnt);
} else
p = bc->hrbuf;
for (i = 0; i < cnt; i++) {
val = card->recv.start[idx++];
if (bc->bch.nr & 2)
val >>= 8;
if (idx >= card->recv.size)
idx = 0;
p[i] = val & 0xff;
}
pn = bc->hrbuf;
next_frame:
if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
stat = isdnhdlc_decode(&bc->hrecv, pn, cnt, &i,
bc->bch.rx_skb->data, bc->bch.maxlen);
if (stat > 0) /* valid frame received */
p = skb_put(bc->bch.rx_skb, stat);
else if (stat == -HDLC_CRC_ERROR)
pr_info("%s: B%1d receive frame CRC error\n",
card->name, bc->bch.nr);
else if (stat == -HDLC_FRAMING_ERROR)
pr_info("%s: B%1d receive framing error\n",
card->name, bc->bch.nr);
else if (stat == -HDLC_LENGTH_ERROR)
pr_info("%s: B%1d receive frame too long (> %d)\n",
card->name, bc->bch.nr, bc->bch.maxlen);
} else
stat = cnt;
if (stat > 0) {
if (debug & DEBUG_HW_BFIFO) {
snprintf(card->log, LOG_SIZE, "B%1d-recv %s %d ",
bc->bch.nr, card->name, stat);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET,
p, stat);
}
recv_Bchannel(&bc->bch, 0);
}
if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
pn += i;
cnt -= i;
if (!bc->bch.rx_skb) {
bc->bch.rx_skb = mI_alloc_skb(bc->bch.maxlen,
GFP_ATOMIC);
if (!bc->bch.rx_skb) {
pr_info("%s: B%1d receive out of memory\n",
card->name, bc->bch.nr);
return;
}
}
if (cnt > 0)
goto next_frame;
}
}
static void
recv_tiger(struct tiger_hw *card, u8 irq_stat)
{
u32 idx;
int cnt = card->recv.size / 2;
/* Note receive is via the WRITE DMA channel */
card->last_is0 &= ~NJ_IRQM0_WR_MASK;
card->last_is0 |= (irq_stat & NJ_IRQM0_WR_MASK);
if (irq_stat & NJ_IRQM0_WR_END)
idx = cnt - 1;
else
idx = card->recv.size - 1;
if (test_bit(FLG_ACTIVE, &card->bc[0].bch.Flags))
read_dma(&card->bc[0], idx, cnt);
if (test_bit(FLG_ACTIVE, &card->bc[1].bch.Flags))
read_dma(&card->bc[1], idx, cnt);
}
/* sync with current DMA address at start or after exception */
static void
resync(struct tiger_ch *bc, struct tiger_hw *card)
{
card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
if (bc->free > card->send.size / 2)
bc->free = card->send.size / 2;
/* currently we simple sync to the next complete free area
* this hast the advantage that we have always maximum time to
* handle TX irq
*/
if (card->send.idx < ((card->send.size / 2) - 1))
bc->idx = (card->recv.size / 2) - 1;
else
bc->idx = card->recv.size - 1;
bc->txstate = TX_RUN;
pr_debug("%s: %s B%1d free %d idx %d/%d\n", card->name,
__func__, bc->bch.nr, bc->free, bc->idx, card->send.idx);
}
static int bc_next_frame(struct tiger_ch *);
static void
fill_hdlc_flag(struct tiger_ch *bc)
{
struct tiger_hw *card = bc->bch.hw;
int count, i;
u32 m, v;
u8 *p;
if (bc->free == 0)
return;
pr_debug("%s: %s B%1d %d state %x idx %d/%d\n", card->name,
__func__, bc->bch.nr, bc->free, bc->txstate,
bc->idx, card->send.idx);
if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
resync(bc, card);
count = isdnhdlc_encode(&bc->hsend, NULL, 0, &i,
bc->hsbuf, bc->free);
pr_debug("%s: B%1d hdlc encoded %d flags\n", card->name,
bc->bch.nr, count);
bc->free -= count;
p = bc->hsbuf;
m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
for (i = 0; i < count; i++) {
if (bc->idx >= card->send.size)
bc->idx = 0;
v = card->send.start[bc->idx];
v &= m;
v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
card->send.start[bc->idx++] = v;
}
if (debug & DEBUG_HW_BFIFO) {
snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
bc->bch.nr, card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
}
}
static void
fill_dma(struct tiger_ch *bc)
{
struct tiger_hw *card = bc->bch.hw;
int count, i;
u32 m, v;
u8 *p;
if (bc->free == 0)
return;
count = bc->bch.tx_skb->len - bc->bch.tx_idx;
if (count <= 0)
return;
pr_debug("%s: %s B%1d %d/%d/%d/%d state %x idx %d/%d\n", card->name,
__func__, bc->bch.nr, count, bc->free, bc->bch.tx_idx,
bc->bch.tx_skb->len, bc->txstate, bc->idx, card->send.idx);
if (bc->txstate & (TX_IDLE | TX_INIT | TX_UNDERRUN))
resync(bc, card);
p = bc->bch.tx_skb->data + bc->bch.tx_idx;
if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
count = isdnhdlc_encode(&bc->hsend, p, count, &i,
bc->hsbuf, bc->free);
pr_debug("%s: B%1d hdlc encoded %d in %d\n", card->name,
bc->bch.nr, i, count);
bc->bch.tx_idx += i;
bc->free -= count;
p = bc->hsbuf;
} else {
if (count > bc->free)
count = bc->free;
bc->bch.tx_idx += count;
bc->free -= count;
}
m = (bc->bch.nr & 1) ? 0xffffff00 : 0xffff00ff;
for (i = 0; i < count; i++) {
if (bc->idx >= card->send.size)
bc->idx = 0;
v = card->send.start[bc->idx];
v &= m;
v |= (bc->bch.nr & 1) ? (u32)(p[i]) : ((u32)(p[i])) << 8;
card->send.start[bc->idx++] = v;
}
if (debug & DEBUG_HW_BFIFO) {
snprintf(card->log, LOG_SIZE, "B%1d-send %s %d ",
bc->bch.nr, card->name, count);
print_hex_dump_bytes(card->log, DUMP_PREFIX_OFFSET, p, count);
}
if (bc->free)
bc_next_frame(bc);
}
static int
bc_next_frame(struct tiger_ch *bc)
{
if (bc->bch.tx_skb && bc->bch.tx_idx < bc->bch.tx_skb->len)
fill_dma(bc);
else {
if (bc->bch.tx_skb) {
/* send confirm, on trans, free on hdlc. */
if (test_bit(FLG_TRANSPARENT, &bc->bch.Flags))
confirm_Bsend(&bc->bch);
dev_kfree_skb(bc->bch.tx_skb);
}
if (get_next_bframe(&bc->bch))
fill_dma(bc);
else
return 0;
}
return 1;
}
static void
send_tiger_bc(struct tiger_hw *card, struct tiger_ch *bc)
{
int ret;
bc->free += card->send.size / 2;
if (bc->free >= card->send.size) {
if (!(bc->txstate & (TX_UNDERRUN | TX_INIT))) {
pr_info("%s: B%1d TX underrun state %x\n", card->name,
bc->bch.nr, bc->txstate);
bc->txstate |= TX_UNDERRUN;
}
bc->free = card->send.size;
}
ret = bc_next_frame(bc);
if (!ret) {
if (test_bit(FLG_HDLC, &bc->bch.Flags)) {
fill_hdlc_flag(bc);
return;
}
pr_debug("%s: B%1d TX no data free %d idx %d/%d\n", card->name,
bc->bch.nr, bc->free, bc->idx, card->send.idx);
if (!(bc->txstate & (TX_IDLE | TX_INIT))) {
fill_mem(bc, bc->idx, bc->free, 0xff);
if (bc->free == card->send.size)
bc->txstate |= TX_IDLE;
}
}
}
static void
send_tiger(struct tiger_hw *card, u8 irq_stat)
{
int i;
/* Note send is via the READ DMA channel */
if ((irq_stat & card->last_is0) & NJ_IRQM0_RD_MASK) {
pr_info("%s: tiger warn write double dma %x/%x\n",
card->name, irq_stat, card->last_is0);
return;
} else {
card->last_is0 &= ~NJ_IRQM0_RD_MASK;
card->last_is0 |= (irq_stat & NJ_IRQM0_RD_MASK);
}
for (i = 0; i < 2; i++) {
if (test_bit(FLG_ACTIVE, &card->bc[i].bch.Flags))
send_tiger_bc(card, &card->bc[i]);
}
}
static irqreturn_t
nj_irq(int intno, void *dev_id)
{
struct tiger_hw *card = dev_id;
u8 val, s1val, s0val;
spin_lock(&card->lock);
s0val = inb(card->base | NJ_IRQSTAT0);
s1val = inb(card->base | NJ_IRQSTAT1);
if ((s1val & NJ_ISACIRQ) && (s0val == 0)) {
/* shared IRQ */
spin_unlock(&card->lock);
return IRQ_NONE;
}
pr_debug("%s: IRQSTAT0 %02x IRQSTAT1 %02x\n", card->name, s0val, s1val);
card->irqcnt++;
if (!(s1val & NJ_ISACIRQ)) {
val = ReadISAC_nj(card, ISAC_ISTA);
if (val)
mISDNisac_irq(&card->isac, val);
}
if (s0val)
/* write to clear */
outb(s0val, card->base | NJ_IRQSTAT0);
else
goto end;
s1val = s0val;
/* set bits in sval to indicate which page is free */
card->recv.dmacur = inl(card->base | NJ_DMA_WRITE_ADR);
card->recv.idx = (card->recv.dmacur - card->recv.dmastart) >> 2;
if (card->recv.dmacur < card->recv.dmairq)
s0val = 0x08; /* the 2nd write area is free */
else
s0val = 0x04; /* the 1st write area is free */
card->send.dmacur = inl(card->base | NJ_DMA_READ_ADR);
card->send.idx = (card->send.dmacur - card->send.dmastart) >> 2;
if (card->send.dmacur < card->send.dmairq)
s0val |= 0x02; /* the 2nd read area is free */
else
s0val |= 0x01; /* the 1st read area is free */
pr_debug("%s: DMA Status %02x/%02x/%02x %d/%d\n", card->name,
s1val, s0val, card->last_is0,
card->recv.idx, card->send.idx);
/* test if we have a DMA interrupt */
if (s0val != card->last_is0) {
if ((s0val & NJ_IRQM0_RD_MASK) !=
(card->last_is0 & NJ_IRQM0_RD_MASK))
/* got a write dma int */
send_tiger(card, s0val);
if ((s0val & NJ_IRQM0_WR_MASK) !=
(card->last_is0 & NJ_IRQM0_WR_MASK))
/* got a read dma int */
recv_tiger(card, s0val);
}
end:
spin_unlock(&card->lock);
return IRQ_HANDLED;
}
static int
nj_l2l1B(struct mISDNchannel *ch, struct sk_buff *skb)
{
int ret = -EINVAL;
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
struct tiger_hw *card = bch->hw;
struct mISDNhead *hh = mISDN_HEAD_P(skb);
u32 id;
u_long flags;
switch (hh->prim) {
case PH_DATA_REQ:
spin_lock_irqsave(&card->lock, flags);
ret = bchannel_senddata(bch, skb);
if (ret > 0) { /* direct TX */
id = hh->id; /* skb can be freed */
fill_dma(bc);
ret = 0;
spin_unlock_irqrestore(&card->lock, flags);
if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
queue_ch_frame(ch, PH_DATA_CNF, id, NULL);
} else
spin_unlock_irqrestore(&card->lock, flags);
return ret;
case PH_ACTIVATE_REQ:
spin_lock_irqsave(&card->lock, flags);
if (!test_and_set_bit(FLG_ACTIVE, &bch->Flags))
ret = mode_tiger(bc, ch->protocol);
else
ret = 0;
spin_unlock_irqrestore(&card->lock, flags);
if (!ret)
_queue_data(ch, PH_ACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
break;
case PH_DEACTIVATE_REQ:
spin_lock_irqsave(&card->lock, flags);
mISDN_clear_bchannel(bch);
mode_tiger(bc, ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
_queue_data(ch, PH_DEACTIVATE_IND, MISDN_ID_ANY, 0,
NULL, GFP_KERNEL);
ret = 0;
break;
}
if (!ret)
dev_kfree_skb(skb);
return ret;
}
static int
channel_bctrl(struct tiger_ch *bc, struct mISDN_ctrl_req *cq)
{
int ret = 0;
struct tiger_hw *card = bc->bch.hw;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = 0;
break;
/* Nothing implemented yet */
case MISDN_CTRL_FILL_EMPTY:
default:
pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
nj_bctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct bchannel *bch = container_of(ch, struct bchannel, ch);
struct tiger_ch *bc = container_of(bch, struct tiger_ch, bch);
struct tiger_hw *card = bch->hw;
int ret = -EINVAL;
u_long flags;
pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
switch (cmd) {
case CLOSE_CHANNEL:
test_and_clear_bit(FLG_OPEN, &bch->Flags);
if (test_bit(FLG_ACTIVE, &bch->Flags)) {
spin_lock_irqsave(&card->lock, flags);
mISDN_freebchannel(bch);
test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
test_and_clear_bit(FLG_ACTIVE, &bch->Flags);
mode_tiger(bc, ISDN_P_NONE);
spin_unlock_irqrestore(&card->lock, flags);
}
ch->protocol = ISDN_P_NONE;
ch->peer = NULL;
module_put(THIS_MODULE);
ret = 0;
break;
case CONTROL_CHANNEL:
ret = channel_bctrl(bc, arg);
break;
default:
pr_info("%s: %s unknown prim(%x)\n", card->name, __func__, cmd);
}
return ret;
}
static int
channel_ctrl(struct tiger_hw *card, struct mISDN_ctrl_req *cq)
{
int ret = 0;
switch (cq->op) {
case MISDN_CTRL_GETOP:
cq->op = MISDN_CTRL_LOOP;
break;
case MISDN_CTRL_LOOP:
/* cq->channel: 0 disable, 1 B1 loop 2 B2 loop, 3 both */
if (cq->channel < 0 || cq->channel > 3) {
ret = -EINVAL;
break;
}
ret = card->isac.ctrl(&card->isac, HW_TESTLOOP, cq->channel);
break;
default:
pr_info("%s: %s unknown Op %x\n", card->name, __func__, cq->op);
ret = -EINVAL;
break;
}
return ret;
}
static int
open_bchannel(struct tiger_hw *card, struct channel_req *rq)
{
struct bchannel *bch;
if (rq->adr.channel > 2)
return -EINVAL;
if (rq->protocol == ISDN_P_NONE)
return -EINVAL;
bch = &card->bc[rq->adr.channel - 1].bch;
if (test_and_set_bit(FLG_OPEN, &bch->Flags))
return -EBUSY; /* b-channel can be only open once */
test_and_clear_bit(FLG_FILLEMPTY, &bch->Flags);
bch->ch.protocol = rq->protocol;
rq->ch = &bch->ch;
return 0;
}
/*
* device control function
*/
static int
nj_dctrl(struct mISDNchannel *ch, u32 cmd, void *arg)
{
struct mISDNdevice *dev = container_of(ch, struct mISDNdevice, D);
struct dchannel *dch = container_of(dev, struct dchannel, dev);
struct tiger_hw *card = dch->hw;
struct channel_req *rq;
int err = 0;
pr_debug("%s: %s cmd:%x %p\n", card->name, __func__, cmd, arg);
switch (cmd) {
case OPEN_CHANNEL:
rq = arg;
if (rq->protocol == ISDN_P_TE_S0)
err = card->isac.open(&card->isac, rq);
else
err = open_bchannel(card, rq);
if (err)
break;
if (!try_module_get(THIS_MODULE))
pr_info("%s: cannot get module\n", card->name);
break;
case CLOSE_CHANNEL:
pr_debug("%s: dev(%d) close from %p\n", card->name, dch->dev.id,
__builtin_return_address(0));
module_put(THIS_MODULE);
break;
case CONTROL_CHANNEL:
err = channel_ctrl(card, arg);
break;
default:
pr_debug("%s: %s unknown command %x\n",
card->name, __func__, cmd);
return -EINVAL;
}
return err;
}
static int
nj_init_card(struct tiger_hw *card)
{
u_long flags;
int ret;
spin_lock_irqsave(&card->lock, flags);
nj_disable_hwirq(card);
spin_unlock_irqrestore(&card->lock, flags);
card->irq = card->pdev->irq;
if (request_irq(card->irq, nj_irq, IRQF_SHARED, card->name, card)) {
pr_info("%s: couldn't get interrupt %d\n",
card->name, card->irq);
card->irq = -1;
return -EIO;
}
spin_lock_irqsave(&card->lock, flags);
nj_reset(card);
ret = card->isac.init(&card->isac);
if (ret)
goto error;
ret = inittiger(card);
if (ret)
goto error;
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
error:
spin_unlock_irqrestore(&card->lock, flags);
return ret;
}
static void
nj_release(struct tiger_hw *card)
{
u_long flags;
int i;
if (card->base_s) {
spin_lock_irqsave(&card->lock, flags);
nj_disable_hwirq(card);
mode_tiger(&card->bc[0], ISDN_P_NONE);
mode_tiger(&card->bc[1], ISDN_P_NONE);
card->isac.release(&card->isac);
spin_unlock_irqrestore(&card->lock, flags);
release_region(card->base, card->base_s);
card->base_s = 0;
}
if (card->irq > 0)
free_irq(card->irq, card);
if (card->isac.dch.dev.dev.class)
mISDN_unregister_device(&card->isac.dch.dev);
for (i = 0; i < 2; i++) {
mISDN_freebchannel(&card->bc[i].bch);
kfree(card->bc[i].hsbuf);
kfree(card->bc[i].hrbuf);
}
if (card->dma_p)
pci_free_consistent(card->pdev, NJ_DMA_SIZE,
card->dma_p, card->dma);
write_lock_irqsave(&card_lock, flags);
list_del(&card->list);
write_unlock_irqrestore(&card_lock, flags);
pci_clear_master(card->pdev);
pci_disable_device(card->pdev);
pci_set_drvdata(card->pdev, NULL);
kfree(card);
}
static int
nj_setup(struct tiger_hw *card)
{
card->base = pci_resource_start(card->pdev, 0);
card->base_s = pci_resource_len(card->pdev, 0);
if (!request_region(card->base, card->base_s, card->name)) {
pr_info("%s: NETjet config port %#x-%#x already in use\n",
card->name, card->base,
(u32)(card->base + card->base_s - 1));
card->base_s = 0;
return -EIO;
}
ASSIGN_FUNC(nj, ISAC, card->isac);
return 0;
}
static int __devinit
setup_instance(struct tiger_hw *card)
{
int i, err;
u_long flags;
snprintf(card->name, MISDN_MAX_IDLEN - 1, "netjet.%d", nj_cnt + 1);
write_lock_irqsave(&card_lock, flags);
list_add_tail(&card->list, &Cards);
write_unlock_irqrestore(&card_lock, flags);
_set_debug(card);
card->isac.name = card->name;
spin_lock_init(&card->lock);
card->isac.hwlock = &card->lock;
mISDNisac_init(&card->isac, card);
card->isac.dch.dev.Bprotocols = (1 << (ISDN_P_B_RAW & ISDN_P_B_MASK)) |
(1 << (ISDN_P_B_HDLC & ISDN_P_B_MASK));
card->isac.dch.dev.D.ctrl = nj_dctrl;
for (i = 0; i < 2; i++) {
card->bc[i].bch.nr = i + 1;
set_channelmap(i + 1, card->isac.dch.dev.channelmap);
mISDN_initbchannel(&card->bc[i].bch, MAX_DATA_MEM);
card->bc[i].bch.hw = card;
card->bc[i].bch.ch.send = nj_l2l1B;
card->bc[i].bch.ch.ctrl = nj_bctrl;
card->bc[i].bch.ch.nr = i + 1;
list_add(&card->bc[i].bch.ch.list,
&card->isac.dch.dev.bchannels);
card->bc[i].bch.hw = card;
}
err = nj_setup(card);
if (err)
goto error;
err = mISDN_register_device(&card->isac.dch.dev, &card->pdev->dev,
card->name);
if (err)
goto error;
err = nj_init_card(card);
if (!err) {
nj_cnt++;
pr_notice("Netjet %d cards installed\n", nj_cnt);
return 0;
}
error:
nj_release(card);
return err;
}
static int __devinit
nj_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err = -ENOMEM;
int cfg;
struct tiger_hw *card;
if (pdev->subsystem_vendor == 0x8086 &&
pdev->subsystem_device == 0x0003) {
pr_notice("Netjet: Digium X100P/X101P not handled\n");
return -ENODEV;
}
if (pdev->subsystem_vendor == 0x55 &&
pdev->subsystem_device == 0x02) {
pr_notice("Netjet: Enter!Now not handled yet\n");
return -ENODEV;
}
if (pdev->subsystem_vendor == 0xb100 &&
pdev->subsystem_device == 0x0003 ) {
pr_notice("Netjet: Digium TDM400P not handled yet\n");
return -ENODEV;
}
card = kzalloc(sizeof(struct tiger_hw), GFP_ATOMIC);
if (!card) {
pr_info("No kmem for Netjet\n");
return err;
}
card->pdev = pdev;
err = pci_enable_device(pdev);
if (err) {
kfree(card);
return err;
}
printk(KERN_INFO "nj_probe(mISDN): found adapter at %s\n",
pci_name(pdev));
pci_set_master(pdev);
/* the TJ300 and TJ320 must be detected, the IRQ handling is different
* unfortunately the chips use the same device ID, but the TJ320 has
* the bit20 in status PCI cfg register set
*/
pci_read_config_dword(pdev, 0x04, &cfg);
if (cfg & 0x00100000)
card->typ = NETJET_S_TJ320;
else
card->typ = NETJET_S_TJ300;
card->base = pci_resource_start(pdev, 0);
card->irq = pdev->irq;
pci_set_drvdata(pdev, card);
err = setup_instance(card);
if (err)
pci_set_drvdata(pdev, NULL);
return err;
}
static void __devexit nj_remove(struct pci_dev *pdev)
{
struct tiger_hw *card = pci_get_drvdata(pdev);
if (card)
nj_release(card);
else
pr_info("%s drvdata already removed\n", __func__);
}
/* We cannot select cards with PCI_SUB... IDs, since here are cards with
* SUB IDs set to PCI_ANY_ID, so we need to match all and reject
* known other cards which not work with this driver - see probe function */
static struct pci_device_id nj_pci_ids[] __devinitdata = {
{ PCI_VENDOR_ID_TIGERJET, PCI_DEVICE_ID_TIGERJET_300,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{ }
};
MODULE_DEVICE_TABLE(pci, nj_pci_ids);
static struct pci_driver nj_driver = {
.name = "netjet",
.probe = nj_probe,
.remove = __devexit_p(nj_remove),
.id_table = nj_pci_ids,
};
static int __init nj_init(void)
{
int err;
pr_notice("Netjet PCI driver Rev. %s\n", NETJET_REV);
err = pci_register_driver(&nj_driver);
return err;
}
static void __exit nj_cleanup(void)
{
pci_unregister_driver(&nj_driver);
}
module_init(nj_init);
module_exit(nj_cleanup);
| gpl-2.0 |
IlyaKrotkih/huawei_u8850_kernel | fs/yaffs2/yaffs_ecc.c | 651 | 7672 | /*
* YAFFS: Yet Another Flash File System. A NAND-flash specific file system.
*
* Copyright (C) 2002-2007 Aleph One Ltd.
* for Toby Churchill Ltd and Brightstar Engineering
*
* Created by Charles Manning <charles@aleph1.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* This code implements the ECC algorithm used in SmartMedia.
*
* The ECC comprises 22 bits of parity information and is stuffed into 3 bytes.
* The two unused bit are set to 1.
* The ECC can correct single bit errors in a 256-byte page of data. Thus, two such ECC
* blocks are used on a 512-byte NAND page.
*
*/
/* Table generated by gen-ecc.c
* Using a table means we do not have to calculate p1..p4 and p1'..p4'
* for each byte of data. These are instead provided in a table in bits7..2.
* Bit 0 of each entry indicates whether the entry has an odd or even parity, and therefore
* this bytes influence on the line parity.
*/
const char *yaffs_ecc_c_version =
"$Id$";
#include "yportenv.h"
#include "yaffs_ecc.h"
static const unsigned char column_parity_table[] = {
0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
0xa9, 0xfc, 0xf0, 0xa5, 0xcc, 0x99, 0x95, 0xc0,
0xc0, 0x95, 0x99, 0xcc, 0xa5, 0xf0, 0xfc, 0xa9,
0x3c, 0x69, 0x65, 0x30, 0x59, 0x0c, 0x00, 0x55,
0x55, 0x00, 0x0c, 0x59, 0x30, 0x65, 0x69, 0x3c,
0x30, 0x65, 0x69, 0x3c, 0x55, 0x00, 0x0c, 0x59,
0x59, 0x0c, 0x00, 0x55, 0x3c, 0x69, 0x65, 0x30,
0xa5, 0xf0, 0xfc, 0xa9, 0xc0, 0x95, 0x99, 0xcc,
0xcc, 0x99, 0x95, 0xc0, 0xa9, 0xfc, 0xf0, 0xa5,
0x0c, 0x59, 0x55, 0x00, 0x69, 0x3c, 0x30, 0x65,
0x65, 0x30, 0x3c, 0x69, 0x00, 0x55, 0x59, 0x0c,
0x99, 0xcc, 0xc0, 0x95, 0xfc, 0xa9, 0xa5, 0xf0,
0xf0, 0xa5, 0xa9, 0xfc, 0x95, 0xc0, 0xcc, 0x99,
0x95, 0xc0, 0xcc, 0x99, 0xf0, 0xa5, 0xa9, 0xfc,
0xfc, 0xa9, 0xa5, 0xf0, 0x99, 0xcc, 0xc0, 0x95,
0x00, 0x55, 0x59, 0x0c, 0x65, 0x30, 0x3c, 0x69,
0x69, 0x3c, 0x30, 0x65, 0x0c, 0x59, 0x55, 0x00,
};
/* Count the bits in an unsigned char or a U32 */
static int yaffs_CountBits(unsigned char x)
{
int r = 0;
while (x) {
if (x & 1)
r++;
x >>= 1;
}
return r;
}
static int yaffs_CountBits32(unsigned x)
{
int r = 0;
while (x) {
if (x & 1)
r++;
x >>= 1;
}
return r;
}
/* Calculate the ECC for a 256-byte block of data */
void yaffs_ECCCalculate(const unsigned char *data, unsigned char *ecc)
{
unsigned int i;
unsigned char col_parity = 0;
unsigned char line_parity = 0;
unsigned char line_parity_prime = 0;
unsigned char t;
unsigned char b;
for (i = 0; i < 256; i++) {
b = column_parity_table[*data++];
col_parity ^= b;
if (b & 0x01) { /* odd number of bits in the byte */
line_parity ^= i;
line_parity_prime ^= ~i;
}
}
ecc[2] = (~col_parity) | 0x03;
t = 0;
if (line_parity & 0x80)
t |= 0x80;
if (line_parity_prime & 0x80)
t |= 0x40;
if (line_parity & 0x40)
t |= 0x20;
if (line_parity_prime & 0x40)
t |= 0x10;
if (line_parity & 0x20)
t |= 0x08;
if (line_parity_prime & 0x20)
t |= 0x04;
if (line_parity & 0x10)
t |= 0x02;
if (line_parity_prime & 0x10)
t |= 0x01;
ecc[1] = ~t;
t = 0;
if (line_parity & 0x08)
t |= 0x80;
if (line_parity_prime & 0x08)
t |= 0x40;
if (line_parity & 0x04)
t |= 0x20;
if (line_parity_prime & 0x04)
t |= 0x10;
if (line_parity & 0x02)
t |= 0x08;
if (line_parity_prime & 0x02)
t |= 0x04;
if (line_parity & 0x01)
t |= 0x02;
if (line_parity_prime & 0x01)
t |= 0x01;
ecc[0] = ~t;
#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
/* Swap the bytes into the wrong order */
t = ecc[0];
ecc[0] = ecc[1];
ecc[1] = t;
#endif
}
/* Correct the ECC on a 256 byte block of data */
int yaffs_ECCCorrect(unsigned char *data, unsigned char *read_ecc,
const unsigned char *test_ecc)
{
unsigned char d0, d1, d2; /* deltas */
d0 = read_ecc[0] ^ test_ecc[0];
d1 = read_ecc[1] ^ test_ecc[1];
d2 = read_ecc[2] ^ test_ecc[2];
if ((d0 | d1 | d2) == 0)
return 0; /* no error */
if (((d0 ^ (d0 >> 1)) & 0x55) == 0x55 &&
((d1 ^ (d1 >> 1)) & 0x55) == 0x55 &&
((d2 ^ (d2 >> 1)) & 0x54) == 0x54) {
/* Single bit (recoverable) error in data */
unsigned byte;
unsigned bit;
#ifdef CONFIG_YAFFS_ECC_WRONG_ORDER
/* swap the bytes to correct for the wrong order */
unsigned char t;
t = d0;
d0 = d1;
d1 = t;
#endif
bit = byte = 0;
if (d1 & 0x80)
byte |= 0x80;
if (d1 & 0x20)
byte |= 0x40;
if (d1 & 0x08)
byte |= 0x20;
if (d1 & 0x02)
byte |= 0x10;
if (d0 & 0x80)
byte |= 0x08;
if (d0 & 0x20)
byte |= 0x04;
if (d0 & 0x08)
byte |= 0x02;
if (d0 & 0x02)
byte |= 0x01;
if (d2 & 0x80)
bit |= 0x04;
if (d2 & 0x20)
bit |= 0x02;
if (d2 & 0x08)
bit |= 0x01;
data[byte] ^= (1 << bit);
return 1; /* Corrected the error */
}
if ((yaffs_CountBits(d0) +
yaffs_CountBits(d1) +
yaffs_CountBits(d2)) == 1) {
/* Reccoverable error in ecc */
read_ecc[0] = test_ecc[0];
read_ecc[1] = test_ecc[1];
read_ecc[2] = test_ecc[2];
return 1; /* Corrected the error */
}
/* Unrecoverable error */
return -1;
}
/*
* ECCxxxOther does ECC calcs on arbitrary n bytes of data
*/
void yaffs_ECCCalculateOther(const unsigned char *data, unsigned nBytes,
yaffs_ECCOther *eccOther)
{
unsigned int i;
unsigned char col_parity = 0;
unsigned line_parity = 0;
unsigned line_parity_prime = 0;
unsigned char b;
for (i = 0; i < nBytes; i++) {
b = column_parity_table[*data++];
col_parity ^= b;
if (b & 0x01) {
/* odd number of bits in the byte */
line_parity ^= i;
line_parity_prime ^= ~i;
}
}
eccOther->colParity = (col_parity >> 2) & 0x3f;
eccOther->lineParity = line_parity;
eccOther->lineParityPrime = line_parity_prime;
}
int yaffs_ECCCorrectOther(unsigned char *data, unsigned nBytes,
yaffs_ECCOther *read_ecc,
const yaffs_ECCOther *test_ecc)
{
unsigned char cDelta; /* column parity delta */
unsigned lDelta; /* line parity delta */
unsigned lDeltaPrime; /* line parity delta */
unsigned bit;
cDelta = read_ecc->colParity ^ test_ecc->colParity;
lDelta = read_ecc->lineParity ^ test_ecc->lineParity;
lDeltaPrime = read_ecc->lineParityPrime ^ test_ecc->lineParityPrime;
if ((cDelta | lDelta | lDeltaPrime) == 0)
return 0; /* no error */
if (lDelta == ~lDeltaPrime &&
(((cDelta ^ (cDelta >> 1)) & 0x15) == 0x15)) {
/* Single bit (recoverable) error in data */
bit = 0;
if (cDelta & 0x20)
bit |= 0x04;
if (cDelta & 0x08)
bit |= 0x02;
if (cDelta & 0x02)
bit |= 0x01;
if (lDelta >= nBytes)
return -1;
data[lDelta] ^= (1 << bit);
return 1; /* corrected */
}
if ((yaffs_CountBits32(lDelta) + yaffs_CountBits32(lDeltaPrime) +
yaffs_CountBits(cDelta)) == 1) {
/* Reccoverable error in ecc */
*read_ecc = *test_ecc;
return 1; /* corrected */
}
/* Unrecoverable error */
return -1;
}
| gpl-2.0 |
WarrickJiang/linux-stable | drivers/net/wireless/b43/phy_lp.c | 1419 | 99028 | /*
Broadcom B43 wireless driver
IEEE 802.11a/g LP-PHY driver
Copyright (c) 2008-2009 Michael Buesch <m@bues.ch>
Copyright (c) 2009 Gábor Stefanik <netrolller.3d@gmail.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#include <linux/slab.h>
#include "b43.h"
#include "main.h"
#include "phy_lp.h"
#include "phy_common.h"
#include "tables_lpphy.h"
static inline u16 channel2freq_lp(u8 channel)
{
if (channel < 14)
return (2407 + 5 * channel);
else if (channel == 14)
return 2484;
else if (channel < 184)
return (5000 + 5 * channel);
else
return (4000 + 5 * channel);
}
static unsigned int b43_lpphy_op_get_default_chan(struct b43_wldev *dev)
{
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
return 1;
return 36;
}
static int b43_lpphy_op_allocate(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy;
lpphy = kzalloc(sizeof(*lpphy), GFP_KERNEL);
if (!lpphy)
return -ENOMEM;
dev->phy.lp = lpphy;
return 0;
}
static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev)
{
struct b43_phy *phy = &dev->phy;
struct b43_phy_lp *lpphy = phy->lp;
memset(lpphy, 0, sizeof(*lpphy));
lpphy->antenna = B43_ANTENNA_DEFAULT;
//TODO
}
static void b43_lpphy_op_free(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
kfree(lpphy);
dev->phy.lp = NULL;
}
/* http://bcm-v4.sipsolutions.net/802.11/PHY/LP/ReadBandSrom */
static void lpphy_read_band_sprom(struct b43_wldev *dev)
{
struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 cckpo, maxpwr;
u32 ofdmpo;
int i;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
lpphy->tx_isolation_med_band = sprom->tri2g;
lpphy->bx_arch = sprom->bxa2g;
lpphy->rx_pwr_offset = sprom->rxpo2g;
lpphy->rssi_vf = sprom->rssismf2g;
lpphy->rssi_vc = sprom->rssismc2g;
lpphy->rssi_gs = sprom->rssisav2g;
lpphy->txpa[0] = sprom->pa0b0;
lpphy->txpa[1] = sprom->pa0b1;
lpphy->txpa[2] = sprom->pa0b2;
maxpwr = sprom->maxpwr_bg;
lpphy->max_tx_pwr_med_band = maxpwr;
cckpo = sprom->cck2gpo;
if (cckpo) {
ofdmpo = sprom->ofdm2gpo;
for (i = 0; i < 4; i++) {
lpphy->tx_max_rate[i] =
maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
ofdmpo = sprom->ofdm2gpo;
for (i = 4; i < 15; i++) {
lpphy->tx_max_rate[i] =
maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
} else {
u8 opo = sprom->opo;
for (i = 0; i < 4; i++)
lpphy->tx_max_rate[i] = maxpwr;
for (i = 4; i < 15; i++)
lpphy->tx_max_rate[i] = maxpwr - opo;
}
} else { /* 5GHz */
lpphy->tx_isolation_low_band = sprom->tri5gl;
lpphy->tx_isolation_med_band = sprom->tri5g;
lpphy->tx_isolation_hi_band = sprom->tri5gh;
lpphy->bx_arch = sprom->bxa5g;
lpphy->rx_pwr_offset = sprom->rxpo5g;
lpphy->rssi_vf = sprom->rssismf5g;
lpphy->rssi_vc = sprom->rssismc5g;
lpphy->rssi_gs = sprom->rssisav5g;
lpphy->txpa[0] = sprom->pa1b0;
lpphy->txpa[1] = sprom->pa1b1;
lpphy->txpa[2] = sprom->pa1b2;
lpphy->txpal[0] = sprom->pa1lob0;
lpphy->txpal[1] = sprom->pa1lob1;
lpphy->txpal[2] = sprom->pa1lob2;
lpphy->txpah[0] = sprom->pa1hib0;
lpphy->txpah[1] = sprom->pa1hib1;
lpphy->txpah[2] = sprom->pa1hib2;
maxpwr = sprom->maxpwr_al;
ofdmpo = sprom->ofdm5glpo;
lpphy->max_tx_pwr_low_band = maxpwr;
for (i = 4; i < 12; i++) {
lpphy->tx_max_ratel[i] = maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
maxpwr = sprom->maxpwr_a;
ofdmpo = sprom->ofdm5gpo;
lpphy->max_tx_pwr_med_band = maxpwr;
for (i = 4; i < 12; i++) {
lpphy->tx_max_rate[i] = maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
maxpwr = sprom->maxpwr_ah;
ofdmpo = sprom->ofdm5ghpo;
lpphy->max_tx_pwr_hi_band = maxpwr;
for (i = 4; i < 12; i++) {
lpphy->tx_max_rateh[i] = maxpwr - (ofdmpo & 0xF) * 2;
ofdmpo >>= 4;
}
}
}
static void lpphy_adjust_gain_table(struct b43_wldev *dev, u32 freq)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 temp[3];
u16 isolation;
B43_WARN_ON(dev->phy.rev >= 2);
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
isolation = lpphy->tx_isolation_med_band;
else if (freq <= 5320)
isolation = lpphy->tx_isolation_low_band;
else if (freq <= 5700)
isolation = lpphy->tx_isolation_med_band;
else
isolation = lpphy->tx_isolation_hi_band;
temp[0] = ((isolation - 26) / 12) << 12;
temp[1] = temp[0] + 0x1000;
temp[2] = temp[0] + 0x2000;
b43_lptab_write_bulk(dev, B43_LPTAB16(13, 0), 3, temp);
b43_lptab_write_bulk(dev, B43_LPTAB16(12, 0), 3, temp);
}
static void lpphy_table_init(struct b43_wldev *dev)
{
u32 freq = channel2freq_lp(b43_lpphy_op_get_default_chan(dev));
if (dev->phy.rev < 2)
lpphy_rev0_1_table_init(dev);
else
lpphy_rev2plus_table_init(dev);
lpphy_init_tx_gain_table(dev);
if (dev->phy.rev < 2)
lpphy_adjust_gain_table(dev, freq);
}
static void lpphy_baseband_rev0_1_init(struct b43_wldev *dev)
{
struct ssb_bus *bus = dev->dev->sdev->bus;
struct ssb_sprom *sprom = dev->dev->bus_sprom;
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 tmp, tmp2;
b43_phy_mask(dev, B43_LPPHY_AFE_DAC_CTL, 0xF7FF);
b43_phy_write(dev, B43_LPPHY_AFE_CTL, 0);
b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, 0);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, 0);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0);
b43_phy_set(dev, B43_LPPHY_AFE_DAC_CTL, 0x0004);
b43_phy_maskset(dev, B43_LPPHY_OFDMSYNCTHRESH0, 0xFF00, 0x0078);
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x5800);
b43_phy_write(dev, B43_LPPHY_ADC_COMPENSATION_CTL, 0x0016);
b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_0, 0xFFF8, 0x0004);
b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0x00FF, 0x5400);
b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2400);
b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100);
b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0x0006);
b43_phy_mask(dev, B43_LPPHY_RX_RADIO_CTL, 0xFFFE);
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x0005);
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0x0180);
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x3C00);
b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xFFF0, 0x0005);
b43_phy_maskset(dev, B43_LPPHY_GAIN_MISMATCH_LIMIT, 0xFFC0, 0x001A);
b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0xFF00, 0x00B3);
b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00);
b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB,
0xFF00, lpphy->rx_pwr_offset);
if ((sprom->boardflags_lo & B43_BFL_FEM) &&
((b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) ||
(sprom->boardflags_hi & B43_BFH_PAREF))) {
ssb_pmu_set_ldo_voltage(&bus->chipco, LDO_PAREF, 0x28);
ssb_pmu_set_ldo_paref(&bus->chipco, true);
if (dev->phy.rev == 0) {
b43_phy_maskset(dev, B43_LPPHY_LP_RF_SIGNAL_LUT,
0xFFCF, 0x0010);
}
b43_lptab_write(dev, B43_LPTAB16(11, 7), 60);
} else {
ssb_pmu_set_ldo_paref(&bus->chipco, false);
b43_phy_maskset(dev, B43_LPPHY_LP_RF_SIGNAL_LUT,
0xFFCF, 0x0020);
b43_lptab_write(dev, B43_LPTAB16(11, 7), 100);
}
tmp = lpphy->rssi_vf | lpphy->rssi_vc << 4 | 0xA000;
b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, tmp);
if (sprom->boardflags_hi & B43_BFH_RSSIINV)
b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x0AAA);
else
b43_phy_maskset(dev, B43_LPPHY_AFE_RSSI_CTL_1, 0xF000, 0x02AA);
b43_lptab_write(dev, B43_LPTAB16(11, 1), 24);
b43_phy_maskset(dev, B43_LPPHY_RX_RADIO_CTL,
0xFFF9, (lpphy->bx_arch << 1));
if (dev->phy.rev == 1 &&
(sprom->boardflags_hi & B43_BFH_FEM_BT)) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0x3F00, 0x0900);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0B00);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0400);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0B00);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_5, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_5, 0xC0FF, 0x0900);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_6, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_6, 0xC0FF, 0x0B00);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_7, 0xC0FF, 0x0900);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_8, 0xC0FF, 0x0B00);
} else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ ||
(dev->dev->board_type == SSB_BOARD_BU4312) ||
(dev->phy.rev == 0 && (sprom->boardflags_lo & B43_BFL_FEM))) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0001);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0400);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0001);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0500);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0002);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0800);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0A00);
} else if (dev->phy.rev == 1 ||
(sprom->boardflags_lo & B43_BFL_FEM)) {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x0004);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0800);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x0004);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0C00);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0002);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0100);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0002);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0300);
} else {
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_1, 0xC0FF, 0x0900);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xFFC0, 0x000A);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_2, 0xC0FF, 0x0B00);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xFFC0, 0x0006);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_3, 0xC0FF, 0x0500);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xFFC0, 0x0006);
b43_phy_maskset(dev, B43_LPPHY_TR_LOOKUP_4, 0xC0FF, 0x0700);
}
if (dev->phy.rev == 1 && (sprom->boardflags_hi & B43_BFH_PAREF)) {
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_5, B43_LPPHY_TR_LOOKUP_1);
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_6, B43_LPPHY_TR_LOOKUP_2);
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_7, B43_LPPHY_TR_LOOKUP_3);
b43_phy_copy(dev, B43_LPPHY_TR_LOOKUP_8, B43_LPPHY_TR_LOOKUP_4);
}
if ((sprom->boardflags_hi & B43_BFH_FEM_BT) &&
(dev->dev->chip_id == 0x5354) &&
(dev->dev->chip_pkg == SSB_CHIPPACK_BCM4712S)) {
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0006);
b43_phy_write(dev, B43_LPPHY_GPIO_SELECT, 0x0005);
b43_phy_write(dev, B43_LPPHY_GPIO_OUTEN, 0xFFFF);
//FIXME the Broadcom driver caches & delays this HF write!
b43_hf_write(dev, b43_hf_read(dev) | B43_HF_PR45960W);
}
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x8000);
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x0040);
b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0xA400);
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0x0B00);
b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x0007);
b43_phy_maskset(dev, B43_LPPHY_DSSS_CONFIRM_CNT, 0xFFF8, 0x0003);
b43_phy_maskset(dev, B43_LPPHY_DSSS_CONFIRM_CNT, 0xFFC7, 0x0020);
b43_phy_mask(dev, B43_LPPHY_IDLEAFTERPKTRXTO, 0x00FF);
} else { /* 5GHz */
b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0x7FFF);
b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFBF);
}
if (dev->phy.rev == 1) {
tmp = b43_phy_read(dev, B43_LPPHY_CLIPCTRTHRESH);
tmp2 = (tmp & 0x03E0) >> 5;
tmp2 |= tmp2 << 5;
b43_phy_write(dev, B43_LPPHY_4C3, tmp2);
tmp = b43_phy_read(dev, B43_LPPHY_GAINDIRECTMISMATCH);
tmp2 = (tmp & 0x1F00) >> 8;
tmp2 |= tmp2 << 5;
b43_phy_write(dev, B43_LPPHY_4C4, tmp2);
tmp = b43_phy_read(dev, B43_LPPHY_VERYLOWGAINDB);
tmp2 = tmp & 0x00FF;
tmp2 |= tmp << 8;
b43_phy_write(dev, B43_LPPHY_4C5, tmp2);
}
}
static void lpphy_save_dig_flt_state(struct b43_wldev *dev)
{
static const u16 addr[] = {
B43_PHY_OFDM(0xC1),
B43_PHY_OFDM(0xC2),
B43_PHY_OFDM(0xC3),
B43_PHY_OFDM(0xC4),
B43_PHY_OFDM(0xC5),
B43_PHY_OFDM(0xC6),
B43_PHY_OFDM(0xC7),
B43_PHY_OFDM(0xC8),
B43_PHY_OFDM(0xCF),
};
static const u16 coefs[] = {
0xDE5E, 0xE832, 0xE331, 0x4D26,
0x0026, 0x1420, 0x0020, 0xFE08,
0x0008,
};
struct b43_phy_lp *lpphy = dev->phy.lp;
int i;
for (i = 0; i < ARRAY_SIZE(addr); i++) {
lpphy->dig_flt_state[i] = b43_phy_read(dev, addr[i]);
b43_phy_write(dev, addr[i], coefs[i]);
}
}
static void lpphy_restore_dig_flt_state(struct b43_wldev *dev)
{
static const u16 addr[] = {
B43_PHY_OFDM(0xC1),
B43_PHY_OFDM(0xC2),
B43_PHY_OFDM(0xC3),
B43_PHY_OFDM(0xC4),
B43_PHY_OFDM(0xC5),
B43_PHY_OFDM(0xC6),
B43_PHY_OFDM(0xC7),
B43_PHY_OFDM(0xC8),
B43_PHY_OFDM(0xCF),
};
struct b43_phy_lp *lpphy = dev->phy.lp;
int i;
for (i = 0; i < ARRAY_SIZE(addr); i++)
b43_phy_write(dev, addr[i], lpphy->dig_flt_state[i]);
}
static void lpphy_baseband_rev2plus_init(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
b43_phy_write(dev, B43_LPPHY_AFE_DAC_CTL, 0x50);
b43_phy_write(dev, B43_LPPHY_AFE_CTL, 0x8800);
b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, 0);
b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, 0);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0);
b43_phy_write(dev, B43_PHY_OFDM(0xF9), 0);
b43_phy_write(dev, B43_LPPHY_TR_LOOKUP_1, 0);
b43_phy_set(dev, B43_LPPHY_ADC_COMPENSATION_CTL, 0x10);
b43_phy_maskset(dev, B43_LPPHY_OFDMSYNCTHRESH0, 0xFF00, 0xB4);
b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xF8FF, 0x200);
b43_phy_maskset(dev, B43_LPPHY_DCOFFSETTRANSIENT, 0xFF00, 0x7F);
b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xFF0F, 0x40);
b43_phy_maskset(dev, B43_LPPHY_PREAMBLECONFIRMTO, 0xFF00, 0x2);
b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x4000);
b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x2000);
b43_phy_set(dev, B43_PHY_OFDM(0x10A), 0x1);
if (dev->dev->board_rev >= 0x18) {
b43_lptab_write(dev, B43_LPTAB32(17, 65), 0xEC);
b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x14);
} else {
b43_phy_maskset(dev, B43_PHY_OFDM(0x10A), 0xFF01, 0x10);
}
b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0xFF00, 0xF4);
b43_phy_maskset(dev, B43_PHY_OFDM(0xDF), 0x00FF, 0xF100);
b43_phy_write(dev, B43_LPPHY_CLIPTHRESH, 0x48);
b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0xFF00, 0x46);
b43_phy_maskset(dev, B43_PHY_OFDM(0xE4), 0xFF00, 0x10);
b43_phy_maskset(dev, B43_LPPHY_PWR_THRESH1, 0xFFF0, 0x9);
b43_phy_mask(dev, B43_LPPHY_GAINDIRECTMISMATCH, ~0xF);
b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0x00FF, 0x5500);
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFC1F, 0xA0);
b43_phy_maskset(dev, B43_LPPHY_GAINDIRECTMISMATCH, 0xE0FF, 0x300);
b43_phy_maskset(dev, B43_LPPHY_HIGAINDB, 0x00FF, 0x2A00);
if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x2100);
b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xA);
} else {
b43_phy_maskset(dev, B43_LPPHY_LOWGAINDB, 0x00FF, 0x1E00);
b43_phy_maskset(dev, B43_LPPHY_VERYLOWGAINDB, 0xFF00, 0xD);
}
b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFFE0, 0x1F);
b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC);
b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0xFF00, 0x19);
b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0x03FF, 0x3C00);
b43_phy_maskset(dev, B43_PHY_OFDM(0xFE), 0xFC1F, 0x3E0);
b43_phy_maskset(dev, B43_PHY_OFDM(0xFF), 0xFFE0, 0xC);
b43_phy_maskset(dev, B43_PHY_OFDM(0x100), 0x00FF, 0x1900);
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0x83FF, 0x5800);
b43_phy_maskset(dev, B43_LPPHY_CLIPCTRTHRESH, 0xFFE0, 0x12);
b43_phy_maskset(dev, B43_LPPHY_GAINMISMATCH, 0x0FFF, 0x9000);
if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_lptab_write(dev, B43_LPTAB16(0x08, 0x14), 0);
b43_lptab_write(dev, B43_LPTAB16(0x08, 0x12), 0x40);
}
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x40);
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xF0FF, 0xB00);
b43_phy_maskset(dev, B43_LPPHY_SYNCPEAKCNT, 0xFFF8, 0x6);
b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0x00FF, 0x9D00);
b43_phy_maskset(dev, B43_LPPHY_MINPWR_LEVEL, 0xFF00, 0xA1);
b43_phy_mask(dev, B43_LPPHY_IDLEAFTERPKTRXTO, 0x00FF);
} else /* 5GHz */
b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, ~0x40);
b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0xFF00, 0xB3);
b43_phy_maskset(dev, B43_LPPHY_CRS_ED_THRESH, 0x00FF, 0xAD00);
b43_phy_maskset(dev, B43_LPPHY_INPUT_PWRDB, 0xFF00, lpphy->rx_pwr_offset);
b43_phy_set(dev, B43_LPPHY_RESET_CTL, 0x44);
b43_phy_write(dev, B43_LPPHY_RESET_CTL, 0x80);
b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_0, 0xA954);
b43_phy_write(dev, B43_LPPHY_AFE_RSSI_CTL_1,
0x2000 | ((u16)lpphy->rssi_gs << 10) |
((u16)lpphy->rssi_vc << 4) | lpphy->rssi_vf);
if ((dev->dev->chip_id == 0x4325) && (dev->dev->chip_rev == 0)) {
b43_phy_set(dev, B43_LPPHY_AFE_ADC_CTL_0, 0x1C);
b43_phy_maskset(dev, B43_LPPHY_AFE_CTL, 0x00FF, 0x8800);
b43_phy_maskset(dev, B43_LPPHY_AFE_ADC_CTL_1, 0xFC3C, 0x0400);
}
lpphy_save_dig_flt_state(dev);
}
static void lpphy_baseband_init(struct b43_wldev *dev)
{
lpphy_table_init(dev);
if (dev->phy.rev >= 2)
lpphy_baseband_rev2plus_init(dev);
else
lpphy_baseband_rev0_1_init(dev);
}
struct b2062_freqdata {
u16 freq;
u8 data[6];
};
/* Initialize the 2062 radio. */
static void lpphy_2062_init(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
struct ssb_bus *bus = dev->dev->sdev->bus;
u32 crystalfreq, tmp, ref;
unsigned int i;
const struct b2062_freqdata *fd = NULL;
static const struct b2062_freqdata freqdata_tab[] = {
{ .freq = 12000, .data[0] = 6, .data[1] = 6, .data[2] = 6,
.data[3] = 6, .data[4] = 10, .data[5] = 6, },
{ .freq = 13000, .data[0] = 4, .data[1] = 4, .data[2] = 4,
.data[3] = 4, .data[4] = 11, .data[5] = 7, },
{ .freq = 14400, .data[0] = 3, .data[1] = 3, .data[2] = 3,
.data[3] = 3, .data[4] = 12, .data[5] = 7, },
{ .freq = 16200, .data[0] = 3, .data[1] = 3, .data[2] = 3,
.data[3] = 3, .data[4] = 13, .data[5] = 8, },
{ .freq = 18000, .data[0] = 2, .data[1] = 2, .data[2] = 2,
.data[3] = 2, .data[4] = 14, .data[5] = 8, },
{ .freq = 19200, .data[0] = 1, .data[1] = 1, .data[2] = 1,
.data[3] = 1, .data[4] = 14, .data[5] = 9, },
};
b2062_upload_init_table(dev);
b43_radio_write(dev, B2062_N_TX_CTL3, 0);
b43_radio_write(dev, B2062_N_TX_CTL4, 0);
b43_radio_write(dev, B2062_N_TX_CTL5, 0);
b43_radio_write(dev, B2062_N_TX_CTL6, 0);
b43_radio_write(dev, B2062_N_PDN_CTL0, 0x40);
b43_radio_write(dev, B2062_N_PDN_CTL0, 0);
b43_radio_write(dev, B2062_N_CALIB_TS, 0x10);
b43_radio_write(dev, B2062_N_CALIB_TS, 0);
if (dev->phy.rev > 0) {
b43_radio_write(dev, B2062_S_BG_CTL1,
(b43_radio_read(dev, B2062_N_COMM2) >> 1) | 0x80);
}
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
b43_radio_set(dev, B2062_N_TSSI_CTL0, 0x1);
else
b43_radio_mask(dev, B2062_N_TSSI_CTL0, ~0x1);
/* Get the crystal freq, in Hz. */
crystalfreq = bus->chipco.pmu.crystalfreq * 1000;
B43_WARN_ON(!(bus->chipco.capabilities & SSB_CHIPCO_CAP_PMU));
B43_WARN_ON(crystalfreq == 0);
if (crystalfreq <= 30000000) {
lpphy->pdiv = 1;
b43_radio_mask(dev, B2062_S_RFPLL_CTL1, 0xFFFB);
} else {
lpphy->pdiv = 2;
b43_radio_set(dev, B2062_S_RFPLL_CTL1, 0x4);
}
tmp = (((800000000 * lpphy->pdiv + crystalfreq) /
(2 * crystalfreq)) - 8) & 0xFF;
b43_radio_write(dev, B2062_S_RFPLL_CTL7, tmp);
tmp = (((100 * crystalfreq + 16000000 * lpphy->pdiv) /
(32000000 * lpphy->pdiv)) - 1) & 0xFF;
b43_radio_write(dev, B2062_S_RFPLL_CTL18, tmp);
tmp = (((2 * crystalfreq + 1000000 * lpphy->pdiv) /
(2000000 * lpphy->pdiv)) - 1) & 0xFF;
b43_radio_write(dev, B2062_S_RFPLL_CTL19, tmp);
ref = (1000 * lpphy->pdiv + 2 * crystalfreq) / (2000 * lpphy->pdiv);
ref &= 0xFFFF;
for (i = 0; i < ARRAY_SIZE(freqdata_tab); i++) {
if (ref < freqdata_tab[i].freq) {
fd = &freqdata_tab[i];
break;
}
}
if (!fd)
fd = &freqdata_tab[ARRAY_SIZE(freqdata_tab) - 1];
b43dbg(dev->wl, "b2062: Using crystal tab entry %u kHz.\n",
fd->freq); /* FIXME: Keep this printk until the code is fully debugged. */
b43_radio_write(dev, B2062_S_RFPLL_CTL8,
((u16)(fd->data[1]) << 4) | fd->data[0]);
b43_radio_write(dev, B2062_S_RFPLL_CTL9,
((u16)(fd->data[3]) << 4) | fd->data[2]);
b43_radio_write(dev, B2062_S_RFPLL_CTL10, fd->data[4]);
b43_radio_write(dev, B2062_S_RFPLL_CTL11, fd->data[5]);
}
/* Initialize the 2063 radio. */
static void lpphy_2063_init(struct b43_wldev *dev)
{
b2063_upload_init_table(dev);
b43_radio_write(dev, B2063_LOGEN_SP5, 0);
b43_radio_set(dev, B2063_COMM8, 0x38);
b43_radio_write(dev, B2063_REG_SP1, 0x56);
b43_radio_mask(dev, B2063_RX_BB_CTL2, ~0x2);
b43_radio_write(dev, B2063_PA_SP7, 0);
b43_radio_write(dev, B2063_TX_RF_SP6, 0x20);
b43_radio_write(dev, B2063_TX_RF_SP9, 0x40);
if (dev->phy.rev == 2) {
b43_radio_write(dev, B2063_PA_SP3, 0xa0);
b43_radio_write(dev, B2063_PA_SP4, 0xa0);
b43_radio_write(dev, B2063_PA_SP2, 0x18);
} else {
b43_radio_write(dev, B2063_PA_SP3, 0x20);
b43_radio_write(dev, B2063_PA_SP2, 0x20);
}
}
struct lpphy_stx_table_entry {
u16 phy_offset;
u16 phy_shift;
u16 rf_addr;
u16 rf_shift;
u16 mask;
};
static const struct lpphy_stx_table_entry lpphy_stx_table[] = {
{ .phy_offset = 2, .phy_shift = 6, .rf_addr = 0x3d, .rf_shift = 3, .mask = 0x01, },
{ .phy_offset = 1, .phy_shift = 12, .rf_addr = 0x4c, .rf_shift = 1, .mask = 0x01, },
{ .phy_offset = 1, .phy_shift = 8, .rf_addr = 0x50, .rf_shift = 0, .mask = 0x7f, },
{ .phy_offset = 0, .phy_shift = 8, .rf_addr = 0x44, .rf_shift = 0, .mask = 0xff, },
{ .phy_offset = 1, .phy_shift = 0, .rf_addr = 0x4a, .rf_shift = 0, .mask = 0xff, },
{ .phy_offset = 0, .phy_shift = 4, .rf_addr = 0x4d, .rf_shift = 0, .mask = 0xff, },
{ .phy_offset = 1, .phy_shift = 4, .rf_addr = 0x4e, .rf_shift = 0, .mask = 0xff, },
{ .phy_offset = 0, .phy_shift = 12, .rf_addr = 0x4f, .rf_shift = 0, .mask = 0x0f, },
{ .phy_offset = 1, .phy_shift = 0, .rf_addr = 0x4f, .rf_shift = 4, .mask = 0x0f, },
{ .phy_offset = 3, .phy_shift = 0, .rf_addr = 0x49, .rf_shift = 0, .mask = 0x0f, },
{ .phy_offset = 4, .phy_shift = 3, .rf_addr = 0x46, .rf_shift = 4, .mask = 0x07, },
{ .phy_offset = 3, .phy_shift = 15, .rf_addr = 0x46, .rf_shift = 0, .mask = 0x01, },
{ .phy_offset = 4, .phy_shift = 0, .rf_addr = 0x46, .rf_shift = 1, .mask = 0x07, },
{ .phy_offset = 3, .phy_shift = 8, .rf_addr = 0x48, .rf_shift = 4, .mask = 0x07, },
{ .phy_offset = 3, .phy_shift = 11, .rf_addr = 0x48, .rf_shift = 0, .mask = 0x0f, },
{ .phy_offset = 3, .phy_shift = 4, .rf_addr = 0x49, .rf_shift = 4, .mask = 0x0f, },
{ .phy_offset = 2, .phy_shift = 15, .rf_addr = 0x45, .rf_shift = 0, .mask = 0x01, },
{ .phy_offset = 5, .phy_shift = 13, .rf_addr = 0x52, .rf_shift = 4, .mask = 0x07, },
{ .phy_offset = 6, .phy_shift = 0, .rf_addr = 0x52, .rf_shift = 7, .mask = 0x01, },
{ .phy_offset = 5, .phy_shift = 3, .rf_addr = 0x41, .rf_shift = 5, .mask = 0x07, },
{ .phy_offset = 5, .phy_shift = 6, .rf_addr = 0x41, .rf_shift = 0, .mask = 0x0f, },
{ .phy_offset = 5, .phy_shift = 10, .rf_addr = 0x42, .rf_shift = 5, .mask = 0x07, },
{ .phy_offset = 4, .phy_shift = 15, .rf_addr = 0x42, .rf_shift = 0, .mask = 0x01, },
{ .phy_offset = 5, .phy_shift = 0, .rf_addr = 0x42, .rf_shift = 1, .mask = 0x07, },
{ .phy_offset = 4, .phy_shift = 11, .rf_addr = 0x43, .rf_shift = 4, .mask = 0x0f, },
{ .phy_offset = 4, .phy_shift = 7, .rf_addr = 0x43, .rf_shift = 0, .mask = 0x0f, },
{ .phy_offset = 4, .phy_shift = 6, .rf_addr = 0x45, .rf_shift = 1, .mask = 0x01, },
{ .phy_offset = 2, .phy_shift = 7, .rf_addr = 0x40, .rf_shift = 4, .mask = 0x0f, },
{ .phy_offset = 2, .phy_shift = 11, .rf_addr = 0x40, .rf_shift = 0, .mask = 0x0f, },
};
static void lpphy_sync_stx(struct b43_wldev *dev)
{
const struct lpphy_stx_table_entry *e;
unsigned int i;
u16 tmp;
for (i = 0; i < ARRAY_SIZE(lpphy_stx_table); i++) {
e = &lpphy_stx_table[i];
tmp = b43_radio_read(dev, e->rf_addr);
tmp >>= e->rf_shift;
tmp <<= e->phy_shift;
b43_phy_maskset(dev, B43_PHY_OFDM(0xF2 + e->phy_offset),
~(e->mask << e->phy_shift), tmp);
}
}
static void lpphy_radio_init(struct b43_wldev *dev)
{
/* The radio is attached through the 4wire bus. */
b43_phy_set(dev, B43_LPPHY_FOURWIRE_CTL, 0x2);
udelay(1);
b43_phy_mask(dev, B43_LPPHY_FOURWIRE_CTL, 0xFFFD);
udelay(1);
if (dev->phy.radio_ver == 0x2062) {
lpphy_2062_init(dev);
} else {
lpphy_2063_init(dev);
lpphy_sync_stx(dev);
b43_phy_write(dev, B43_PHY_OFDM(0xF0), 0x5F80);
b43_phy_write(dev, B43_PHY_OFDM(0xF1), 0);
if (dev->dev->chip_id == 0x4325) {
// TODO SSB PMU recalibration
}
}
}
struct lpphy_iq_est { u32 iq_prod, i_pwr, q_pwr; };
static void lpphy_set_rc_cap(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u8 rc_cap = (lpphy->rc_cap & 0x1F) >> 1;
if (dev->phy.rev == 1) //FIXME check channel 14!
rc_cap = min_t(u8, rc_cap + 5, 15);
b43_radio_write(dev, B2062_N_RXBB_CALIB2,
max_t(u8, lpphy->rc_cap - 4, 0x80));
b43_radio_write(dev, B2062_N_TX_CTL_A, rc_cap | 0x80);
b43_radio_write(dev, B2062_S_RXG_CNT16,
((lpphy->rc_cap & 0x1F) >> 2) | 0x80);
}
static u8 lpphy_get_bb_mult(struct b43_wldev *dev)
{
return (b43_lptab_read(dev, B43_LPTAB16(0, 87)) & 0xFF00) >> 8;
}
static void lpphy_set_bb_mult(struct b43_wldev *dev, u8 bb_mult)
{
b43_lptab_write(dev, B43_LPTAB16(0, 87), (u16)bb_mult << 8);
}
static void lpphy_set_deaf(struct b43_wldev *dev, bool user)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
lpphy->crs_usr_disable = true;
else
lpphy->crs_sys_disable = true;
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFF1F, 0x80);
}
static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
if (user)
lpphy->crs_usr_disable = false;
else
lpphy->crs_sys_disable = false;
if (!lpphy->crs_usr_disable && !lpphy->crs_sys_disable) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL,
0xFF1F, 0x60);
else
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL,
0xFF1F, 0x20);
}
}
static void lpphy_set_trsw_over(struct b43_wldev *dev, bool tx, bool rx)
{
u16 trsw = (tx << 1) | rx;
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, trsw);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
}
static void lpphy_disable_crs(struct b43_wldev *dev, bool user)
{
lpphy_set_deaf(dev, user);
lpphy_set_trsw_over(dev, false, true);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x10);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFDF);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFBF);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x7);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x38);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFF3F);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0x100);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFDFF);
b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL0, 0);
b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL1, 1);
b43_phy_write(dev, B43_LPPHY_PS_CTL_OVERRIDE_VAL2, 0x20);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFBFF);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xF7FF);
b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 0);
b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, 0x45AF);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, 0x3FF);
}
static void lpphy_restore_crs(struct b43_wldev *dev, bool user)
{
lpphy_clear_deaf(dev, user);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFF80);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFC00);
}
struct lpphy_tx_gains { u16 gm, pga, pad, dac; };
static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
{
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
if (dev->phy.rev >= 2) {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
}
} else {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF);
}
}
static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
{
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
if (dev->phy.rev >= 2) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
}
} else {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200);
}
}
static void lpphy_disable_tx_gain_override(struct b43_wldev *dev)
{
if (dev->phy.rev < 2)
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
else {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF);
}
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF);
}
static void lpphy_enable_tx_gain_override(struct b43_wldev *dev)
{
if (dev->phy.rev < 2)
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
else {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x80);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x4000);
}
b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x40);
}
static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev)
{
struct lpphy_tx_gains gains;
u16 tmp;
gains.dac = (b43_phy_read(dev, B43_LPPHY_AFE_DAC_CTL) & 0x380) >> 7;
if (dev->phy.rev < 2) {
tmp = b43_phy_read(dev,
B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL) & 0x7FF;
gains.gm = tmp & 0x0007;
gains.pga = (tmp & 0x0078) >> 3;
gains.pad = (tmp & 0x780) >> 7;
} else {
tmp = b43_phy_read(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL);
gains.pad = b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0xFF;
gains.gm = tmp & 0xFF;
gains.pga = (tmp >> 8) & 0xFF;
}
return gains;
}
static void lpphy_set_dac_gain(struct b43_wldev *dev, u16 dac)
{
u16 ctl = b43_phy_read(dev, B43_LPPHY_AFE_DAC_CTL) & 0xC7F;
ctl |= dac << 7;
b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl);
}
static u16 lpphy_get_pa_gain(struct b43_wldev *dev)
{
return b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x7F;
}
static void lpphy_set_pa_gain(struct b43_wldev *dev, u16 gain)
{
b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 0xE03F, gain << 6);
b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 0x80FF, gain << 8);
}
static void lpphy_set_tx_gains(struct b43_wldev *dev,
struct lpphy_tx_gains gains)
{
u16 rf_gain, pa_gain;
if (dev->phy.rev < 2) {
rf_gain = (gains.pad << 7) | (gains.pga << 3) | gains.gm;
b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
0xF800, rf_gain);
} else {
pa_gain = lpphy_get_pa_gain(dev);
b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
(gains.pga << 8) | gains.gm);
/*
* SPEC FIXME The spec calls for (pa_gain << 8) here, but that
* conflicts with the spec for set_pa_gain! Vendor driver bug?
*/
b43_phy_maskset(dev, B43_PHY_OFDM(0xFB),
0x8000, gains.pad | (pa_gain << 6));
b43_phy_write(dev, B43_PHY_OFDM(0xFC),
(gains.pga << 8) | gains.gm);
b43_phy_maskset(dev, B43_PHY_OFDM(0xFD),
0x8000, gains.pad | (pa_gain << 8));
}
lpphy_set_dac_gain(dev, gains.dac);
lpphy_enable_tx_gain_override(dev);
}
static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain)
{
u16 trsw = gain & 0x1;
u16 lna = (gain & 0xFFFC) | ((gain & 0xC) >> 2);
u16 ext_lna = (gain & 2) >> 1;
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFE, trsw);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
0xFBFF, ext_lna << 10);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
0xF7FF, ext_lna << 11);
b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, lna);
}
static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
{
u16 low_gain = gain & 0xFFFF;
u16 high_gain = (gain >> 16) & 0xF;
u16 ext_lna = (gain >> 21) & 0x1;
u16 trsw = ~(gain >> 20) & 0x1;
u16 tmp;
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFE, trsw);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
0xFDFF, ext_lna << 9);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
0xFBFF, ext_lna << 10);
b43_phy_write(dev, B43_LPPHY_RX_GAIN_CTL_OVERRIDE_VAL, low_gain);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF0, high_gain);
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
tmp = (gain >> 2) & 0x3;
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL,
0xE7FF, tmp<<11);
b43_phy_maskset(dev, B43_PHY_OFDM(0xE6), 0xFFE7, tmp << 3);
}
}
static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain)
{
if (dev->phy.rev < 2)
lpphy_rev0_1_set_rx_gain(dev, gain);
else
lpphy_rev2plus_set_rx_gain(dev, gain);
lpphy_enable_rx_gain_override(dev);
}
static void lpphy_set_rx_gain_by_index(struct b43_wldev *dev, u16 idx)
{
u32 gain = b43_lptab_read(dev, B43_LPTAB16(12, idx));
lpphy_set_rx_gain(dev, gain);
}
static void lpphy_stop_ddfs(struct b43_wldev *dev)
{
b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0xFFFD);
b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0xFFDF);
}
static void lpphy_run_ddfs(struct b43_wldev *dev, int i_on, int q_on,
int incr1, int incr2, int scale_idx)
{
lpphy_stop_ddfs(dev);
b43_phy_mask(dev, B43_LPPHY_AFE_DDFS_POINTER_INIT, 0xFF80);
b43_phy_mask(dev, B43_LPPHY_AFE_DDFS_POINTER_INIT, 0x80FF);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS_INCR_INIT, 0xFF80, incr1);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS_INCR_INIT, 0x80FF, incr2 << 8);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFF7, i_on << 3);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFFEF, q_on << 4);
b43_phy_maskset(dev, B43_LPPHY_AFE_DDFS, 0xFF9F, scale_idx << 5);
b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0xFFFB);
b43_phy_set(dev, B43_LPPHY_AFE_DDFS, 0x2);
b43_phy_set(dev, B43_LPPHY_LP_PHY_CTL, 0x20);
}
static bool lpphy_rx_iq_est(struct b43_wldev *dev, u16 samples, u8 time,
struct lpphy_iq_est *iq_est)
{
int i;
b43_phy_mask(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFF7);
b43_phy_write(dev, B43_LPPHY_IQ_NUM_SMPLS_ADDR, samples);
b43_phy_maskset(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xFF00, time);
b43_phy_mask(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0xFEFF);
b43_phy_set(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR, 0x200);
for (i = 0; i < 500; i++) {
if (!(b43_phy_read(dev,
B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200))
break;
msleep(1);
}
if ((b43_phy_read(dev, B43_LPPHY_IQ_ENABLE_WAIT_TIME_ADDR) & 0x200)) {
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x8);
return false;
}
iq_est->iq_prod = b43_phy_read(dev, B43_LPPHY_IQ_ACC_HI_ADDR);
iq_est->iq_prod <<= 16;
iq_est->iq_prod |= b43_phy_read(dev, B43_LPPHY_IQ_ACC_LO_ADDR);
iq_est->i_pwr = b43_phy_read(dev, B43_LPPHY_IQ_I_PWR_ACC_HI_ADDR);
iq_est->i_pwr <<= 16;
iq_est->i_pwr |= b43_phy_read(dev, B43_LPPHY_IQ_I_PWR_ACC_LO_ADDR);
iq_est->q_pwr = b43_phy_read(dev, B43_LPPHY_IQ_Q_PWR_ACC_HI_ADDR);
iq_est->q_pwr <<= 16;
iq_est->q_pwr |= b43_phy_read(dev, B43_LPPHY_IQ_Q_PWR_ACC_LO_ADDR);
b43_phy_set(dev, B43_LPPHY_CRSGAIN_CTL, 0x8);
return true;
}
static int lpphy_loopback(struct b43_wldev *dev)
{
struct lpphy_iq_est iq_est;
int i, index = -1;
u32 tmp;
memset(&iq_est, 0, sizeof(iq_est));
lpphy_set_trsw_over(dev, true, true);
b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1);
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x8);
b43_radio_write(dev, B2062_N_TX_CTL_A, 0x80);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x80);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x80);
for (i = 0; i < 32; i++) {
lpphy_set_rx_gain_by_index(dev, i);
lpphy_run_ddfs(dev, 1, 1, 5, 5, 0);
if (!(lpphy_rx_iq_est(dev, 1000, 32, &iq_est)))
continue;
tmp = (iq_est.i_pwr + iq_est.q_pwr) / 1000;
if ((tmp > 4000) && (tmp < 10000)) {
index = i;
break;
}
}
lpphy_stop_ddfs(dev);
return index;
}
/* Fixed-point division algorithm using only integer math. */
static u32 lpphy_qdiv_roundup(u32 dividend, u32 divisor, u8 precision)
{
u32 quotient, remainder;
if (divisor == 0)
return 0;
quotient = dividend / divisor;
remainder = dividend % divisor;
while (precision > 0) {
quotient <<= 1;
if (remainder << 1 >= divisor) {
quotient++;
remainder = (remainder << 1) - divisor;
}
precision--;
}
if (remainder << 1 >= divisor)
quotient++;
return quotient;
}
/* Read the TX power control mode from hardware. */
static void lpphy_read_tx_pctl_mode_from_hardware(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 ctl;
ctl = b43_phy_read(dev, B43_LPPHY_TX_PWR_CTL_CMD);
switch (ctl & B43_LPPHY_TX_PWR_CTL_CMD_MODE) {
case B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF:
lpphy->txpctl_mode = B43_LPPHY_TXPCTL_OFF;
break;
case B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW:
lpphy->txpctl_mode = B43_LPPHY_TXPCTL_SW;
break;
case B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW:
lpphy->txpctl_mode = B43_LPPHY_TXPCTL_HW;
break;
default:
lpphy->txpctl_mode = B43_LPPHY_TXPCTL_UNKNOWN;
B43_WARN_ON(1);
break;
}
}
/* Set the TX power control mode in hardware. */
static void lpphy_write_tx_pctl_mode_to_hardware(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 ctl;
switch (lpphy->txpctl_mode) {
case B43_LPPHY_TXPCTL_OFF:
ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF;
break;
case B43_LPPHY_TXPCTL_HW:
ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_HW;
break;
case B43_LPPHY_TXPCTL_SW:
ctl = B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW;
break;
default:
ctl = 0;
B43_WARN_ON(1);
}
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF, ctl);
}
static void lpphy_set_tx_power_control(struct b43_wldev *dev,
enum b43_lpphy_txpctl_mode mode)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
enum b43_lpphy_txpctl_mode oldmode;
lpphy_read_tx_pctl_mode_from_hardware(dev);
oldmode = lpphy->txpctl_mode;
if (oldmode == mode)
return;
lpphy->txpctl_mode = mode;
if (oldmode == B43_LPPHY_TXPCTL_HW) {
//TODO Update TX Power NPT
//TODO Clear all TX Power offsets
} else {
if (mode == B43_LPPHY_TXPCTL_HW) {
//TODO Recalculate target TX power
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
0xFF80, lpphy->tssi_idx);
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM,
0x8FFF, ((u16)lpphy->tssi_npt << 16));
//TODO Set "TSSI Transmit Count" variable to total transmitted frame count
lpphy_disable_tx_gain_override(dev);
lpphy->tx_pwr_idx_over = -1;
}
}
if (dev->phy.rev >= 2) {
if (mode == B43_LPPHY_TXPCTL_HW)
b43_phy_set(dev, B43_PHY_OFDM(0xD0), 0x2);
else
b43_phy_mask(dev, B43_PHY_OFDM(0xD0), 0xFFFD);
}
lpphy_write_tx_pctl_mode_to_hardware(dev);
}
static int b43_lpphy_op_switch_channel(struct b43_wldev *dev,
unsigned int new_channel);
static void lpphy_rev0_1_rc_calib(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
struct lpphy_iq_est iq_est;
struct lpphy_tx_gains tx_gains;
static const u32 ideal_pwr_table[21] = {
0x10000, 0x10557, 0x10e2d, 0x113e0, 0x10f22, 0x0ff64,
0x0eda2, 0x0e5d4, 0x0efd1, 0x0fbe8, 0x0b7b8, 0x04b35,
0x01a5e, 0x00a0b, 0x00444, 0x001fd, 0x000ff, 0x00088,
0x0004c, 0x0002c, 0x0001a,
};
bool old_txg_ovr;
u8 old_bbmult;
u16 old_rf_ovr, old_rf_ovrval, old_afe_ovr, old_afe_ovrval,
old_rf2_ovr, old_rf2_ovrval, old_phy_ctl;
enum b43_lpphy_txpctl_mode old_txpctl;
u32 normal_pwr, ideal_pwr, mean_sq_pwr, tmp = 0, mean_sq_pwr_min = 0;
int loopback, i, j, inner_sum, err;
memset(&iq_est, 0, sizeof(iq_est));
err = b43_lpphy_op_switch_channel(dev, 7);
if (err) {
b43dbg(dev->wl,
"RC calib: Failed to switch to channel 7, error = %d\n",
err);
}
old_txg_ovr = !!(b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40);
old_bbmult = lpphy_get_bb_mult(dev);
if (old_txg_ovr)
tx_gains = lpphy_get_tx_gains(dev);
old_rf_ovr = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_0);
old_rf_ovrval = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_VAL_0);
old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR);
old_afe_ovrval = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVRVAL);
old_rf2_ovr = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_2);
old_rf2_ovrval = b43_phy_read(dev, B43_LPPHY_RF_OVERRIDE_2_VAL);
old_phy_ctl = b43_phy_read(dev, B43_LPPHY_LP_PHY_CTL);
lpphy_read_tx_pctl_mode_from_hardware(dev);
old_txpctl = lpphy->txpctl_mode;
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
lpphy_disable_crs(dev, true);
loopback = lpphy_loopback(dev);
if (loopback == -1)
goto finish;
lpphy_set_rx_gain_by_index(dev, loopback);
b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFFBF, 0x40);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFFF8, 0x1);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFFC7, 0x8);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFF3F, 0xC0);
for (i = 128; i <= 159; i++) {
b43_radio_write(dev, B2062_N_RXBB_CALIB2, i);
inner_sum = 0;
for (j = 5; j <= 25; j++) {
lpphy_run_ddfs(dev, 1, 1, j, j, 0);
if (!(lpphy_rx_iq_est(dev, 1000, 32, &iq_est)))
goto finish;
mean_sq_pwr = iq_est.i_pwr + iq_est.q_pwr;
if (j == 5)
tmp = mean_sq_pwr;
ideal_pwr = ((ideal_pwr_table[j-5] >> 3) + 1) >> 1;
normal_pwr = lpphy_qdiv_roundup(mean_sq_pwr, tmp, 12);
mean_sq_pwr = ideal_pwr - normal_pwr;
mean_sq_pwr *= mean_sq_pwr;
inner_sum += mean_sq_pwr;
if ((i == 128) || (inner_sum < mean_sq_pwr_min)) {
lpphy->rc_cap = i;
mean_sq_pwr_min = inner_sum;
}
}
}
lpphy_stop_ddfs(dev);
finish:
lpphy_restore_crs(dev, true);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, old_rf_ovrval);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_0, old_rf_ovr);
b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVRVAL, old_afe_ovrval);
b43_phy_write(dev, B43_LPPHY_AFE_CTL_OVR, old_afe_ovr);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, old_rf2_ovrval);
b43_phy_write(dev, B43_LPPHY_RF_OVERRIDE_2, old_rf2_ovr);
b43_phy_write(dev, B43_LPPHY_LP_PHY_CTL, old_phy_ctl);
lpphy_set_bb_mult(dev, old_bbmult);
if (old_txg_ovr) {
/*
* SPEC FIXME: The specs say "get_tx_gains" here, which is
* illogical. According to lwfinger, vendor driver v4.150.10.5
* has a Set here, while v4.174.64.19 has a Get - regression in
* the vendor driver? This should be tested this once the code
* is testable.
*/
lpphy_set_tx_gains(dev, tx_gains);
}
lpphy_set_tx_power_control(dev, old_txpctl);
if (lpphy->rc_cap)
lpphy_set_rc_cap(dev);
}
static void lpphy_rev2plus_rc_calib(struct b43_wldev *dev)
{
struct ssb_bus *bus = dev->dev->sdev->bus;
u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
u8 tmp = b43_radio_read(dev, B2063_RX_BB_SP8) & 0xFF;
int i;
b43_radio_write(dev, B2063_RX_BB_SP8, 0x0);
b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E);
b43_radio_mask(dev, B2063_PLL_SP1, 0xF7);
b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7C);
b43_radio_write(dev, B2063_RC_CALIB_CTL2, 0x15);
b43_radio_write(dev, B2063_RC_CALIB_CTL3, 0x70);
b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0x52);
b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x1);
b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7D);
for (i = 0; i < 10000; i++) {
if (b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2)
break;
msleep(1);
}
if (!(b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2))
b43_radio_write(dev, B2063_RX_BB_SP8, tmp);
tmp = b43_radio_read(dev, B2063_TX_BB_SP3) & 0xFF;
b43_radio_write(dev, B2063_TX_BB_SP3, 0x0);
b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E);
b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7C);
b43_radio_write(dev, B2063_RC_CALIB_CTL2, 0x55);
b43_radio_write(dev, B2063_RC_CALIB_CTL3, 0x76);
if (crystal_freq == 24000000) {
b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0xFC);
b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x0);
} else {
b43_radio_write(dev, B2063_RC_CALIB_CTL4, 0x13);
b43_radio_write(dev, B2063_RC_CALIB_CTL5, 0x1);
}
b43_radio_write(dev, B2063_PA_SP7, 0x7D);
for (i = 0; i < 10000; i++) {
if (b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2)
break;
msleep(1);
}
if (!(b43_radio_read(dev, B2063_RC_CALIB_CTL6) & 0x2))
b43_radio_write(dev, B2063_TX_BB_SP3, tmp);
b43_radio_write(dev, B2063_RC_CALIB_CTL1, 0x7E);
}
static void lpphy_calibrate_rc(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
if (dev->phy.rev >= 2) {
lpphy_rev2plus_rc_calib(dev);
} else if (!lpphy->rc_cap) {
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
lpphy_rev0_1_rc_calib(dev);
} else {
lpphy_set_rc_cap(dev);
}
}
static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
{
if (dev->phy.rev >= 2)
return; // rev2+ doesn't support antenna diversity
if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1))
return;
b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP);
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2);
b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1);
b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP);
dev->phy.lp->antenna = antenna;
}
static void lpphy_set_tx_iqcc(struct b43_wldev *dev, u16 a, u16 b)
{
u16 tmp[2];
tmp[0] = a;
tmp[1] = b;
b43_lptab_write_bulk(dev, B43_LPTAB16(0, 80), 2, tmp);
}
static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
struct lpphy_tx_gains gains;
u32 iq_comp, tx_gain, coeff, rf_power;
lpphy->tx_pwr_idx_over = index;
lpphy_read_tx_pctl_mode_from_hardware(dev);
if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF)
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW);
if (dev->phy.rev >= 2) {
iq_comp = b43_lptab_read(dev, B43_LPTAB32(7, index + 320));
tx_gain = b43_lptab_read(dev, B43_LPTAB32(7, index + 192));
gains.pad = (tx_gain >> 16) & 0xFF;
gains.gm = tx_gain & 0xFF;
gains.pga = (tx_gain >> 8) & 0xFF;
gains.dac = (iq_comp >> 28) & 0xFF;
lpphy_set_tx_gains(dev, gains);
} else {
iq_comp = b43_lptab_read(dev, B43_LPTAB32(10, index + 320));
tx_gain = b43_lptab_read(dev, B43_LPTAB32(10, index + 192));
b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
0xF800, (tx_gain >> 4) & 0x7FFF);
lpphy_set_dac_gain(dev, tx_gain & 0x7);
lpphy_set_pa_gain(dev, (tx_gain >> 24) & 0x7F);
}
lpphy_set_bb_mult(dev, (iq_comp >> 20) & 0xFF);
lpphy_set_tx_iqcc(dev, (iq_comp >> 10) & 0x3FF, iq_comp & 0x3FF);
if (dev->phy.rev >= 2) {
coeff = b43_lptab_read(dev, B43_LPTAB32(7, index + 448));
} else {
coeff = b43_lptab_read(dev, B43_LPTAB32(10, index + 448));
}
b43_lptab_write(dev, B43_LPTAB16(0, 85), coeff & 0xFFFF);
if (dev->phy.rev >= 2) {
rf_power = b43_lptab_read(dev, B43_LPTAB32(7, index + 576));
b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00,
rf_power & 0xFFFF);//SPEC FIXME mask & set != 0
}
lpphy_enable_tx_gain_override(dev);
}
static void lpphy_btcoex_override(struct b43_wldev *dev)
{
b43_write16(dev, B43_MMIO_BTCOEX_CTL, 0x3);
b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF);
}
static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
bool blocked)
{
//TODO check MAC control register
if (blocked) {
if (dev->phy.rev >= 2) {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x83FF);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0x80FF);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xDFFF);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0808);
} else {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xE0FF);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFCFF);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0018);
}
} else {
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xE0FF);
if (dev->phy.rev >= 2)
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xF7F7);
else
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFFE7);
}
}
/* This was previously called lpphy_japan_filter */
static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter!
if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific?
b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9);
if ((dev->phy.rev == 1) && (lpphy->rc_cap))
lpphy_set_rc_cap(dev);
} else {
b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F);
}
}
static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode)
{
if (mode != TSSI_MUX_EXT) {
b43_radio_set(dev, B2063_PA_SP1, 0x2);
b43_phy_set(dev, B43_PHY_OFDM(0xF3), 0x1000);
b43_radio_write(dev, B2063_PA_CTL10, 0x51);
if (mode == TSSI_MUX_POSTPA) {
b43_radio_mask(dev, B2063_PA_SP1, 0xFFFE);
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFC7);
} else {
b43_radio_maskset(dev, B2063_PA_SP1, 0xFFFE, 0x1);
b43_phy_maskset(dev, B43_LPPHY_AFE_CTL_OVRVAL,
0xFFC7, 0x20);
}
} else {
B43_WARN_ON(1);
}
}
static void lpphy_tx_pctl_init_hw(struct b43_wldev *dev)
{
u16 tmp;
int i;
//SPEC TODO Call LP PHY Clear TX Power offsets
for (i = 0; i < 64; i++) {
if (dev->phy.rev >= 2)
b43_lptab_write(dev, B43_LPTAB32(7, i + 1), i);
else
b43_lptab_write(dev, B43_LPTAB32(10, i + 1), i);
}
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xFF00, 0xFF);
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0x8FFF, 0x5000);
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, 0xFFC0, 0x1F);
if (dev->phy.rev < 2) {
b43_phy_mask(dev, B43_LPPHY_LP_PHY_CTL, 0xEFFF);
b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xDFFF, 0x2000);
} else {
b43_phy_mask(dev, B43_PHY_OFDM(0x103), 0xFFFE);
b43_phy_maskset(dev, B43_PHY_OFDM(0x103), 0xFFFB, 0x4);
b43_phy_maskset(dev, B43_PHY_OFDM(0x103), 0xFFEF, 0x10);
b43_radio_maskset(dev, B2063_IQ_CALIB_CTL2, 0xF3, 0x1);
lpphy_set_tssi_mux(dev, TSSI_MUX_POSTPA);
}
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI, 0x7FFF, 0x8000);
b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xFF);
b43_phy_write(dev, B43_LPPHY_TX_PWR_CTL_DELTAPWR_LIMIT, 0xA);
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF,
B43_LPPHY_TX_PWR_CTL_CMD_MODE_OFF);
b43_phy_mask(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 0xF8FF);
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_CMD,
~B43_LPPHY_TX_PWR_CTL_CMD_MODE & 0xFFFF,
B43_LPPHY_TX_PWR_CTL_CMD_MODE_SW);
if (dev->phy.rev < 2) {
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_0, 0xEFFF, 0x1000);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xEFFF);
} else {
lpphy_set_tx_power_by_index(dev, 0x7F);
}
b43_dummy_transmission(dev, true, true);
tmp = b43_phy_read(dev, B43_LPPHY_TX_PWR_CTL_STAT);
if (tmp & 0x8000) {
b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_IDLETSSI,
0xFFC0, (tmp & 0xFF) - 32);
}
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xEFFF);
// (SPEC?) TODO Set "Target TX frequency" variable to 0
// SPEC FIXME "Set BB Multiplier to 0xE000" impossible - bb_mult is u8!
}
static void lpphy_tx_pctl_init_sw(struct b43_wldev *dev)
{
struct lpphy_tx_gains gains;
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
gains.gm = 4;
gains.pad = 12;
gains.pga = 12;
gains.dac = 0;
} else {
gains.gm = 7;
gains.pad = 14;
gains.pga = 15;
gains.dac = 0;
}
lpphy_set_tx_gains(dev, gains);
lpphy_set_bb_mult(dev, 150);
}
/* Initialize TX power control */
static void lpphy_tx_pctl_init(struct b43_wldev *dev)
{
if (0/*FIXME HWPCTL capable */) {
lpphy_tx_pctl_init_hw(dev);
} else { /* This device is only software TX power control capable. */
lpphy_tx_pctl_init_sw(dev);
}
}
static void lpphy_pr41573_workaround(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u32 *saved_tab;
const unsigned int saved_tab_size = 256;
enum b43_lpphy_txpctl_mode txpctl_mode;
s8 tx_pwr_idx_over;
u16 tssi_npt, tssi_idx;
saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL);
if (!saved_tab) {
b43err(dev->wl, "PR41573 failed. Out of memory!\n");
return;
}
lpphy_read_tx_pctl_mode_from_hardware(dev);
txpctl_mode = lpphy->txpctl_mode;
tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
tssi_npt = lpphy->tssi_npt;
tssi_idx = lpphy->tssi_idx;
if (dev->phy.rev < 2) {
b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
saved_tab_size, saved_tab);
} else {
b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140),
saved_tab_size, saved_tab);
}
//FIXME PHY reset
lpphy_table_init(dev); //FIXME is table init needed?
lpphy_baseband_init(dev);
lpphy_tx_pctl_init(dev);
b43_lpphy_op_software_rfkill(dev, false);
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
if (dev->phy.rev < 2) {
b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0x140),
saved_tab_size, saved_tab);
} else {
b43_lptab_write_bulk(dev, B43_LPTAB32(7, 0x140),
saved_tab_size, saved_tab);
}
b43_write16(dev, B43_MMIO_CHANNEL, lpphy->channel);
lpphy->tssi_npt = tssi_npt;
lpphy->tssi_idx = tssi_idx;
lpphy_set_analog_filter(dev, lpphy->channel);
if (tx_pwr_idx_over != -1)
lpphy_set_tx_power_by_index(dev, tx_pwr_idx_over);
if (lpphy->rc_cap)
lpphy_set_rc_cap(dev);
b43_lpphy_op_set_rx_antenna(dev, lpphy->antenna);
lpphy_set_tx_power_control(dev, txpctl_mode);
kfree(saved_tab);
}
struct lpphy_rx_iq_comp { u8 chan; s8 c1, c0; };
static const struct lpphy_rx_iq_comp lpphy_5354_iq_table[] = {
{ .chan = 1, .c1 = -66, .c0 = 15, },
{ .chan = 2, .c1 = -66, .c0 = 15, },
{ .chan = 3, .c1 = -66, .c0 = 15, },
{ .chan = 4, .c1 = -66, .c0 = 15, },
{ .chan = 5, .c1 = -66, .c0 = 15, },
{ .chan = 6, .c1 = -66, .c0 = 15, },
{ .chan = 7, .c1 = -66, .c0 = 14, },
{ .chan = 8, .c1 = -66, .c0 = 14, },
{ .chan = 9, .c1 = -66, .c0 = 14, },
{ .chan = 10, .c1 = -66, .c0 = 14, },
{ .chan = 11, .c1 = -66, .c0 = 14, },
{ .chan = 12, .c1 = -66, .c0 = 13, },
{ .chan = 13, .c1 = -66, .c0 = 13, },
{ .chan = 14, .c1 = -66, .c0 = 13, },
};
static const struct lpphy_rx_iq_comp lpphy_rev0_1_iq_table[] = {
{ .chan = 1, .c1 = -64, .c0 = 13, },
{ .chan = 2, .c1 = -64, .c0 = 13, },
{ .chan = 3, .c1 = -64, .c0 = 13, },
{ .chan = 4, .c1 = -64, .c0 = 13, },
{ .chan = 5, .c1 = -64, .c0 = 12, },
{ .chan = 6, .c1 = -64, .c0 = 12, },
{ .chan = 7, .c1 = -64, .c0 = 12, },
{ .chan = 8, .c1 = -64, .c0 = 12, },
{ .chan = 9, .c1 = -64, .c0 = 12, },
{ .chan = 10, .c1 = -64, .c0 = 11, },
{ .chan = 11, .c1 = -64, .c0 = 11, },
{ .chan = 12, .c1 = -64, .c0 = 11, },
{ .chan = 13, .c1 = -64, .c0 = 11, },
{ .chan = 14, .c1 = -64, .c0 = 10, },
{ .chan = 34, .c1 = -62, .c0 = 24, },
{ .chan = 38, .c1 = -62, .c0 = 24, },
{ .chan = 42, .c1 = -62, .c0 = 24, },
{ .chan = 46, .c1 = -62, .c0 = 23, },
{ .chan = 36, .c1 = -62, .c0 = 24, },
{ .chan = 40, .c1 = -62, .c0 = 24, },
{ .chan = 44, .c1 = -62, .c0 = 23, },
{ .chan = 48, .c1 = -62, .c0 = 23, },
{ .chan = 52, .c1 = -62, .c0 = 23, },
{ .chan = 56, .c1 = -62, .c0 = 22, },
{ .chan = 60, .c1 = -62, .c0 = 22, },
{ .chan = 64, .c1 = -62, .c0 = 22, },
{ .chan = 100, .c1 = -62, .c0 = 16, },
{ .chan = 104, .c1 = -62, .c0 = 16, },
{ .chan = 108, .c1 = -62, .c0 = 15, },
{ .chan = 112, .c1 = -62, .c0 = 14, },
{ .chan = 116, .c1 = -62, .c0 = 14, },
{ .chan = 120, .c1 = -62, .c0 = 13, },
{ .chan = 124, .c1 = -62, .c0 = 12, },
{ .chan = 128, .c1 = -62, .c0 = 12, },
{ .chan = 132, .c1 = -62, .c0 = 12, },
{ .chan = 136, .c1 = -62, .c0 = 11, },
{ .chan = 140, .c1 = -62, .c0 = 10, },
{ .chan = 149, .c1 = -61, .c0 = 9, },
{ .chan = 153, .c1 = -61, .c0 = 9, },
{ .chan = 157, .c1 = -61, .c0 = 9, },
{ .chan = 161, .c1 = -61, .c0 = 8, },
{ .chan = 165, .c1 = -61, .c0 = 8, },
{ .chan = 184, .c1 = -62, .c0 = 25, },
{ .chan = 188, .c1 = -62, .c0 = 25, },
{ .chan = 192, .c1 = -62, .c0 = 25, },
{ .chan = 196, .c1 = -62, .c0 = 25, },
{ .chan = 200, .c1 = -62, .c0 = 25, },
{ .chan = 204, .c1 = -62, .c0 = 25, },
{ .chan = 208, .c1 = -62, .c0 = 25, },
{ .chan = 212, .c1 = -62, .c0 = 25, },
{ .chan = 216, .c1 = -62, .c0 = 26, },
};
static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
.chan = 0,
.c1 = -64,
.c0 = 0,
};
static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
{
struct lpphy_iq_est iq_est;
u16 c0, c1;
int prod, ipwr, qpwr, prod_msb, q_msb, tmp1, tmp2, tmp3, tmp4, ret;
c1 = b43_phy_read(dev, B43_LPPHY_RX_COMP_COEFF_S);
c0 = c1 >> 8;
c1 |= 0xFF;
b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, 0x00C0);
b43_phy_mask(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF);
ret = lpphy_rx_iq_est(dev, samples, 32, &iq_est);
if (!ret)
goto out;
prod = iq_est.iq_prod;
ipwr = iq_est.i_pwr;
qpwr = iq_est.q_pwr;
if (ipwr + qpwr < 2) {
ret = 0;
goto out;
}
prod_msb = fls(abs(prod));
q_msb = fls(abs(qpwr));
tmp1 = prod_msb - 20;
if (tmp1 >= 0) {
tmp3 = ((prod << (30 - prod_msb)) + (ipwr >> (1 + tmp1))) /
(ipwr >> tmp1);
} else {
tmp3 = ((prod << (30 - prod_msb)) + (ipwr << (-1 - tmp1))) /
(ipwr << -tmp1);
}
tmp2 = q_msb - 11;
if (tmp2 >= 0)
tmp4 = (qpwr << (31 - q_msb)) / (ipwr >> tmp2);
else
tmp4 = (qpwr << (31 - q_msb)) / (ipwr << -tmp2);
tmp4 -= tmp3 * tmp3;
tmp4 = -int_sqrt(tmp4);
c0 = tmp3 >> 3;
c1 = tmp4 >> 4;
out:
b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, c1);
b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF, c0 << 8);
return ret;
}
static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
u16 wait)
{
b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL,
0xFFC0, samples - 1);
if (loops != 0xFFFF)
loops--;
b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000, loops);
b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, 0x3F, wait << 6);
b43_phy_set(dev, B43_LPPHY_A_PHY_CTL_ADDR, 0x1);
}
//SPEC FIXME what does a negative freq mean?
static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
u16 buf[64];
int i, samples = 0, angle = 0;
int rotation = (((36 * freq) / 20) << 16) / 100;
struct b43_c32 sample;
lpphy->tx_tone_freq = freq;
if (freq) {
/* Find i for which abs(freq) integrally divides 20000 * i */
for (i = 1; samples * abs(freq) != 20000 * i; i++) {
samples = (20000 * i) / abs(freq);
if(B43_WARN_ON(samples > 63))
return;
}
} else {
samples = 2;
}
for (i = 0; i < samples; i++) {
sample = b43_cordic(angle);
angle += rotation;
buf[i] = CORDIC_CONVERT((sample.i * max) & 0xFF) << 8;
buf[i] |= CORDIC_CONVERT((sample.q * max) & 0xFF);
}
b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
lpphy_run_samples(dev, samples, 0xFFFF, 0);
}
static void lpphy_stop_tx_tone(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
int i;
lpphy->tx_tone_freq = 0;
b43_phy_mask(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000);
for (i = 0; i < 31; i++) {
if (!(b43_phy_read(dev, B43_LPPHY_A_PHY_CTL_ADDR) & 0x1))
break;
udelay(100);
}
}
static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
int mode, bool useindex, u8 index)
{
//TODO
}
static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
struct lpphy_tx_gains gains, oldgains;
int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
lpphy_read_tx_pctl_mode_from_hardware(dev);
old_txpctl = lpphy->txpctl_mode;
old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
if (old_afe_ovr)
oldgains = lpphy_get_tx_gains(dev);
old_rf = b43_phy_read(dev, B43_LPPHY_RF_PWR_OVERRIDE) & 0xFF;
old_bbmult = lpphy_get_bb_mult(dev);
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
if (dev->dev->chip_id == 0x4325 && dev->dev->chip_rev == 0)
lpphy_papd_cal(dev, gains, 0, 1, 30);
else
lpphy_papd_cal(dev, gains, 0, 1, 65);
if (old_afe_ovr)
lpphy_set_tx_gains(dev, oldgains);
lpphy_set_bb_mult(dev, old_bbmult);
lpphy_set_tx_power_control(dev, old_txpctl);
b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, old_rf);
}
static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
bool rx, bool pa, struct lpphy_tx_gains *gains)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
const struct lpphy_rx_iq_comp *iqcomp = NULL;
struct lpphy_tx_gains nogains, oldgains;
u16 tmp;
int i, ret;
memset(&nogains, 0, sizeof(nogains));
memset(&oldgains, 0, sizeof(oldgains));
if (dev->dev->chip_id == 0x5354) {
for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) {
if (lpphy_5354_iq_table[i].chan == lpphy->channel) {
iqcomp = &lpphy_5354_iq_table[i];
}
}
} else if (dev->phy.rev >= 2) {
iqcomp = &lpphy_rev2plus_iq_comp;
} else {
for (i = 0; i < ARRAY_SIZE(lpphy_rev0_1_iq_table); i++) {
if (lpphy_rev0_1_iq_table[i].chan == lpphy->channel) {
iqcomp = &lpphy_rev0_1_iq_table[i];
}
}
}
if (B43_WARN_ON(!iqcomp))
return 0;
b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, iqcomp->c1);
b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S,
0x00FF, iqcomp->c0 << 8);
if (noise) {
tx = true;
rx = false;
pa = false;
}
lpphy_set_trsw_over(dev, tx, rx);
if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
0xFFF7, pa << 3);
} else {
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20);
b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
0xFFDF, pa << 5);
}
tmp = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
if (noise)
lpphy_set_rx_gain(dev, 0x2D5D);
else {
if (tmp)
oldgains = lpphy_get_tx_gains(dev);
if (!gains)
gains = &nogains;
lpphy_set_tx_gains(dev, *gains);
}
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800);
lpphy_set_deaf(dev, false);
if (noise)
ret = lpphy_calc_rx_iq_comp(dev, 0xFFF0);
else {
lpphy_start_tx_tone(dev, 4000, 100);
ret = lpphy_calc_rx_iq_comp(dev, 0x4000);
lpphy_stop_tx_tone(dev);
}
lpphy_clear_deaf(dev, false);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFC);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFF7);
b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFDF);
if (!noise) {
if (tmp)
lpphy_set_tx_gains(dev, oldgains);
else
lpphy_disable_tx_gain_override(dev);
}
lpphy_disable_rx_gain_override(dev);
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xF7FF);
return ret;
}
static void lpphy_calibration(struct b43_wldev *dev)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
enum b43_lpphy_txpctl_mode saved_pctl_mode;
bool full_cal = false;
if (lpphy->full_calib_chan != lpphy->channel) {
full_cal = true;
lpphy->full_calib_chan = lpphy->channel;
}
b43_mac_suspend(dev);
lpphy_btcoex_override(dev);
if (dev->phy.rev >= 2)
lpphy_save_dig_flt_state(dev);
lpphy_read_tx_pctl_mode_from_hardware(dev);
saved_pctl_mode = lpphy->txpctl_mode;
lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
//TODO Perform transmit power table I/Q LO calibration
if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
lpphy_pr41573_workaround(dev);
if ((dev->phy.rev >= 2) && full_cal) {
lpphy_papd_cal_txpwr(dev);
}
lpphy_set_tx_power_control(dev, saved_pctl_mode);
if (dev->phy.rev >= 2)
lpphy_restore_dig_flt_state(dev);
lpphy_rx_iq_cal(dev, true, true, false, false, NULL);
b43_mac_enable(dev);
}
static void b43_lpphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
u16 set)
{
b43_write16f(dev, B43_MMIO_PHY_CONTROL, reg);
b43_write16(dev, B43_MMIO_PHY_DATA,
(b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
}
static u16 b43_lpphy_op_radio_read(struct b43_wldev *dev, u16 reg)
{
/* Register 1 is a 32-bit register. */
B43_WARN_ON(reg == 1);
/* LP-PHY needs a special bit set for read access */
if (dev->phy.rev < 2) {
if (reg != 0x4001)
reg |= 0x100;
} else
reg |= 0x200;
b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg);
return b43_read16(dev, B43_MMIO_RADIO_DATA_LOW);
}
static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
{
/* Register 1 is a 32-bit register. */
B43_WARN_ON(reg == 1);
b43_write16f(dev, B43_MMIO_RADIO_CONTROL, reg);
b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
}
struct b206x_channel {
u8 channel;
u16 freq;
u8 data[12];
};
static const struct b206x_channel b2062_chantbl[] = {
{ .channel = 1, .freq = 2412, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 2, .freq = 2417, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 3, .freq = 2422, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 4, .freq = 2427, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 5, .freq = 2432, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 6, .freq = 2437, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 7, .freq = 2442, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 8, .freq = 2447, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 9, .freq = 2452, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 10, .freq = 2457, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 11, .freq = 2462, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 12, .freq = 2467, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 13, .freq = 2472, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 14, .freq = 2484, .data[0] = 0xFF, .data[1] = 0xFF,
.data[2] = 0xB5, .data[3] = 0x1B, .data[4] = 0x24, .data[5] = 0x32,
.data[6] = 0x32, .data[7] = 0x88, .data[8] = 0x88, },
{ .channel = 34, .freq = 5170, .data[0] = 0x00, .data[1] = 0x22,
.data[2] = 0x20, .data[3] = 0x84, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 38, .freq = 5190, .data[0] = 0x00, .data[1] = 0x11,
.data[2] = 0x10, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 42, .freq = 5210, .data[0] = 0x00, .data[1] = 0x11,
.data[2] = 0x10, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 46, .freq = 5230, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 36, .freq = 5180, .data[0] = 0x00, .data[1] = 0x11,
.data[2] = 0x20, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 40, .freq = 5200, .data[0] = 0x00, .data[1] = 0x11,
.data[2] = 0x10, .data[3] = 0x84, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 44, .freq = 5220, .data[0] = 0x00, .data[1] = 0x11,
.data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 48, .freq = 5240, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 52, .freq = 5260, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 56, .freq = 5280, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x83, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 60, .freq = 5300, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x63, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 64, .freq = 5320, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x62, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 100, .freq = 5500, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x30, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 104, .freq = 5520, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 108, .freq = 5540, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 112, .freq = 5560, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x20, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 116, .freq = 5580, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x10, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 120, .freq = 5600, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 124, .freq = 5620, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 128, .freq = 5640, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 132, .freq = 5660, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 136, .freq = 5680, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 140, .freq = 5700, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 149, .freq = 5745, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 153, .freq = 5765, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 157, .freq = 5785, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 161, .freq = 5805, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 165, .freq = 5825, .data[0] = 0x00, .data[1] = 0x00,
.data[2] = 0x00, .data[3] = 0x00, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x37, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 184, .freq = 4920, .data[0] = 0x55, .data[1] = 0x77,
.data[2] = 0x90, .data[3] = 0xF7, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, },
{ .channel = 188, .freq = 4940, .data[0] = 0x44, .data[1] = 0x77,
.data[2] = 0x80, .data[3] = 0xE7, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, },
{ .channel = 192, .freq = 4960, .data[0] = 0x44, .data[1] = 0x66,
.data[2] = 0x80, .data[3] = 0xE7, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, },
{ .channel = 196, .freq = 4980, .data[0] = 0x33, .data[1] = 0x66,
.data[2] = 0x70, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, },
{ .channel = 200, .freq = 5000, .data[0] = 0x22, .data[1] = 0x55,
.data[2] = 0x60, .data[3] = 0xD7, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, },
{ .channel = 204, .freq = 5020, .data[0] = 0x22, .data[1] = 0x55,
.data[2] = 0x60, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, },
{ .channel = 208, .freq = 5040, .data[0] = 0x22, .data[1] = 0x44,
.data[2] = 0x50, .data[3] = 0xC7, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0xFF, },
{ .channel = 212, .freq = 5060, .data[0] = 0x11, .data[1] = 0x44,
.data[2] = 0x50, .data[3] = 0xA5, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
{ .channel = 216, .freq = 5080, .data[0] = 0x00, .data[1] = 0x44,
.data[2] = 0x40, .data[3] = 0xB6, .data[4] = 0x3C, .data[5] = 0x77,
.data[6] = 0x35, .data[7] = 0xFF, .data[8] = 0x88, },
};
static const struct b206x_channel b2063_chantbl[] = {
{ .channel = 1, .freq = 2412, .data[0] = 0x6F, .data[1] = 0x3C,
.data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 2, .freq = 2417, .data[0] = 0x6F, .data[1] = 0x3C,
.data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 3, .freq = 2422, .data[0] = 0x6F, .data[1] = 0x3C,
.data[2] = 0x3C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 4, .freq = 2427, .data[0] = 0x6F, .data[1] = 0x2C,
.data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 5, .freq = 2432, .data[0] = 0x6F, .data[1] = 0x2C,
.data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 6, .freq = 2437, .data[0] = 0x6F, .data[1] = 0x2C,
.data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 7, .freq = 2442, .data[0] = 0x6F, .data[1] = 0x2C,
.data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 8, .freq = 2447, .data[0] = 0x6F, .data[1] = 0x2C,
.data[2] = 0x2C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 9, .freq = 2452, .data[0] = 0x6F, .data[1] = 0x1C,
.data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 10, .freq = 2457, .data[0] = 0x6F, .data[1] = 0x1C,
.data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 11, .freq = 2462, .data[0] = 0x6E, .data[1] = 0x1C,
.data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 12, .freq = 2467, .data[0] = 0x6E, .data[1] = 0x1C,
.data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 13, .freq = 2472, .data[0] = 0x6E, .data[1] = 0x1C,
.data[2] = 0x1C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 14, .freq = 2484, .data[0] = 0x6E, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x04, .data[4] = 0x05, .data[5] = 0x05,
.data[6] = 0x05, .data[7] = 0x05, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x80, .data[11] = 0x70, },
{ .channel = 34, .freq = 5170, .data[0] = 0x6A, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x02, .data[5] = 0x05,
.data[6] = 0x0D, .data[7] = 0x0D, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 36, .freq = 5180, .data[0] = 0x6A, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x05,
.data[6] = 0x0D, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 38, .freq = 5190, .data[0] = 0x6A, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04,
.data[6] = 0x0C, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x80,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 40, .freq = 5200, .data[0] = 0x69, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04,
.data[6] = 0x0C, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x70,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 42, .freq = 5210, .data[0] = 0x69, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x01, .data[5] = 0x04,
.data[6] = 0x0B, .data[7] = 0x0C, .data[8] = 0x77, .data[9] = 0x70,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 44, .freq = 5220, .data[0] = 0x69, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x04,
.data[6] = 0x0B, .data[7] = 0x0B, .data[8] = 0x77, .data[9] = 0x60,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 46, .freq = 5230, .data[0] = 0x69, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x03,
.data[6] = 0x0A, .data[7] = 0x0B, .data[8] = 0x77, .data[9] = 0x60,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 48, .freq = 5240, .data[0] = 0x69, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x03,
.data[6] = 0x0A, .data[7] = 0x0A, .data[8] = 0x77, .data[9] = 0x60,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 52, .freq = 5260, .data[0] = 0x68, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x02,
.data[6] = 0x09, .data[7] = 0x09, .data[8] = 0x77, .data[9] = 0x60,
.data[10] = 0x20, .data[11] = 0x00, },
{ .channel = 56, .freq = 5280, .data[0] = 0x68, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x01,
.data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50,
.data[10] = 0x10, .data[11] = 0x00, },
{ .channel = 60, .freq = 5300, .data[0] = 0x68, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x01,
.data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50,
.data[10] = 0x10, .data[11] = 0x00, },
{ .channel = 64, .freq = 5320, .data[0] = 0x67, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x08, .data[7] = 0x08, .data[8] = 0x77, .data[9] = 0x50,
.data[10] = 0x10, .data[11] = 0x00, },
{ .channel = 100, .freq = 5500, .data[0] = 0x64, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x02, .data[7] = 0x01, .data[8] = 0x77, .data[9] = 0x20,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 104, .freq = 5520, .data[0] = 0x64, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x01, .data[7] = 0x01, .data[8] = 0x77, .data[9] = 0x20,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 108, .freq = 5540, .data[0] = 0x63, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x01, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 112, .freq = 5560, .data[0] = 0x63, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 116, .freq = 5580, .data[0] = 0x62, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x10,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 120, .freq = 5600, .data[0] = 0x62, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 124, .freq = 5620, .data[0] = 0x62, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 128, .freq = 5640, .data[0] = 0x61, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 132, .freq = 5660, .data[0] = 0x61, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 136, .freq = 5680, .data[0] = 0x61, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 140, .freq = 5700, .data[0] = 0x60, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 149, .freq = 5745, .data[0] = 0x60, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 153, .freq = 5765, .data[0] = 0x60, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 157, .freq = 5785, .data[0] = 0x60, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 161, .freq = 5805, .data[0] = 0x60, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 165, .freq = 5825, .data[0] = 0x60, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x00, .data[5] = 0x00,
.data[6] = 0x00, .data[7] = 0x00, .data[8] = 0x77, .data[9] = 0x00,
.data[10] = 0x00, .data[11] = 0x00, },
{ .channel = 184, .freq = 4920, .data[0] = 0x6E, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x09, .data[5] = 0x0E,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xC0,
.data[10] = 0x50, .data[11] = 0x00, },
{ .channel = 188, .freq = 4940, .data[0] = 0x6E, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x09, .data[5] = 0x0D,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xB0,
.data[10] = 0x50, .data[11] = 0x00, },
{ .channel = 192, .freq = 4960, .data[0] = 0x6E, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0C,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xB0,
.data[10] = 0x50, .data[11] = 0x00, },
{ .channel = 196, .freq = 4980, .data[0] = 0x6D, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0C,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0,
.data[10] = 0x40, .data[11] = 0x00, },
{ .channel = 200, .freq = 5000, .data[0] = 0x6D, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0B,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0,
.data[10] = 0x40, .data[11] = 0x00, },
{ .channel = 204, .freq = 5020, .data[0] = 0x6D, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x08, .data[5] = 0x0A,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0xA0,
.data[10] = 0x40, .data[11] = 0x00, },
{ .channel = 208, .freq = 5040, .data[0] = 0x6C, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x07, .data[5] = 0x09,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90,
.data[10] = 0x40, .data[11] = 0x00, },
{ .channel = 212, .freq = 5060, .data[0] = 0x6C, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x06, .data[5] = 0x08,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90,
.data[10] = 0x40, .data[11] = 0x00, },
{ .channel = 216, .freq = 5080, .data[0] = 0x6C, .data[1] = 0x0C,
.data[2] = 0x0C, .data[3] = 0x00, .data[4] = 0x05, .data[5] = 0x08,
.data[6] = 0x0F, .data[7] = 0x0F, .data[8] = 0x77, .data[9] = 0x90,
.data[10] = 0x40, .data[11] = 0x00, },
};
static void lpphy_b2062_reset_pll_bias(struct b43_wldev *dev)
{
b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0xFF);
udelay(20);
if (dev->dev->chip_id == 0x5354) {
b43_radio_write(dev, B2062_N_COMM1, 4);
b43_radio_write(dev, B2062_S_RFPLL_CTL2, 4);
} else {
b43_radio_write(dev, B2062_S_RFPLL_CTL2, 0);
}
udelay(5);
}
static void lpphy_b2062_vco_calib(struct b43_wldev *dev)
{
b43_radio_write(dev, B2062_S_RFPLL_CTL21, 0x42);
b43_radio_write(dev, B2062_S_RFPLL_CTL21, 0x62);
udelay(200);
}
static int lpphy_b2062_tune(struct b43_wldev *dev,
unsigned int channel)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
struct ssb_bus *bus = dev->dev->sdev->bus;
const struct b206x_channel *chandata = NULL;
u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp8, tmp9;
int i, err = 0;
for (i = 0; i < ARRAY_SIZE(b2062_chantbl); i++) {
if (b2062_chantbl[i].channel == channel) {
chandata = &b2062_chantbl[i];
break;
}
}
if (B43_WARN_ON(!chandata))
return -EINVAL;
b43_radio_set(dev, B2062_S_RFPLL_CTL14, 0x04);
b43_radio_write(dev, B2062_N_LGENA_TUNE0, chandata->data[0]);
b43_radio_write(dev, B2062_N_LGENA_TUNE2, chandata->data[1]);
b43_radio_write(dev, B2062_N_LGENA_TUNE3, chandata->data[2]);
b43_radio_write(dev, B2062_N_TX_TUNE, chandata->data[3]);
b43_radio_write(dev, B2062_S_LGENG_CTL1, chandata->data[4]);
b43_radio_write(dev, B2062_N_LGENA_CTL5, chandata->data[5]);
b43_radio_write(dev, B2062_N_LGENA_CTL6, chandata->data[6]);
b43_radio_write(dev, B2062_N_TX_PGA, chandata->data[7]);
b43_radio_write(dev, B2062_N_TX_PAD, chandata->data[8]);
tmp1 = crystal_freq / 1000;
tmp2 = lpphy->pdiv * 1000;
b43_radio_write(dev, B2062_S_RFPLL_CTL33, 0xCC);
b43_radio_write(dev, B2062_S_RFPLL_CTL34, 0x07);
lpphy_b2062_reset_pll_bias(dev);
tmp3 = tmp2 * channel2freq_lp(channel);
if (channel2freq_lp(channel) < 4000)
tmp3 *= 2;
tmp4 = 48 * tmp1;
tmp6 = tmp3 / tmp4;
tmp7 = tmp3 % tmp4;
b43_radio_write(dev, B2062_S_RFPLL_CTL26, tmp6);
tmp5 = tmp7 * 0x100;
tmp6 = tmp5 / tmp4;
tmp7 = tmp5 % tmp4;
b43_radio_write(dev, B2062_S_RFPLL_CTL27, tmp6);
tmp5 = tmp7 * 0x100;
tmp6 = tmp5 / tmp4;
tmp7 = tmp5 % tmp4;
b43_radio_write(dev, B2062_S_RFPLL_CTL28, tmp6);
tmp5 = tmp7 * 0x100;
tmp6 = tmp5 / tmp4;
tmp7 = tmp5 % tmp4;
b43_radio_write(dev, B2062_S_RFPLL_CTL29, tmp6 + ((2 * tmp7) / tmp4));
tmp8 = b43_radio_read(dev, B2062_S_RFPLL_CTL19);
tmp9 = ((2 * tmp3 * (tmp8 + 1)) + (3 * tmp1)) / (6 * tmp1);
b43_radio_write(dev, B2062_S_RFPLL_CTL23, (tmp9 >> 8) + 16);
b43_radio_write(dev, B2062_S_RFPLL_CTL24, tmp9 & 0xFF);
lpphy_b2062_vco_calib(dev);
if (b43_radio_read(dev, B2062_S_RFPLL_CTL3) & 0x10) {
b43_radio_write(dev, B2062_S_RFPLL_CTL33, 0xFC);
b43_radio_write(dev, B2062_S_RFPLL_CTL34, 0);
lpphy_b2062_reset_pll_bias(dev);
lpphy_b2062_vco_calib(dev);
if (b43_radio_read(dev, B2062_S_RFPLL_CTL3) & 0x10)
err = -EIO;
}
b43_radio_mask(dev, B2062_S_RFPLL_CTL14, ~0x04);
return err;
}
static void lpphy_b2063_vco_calib(struct b43_wldev *dev)
{
u16 tmp;
b43_radio_mask(dev, B2063_PLL_SP1, ~0x40);
tmp = b43_radio_read(dev, B2063_PLL_JTAG_CALNRST) & 0xF8;
b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp);
udelay(1);
b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x4);
udelay(1);
b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x6);
udelay(1);
b43_radio_write(dev, B2063_PLL_JTAG_CALNRST, tmp | 0x7);
udelay(300);
b43_radio_set(dev, B2063_PLL_SP1, 0x40);
}
static int lpphy_b2063_tune(struct b43_wldev *dev,
unsigned int channel)
{
struct ssb_bus *bus = dev->dev->sdev->bus;
static const struct b206x_channel *chandata = NULL;
u32 crystal_freq = bus->chipco.pmu.crystalfreq * 1000;
u32 freqref, vco_freq, val1, val2, val3, timeout, timeoutref, count;
u16 old_comm15, scale;
u32 tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
int i, div = (crystal_freq <= 26000000 ? 1 : 2);
for (i = 0; i < ARRAY_SIZE(b2063_chantbl); i++) {
if (b2063_chantbl[i].channel == channel) {
chandata = &b2063_chantbl[i];
break;
}
}
if (B43_WARN_ON(!chandata))
return -EINVAL;
b43_radio_write(dev, B2063_LOGEN_VCOBUF1, chandata->data[0]);
b43_radio_write(dev, B2063_LOGEN_MIXER2, chandata->data[1]);
b43_radio_write(dev, B2063_LOGEN_BUF2, chandata->data[2]);
b43_radio_write(dev, B2063_LOGEN_RCCR1, chandata->data[3]);
b43_radio_write(dev, B2063_A_RX_1ST3, chandata->data[4]);
b43_radio_write(dev, B2063_A_RX_2ND1, chandata->data[5]);
b43_radio_write(dev, B2063_A_RX_2ND4, chandata->data[6]);
b43_radio_write(dev, B2063_A_RX_2ND7, chandata->data[7]);
b43_radio_write(dev, B2063_A_RX_PS6, chandata->data[8]);
b43_radio_write(dev, B2063_TX_RF_CTL2, chandata->data[9]);
b43_radio_write(dev, B2063_TX_RF_CTL5, chandata->data[10]);
b43_radio_write(dev, B2063_PA_CTL11, chandata->data[11]);
old_comm15 = b43_radio_read(dev, B2063_COMM15);
b43_radio_set(dev, B2063_COMM15, 0x1E);
if (chandata->freq > 4000) /* spec says 2484, but 4000 is safer */
vco_freq = chandata->freq << 1;
else
vco_freq = chandata->freq << 2;
freqref = crystal_freq * 3;
val1 = lpphy_qdiv_roundup(crystal_freq, 1000000, 16);
val2 = lpphy_qdiv_roundup(crystal_freq, 1000000 * div, 16);
val3 = lpphy_qdiv_roundup(vco_freq, 3, 16);
timeout = ((((8 * crystal_freq) / (div * 5000000)) + 1) >> 1) - 1;
b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB3, 0x2);
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB6,
0xFFF8, timeout >> 2);
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB7,
0xFF9F,timeout << 5);
timeoutref = ((((8 * crystal_freq) / (div * (timeout + 1))) +
999999) / 1000000) + 1;
b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB5, timeoutref);
count = lpphy_qdiv_roundup(val3, val2 + 16, 16);
count *= (timeout + 1) * (timeoutref + 1);
count--;
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_VCO_CALIB7,
0xF0, count >> 8);
b43_radio_write(dev, B2063_PLL_JTAG_PLL_VCO_CALIB8, count & 0xFF);
tmp1 = ((val3 * 62500) / freqref) << 4;
tmp2 = ((val3 * 62500) % freqref) << 4;
while (tmp2 >= freqref) {
tmp1++;
tmp2 -= freqref;
}
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG1, 0xFFE0, tmp1 >> 4);
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG2, 0xFE0F, tmp1 << 4);
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_SG2, 0xFFF0, tmp1 >> 16);
b43_radio_write(dev, B2063_PLL_JTAG_PLL_SG3, (tmp2 >> 8) & 0xFF);
b43_radio_write(dev, B2063_PLL_JTAG_PLL_SG4, tmp2 & 0xFF);
b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF1, 0xB9);
b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF2, 0x88);
b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF3, 0x28);
b43_radio_write(dev, B2063_PLL_JTAG_PLL_LF4, 0x63);
tmp3 = ((41 * (val3 - 3000)) /1200) + 27;
tmp4 = lpphy_qdiv_roundup(132000 * tmp1, 8451, 16);
if ((tmp4 + tmp3 - 1) / tmp3 > 60) {
scale = 1;
tmp5 = ((tmp4 + tmp3) / (tmp3 << 1)) - 8;
} else {
scale = 0;
tmp5 = ((tmp4 + (tmp3 >> 1)) / tmp3) - 8;
}
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP2, 0xFFC0, tmp5);
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP2, 0xFFBF, scale << 6);
tmp6 = lpphy_qdiv_roundup(100 * val1, val3, 16);
tmp6 *= (tmp5 * 8) * (scale + 1);
if (tmp6 > 150)
tmp6 = 0;
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP3, 0xFFE0, tmp6);
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_CP3, 0xFFDF, scale << 5);
b43_radio_maskset(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0xFFFB, 0x4);
if (crystal_freq > 26000000)
b43_radio_set(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0x2);
else
b43_radio_mask(dev, B2063_PLL_JTAG_PLL_XTAL_12, 0xFD);
if (val1 == 45)
b43_radio_set(dev, B2063_PLL_JTAG_PLL_VCO1, 0x2);
else
b43_radio_mask(dev, B2063_PLL_JTAG_PLL_VCO1, 0xFD);
b43_radio_set(dev, B2063_PLL_SP2, 0x3);
udelay(1);
b43_radio_mask(dev, B2063_PLL_SP2, 0xFFFC);
lpphy_b2063_vco_calib(dev);
b43_radio_write(dev, B2063_COMM15, old_comm15);
return 0;
}
static int b43_lpphy_op_switch_channel(struct b43_wldev *dev,
unsigned int new_channel)
{
struct b43_phy_lp *lpphy = dev->phy.lp;
int err;
if (dev->phy.radio_ver == 0x2063) {
err = lpphy_b2063_tune(dev, new_channel);
if (err)
return err;
} else {
err = lpphy_b2062_tune(dev, new_channel);
if (err)
return err;
lpphy_set_analog_filter(dev, new_channel);
lpphy_adjust_gain_table(dev, channel2freq_lp(new_channel));
}
lpphy->channel = new_channel;
b43_write16(dev, B43_MMIO_CHANNEL, new_channel);
return 0;
}
static int b43_lpphy_op_init(struct b43_wldev *dev)
{
int err;
if (dev->dev->bus_type != B43_BUS_SSB) {
b43err(dev->wl, "LP-PHY is supported only on SSB!\n");
return -EOPNOTSUPP;
}
lpphy_read_band_sprom(dev); //FIXME should this be in prepare_structs?
lpphy_baseband_init(dev);
lpphy_radio_init(dev);
lpphy_calibrate_rc(dev);
err = b43_lpphy_op_switch_channel(dev, 7);
if (err) {
b43dbg(dev->wl, "Switch to channel 7 failed, error = %d.\n",
err);
}
lpphy_tx_pctl_init(dev);
lpphy_calibration(dev);
//TODO ACI init
return 0;
}
static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev)
{
//TODO
}
static enum b43_txpwr_result b43_lpphy_op_recalc_txpower(struct b43_wldev *dev,
bool ignore_tssi)
{
//TODO
return B43_TXPWR_RES_DONE;
}
static void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
{
if (on) {
b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xfff8);
} else {
b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0x0007);
b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x0007);
}
}
static void b43_lpphy_op_pwork_15sec(struct b43_wldev *dev)
{
//TODO
}
const struct b43_phy_operations b43_phyops_lp = {
.allocate = b43_lpphy_op_allocate,
.free = b43_lpphy_op_free,
.prepare_structs = b43_lpphy_op_prepare_structs,
.init = b43_lpphy_op_init,
.phy_maskset = b43_lpphy_op_maskset,
.radio_read = b43_lpphy_op_radio_read,
.radio_write = b43_lpphy_op_radio_write,
.software_rfkill = b43_lpphy_op_software_rfkill,
.switch_analog = b43_lpphy_op_switch_analog,
.switch_channel = b43_lpphy_op_switch_channel,
.get_default_chan = b43_lpphy_op_get_default_chan,
.set_rx_antenna = b43_lpphy_op_set_rx_antenna,
.recalc_txpower = b43_lpphy_op_recalc_txpower,
.adjust_txpower = b43_lpphy_op_adjust_txpower,
.pwork_15sec = b43_lpphy_op_pwork_15sec,
.pwork_60sec = lpphy_calibration,
};
| gpl-2.0 |
shianyow/kernel-android-galaxy-s2-t989 | net/ipv6/xfrm6_state.c | 1419 | 4547 | /*
* xfrm6_state.c: based on xfrm4_state.c
*
* Authors:
* Mitsuru KANDA @USAGI
* Kazunori MIYAZAWA @USAGI
* Kunihiro Ishiguro <kunihiro@ipinfusion.com>
* IPv6 support
* YOSHIFUJI Hideaki @USAGI
* Split up af-specific portion
*
*/
#include <net/xfrm.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
#include <linux/netfilter_ipv6.h>
#include <net/dsfield.h>
#include <net/ipv6.h>
#include <net/addrconf.h>
static void
__xfrm6_init_tempsel(struct xfrm_state *x, struct flowi *fl,
struct xfrm_tmpl *tmpl,
xfrm_address_t *daddr, xfrm_address_t *saddr)
{
/* Initialize temporary selector matching only
* to current session. */
ipv6_addr_copy((struct in6_addr *)&x->sel.daddr, &fl->fl6_dst);
ipv6_addr_copy((struct in6_addr *)&x->sel.saddr, &fl->fl6_src);
x->sel.dport = xfrm_flowi_dport(fl);
x->sel.dport_mask = htons(0xffff);
x->sel.sport = xfrm_flowi_sport(fl);
x->sel.sport_mask = htons(0xffff);
x->sel.family = AF_INET6;
x->sel.prefixlen_d = 128;
x->sel.prefixlen_s = 128;
x->sel.proto = fl->proto;
x->sel.ifindex = fl->oif;
x->id = tmpl->id;
if (ipv6_addr_any((struct in6_addr*)&x->id.daddr))
memcpy(&x->id.daddr, daddr, sizeof(x->sel.daddr));
memcpy(&x->props.saddr, &tmpl->saddr, sizeof(x->props.saddr));
if (ipv6_addr_any((struct in6_addr*)&x->props.saddr))
memcpy(&x->props.saddr, saddr, sizeof(x->props.saddr));
x->props.mode = tmpl->mode;
x->props.reqid = tmpl->reqid;
x->props.family = AF_INET6;
}
/* distribution counting sort function for xfrm_state and xfrm_tmpl */
static int
__xfrm6_sort(void **dst, void **src, int n, int (*cmp)(void *p), int maxclass)
{
int i;
int class[XFRM_MAX_DEPTH];
int count[maxclass];
memset(count, 0, sizeof(count));
for (i = 0; i < n; i++) {
int c;
class[i] = c = cmp(src[i]);
count[c]++;
}
for (i = 2; i < maxclass; i++)
count[i] += count[i - 1];
for (i = 0; i < n; i++) {
dst[count[class[i] - 1]++] = src[i];
src[i] = NULL;
}
return 0;
}
/*
* Rule for xfrm_state:
*
* rule 1: select IPsec transport except AH
* rule 2: select MIPv6 RO or inbound trigger
* rule 3: select IPsec transport AH
* rule 4: select IPsec tunnel
* rule 5: others
*/
static int __xfrm6_state_sort_cmp(void *p)
{
struct xfrm_state *v = p;
switch (v->props.mode) {
case XFRM_MODE_TRANSPORT:
if (v->id.proto != IPPROTO_AH)
return 1;
else
return 3;
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_IN_TRIGGER:
return 2;
#endif
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
return 4;
}
return 5;
}
static int
__xfrm6_state_sort(struct xfrm_state **dst, struct xfrm_state **src, int n)
{
return __xfrm6_sort((void **)dst, (void **)src, n,
__xfrm6_state_sort_cmp, 6);
}
/*
* Rule for xfrm_tmpl:
*
* rule 1: select IPsec transport
* rule 2: select MIPv6 RO or inbound trigger
* rule 3: select IPsec tunnel
* rule 4: others
*/
static int __xfrm6_tmpl_sort_cmp(void *p)
{
struct xfrm_tmpl *v = p;
switch (v->mode) {
case XFRM_MODE_TRANSPORT:
return 1;
#if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
case XFRM_MODE_ROUTEOPTIMIZATION:
case XFRM_MODE_IN_TRIGGER:
return 2;
#endif
case XFRM_MODE_TUNNEL:
case XFRM_MODE_BEET:
return 3;
}
return 4;
}
static int
__xfrm6_tmpl_sort(struct xfrm_tmpl **dst, struct xfrm_tmpl **src, int n)
{
return __xfrm6_sort((void **)dst, (void **)src, n,
__xfrm6_tmpl_sort_cmp, 5);
}
int xfrm6_extract_header(struct sk_buff *skb)
{
struct ipv6hdr *iph = ipv6_hdr(skb);
XFRM_MODE_SKB_CB(skb)->ihl = sizeof(*iph);
XFRM_MODE_SKB_CB(skb)->id = 0;
XFRM_MODE_SKB_CB(skb)->frag_off = htons(IP_DF);
XFRM_MODE_SKB_CB(skb)->tos = ipv6_get_dsfield(iph);
XFRM_MODE_SKB_CB(skb)->ttl = iph->hop_limit;
XFRM_MODE_SKB_CB(skb)->optlen = 0;
memcpy(XFRM_MODE_SKB_CB(skb)->flow_lbl, iph->flow_lbl,
sizeof(XFRM_MODE_SKB_CB(skb)->flow_lbl));
return 0;
}
static struct xfrm_state_afinfo xfrm6_state_afinfo = {
.family = AF_INET6,
.proto = IPPROTO_IPV6,
.eth_proto = htons(ETH_P_IPV6),
.owner = THIS_MODULE,
.init_tempsel = __xfrm6_init_tempsel,
.tmpl_sort = __xfrm6_tmpl_sort,
.state_sort = __xfrm6_state_sort,
.output = xfrm6_output,
.extract_input = xfrm6_extract_input,
.extract_output = xfrm6_extract_output,
.transport_finish = xfrm6_transport_finish,
};
int __init xfrm6_state_init(void)
{
return xfrm_state_register_afinfo(&xfrm6_state_afinfo);
}
void xfrm6_state_fini(void)
{
xfrm_state_unregister_afinfo(&xfrm6_state_afinfo);
}
| gpl-2.0 |
CyanogenMod/android_kernel_moto_shamu | sound/soc/codecs/wm8580.c | 2443 | 26202 | /*
* wm8580.c -- WM8580 ALSA Soc Audio driver
*
* Copyright 2008-12 Wolfson Microelectronics PLC.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* Notes:
* The WM8580 is a multichannel codec with S/PDIF support, featuring six
* DAC channels and two ADC channels.
*
* Currently only the primary audio interface is supported - S/PDIF and
* the secondary audio interfaces are not.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/slab.h>
#include <linux/of_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/tlv.h>
#include <sound/initval.h>
#include <asm/div64.h>
#include "wm8580.h"
/* WM8580 register space */
#define WM8580_PLLA1 0x00
#define WM8580_PLLA2 0x01
#define WM8580_PLLA3 0x02
#define WM8580_PLLA4 0x03
#define WM8580_PLLB1 0x04
#define WM8580_PLLB2 0x05
#define WM8580_PLLB3 0x06
#define WM8580_PLLB4 0x07
#define WM8580_CLKSEL 0x08
#define WM8580_PAIF1 0x09
#define WM8580_PAIF2 0x0A
#define WM8580_SAIF1 0x0B
#define WM8580_PAIF3 0x0C
#define WM8580_PAIF4 0x0D
#define WM8580_SAIF2 0x0E
#define WM8580_DAC_CONTROL1 0x0F
#define WM8580_DAC_CONTROL2 0x10
#define WM8580_DAC_CONTROL3 0x11
#define WM8580_DAC_CONTROL4 0x12
#define WM8580_DAC_CONTROL5 0x13
#define WM8580_DIGITAL_ATTENUATION_DACL1 0x14
#define WM8580_DIGITAL_ATTENUATION_DACR1 0x15
#define WM8580_DIGITAL_ATTENUATION_DACL2 0x16
#define WM8580_DIGITAL_ATTENUATION_DACR2 0x17
#define WM8580_DIGITAL_ATTENUATION_DACL3 0x18
#define WM8580_DIGITAL_ATTENUATION_DACR3 0x19
#define WM8580_MASTER_DIGITAL_ATTENUATION 0x1C
#define WM8580_ADC_CONTROL1 0x1D
#define WM8580_SPDTXCHAN0 0x1E
#define WM8580_SPDTXCHAN1 0x1F
#define WM8580_SPDTXCHAN2 0x20
#define WM8580_SPDTXCHAN3 0x21
#define WM8580_SPDTXCHAN4 0x22
#define WM8580_SPDTXCHAN5 0x23
#define WM8580_SPDMODE 0x24
#define WM8580_INTMASK 0x25
#define WM8580_GPO1 0x26
#define WM8580_GPO2 0x27
#define WM8580_GPO3 0x28
#define WM8580_GPO4 0x29
#define WM8580_GPO5 0x2A
#define WM8580_INTSTAT 0x2B
#define WM8580_SPDRXCHAN1 0x2C
#define WM8580_SPDRXCHAN2 0x2D
#define WM8580_SPDRXCHAN3 0x2E
#define WM8580_SPDRXCHAN4 0x2F
#define WM8580_SPDRXCHAN5 0x30
#define WM8580_SPDSTAT 0x31
#define WM8580_PWRDN1 0x32
#define WM8580_PWRDN2 0x33
#define WM8580_READBACK 0x34
#define WM8580_RESET 0x35
#define WM8580_MAX_REGISTER 0x35
#define WM8580_DACOSR 0x40
/* PLLB4 (register 7h) */
#define WM8580_PLLB4_MCLKOUTSRC_MASK 0x60
#define WM8580_PLLB4_MCLKOUTSRC_PLLA 0x20
#define WM8580_PLLB4_MCLKOUTSRC_PLLB 0x40
#define WM8580_PLLB4_MCLKOUTSRC_OSC 0x60
#define WM8580_PLLB4_CLKOUTSRC_MASK 0x180
#define WM8580_PLLB4_CLKOUTSRC_PLLACLK 0x080
#define WM8580_PLLB4_CLKOUTSRC_PLLBCLK 0x100
#define WM8580_PLLB4_CLKOUTSRC_OSCCLK 0x180
/* CLKSEL (register 8h) */
#define WM8580_CLKSEL_DAC_CLKSEL_MASK 0x03
#define WM8580_CLKSEL_DAC_CLKSEL_PLLA 0x01
#define WM8580_CLKSEL_DAC_CLKSEL_PLLB 0x02
/* AIF control 1 (registers 9h-bh) */
#define WM8580_AIF_RATE_MASK 0x7
#define WM8580_AIF_BCLKSEL_MASK 0x18
#define WM8580_AIF_MS 0x20
#define WM8580_AIF_CLKSRC_MASK 0xc0
#define WM8580_AIF_CLKSRC_PLLA 0x40
#define WM8580_AIF_CLKSRC_PLLB 0x40
#define WM8580_AIF_CLKSRC_MCLK 0xc0
/* AIF control 2 (registers ch-eh) */
#define WM8580_AIF_FMT_MASK 0x03
#define WM8580_AIF_FMT_RIGHTJ 0x00
#define WM8580_AIF_FMT_LEFTJ 0x01
#define WM8580_AIF_FMT_I2S 0x02
#define WM8580_AIF_FMT_DSP 0x03
#define WM8580_AIF_LENGTH_MASK 0x0c
#define WM8580_AIF_LENGTH_16 0x00
#define WM8580_AIF_LENGTH_20 0x04
#define WM8580_AIF_LENGTH_24 0x08
#define WM8580_AIF_LENGTH_32 0x0c
#define WM8580_AIF_LRP 0x10
#define WM8580_AIF_BCP 0x20
/* Powerdown Register 1 (register 32h) */
#define WM8580_PWRDN1_PWDN 0x001
#define WM8580_PWRDN1_ALLDACPD 0x040
/* Powerdown Register 2 (register 33h) */
#define WM8580_PWRDN2_OSSCPD 0x001
#define WM8580_PWRDN2_PLLAPD 0x002
#define WM8580_PWRDN2_PLLBPD 0x004
#define WM8580_PWRDN2_SPDIFPD 0x008
#define WM8580_PWRDN2_SPDIFTXD 0x010
#define WM8580_PWRDN2_SPDIFRXD 0x020
#define WM8580_DAC_CONTROL5_MUTEALL 0x10
/*
* wm8580 register cache
* We can't read the WM8580 register space when we
* are using 2 wire for device control, so we cache them instead.
*/
static const struct reg_default wm8580_reg_defaults[] = {
{ 0, 0x0121 },
{ 1, 0x017e },
{ 2, 0x007d },
{ 3, 0x0014 },
{ 4, 0x0121 },
{ 5, 0x017e },
{ 6, 0x007d },
{ 7, 0x0194 },
{ 8, 0x0010 },
{ 9, 0x0002 },
{ 10, 0x0002 },
{ 11, 0x00c2 },
{ 12, 0x0182 },
{ 13, 0x0082 },
{ 14, 0x000a },
{ 15, 0x0024 },
{ 16, 0x0009 },
{ 17, 0x0000 },
{ 18, 0x00ff },
{ 19, 0x0000 },
{ 20, 0x00ff },
{ 21, 0x00ff },
{ 22, 0x00ff },
{ 23, 0x00ff },
{ 24, 0x00ff },
{ 25, 0x00ff },
{ 26, 0x00ff },
{ 27, 0x00ff },
{ 28, 0x01f0 },
{ 29, 0x0040 },
{ 30, 0x0000 },
{ 31, 0x0000 },
{ 32, 0x0000 },
{ 33, 0x0000 },
{ 34, 0x0031 },
{ 35, 0x000b },
{ 36, 0x0039 },
{ 37, 0x0000 },
{ 38, 0x0010 },
{ 39, 0x0032 },
{ 40, 0x0054 },
{ 41, 0x0076 },
{ 42, 0x0098 },
{ 43, 0x0000 },
{ 44, 0x0000 },
{ 45, 0x0000 },
{ 46, 0x0000 },
{ 47, 0x0000 },
{ 48, 0x0000 },
{ 49, 0x0000 },
{ 50, 0x005e },
{ 51, 0x003e },
{ 52, 0x0000 },
};
static bool wm8580_volatile(struct device *dev, unsigned int reg)
{
switch (reg) {
case WM8580_RESET:
return true;
default:
return false;
}
}
struct pll_state {
unsigned int in;
unsigned int out;
};
#define WM8580_NUM_SUPPLIES 3
static const char *wm8580_supply_names[WM8580_NUM_SUPPLIES] = {
"AVDD",
"DVDD",
"PVDD",
};
/* codec private data */
struct wm8580_priv {
struct regmap *regmap;
struct regulator_bulk_data supplies[WM8580_NUM_SUPPLIES];
struct pll_state a;
struct pll_state b;
int sysclk[2];
};
static const DECLARE_TLV_DB_SCALE(dac_tlv, -12750, 50, 1);
static int wm8580_out_vu(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
unsigned int reg = mc->reg;
unsigned int reg2 = mc->rreg;
int ret;
/* Clear the register cache VU so we write without VU set */
regcache_cache_only(wm8580->regmap, true);
regmap_update_bits(wm8580->regmap, reg, 0x100, 0x000);
regmap_update_bits(wm8580->regmap, reg2, 0x100, 0x000);
regcache_cache_only(wm8580->regmap, false);
ret = snd_soc_put_volsw(kcontrol, ucontrol);
if (ret < 0)
return ret;
/* Now write again with the volume update bit set */
snd_soc_update_bits(codec, reg, 0x100, 0x100);
snd_soc_update_bits(codec, reg2, 0x100, 0x100);
return 0;
}
static const struct snd_kcontrol_new wm8580_snd_controls[] = {
SOC_DOUBLE_R_EXT_TLV("DAC1 Playback Volume",
WM8580_DIGITAL_ATTENUATION_DACL1,
WM8580_DIGITAL_ATTENUATION_DACR1,
0, 0xff, 0, snd_soc_get_volsw, wm8580_out_vu, dac_tlv),
SOC_DOUBLE_R_EXT_TLV("DAC2 Playback Volume",
WM8580_DIGITAL_ATTENUATION_DACL2,
WM8580_DIGITAL_ATTENUATION_DACR2,
0, 0xff, 0, snd_soc_get_volsw, wm8580_out_vu, dac_tlv),
SOC_DOUBLE_R_EXT_TLV("DAC3 Playback Volume",
WM8580_DIGITAL_ATTENUATION_DACL3,
WM8580_DIGITAL_ATTENUATION_DACR3,
0, 0xff, 0, snd_soc_get_volsw, wm8580_out_vu, dac_tlv),
SOC_SINGLE("DAC1 Deemphasis Switch", WM8580_DAC_CONTROL3, 0, 1, 0),
SOC_SINGLE("DAC2 Deemphasis Switch", WM8580_DAC_CONTROL3, 1, 1, 0),
SOC_SINGLE("DAC3 Deemphasis Switch", WM8580_DAC_CONTROL3, 2, 1, 0),
SOC_DOUBLE("DAC1 Invert Switch", WM8580_DAC_CONTROL4, 0, 1, 1, 0),
SOC_DOUBLE("DAC2 Invert Switch", WM8580_DAC_CONTROL4, 2, 3, 1, 0),
SOC_DOUBLE("DAC3 Invert Switch", WM8580_DAC_CONTROL4, 4, 5, 1, 0),
SOC_SINGLE("DAC ZC Switch", WM8580_DAC_CONTROL5, 5, 1, 0),
SOC_SINGLE("DAC1 Switch", WM8580_DAC_CONTROL5, 0, 1, 1),
SOC_SINGLE("DAC2 Switch", WM8580_DAC_CONTROL5, 1, 1, 1),
SOC_SINGLE("DAC3 Switch", WM8580_DAC_CONTROL5, 2, 1, 1),
SOC_DOUBLE("Capture Switch", WM8580_ADC_CONTROL1, 0, 1, 1, 1),
SOC_SINGLE("Capture High-Pass Filter Switch", WM8580_ADC_CONTROL1, 4, 1, 0),
};
static const struct snd_soc_dapm_widget wm8580_dapm_widgets[] = {
SND_SOC_DAPM_DAC("DAC1", "Playback", WM8580_PWRDN1, 2, 1),
SND_SOC_DAPM_DAC("DAC2", "Playback", WM8580_PWRDN1, 3, 1),
SND_SOC_DAPM_DAC("DAC3", "Playback", WM8580_PWRDN1, 4, 1),
SND_SOC_DAPM_OUTPUT("VOUT1L"),
SND_SOC_DAPM_OUTPUT("VOUT1R"),
SND_SOC_DAPM_OUTPUT("VOUT2L"),
SND_SOC_DAPM_OUTPUT("VOUT2R"),
SND_SOC_DAPM_OUTPUT("VOUT3L"),
SND_SOC_DAPM_OUTPUT("VOUT3R"),
SND_SOC_DAPM_ADC("ADC", "Capture", WM8580_PWRDN1, 1, 1),
SND_SOC_DAPM_INPUT("AINL"),
SND_SOC_DAPM_INPUT("AINR"),
};
static const struct snd_soc_dapm_route wm8580_dapm_routes[] = {
{ "VOUT1L", NULL, "DAC1" },
{ "VOUT1R", NULL, "DAC1" },
{ "VOUT2L", NULL, "DAC2" },
{ "VOUT2R", NULL, "DAC2" },
{ "VOUT3L", NULL, "DAC3" },
{ "VOUT3R", NULL, "DAC3" },
{ "ADC", NULL, "AINL" },
{ "ADC", NULL, "AINR" },
};
/* PLL divisors */
struct _pll_div {
u32 prescale:1;
u32 postscale:1;
u32 freqmode:2;
u32 n:4;
u32 k:24;
};
/* The size in bits of the pll divide */
#define FIXED_PLL_SIZE (1 << 22)
/* PLL rate to output rate divisions */
static struct {
unsigned int div;
unsigned int freqmode;
unsigned int postscale;
} post_table[] = {
{ 2, 0, 0 },
{ 4, 0, 1 },
{ 4, 1, 0 },
{ 8, 1, 1 },
{ 8, 2, 0 },
{ 16, 2, 1 },
{ 12, 3, 0 },
{ 24, 3, 1 }
};
static int pll_factors(struct _pll_div *pll_div, unsigned int target,
unsigned int source)
{
u64 Kpart;
unsigned int K, Ndiv, Nmod;
int i;
pr_debug("wm8580: PLL %uHz->%uHz\n", source, target);
/* Scale the output frequency up; the PLL should run in the
* region of 90-100MHz.
*/
for (i = 0; i < ARRAY_SIZE(post_table); i++) {
if (target * post_table[i].div >= 90000000 &&
target * post_table[i].div <= 100000000) {
pll_div->freqmode = post_table[i].freqmode;
pll_div->postscale = post_table[i].postscale;
target *= post_table[i].div;
break;
}
}
if (i == ARRAY_SIZE(post_table)) {
printk(KERN_ERR "wm8580: Unable to scale output frequency "
"%u\n", target);
return -EINVAL;
}
Ndiv = target / source;
if (Ndiv < 5) {
source /= 2;
pll_div->prescale = 1;
Ndiv = target / source;
} else
pll_div->prescale = 0;
if ((Ndiv < 5) || (Ndiv > 13)) {
printk(KERN_ERR
"WM8580 N=%u outside supported range\n", Ndiv);
return -EINVAL;
}
pll_div->n = Ndiv;
Nmod = target % source;
Kpart = FIXED_PLL_SIZE * (long long)Nmod;
do_div(Kpart, source);
K = Kpart & 0xFFFFFFFF;
pll_div->k = K;
pr_debug("PLL %x.%x prescale %d freqmode %d postscale %d\n",
pll_div->n, pll_div->k, pll_div->prescale, pll_div->freqmode,
pll_div->postscale);
return 0;
}
static int wm8580_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id,
int source, unsigned int freq_in, unsigned int freq_out)
{
int offset;
struct snd_soc_codec *codec = codec_dai->codec;
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
struct pll_state *state;
struct _pll_div pll_div;
unsigned int reg;
unsigned int pwr_mask;
int ret;
/* GCC isn't able to work out the ifs below for initialising/using
* pll_div so suppress warnings.
*/
memset(&pll_div, 0, sizeof(pll_div));
switch (pll_id) {
case WM8580_PLLA:
state = &wm8580->a;
offset = 0;
pwr_mask = WM8580_PWRDN2_PLLAPD;
break;
case WM8580_PLLB:
state = &wm8580->b;
offset = 4;
pwr_mask = WM8580_PWRDN2_PLLBPD;
break;
default:
return -ENODEV;
}
if (freq_in && freq_out) {
ret = pll_factors(&pll_div, freq_out, freq_in);
if (ret != 0)
return ret;
}
state->in = freq_in;
state->out = freq_out;
/* Always disable the PLL - it is not safe to leave it running
* while reprogramming it.
*/
snd_soc_update_bits(codec, WM8580_PWRDN2, pwr_mask, pwr_mask);
if (!freq_in || !freq_out)
return 0;
snd_soc_write(codec, WM8580_PLLA1 + offset, pll_div.k & 0x1ff);
snd_soc_write(codec, WM8580_PLLA2 + offset, (pll_div.k >> 9) & 0x1ff);
snd_soc_write(codec, WM8580_PLLA3 + offset,
(pll_div.k >> 18 & 0xf) | (pll_div.n << 4));
reg = snd_soc_read(codec, WM8580_PLLA4 + offset);
reg &= ~0x1b;
reg |= pll_div.prescale | pll_div.postscale << 1 |
pll_div.freqmode << 3;
snd_soc_write(codec, WM8580_PLLA4 + offset, reg);
/* All done, turn it on */
snd_soc_update_bits(codec, WM8580_PWRDN2, pwr_mask, 0);
return 0;
}
static const int wm8580_sysclk_ratios[] = {
128, 192, 256, 384, 512, 768, 1152,
};
/*
* Set PCM DAI bit size and sample rate.
*/
static int wm8580_paif_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_codec *codec = rtd->codec;
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
u16 paifa = 0;
u16 paifb = 0;
int i, ratio, osr;
/* bit size */
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
paifa |= 0x8;
break;
case SNDRV_PCM_FORMAT_S20_3LE:
paifa |= 0x0;
paifb |= WM8580_AIF_LENGTH_20;
break;
case SNDRV_PCM_FORMAT_S24_LE:
paifa |= 0x0;
paifb |= WM8580_AIF_LENGTH_24;
break;
case SNDRV_PCM_FORMAT_S32_LE:
paifa |= 0x0;
paifb |= WM8580_AIF_LENGTH_32;
break;
default:
return -EINVAL;
}
/* Look up the SYSCLK ratio; accept only exact matches */
ratio = wm8580->sysclk[dai->driver->id] / params_rate(params);
for (i = 0; i < ARRAY_SIZE(wm8580_sysclk_ratios); i++)
if (ratio == wm8580_sysclk_ratios[i])
break;
if (i == ARRAY_SIZE(wm8580_sysclk_ratios)) {
dev_err(codec->dev, "Invalid clock ratio %d/%d\n",
wm8580->sysclk[dai->driver->id], params_rate(params));
return -EINVAL;
}
paifa |= i;
dev_dbg(codec->dev, "Running at %dfs with %dHz clock\n",
wm8580_sysclk_ratios[i], wm8580->sysclk[dai->driver->id]);
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
switch (ratio) {
case 128:
case 192:
osr = WM8580_DACOSR;
dev_dbg(codec->dev, "Selecting 64x OSR\n");
break;
default:
osr = 0;
dev_dbg(codec->dev, "Selecting 128x OSR\n");
break;
}
snd_soc_update_bits(codec, WM8580_PAIF3, WM8580_DACOSR, osr);
}
snd_soc_update_bits(codec, WM8580_PAIF1 + dai->driver->id,
WM8580_AIF_RATE_MASK | WM8580_AIF_BCLKSEL_MASK,
paifa);
snd_soc_update_bits(codec, WM8580_PAIF3 + dai->driver->id,
WM8580_AIF_LENGTH_MASK, paifb);
return 0;
}
static int wm8580_set_paif_dai_fmt(struct snd_soc_dai *codec_dai,
unsigned int fmt)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int aifa;
unsigned int aifb;
int can_invert_lrclk;
aifa = snd_soc_read(codec, WM8580_PAIF1 + codec_dai->driver->id);
aifb = snd_soc_read(codec, WM8580_PAIF3 + codec_dai->driver->id);
aifb &= ~(WM8580_AIF_FMT_MASK | WM8580_AIF_LRP | WM8580_AIF_BCP);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
aifa &= ~WM8580_AIF_MS;
break;
case SND_SOC_DAIFMT_CBM_CFM:
aifa |= WM8580_AIF_MS;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
can_invert_lrclk = 1;
aifb |= WM8580_AIF_FMT_I2S;
break;
case SND_SOC_DAIFMT_RIGHT_J:
can_invert_lrclk = 1;
aifb |= WM8580_AIF_FMT_RIGHTJ;
break;
case SND_SOC_DAIFMT_LEFT_J:
can_invert_lrclk = 1;
aifb |= WM8580_AIF_FMT_LEFTJ;
break;
case SND_SOC_DAIFMT_DSP_A:
can_invert_lrclk = 0;
aifb |= WM8580_AIF_FMT_DSP;
break;
case SND_SOC_DAIFMT_DSP_B:
can_invert_lrclk = 0;
aifb |= WM8580_AIF_FMT_DSP;
aifb |= WM8580_AIF_LRP;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
case SND_SOC_DAIFMT_IB_IF:
if (!can_invert_lrclk)
return -EINVAL;
aifb |= WM8580_AIF_BCP;
aifb |= WM8580_AIF_LRP;
break;
case SND_SOC_DAIFMT_IB_NF:
aifb |= WM8580_AIF_BCP;
break;
case SND_SOC_DAIFMT_NB_IF:
if (!can_invert_lrclk)
return -EINVAL;
aifb |= WM8580_AIF_LRP;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8580_PAIF1 + codec_dai->driver->id, aifa);
snd_soc_write(codec, WM8580_PAIF3 + codec_dai->driver->id, aifb);
return 0;
}
static int wm8580_set_dai_clkdiv(struct snd_soc_dai *codec_dai,
int div_id, int div)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int reg;
switch (div_id) {
case WM8580_MCLK:
reg = snd_soc_read(codec, WM8580_PLLB4);
reg &= ~WM8580_PLLB4_MCLKOUTSRC_MASK;
switch (div) {
case WM8580_CLKSRC_MCLK:
/* Input */
break;
case WM8580_CLKSRC_PLLA:
reg |= WM8580_PLLB4_MCLKOUTSRC_PLLA;
break;
case WM8580_CLKSRC_PLLB:
reg |= WM8580_PLLB4_MCLKOUTSRC_PLLB;
break;
case WM8580_CLKSRC_OSC:
reg |= WM8580_PLLB4_MCLKOUTSRC_OSC;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8580_PLLB4, reg);
break;
case WM8580_CLKOUTSRC:
reg = snd_soc_read(codec, WM8580_PLLB4);
reg &= ~WM8580_PLLB4_CLKOUTSRC_MASK;
switch (div) {
case WM8580_CLKSRC_NONE:
break;
case WM8580_CLKSRC_PLLA:
reg |= WM8580_PLLB4_CLKOUTSRC_PLLACLK;
break;
case WM8580_CLKSRC_PLLB:
reg |= WM8580_PLLB4_CLKOUTSRC_PLLBCLK;
break;
case WM8580_CLKSRC_OSC:
reg |= WM8580_PLLB4_CLKOUTSRC_OSCCLK;
break;
default:
return -EINVAL;
}
snd_soc_write(codec, WM8580_PLLB4, reg);
break;
default:
return -EINVAL;
}
return 0;
}
static int wm8580_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct snd_soc_codec *codec = dai->codec;
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
int ret, sel, sel_mask, sel_shift;
switch (dai->driver->id) {
case WM8580_DAI_PAIFRX:
sel_mask = 0x3;
sel_shift = 0;
break;
case WM8580_DAI_PAIFTX:
sel_mask = 0xc;
sel_shift = 2;
break;
default:
BUG_ON("Unknown DAI driver ID\n");
return -EINVAL;
}
switch (clk_id) {
case WM8580_CLKSRC_ADCMCLK:
if (dai->driver->id != WM8580_DAI_PAIFTX)
return -EINVAL;
sel = 0 << sel_shift;
break;
case WM8580_CLKSRC_PLLA:
sel = 1 << sel_shift;
break;
case WM8580_CLKSRC_PLLB:
sel = 2 << sel_shift;
break;
case WM8580_CLKSRC_MCLK:
sel = 3 << sel_shift;
break;
default:
dev_err(codec->dev, "Unknown clock %d\n", clk_id);
return -EINVAL;
}
/* We really should validate PLL settings but not yet */
wm8580->sysclk[dai->driver->id] = freq;
ret = snd_soc_update_bits(codec, WM8580_CLKSEL, sel_mask, sel);
if (ret < 0)
return ret;
return 0;
}
static int wm8580_digital_mute(struct snd_soc_dai *codec_dai, int mute)
{
struct snd_soc_codec *codec = codec_dai->codec;
unsigned int reg;
reg = snd_soc_read(codec, WM8580_DAC_CONTROL5);
if (mute)
reg |= WM8580_DAC_CONTROL5_MUTEALL;
else
reg &= ~WM8580_DAC_CONTROL5_MUTEALL;
snd_soc_write(codec, WM8580_DAC_CONTROL5, reg);
return 0;
}
static int wm8580_set_bias_level(struct snd_soc_codec *codec,
enum snd_soc_bias_level level)
{
switch (level) {
case SND_SOC_BIAS_ON:
case SND_SOC_BIAS_PREPARE:
break;
case SND_SOC_BIAS_STANDBY:
if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) {
/* Power up and get individual control of the DACs */
snd_soc_update_bits(codec, WM8580_PWRDN1,
WM8580_PWRDN1_PWDN |
WM8580_PWRDN1_ALLDACPD, 0);
/* Make VMID high impedance */
snd_soc_update_bits(codec, WM8580_ADC_CONTROL1,
0x100, 0);
}
break;
case SND_SOC_BIAS_OFF:
snd_soc_update_bits(codec, WM8580_PWRDN1,
WM8580_PWRDN1_PWDN, WM8580_PWRDN1_PWDN);
break;
}
codec->dapm.bias_level = level;
return 0;
}
#define WM8580_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\
SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S32_LE)
static const struct snd_soc_dai_ops wm8580_dai_ops_playback = {
.set_sysclk = wm8580_set_sysclk,
.hw_params = wm8580_paif_hw_params,
.set_fmt = wm8580_set_paif_dai_fmt,
.set_clkdiv = wm8580_set_dai_clkdiv,
.set_pll = wm8580_set_dai_pll,
.digital_mute = wm8580_digital_mute,
};
static const struct snd_soc_dai_ops wm8580_dai_ops_capture = {
.set_sysclk = wm8580_set_sysclk,
.hw_params = wm8580_paif_hw_params,
.set_fmt = wm8580_set_paif_dai_fmt,
.set_clkdiv = wm8580_set_dai_clkdiv,
.set_pll = wm8580_set_dai_pll,
};
static struct snd_soc_dai_driver wm8580_dai[] = {
{
.name = "wm8580-hifi-playback",
.id = WM8580_DAI_PAIFRX,
.playback = {
.stream_name = "Playback",
.channels_min = 1,
.channels_max = 6,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = WM8580_FORMATS,
},
.ops = &wm8580_dai_ops_playback,
},
{
.name = "wm8580-hifi-capture",
.id = WM8580_DAI_PAIFTX,
.capture = {
.stream_name = "Capture",
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = WM8580_FORMATS,
},
.ops = &wm8580_dai_ops_capture,
},
};
static int wm8580_probe(struct snd_soc_codec *codec)
{
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
int ret = 0;
ret = snd_soc_codec_set_cache_io(codec, 7, 9, SND_SOC_REGMAP);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cache I/O: %d\n", ret);
return ret;
}
ret = regulator_bulk_enable(ARRAY_SIZE(wm8580->supplies),
wm8580->supplies);
if (ret != 0) {
dev_err(codec->dev, "Failed to enable supplies: %d\n", ret);
goto err_regulator_get;
}
/* Get the codec into a known state */
ret = snd_soc_write(codec, WM8580_RESET, 0);
if (ret != 0) {
dev_err(codec->dev, "Failed to reset codec: %d\n", ret);
goto err_regulator_enable;
}
wm8580_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
return 0;
err_regulator_enable:
regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
err_regulator_get:
return ret;
}
/* power down chip */
static int wm8580_remove(struct snd_soc_codec *codec)
{
struct wm8580_priv *wm8580 = snd_soc_codec_get_drvdata(codec);
wm8580_set_bias_level(codec, SND_SOC_BIAS_OFF);
regulator_bulk_disable(ARRAY_SIZE(wm8580->supplies), wm8580->supplies);
return 0;
}
static struct snd_soc_codec_driver soc_codec_dev_wm8580 = {
.probe = wm8580_probe,
.remove = wm8580_remove,
.set_bias_level = wm8580_set_bias_level,
.controls = wm8580_snd_controls,
.num_controls = ARRAY_SIZE(wm8580_snd_controls),
.dapm_widgets = wm8580_dapm_widgets,
.num_dapm_widgets = ARRAY_SIZE(wm8580_dapm_widgets),
.dapm_routes = wm8580_dapm_routes,
.num_dapm_routes = ARRAY_SIZE(wm8580_dapm_routes),
};
static const struct of_device_id wm8580_of_match[] = {
{ .compatible = "wlf,wm8580" },
{ },
};
static const struct regmap_config wm8580_regmap = {
.reg_bits = 7,
.val_bits = 9,
.max_register = WM8580_MAX_REGISTER,
.reg_defaults = wm8580_reg_defaults,
.num_reg_defaults = ARRAY_SIZE(wm8580_reg_defaults),
.cache_type = REGCACHE_RBTREE,
.volatile_reg = wm8580_volatile,
};
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
static int wm8580_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct wm8580_priv *wm8580;
int ret, i;
wm8580 = devm_kzalloc(&i2c->dev, sizeof(struct wm8580_priv),
GFP_KERNEL);
if (wm8580 == NULL)
return -ENOMEM;
wm8580->regmap = devm_regmap_init_i2c(i2c, &wm8580_regmap);
if (IS_ERR(wm8580->regmap))
return PTR_ERR(wm8580->regmap);
for (i = 0; i < ARRAY_SIZE(wm8580->supplies); i++)
wm8580->supplies[i].supply = wm8580_supply_names[i];
ret = devm_regulator_bulk_get(&i2c->dev, ARRAY_SIZE(wm8580->supplies),
wm8580->supplies);
if (ret != 0) {
dev_err(&i2c->dev, "Failed to request supplies: %d\n", ret);
return ret;
}
i2c_set_clientdata(i2c, wm8580);
ret = snd_soc_register_codec(&i2c->dev,
&soc_codec_dev_wm8580, wm8580_dai, ARRAY_SIZE(wm8580_dai));
return ret;
}
static int wm8580_i2c_remove(struct i2c_client *client)
{
snd_soc_unregister_codec(&client->dev);
return 0;
}
static const struct i2c_device_id wm8580_i2c_id[] = {
{ "wm8580", 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, wm8580_i2c_id);
static struct i2c_driver wm8580_i2c_driver = {
.driver = {
.name = "wm8580",
.owner = THIS_MODULE,
.of_match_table = wm8580_of_match,
},
.probe = wm8580_i2c_probe,
.remove = wm8580_i2c_remove,
.id_table = wm8580_i2c_id,
};
#endif
static int __init wm8580_modinit(void)
{
int ret = 0;
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
ret = i2c_add_driver(&wm8580_i2c_driver);
if (ret != 0) {
pr_err("Failed to register WM8580 I2C driver: %d\n", ret);
}
#endif
return ret;
}
module_init(wm8580_modinit);
static void __exit wm8580_exit(void)
{
#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
i2c_del_driver(&wm8580_i2c_driver);
#endif
}
module_exit(wm8580_exit);
MODULE_DESCRIPTION("ASoC WM8580 driver");
MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
storm31/android_kernel_samsung_aries | drivers/net/phy/national.c | 2699 | 4171 | /*
* drivers/net/phy/national.c
*
* Driver for National Semiconductor PHYs
*
* Author: Stuart Menefy <stuart.menefy@st.com>
* Maintainer: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*
* Copyright (c) 2008 STMicroelectronics Limited
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mii.h>
#include <linux/ethtool.h>
#include <linux/phy.h>
#include <linux/netdevice.h>
/* DP83865 phy identifier values */
#define DP83865_PHY_ID 0x20005c7a
#define DP83865_INT_MASK_REG 0x15
#define DP83865_INT_MASK_STATUS 0x14
#define DP83865_INT_REMOTE_FAULT 0x0008
#define DP83865_INT_ANE_COMPLETED 0x0010
#define DP83865_INT_LINK_CHANGE 0xe000
#define DP83865_INT_MASK_DEFAULT (DP83865_INT_REMOTE_FAULT | \
DP83865_INT_ANE_COMPLETED | \
DP83865_INT_LINK_CHANGE)
/* Advanced proprietary configuration */
#define NS_EXP_MEM_CTL 0x16
#define NS_EXP_MEM_DATA 0x1d
#define NS_EXP_MEM_ADD 0x1e
#define LED_CTRL_REG 0x13
#define AN_FALLBACK_AN 0x0001
#define AN_FALLBACK_CRC 0x0002
#define AN_FALLBACK_IE 0x0004
#define ALL_FALLBACK_ON (AN_FALLBACK_AN | AN_FALLBACK_CRC | AN_FALLBACK_IE)
enum hdx_loopback {
hdx_loopback_on = 0,
hdx_loopback_off = 1,
};
static u8 ns_exp_read(struct phy_device *phydev, u16 reg)
{
phy_write(phydev, NS_EXP_MEM_ADD, reg);
return phy_read(phydev, NS_EXP_MEM_DATA);
}
static void ns_exp_write(struct phy_device *phydev, u16 reg, u8 data)
{
phy_write(phydev, NS_EXP_MEM_ADD, reg);
phy_write(phydev, NS_EXP_MEM_DATA, data);
}
static int ns_config_intr(struct phy_device *phydev)
{
int err;
if (phydev->interrupts == PHY_INTERRUPT_ENABLED)
err = phy_write(phydev, DP83865_INT_MASK_REG,
DP83865_INT_MASK_DEFAULT);
else
err = phy_write(phydev, DP83865_INT_MASK_REG, 0);
return err;
}
static int ns_ack_interrupt(struct phy_device *phydev)
{
int ret = phy_read(phydev, DP83865_INT_MASK_STATUS);
if (ret < 0)
return ret;
return 0;
}
static void ns_giga_speed_fallback(struct phy_device *phydev, int mode)
{
int bmcr = phy_read(phydev, MII_BMCR);
phy_write(phydev, MII_BMCR, (bmcr | BMCR_PDOWN));
/* Enable 8 bit expended memory read/write (no auto increment) */
phy_write(phydev, NS_EXP_MEM_CTL, 0);
phy_write(phydev, NS_EXP_MEM_ADD, 0x1C0);
phy_write(phydev, NS_EXP_MEM_DATA, 0x0008);
phy_write(phydev, MII_BMCR, (bmcr & ~BMCR_PDOWN));
phy_write(phydev, LED_CTRL_REG, mode);
}
static void ns_10_base_t_hdx_loopack(struct phy_device *phydev, int disable)
{
if (disable)
ns_exp_write(phydev, 0x1c0, ns_exp_read(phydev, 0x1c0) | 1);
else
ns_exp_write(phydev, 0x1c0,
ns_exp_read(phydev, 0x1c0) & 0xfffe);
printk(KERN_DEBUG "DP83865 PHY: 10BASE-T HDX loopback %s\n",
(ns_exp_read(phydev, 0x1c0) & 0x0001) ? "off" : "on");
}
static int ns_config_init(struct phy_device *phydev)
{
ns_giga_speed_fallback(phydev, ALL_FALLBACK_ON);
/* In the latest MAC or switches design, the 10 Mbps loopback
is desired to be turned off. */
ns_10_base_t_hdx_loopack(phydev, hdx_loopback_off);
return ns_ack_interrupt(phydev);
}
static struct phy_driver dp83865_driver = {
.phy_id = DP83865_PHY_ID,
.phy_id_mask = 0xfffffff0,
.name = "NatSemi DP83865",
.features = PHY_GBIT_FEATURES | SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_HAS_INTERRUPT,
.config_init = ns_config_init,
.config_aneg = genphy_config_aneg,
.read_status = genphy_read_status,
.ack_interrupt = ns_ack_interrupt,
.config_intr = ns_config_intr,
.driver = {.owner = THIS_MODULE,}
};
static int __init ns_init(void)
{
return phy_driver_register(&dp83865_driver);
}
static void __exit ns_exit(void)
{
phy_driver_unregister(&dp83865_driver);
}
MODULE_DESCRIPTION("NatSemi PHY driver");
MODULE_AUTHOR("Stuart Menefy");
MODULE_LICENSE("GPL");
module_init(ns_init);
module_exit(ns_exit);
static struct mdio_device_id __maybe_unused ns_tbl[] = {
{ DP83865_PHY_ID, 0xfffffff0 },
{ }
};
MODULE_DEVICE_TABLE(mdio, ns_tbl);
| gpl-2.0 |
msdx321/android_kernel_samsung_G9350 | drivers/rtc/rtc-tegra.c | 2955 | 12256 | /*
* An RTC driver for the NVIDIA Tegra 200 series internal RTC.
*
* Copyright (c) 2010, NVIDIA Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/irq.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <linux/pm.h>
/* set to 1 = busy every eight 32kHz clocks during copy of sec+msec to AHB */
#define TEGRA_RTC_REG_BUSY 0x004
#define TEGRA_RTC_REG_SECONDS 0x008
/* when msec is read, the seconds are buffered into shadow seconds. */
#define TEGRA_RTC_REG_SHADOW_SECONDS 0x00c
#define TEGRA_RTC_REG_MILLI_SECONDS 0x010
#define TEGRA_RTC_REG_SECONDS_ALARM0 0x014
#define TEGRA_RTC_REG_SECONDS_ALARM1 0x018
#define TEGRA_RTC_REG_MILLI_SECONDS_ALARM0 0x01c
#define TEGRA_RTC_REG_INTR_MASK 0x028
/* write 1 bits to clear status bits */
#define TEGRA_RTC_REG_INTR_STATUS 0x02c
/* bits in INTR_MASK */
#define TEGRA_RTC_INTR_MASK_MSEC_CDN_ALARM (1<<4)
#define TEGRA_RTC_INTR_MASK_SEC_CDN_ALARM (1<<3)
#define TEGRA_RTC_INTR_MASK_MSEC_ALARM (1<<2)
#define TEGRA_RTC_INTR_MASK_SEC_ALARM1 (1<<1)
#define TEGRA_RTC_INTR_MASK_SEC_ALARM0 (1<<0)
/* bits in INTR_STATUS */
#define TEGRA_RTC_INTR_STATUS_MSEC_CDN_ALARM (1<<4)
#define TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM (1<<3)
#define TEGRA_RTC_INTR_STATUS_MSEC_ALARM (1<<2)
#define TEGRA_RTC_INTR_STATUS_SEC_ALARM1 (1<<1)
#define TEGRA_RTC_INTR_STATUS_SEC_ALARM0 (1<<0)
struct tegra_rtc_info {
struct platform_device *pdev;
struct rtc_device *rtc_dev;
void __iomem *rtc_base; /* NULL if not initialized. */
int tegra_rtc_irq; /* alarm and periodic irq */
spinlock_t tegra_rtc_lock;
};
/* RTC hardware is busy when it is updating its values over AHB once
* every eight 32kHz clocks (~250uS).
* outside of these updates the CPU is free to write.
* CPU is always free to read.
*/
static inline u32 tegra_rtc_check_busy(struct tegra_rtc_info *info)
{
return readl(info->rtc_base + TEGRA_RTC_REG_BUSY) & 1;
}
/* Wait for hardware to be ready for writing.
* This function tries to maximize the amount of time before the next update.
* It does this by waiting for the RTC to become busy with its periodic update,
* then returning once the RTC first becomes not busy.
* This periodic update (where the seconds and milliseconds are copied to the
* AHB side) occurs every eight 32kHz clocks (~250uS).
* The behavior of this function allows us to make some assumptions without
* introducing a race, because 250uS is plenty of time to read/write a value.
*/
static int tegra_rtc_wait_while_busy(struct device *dev)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
int retries = 500; /* ~490 us is the worst case, ~250 us is best. */
/* first wait for the RTC to become busy. this is when it
* posts its updated seconds+msec registers to AHB side. */
while (tegra_rtc_check_busy(info)) {
if (!retries--)
goto retry_failed;
udelay(1);
}
/* now we have about 250 us to manipulate registers */
return 0;
retry_failed:
dev_err(dev, "write failed:retry count exceeded.\n");
return -ETIMEDOUT;
}
static int tegra_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long sec, msec;
unsigned long sl_irq_flags;
/* RTC hardware copies seconds to shadow seconds when a read
* of milliseconds occurs. use a lock to keep other threads out. */
spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
msec = readl(info->rtc_base + TEGRA_RTC_REG_MILLI_SECONDS);
sec = readl(info->rtc_base + TEGRA_RTC_REG_SHADOW_SECONDS);
spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
rtc_time_to_tm(sec, tm);
dev_vdbg(dev, "time read as %lu. %d/%d/%d %d:%02u:%02u\n",
sec,
tm->tm_mon + 1,
tm->tm_mday,
tm->tm_year + 1900,
tm->tm_hour,
tm->tm_min,
tm->tm_sec
);
return 0;
}
static int tegra_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long sec;
int ret;
/* convert tm to seconds. */
ret = rtc_valid_tm(tm);
if (ret)
return ret;
rtc_tm_to_time(tm, &sec);
dev_vdbg(dev, "time set to %lu. %d/%d/%d %d:%02u:%02u\n",
sec,
tm->tm_mon+1,
tm->tm_mday,
tm->tm_year+1900,
tm->tm_hour,
tm->tm_min,
tm->tm_sec
);
/* seconds only written if wait succeeded. */
ret = tegra_rtc_wait_while_busy(dev);
if (!ret)
writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS);
dev_vdbg(dev, "time read back as %d\n",
readl(info->rtc_base + TEGRA_RTC_REG_SECONDS));
return ret;
}
static int tegra_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long sec;
unsigned tmp;
sec = readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
if (sec == 0) {
/* alarm is disabled. */
alarm->enabled = 0;
alarm->time.tm_mon = -1;
alarm->time.tm_mday = -1;
alarm->time.tm_year = -1;
alarm->time.tm_hour = -1;
alarm->time.tm_min = -1;
alarm->time.tm_sec = -1;
} else {
/* alarm is enabled. */
alarm->enabled = 1;
rtc_time_to_tm(sec, &alarm->time);
}
tmp = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
alarm->pending = (tmp & TEGRA_RTC_INTR_STATUS_SEC_ALARM0) != 0;
return 0;
}
static int tegra_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned status;
unsigned long sl_irq_flags;
tegra_rtc_wait_while_busy(dev);
spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
/* read the original value, and OR in the flag. */
status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
if (enabled)
status |= TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* set it */
else
status &= ~TEGRA_RTC_INTR_MASK_SEC_ALARM0; /* clear it */
writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
return 0;
}
static int tegra_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long sec;
if (alarm->enabled)
rtc_tm_to_time(&alarm->time, &sec);
else
sec = 0;
tegra_rtc_wait_while_busy(dev);
writel(sec, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
dev_vdbg(dev, "alarm read back as %d\n",
readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
/* if successfully written and alarm is enabled ... */
if (sec) {
tegra_rtc_alarm_irq_enable(dev, 1);
dev_vdbg(dev, "alarm set as %lu. %d/%d/%d %d:%02u:%02u\n",
sec,
alarm->time.tm_mon+1,
alarm->time.tm_mday,
alarm->time.tm_year+1900,
alarm->time.tm_hour,
alarm->time.tm_min,
alarm->time.tm_sec);
} else {
/* disable alarm if 0 or write error. */
dev_vdbg(dev, "alarm disabled\n");
tegra_rtc_alarm_irq_enable(dev, 0);
}
return 0;
}
static int tegra_rtc_proc(struct device *dev, struct seq_file *seq)
{
if (!dev || !dev->driver)
return 0;
return seq_printf(seq, "name\t\t: %s\n", dev_name(dev));
}
static irqreturn_t tegra_rtc_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct tegra_rtc_info *info = dev_get_drvdata(dev);
unsigned long events = 0;
unsigned status;
unsigned long sl_irq_flags;
status = readl(info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
if (status) {
/* clear the interrupt masks and status on any irq. */
tegra_rtc_wait_while_busy(dev);
spin_lock_irqsave(&info->tegra_rtc_lock, sl_irq_flags);
writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
writel(status, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
spin_unlock_irqrestore(&info->tegra_rtc_lock, sl_irq_flags);
}
/* check if Alarm */
if ((status & TEGRA_RTC_INTR_STATUS_SEC_ALARM0))
events |= RTC_IRQF | RTC_AF;
/* check if Periodic */
if ((status & TEGRA_RTC_INTR_STATUS_SEC_CDN_ALARM))
events |= RTC_IRQF | RTC_PF;
rtc_update_irq(info->rtc_dev, 1, events);
return IRQ_HANDLED;
}
static struct rtc_class_ops tegra_rtc_ops = {
.read_time = tegra_rtc_read_time,
.set_time = tegra_rtc_set_time,
.read_alarm = tegra_rtc_read_alarm,
.set_alarm = tegra_rtc_set_alarm,
.proc = tegra_rtc_proc,
.alarm_irq_enable = tegra_rtc_alarm_irq_enable,
};
static const struct of_device_id tegra_rtc_dt_match[] = {
{ .compatible = "nvidia,tegra20-rtc", },
{}
};
MODULE_DEVICE_TABLE(of, tegra_rtc_dt_match);
static int __init tegra_rtc_probe(struct platform_device *pdev)
{
struct tegra_rtc_info *info;
struct resource *res;
int ret;
info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
info->rtc_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->rtc_base))
return PTR_ERR(info->rtc_base);
info->tegra_rtc_irq = platform_get_irq(pdev, 0);
if (info->tegra_rtc_irq <= 0)
return -EBUSY;
/* set context info. */
info->pdev = pdev;
spin_lock_init(&info->tegra_rtc_lock);
platform_set_drvdata(pdev, info);
/* clear out the hardware. */
writel(0, info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0);
writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
writel(0, info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
device_init_wakeup(&pdev->dev, 1);
info->rtc_dev = devm_rtc_device_register(&pdev->dev,
dev_name(&pdev->dev), &tegra_rtc_ops,
THIS_MODULE);
if (IS_ERR(info->rtc_dev)) {
ret = PTR_ERR(info->rtc_dev);
dev_err(&pdev->dev, "Unable to register device (err=%d).\n",
ret);
return ret;
}
ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
dev_name(&pdev->dev), &pdev->dev);
if (ret) {
dev_err(&pdev->dev,
"Unable to request interrupt for device (err=%d).\n",
ret);
return ret;
}
dev_notice(&pdev->dev, "Tegra internal Real Time Clock\n");
return 0;
}
#ifdef CONFIG_PM_SLEEP
static int tegra_rtc_suspend(struct device *dev)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
tegra_rtc_wait_while_busy(dev);
/* only use ALARM0 as a wake source. */
writel(0xffffffff, info->rtc_base + TEGRA_RTC_REG_INTR_STATUS);
writel(TEGRA_RTC_INTR_STATUS_SEC_ALARM0,
info->rtc_base + TEGRA_RTC_REG_INTR_MASK);
dev_vdbg(dev, "alarm sec = %d\n",
readl(info->rtc_base + TEGRA_RTC_REG_SECONDS_ALARM0));
dev_vdbg(dev, "Suspend (device_may_wakeup=%d) irq:%d\n",
device_may_wakeup(dev), info->tegra_rtc_irq);
/* leave the alarms on as a wake source. */
if (device_may_wakeup(dev))
enable_irq_wake(info->tegra_rtc_irq);
return 0;
}
static int tegra_rtc_resume(struct device *dev)
{
struct tegra_rtc_info *info = dev_get_drvdata(dev);
dev_vdbg(dev, "Resume (device_may_wakeup=%d)\n",
device_may_wakeup(dev));
/* alarms were left on as a wake source, turn them off. */
if (device_may_wakeup(dev))
disable_irq_wake(info->tegra_rtc_irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(tegra_rtc_pm_ops, tegra_rtc_suspend, tegra_rtc_resume);
static void tegra_rtc_shutdown(struct platform_device *pdev)
{
dev_vdbg(&pdev->dev, "disabling interrupts.\n");
tegra_rtc_alarm_irq_enable(&pdev->dev, 0);
}
MODULE_ALIAS("platform:tegra_rtc");
static struct platform_driver tegra_rtc_driver = {
.shutdown = tegra_rtc_shutdown,
.driver = {
.name = "tegra_rtc",
.owner = THIS_MODULE,
.of_match_table = tegra_rtc_dt_match,
.pm = &tegra_rtc_pm_ops,
},
};
module_platform_driver_probe(tegra_rtc_driver, tegra_rtc_probe);
MODULE_AUTHOR("Jon Mayo <jmayo@nvidia.com>");
MODULE_DESCRIPTION("driver for Tegra internal RTC");
MODULE_LICENSE("GPL");
| gpl-2.0 |
elettronicagf/kernel-am335x | arch/mips/fw/arc/misc.c | 3211 | 1491 | /*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Miscellaneous ARCS PROM routines.
*
* Copyright (C) 1996 David S. Miller (davem@davemloft.net)
* Copyright (C) 1999 Ralf Baechle (ralf@gnu.org)
* Copyright (C) 1999 Silicon Graphics, Inc.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/irqflags.h>
#include <asm/bcache.h>
#include <asm/fw/arc/types.h>
#include <asm/sgialib.h>
#include <asm/bootinfo.h>
VOID
ArcHalt(VOID)
{
bc_disable();
local_irq_disable();
ARC_CALL0(halt);
never: goto never;
}
VOID
ArcPowerDown(VOID)
{
bc_disable();
local_irq_disable();
ARC_CALL0(pdown);
never: goto never;
}
/* XXX is this a soft reset basically? XXX */
VOID
ArcRestart(VOID)
{
bc_disable();
local_irq_disable();
ARC_CALL0(restart);
never: goto never;
}
VOID
ArcReboot(VOID)
{
bc_disable();
local_irq_disable();
ARC_CALL0(reboot);
never: goto never;
}
VOID
ArcEnterInteractiveMode(VOID)
{
bc_disable();
local_irq_disable();
ARC_CALL0(imode);
never: goto never;
}
LONG
ArcSaveConfiguration(VOID)
{
return ARC_CALL0(cfg_save);
}
struct linux_sysid *
ArcGetSystemId(VOID)
{
return (struct linux_sysid *) ARC_CALL0(get_sysid);
}
VOID __init
ArcFlushAllCaches(VOID)
{
ARC_CALL0(cache_flush);
}
DISPLAY_STATUS * __init ArcGetDisplayStatus(ULONG FileID)
{
return (DISPLAY_STATUS *) ARC_CALL1(GetDisplayStatus, FileID);
}
| gpl-2.0 |
ruslan250283/alcatel_6036 | drivers/net/wireless/ath/ath9k/calib.c | 3467 | 11833 | /*
* Copyright (c) 2008-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "hw.h"
#include "hw-ops.h"
#include <linux/export.h>
/* Common calibration code */
static int16_t ath9k_hw_get_nf_hist_mid(int16_t *nfCalBuffer)
{
int16_t nfval;
int16_t sort[ATH9K_NF_CAL_HIST_MAX];
int i, j;
for (i = 0; i < ATH9K_NF_CAL_HIST_MAX; i++)
sort[i] = nfCalBuffer[i];
for (i = 0; i < ATH9K_NF_CAL_HIST_MAX - 1; i++) {
for (j = 1; j < ATH9K_NF_CAL_HIST_MAX - i; j++) {
if (sort[j] > sort[j - 1]) {
nfval = sort[j];
sort[j] = sort[j - 1];
sort[j - 1] = nfval;
}
}
}
nfval = sort[(ATH9K_NF_CAL_HIST_MAX - 1) >> 1];
return nfval;
}
static struct ath_nf_limits *ath9k_hw_get_nf_limits(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath_nf_limits *limit;
if (!chan || IS_CHAN_2GHZ(chan))
limit = &ah->nf_2g;
else
limit = &ah->nf_5g;
return limit;
}
static s16 ath9k_hw_get_default_nf(struct ath_hw *ah,
struct ath9k_channel *chan)
{
return ath9k_hw_get_nf_limits(ah, chan)->nominal;
}
s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
{
s8 noise = ATH_DEFAULT_NOISE_FLOOR;
if (chan && chan->noisefloor) {
s8 delta = chan->noisefloor -
ath9k_hw_get_default_nf(ah, chan);
if (delta > 0)
noise += delta;
}
return noise;
}
EXPORT_SYMBOL(ath9k_hw_getchan_noise);
static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_hw_cal_data *cal,
int16_t *nfarray)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath_nf_limits *limit;
struct ath9k_nfcal_hist *h;
bool high_nf_mid = false;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
int i;
h = cal->nfCalHist;
limit = ath9k_hw_get_nf_limits(ah, ah->curchan);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!(chainmask & (1 << i)) ||
((i >= AR5416_MAX_CHAINS) && !IS_CHAN_HT40(ah->curchan)))
continue;
h[i].nfCalBuffer[h[i].currIndex] = nfarray[i];
if (++h[i].currIndex >= ATH9K_NF_CAL_HIST_MAX)
h[i].currIndex = 0;
if (h[i].invalidNFcount > 0) {
h[i].invalidNFcount--;
h[i].privNF = nfarray[i];
} else {
h[i].privNF =
ath9k_hw_get_nf_hist_mid(h[i].nfCalBuffer);
}
if (!h[i].privNF)
continue;
if (h[i].privNF > limit->max) {
high_nf_mid = true;
ath_dbg(common, CALIBRATE,
"NFmid[%d] (%d) > MAX (%d), %s\n",
i, h[i].privNF, limit->max,
(cal->nfcal_interference ?
"not corrected (due to interference)" :
"correcting to MAX"));
/*
* Normally we limit the average noise floor by the
* hardware specific maximum here. However if we have
* encountered stuck beacons because of interference,
* we bypass this limit here in order to better deal
* with our environment.
*/
if (!cal->nfcal_interference)
h[i].privNF = limit->max;
}
}
/*
* If the noise floor seems normal for all chains, assume that
* there is no significant interference in the environment anymore.
* Re-enable the enforcement of the NF maximum again.
*/
if (!high_nf_mid)
cal->nfcal_interference = false;
}
static bool ath9k_hw_get_nf_thresh(struct ath_hw *ah,
enum ieee80211_band band,
int16_t *nft)
{
switch (band) {
case IEEE80211_BAND_5GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_5);
break;
case IEEE80211_BAND_2GHZ:
*nft = (int8_t)ah->eep_ops->get_eeprom(ah, EEP_NFTHRESH_2);
break;
default:
BUG_ON(1);
return false;
}
return true;
}
void ath9k_hw_reset_calibration(struct ath_hw *ah,
struct ath9k_cal_list *currCal)
{
int i;
ath9k_hw_setup_calibration(ah, currCal);
currCal->calState = CAL_RUNNING;
for (i = 0; i < AR5416_MAX_CHAINS; i++) {
ah->meas0.sign[i] = 0;
ah->meas1.sign[i] = 0;
ah->meas2.sign[i] = 0;
ah->meas3.sign[i] = 0;
}
ah->cal_samples = 0;
}
/* This is done for the currently configured channel */
bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
struct ath9k_cal_list *currCal = ah->cal_list_curr;
if (!ah->caldata)
return true;
if (!AR_SREV_9100(ah) && !AR_SREV_9160_10_OR_LATER(ah))
return true;
if (currCal == NULL)
return true;
if (currCal->calState != CAL_DONE) {
ath_dbg(common, CALIBRATE, "Calibration state incorrect, %d\n",
currCal->calState);
return true;
}
if (!(ah->supp_cals & currCal->calData->calType))
return true;
ath_dbg(common, CALIBRATE, "Resetting Cal %d state for channel %u\n",
currCal->calData->calType, conf->channel->center_freq);
ah->caldata->CalValid &= ~currCal->calData->calType;
currCal->calState = CAL_WAITING;
return false;
}
EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
void ath9k_hw_start_nfcal(struct ath_hw *ah, bool update)
{
if (ah->caldata)
ah->caldata->nfcal_pending = true;
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
if (update)
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
else
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
}
void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h = NULL;
unsigned i, j;
int32_t val;
u8 chainmask = (ah->rxchainmask << 3) | ah->rxchainmask;
struct ath_common *common = ath9k_hw_common(ah);
struct ieee80211_conf *conf = &common->hw->conf;
s16 default_nf = ath9k_hw_get_default_nf(ah, chan);
if (ah->caldata)
h = ah->caldata->nfCalHist;
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
s16 nfval;
if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
continue;
if (h)
nfval = h[i].privNF;
else
nfval = default_nf;
val = REG_READ(ah, ah->nf_regs[i]);
val &= 0xFFFFFE00;
val |= (((u32) nfval << 1) & 0x1ff);
REG_WRITE(ah, ah->nf_regs[i], val);
}
}
/*
* Load software filtered NF value into baseband internal minCCApwr
* variable.
*/
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_ENABLE_NF);
REG_CLR_BIT(ah, AR_PHY_AGC_CONTROL,
AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
/*
* Wait for load to complete, should be fast, a few 10s of us.
* The max delay was changed from an original 250us to 10000us
* since 250us often results in NF load timeout and causes deaf
* condition during stress testing 12/12/2009
*/
for (j = 0; j < 10000; j++) {
if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
AR_PHY_AGC_CONTROL_NF) == 0)
break;
udelay(10);
}
/*
* We timed out waiting for the noisefloor to load, probably due to an
* in-progress rx. Simply return here and allow the load plenty of time
* to complete before the next calibration interval. We need to avoid
* trying to load -50 (which happens below) while the previous load is
* still in progress as this can cause rx deafness. Instead by returning
* here, the baseband nf cal will just be capped by our present
* noisefloor until the next calibration timer.
*/
if (j == 10000) {
ath_dbg(common, ANY,
"Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
REG_READ(ah, AR_PHY_AGC_CONTROL));
return;
}
/*
* Restore maxCCAPower register parameter again so that we're not capped
* by the median we just loaded. This will be initial (and max) value
* of next noise floor calibration the baseband does.
*/
ENABLE_REGWRITE_BUFFER(ah);
for (i = 0; i < NUM_NF_READINGS; i++) {
if (chainmask & (1 << i)) {
if ((i >= AR5416_MAX_CHAINS) && !conf_is_ht40(conf))
continue;
val = REG_READ(ah, ah->nf_regs[i]);
val &= 0xFFFFFE00;
val |= (((u32) (-50) << 1) & 0x1ff);
REG_WRITE(ah, ah->nf_regs[i], val);
}
}
REGWRITE_BUFFER_FLUSH(ah);
}
static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
{
struct ath_common *common = ath9k_hw_common(ah);
struct ath_nf_limits *limit;
int i;
if (IS_CHAN_2GHZ(ah->curchan))
limit = &ah->nf_2g;
else
limit = &ah->nf_5g;
for (i = 0; i < NUM_NF_READINGS; i++) {
if (!nf[i])
continue;
ath_dbg(common, CALIBRATE,
"NF calibrated [%s] [chain %d] is %d\n",
(i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
if (nf[i] > limit->max) {
ath_dbg(common, CALIBRATE,
"NF[%d] (%d) > MAX (%d), correcting to MAX\n",
i, nf[i], limit->max);
nf[i] = limit->max;
} else if (nf[i] < limit->min) {
ath_dbg(common, CALIBRATE,
"NF[%d] (%d) < MIN (%d), correcting to NOM\n",
i, nf[i], limit->min);
nf[i] = limit->nominal;
}
}
}
bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
{
struct ath_common *common = ath9k_hw_common(ah);
int16_t nf, nfThresh;
int16_t nfarray[NUM_NF_READINGS] = { 0 };
struct ath9k_nfcal_hist *h;
struct ieee80211_channel *c = chan->chan;
struct ath9k_hw_cal_data *caldata = ah->caldata;
chan->channelFlags &= (~CHANNEL_CW_INT);
if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
ath_dbg(common, CALIBRATE,
"NF did not complete in calibration window\n");
return false;
}
ath9k_hw_do_getnf(ah, nfarray);
ath9k_hw_nf_sanitize(ah, nfarray);
nf = nfarray[0];
if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
&& nf > nfThresh) {
ath_dbg(common, CALIBRATE,
"noise floor failed detected; detected %d, threshold %d\n",
nf, nfThresh);
chan->channelFlags |= CHANNEL_CW_INT;
}
if (!caldata) {
chan->noisefloor = nf;
ah->noise = ath9k_hw_getchan_noise(ah, chan);
return false;
}
h = caldata->nfCalHist;
caldata->nfcal_pending = false;
ath9k_hw_update_nfcal_hist_buffer(ah, caldata, nfarray);
chan->noisefloor = h[0].privNF;
ah->noise = ath9k_hw_getchan_noise(ah, chan);
return true;
}
EXPORT_SYMBOL(ath9k_hw_getnf);
void ath9k_init_nfcal_hist_buffer(struct ath_hw *ah,
struct ath9k_channel *chan)
{
struct ath9k_nfcal_hist *h;
s16 default_nf;
int i, j;
ah->caldata->channel = chan->channel;
ah->caldata->channelFlags = chan->channelFlags & ~CHANNEL_CW_INT;
h = ah->caldata->nfCalHist;
default_nf = ath9k_hw_get_default_nf(ah, chan);
for (i = 0; i < NUM_NF_READINGS; i++) {
h[i].currIndex = 0;
h[i].privNF = default_nf;
h[i].invalidNFcount = AR_PHY_CCA_FILTERWINDOW_LENGTH;
for (j = 0; j < ATH9K_NF_CAL_HIST_MAX; j++) {
h[i].nfCalBuffer[j] = default_nf;
}
}
}
void ath9k_hw_bstuck_nfcal(struct ath_hw *ah)
{
struct ath9k_hw_cal_data *caldata = ah->caldata;
if (unlikely(!caldata))
return;
/*
* If beacons are stuck, the most likely cause is interference.
* Triggering a noise floor calibration at this point helps the
* hardware adapt to a noisy environment much faster.
* To ensure that we recover from stuck beacons quickly, let
* the baseband update the internal NF value itself, similar to
* what is being done after a full reset.
*/
if (!caldata->nfcal_pending)
ath9k_hw_start_nfcal(ah, true);
else if (!(REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF))
ath9k_hw_getnf(ah, ah->curchan);
caldata->nfcal_interference = true;
}
EXPORT_SYMBOL(ath9k_hw_bstuck_nfcal);
| gpl-2.0 |
digetx/tegra2_qemu_linux_kernel | drivers/infiniband/core/cache.c | 3723 | 11195 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
* Copyright (c) 2005 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <rdma/ib_cache.h>
#include "core_priv.h"
struct ib_pkey_cache {
int table_len;
u16 table[0];
};
struct ib_gid_cache {
int table_len;
union ib_gid table[0];
};
struct ib_update_work {
struct work_struct work;
struct ib_device *device;
u8 port_num;
};
static inline int start_port(struct ib_device *device)
{
return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1;
}
static inline int end_port(struct ib_device *device)
{
return (device->node_type == RDMA_NODE_IB_SWITCH) ?
0 : device->phys_port_cnt;
}
int ib_get_cached_gid(struct ib_device *device,
u8 port_num,
int index,
union ib_gid *gid)
{
struct ib_gid_cache *cache;
unsigned long flags;
int ret = 0;
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.gid_cache[port_num - start_port(device)];
if (index < 0 || index >= cache->table_len)
ret = -EINVAL;
else
*gid = cache->table[index];
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_get_cached_gid);
int ib_find_cached_gid(struct ib_device *device,
union ib_gid *gid,
u8 *port_num,
u16 *index)
{
struct ib_gid_cache *cache;
unsigned long flags;
int p, i;
int ret = -ENOENT;
*port_num = -1;
if (index)
*index = -1;
read_lock_irqsave(&device->cache.lock, flags);
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
cache = device->cache.gid_cache[p];
for (i = 0; i < cache->table_len; ++i) {
if (!memcmp(gid, &cache->table[i], sizeof *gid)) {
*port_num = p + start_port(device);
if (index)
*index = i;
ret = 0;
goto found;
}
}
}
found:
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_find_cached_gid);
int ib_get_cached_pkey(struct ib_device *device,
u8 port_num,
int index,
u16 *pkey)
{
struct ib_pkey_cache *cache;
unsigned long flags;
int ret = 0;
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.pkey_cache[port_num - start_port(device)];
if (index < 0 || index >= cache->table_len)
ret = -EINVAL;
else
*pkey = cache->table[index];
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_get_cached_pkey);
int ib_find_cached_pkey(struct ib_device *device,
u8 port_num,
u16 pkey,
u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
int i;
int ret = -ENOENT;
int partial_ix = -1;
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.pkey_cache[port_num - start_port(device)];
*index = -1;
for (i = 0; i < cache->table_len; ++i)
if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) {
if (cache->table[i] & 0x8000) {
*index = i;
ret = 0;
break;
} else
partial_ix = i;
}
if (ret && partial_ix >= 0) {
*index = partial_ix;
ret = 0;
}
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_find_cached_pkey);
int ib_find_exact_cached_pkey(struct ib_device *device,
u8 port_num,
u16 pkey,
u16 *index)
{
struct ib_pkey_cache *cache;
unsigned long flags;
int i;
int ret = -ENOENT;
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
cache = device->cache.pkey_cache[port_num - start_port(device)];
*index = -1;
for (i = 0; i < cache->table_len; ++i)
if (cache->table[i] == pkey) {
*index = i;
ret = 0;
break;
}
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_find_exact_cached_pkey);
int ib_get_cached_lmc(struct ib_device *device,
u8 port_num,
u8 *lmc)
{
unsigned long flags;
int ret = 0;
if (port_num < start_port(device) || port_num > end_port(device))
return -EINVAL;
read_lock_irqsave(&device->cache.lock, flags);
*lmc = device->cache.lmc_cache[port_num - start_port(device)];
read_unlock_irqrestore(&device->cache.lock, flags);
return ret;
}
EXPORT_SYMBOL(ib_get_cached_lmc);
static void ib_cache_update(struct ib_device *device,
u8 port)
{
struct ib_port_attr *tprops = NULL;
struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache;
struct ib_gid_cache *gid_cache = NULL, *old_gid_cache;
int i;
int ret;
tprops = kmalloc(sizeof *tprops, GFP_KERNEL);
if (!tprops)
return;
ret = ib_query_port(device, port, tprops);
if (ret) {
printk(KERN_WARNING "ib_query_port failed (%d) for %s\n",
ret, device->name);
goto err;
}
pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len *
sizeof *pkey_cache->table, GFP_KERNEL);
if (!pkey_cache)
goto err;
pkey_cache->table_len = tprops->pkey_tbl_len;
gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len *
sizeof *gid_cache->table, GFP_KERNEL);
if (!gid_cache)
goto err;
gid_cache->table_len = tprops->gid_tbl_len;
for (i = 0; i < pkey_cache->table_len; ++i) {
ret = ib_query_pkey(device, port, i, pkey_cache->table + i);
if (ret) {
printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
}
for (i = 0; i < gid_cache->table_len; ++i) {
ret = ib_query_gid(device, port, i, gid_cache->table + i);
if (ret) {
printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n",
ret, device->name, i);
goto err;
}
}
write_lock_irq(&device->cache.lock);
old_pkey_cache = device->cache.pkey_cache[port - start_port(device)];
old_gid_cache = device->cache.gid_cache [port - start_port(device)];
device->cache.pkey_cache[port - start_port(device)] = pkey_cache;
device->cache.gid_cache [port - start_port(device)] = gid_cache;
device->cache.lmc_cache[port - start_port(device)] = tprops->lmc;
write_unlock_irq(&device->cache.lock);
kfree(old_pkey_cache);
kfree(old_gid_cache);
kfree(tprops);
return;
err:
kfree(pkey_cache);
kfree(gid_cache);
kfree(tprops);
}
static void ib_cache_task(struct work_struct *_work)
{
struct ib_update_work *work =
container_of(_work, struct ib_update_work, work);
ib_cache_update(work->device, work->port_num);
kfree(work);
}
static void ib_cache_event(struct ib_event_handler *handler,
struct ib_event *event)
{
struct ib_update_work *work;
if (event->event == IB_EVENT_PORT_ERR ||
event->event == IB_EVENT_PORT_ACTIVE ||
event->event == IB_EVENT_LID_CHANGE ||
event->event == IB_EVENT_PKEY_CHANGE ||
event->event == IB_EVENT_SM_CHANGE ||
event->event == IB_EVENT_CLIENT_REREGISTER ||
event->event == IB_EVENT_GID_CHANGE) {
work = kmalloc(sizeof *work, GFP_ATOMIC);
if (work) {
INIT_WORK(&work->work, ib_cache_task);
work->device = event->device;
work->port_num = event->element.port_num;
queue_work(ib_wq, &work->work);
}
}
}
static void ib_cache_setup_one(struct ib_device *device)
{
int p;
rwlock_init(&device->cache.lock);
device->cache.pkey_cache =
kmalloc(sizeof *device->cache.pkey_cache *
(end_port(device) - start_port(device) + 1), GFP_KERNEL);
device->cache.gid_cache =
kmalloc(sizeof *device->cache.gid_cache *
(end_port(device) - start_port(device) + 1), GFP_KERNEL);
device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache *
(end_port(device) -
start_port(device) + 1),
GFP_KERNEL);
if (!device->cache.pkey_cache || !device->cache.gid_cache ||
!device->cache.lmc_cache) {
printk(KERN_WARNING "Couldn't allocate cache "
"for %s\n", device->name);
goto err;
}
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
device->cache.pkey_cache[p] = NULL;
device->cache.gid_cache [p] = NULL;
ib_cache_update(device, p + start_port(device));
}
INIT_IB_EVENT_HANDLER(&device->cache.event_handler,
device, ib_cache_event);
if (ib_register_event_handler(&device->cache.event_handler))
goto err_cache;
return;
err_cache:
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
kfree(device->cache.pkey_cache[p]);
kfree(device->cache.gid_cache[p]);
}
err:
kfree(device->cache.pkey_cache);
kfree(device->cache.gid_cache);
kfree(device->cache.lmc_cache);
}
static void ib_cache_cleanup_one(struct ib_device *device)
{
int p;
ib_unregister_event_handler(&device->cache.event_handler);
flush_workqueue(ib_wq);
for (p = 0; p <= end_port(device) - start_port(device); ++p) {
kfree(device->cache.pkey_cache[p]);
kfree(device->cache.gid_cache[p]);
}
kfree(device->cache.pkey_cache);
kfree(device->cache.gid_cache);
kfree(device->cache.lmc_cache);
}
static struct ib_client cache_client = {
.name = "cache",
.add = ib_cache_setup_one,
.remove = ib_cache_cleanup_one
};
int __init ib_cache_setup(void)
{
return ib_register_client(&cache_client);
}
void __exit ib_cache_cleanup(void)
{
ib_unregister_client(&cache_client);
}
| gpl-2.0 |
TheEdge-/Leaping_Lemur_kernel | drivers/acpi/acpica/dswload2.c | 5003 | 18154 | /******************************************************************************
*
* Module Name: dswload2 - Dispatcher second pass namespace load callbacks
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2012, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acparser.h"
#include "amlcode.h"
#include "acdispat.h"
#include "acinterp.h"
#include "acnamesp.h"
#include "acevents.h"
#define _COMPONENT ACPI_DISPATCHER
ACPI_MODULE_NAME("dswload2")
/*******************************************************************************
*
* FUNCTION: acpi_ds_load2_begin_op
*
* PARAMETERS: walk_state - Current state of the parse tree walk
* out_op - Wher to return op if a new one is created
*
* RETURN: Status
*
* DESCRIPTION: Descending callback used during the loading of ACPI tables.
*
******************************************************************************/
acpi_status
acpi_ds_load2_begin_op(struct acpi_walk_state *walk_state,
union acpi_parse_object **out_op)
{
union acpi_parse_object *op;
struct acpi_namespace_node *node;
acpi_status status;
acpi_object_type object_type;
char *buffer_ptr;
u32 flags;
ACPI_FUNCTION_TRACE(ds_load2_begin_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Op=%p State=%p\n", op,
walk_state));
if (op) {
if ((walk_state->control_state) &&
(walk_state->control_state->common.state ==
ACPI_CONTROL_CONDITIONAL_EXECUTING)) {
/* We are executing a while loop outside of a method */
status = acpi_ds_exec_begin_op(walk_state, out_op);
return_ACPI_STATUS(status);
}
/* We only care about Namespace opcodes here */
if ((!(walk_state->op_info->flags & AML_NSOPCODE) &&
(walk_state->opcode != AML_INT_NAMEPATH_OP)) ||
(!(walk_state->op_info->flags & AML_NAMED))) {
return_ACPI_STATUS(AE_OK);
}
/* Get the name we are going to enter or lookup in the namespace */
if (walk_state->opcode == AML_INT_NAMEPATH_OP) {
/* For Namepath op, get the path string */
buffer_ptr = op->common.value.string;
if (!buffer_ptr) {
/* No name, just exit */
return_ACPI_STATUS(AE_OK);
}
} else {
/* Get name from the op */
buffer_ptr = ACPI_CAST_PTR(char, &op->named.name);
}
} else {
/* Get the namestring from the raw AML */
buffer_ptr =
acpi_ps_get_next_namestring(&walk_state->parser_state);
}
/* Map the opcode into an internal object type */
object_type = walk_state->op_info->object_type;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"State=%p Op=%p Type=%X\n", walk_state, op,
object_type));
switch (walk_state->opcode) {
case AML_FIELD_OP:
case AML_BANK_FIELD_OP:
case AML_INDEX_FIELD_OP:
node = NULL;
status = AE_OK;
break;
case AML_INT_NAMEPATH_OP:
/*
* The name_path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
* for use later.
*/
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT, walk_state, &(node));
break;
case AML_SCOPE_OP:
/* Special case for Scope(\) -> refers to the Root node */
if (op && (op->named.node == acpi_gbl_root_node)) {
node = op->named.node;
status =
acpi_ds_scope_stack_push(node, object_type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
} else {
/*
* The Path is an object reference to an existing object.
* Don't enter the name into the namespace, but look it up
* for use later.
*/
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_EXECUTE,
ACPI_NS_SEARCH_PARENT, walk_state,
&(node));
if (ACPI_FAILURE(status)) {
#ifdef ACPI_ASL_COMPILER
if (status == AE_NOT_FOUND) {
status = AE_OK;
} else {
ACPI_ERROR_NAMESPACE(buffer_ptr,
status);
}
#else
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
#endif
return_ACPI_STATUS(status);
}
}
/*
* We must check to make sure that the target is
* one of the opcodes that actually opens a scope
*/
switch (node->type) {
case ACPI_TYPE_ANY:
case ACPI_TYPE_LOCAL_SCOPE: /* Scope */
case ACPI_TYPE_DEVICE:
case ACPI_TYPE_POWER:
case ACPI_TYPE_PROCESSOR:
case ACPI_TYPE_THERMAL:
/* These are acceptable types */
break;
case ACPI_TYPE_INTEGER:
case ACPI_TYPE_STRING:
case ACPI_TYPE_BUFFER:
/*
* These types we will allow, but we will change the type.
* This enables some existing code of the form:
*
* Name (DEB, 0)
* Scope (DEB) { ... }
*/
ACPI_WARNING((AE_INFO,
"Type override - [%4.4s] had invalid type (%s) "
"for Scope operator, changed to type ANY\n",
acpi_ut_get_node_name(node),
acpi_ut_get_type_name(node->type)));
node->type = ACPI_TYPE_ANY;
walk_state->scope_info->common.value = ACPI_TYPE_ANY;
break;
default:
/* All other types are an error */
ACPI_ERROR((AE_INFO,
"Invalid type (%s) for target of "
"Scope operator [%4.4s] (Cannot override)",
acpi_ut_get_type_name(node->type),
acpi_ut_get_node_name(node)));
return (AE_AML_OPERAND_TYPE);
}
break;
default:
/* All other opcodes */
if (op && op->common.node) {
/* This op/node was previously entered into the namespace */
node = op->common.node;
if (acpi_ns_opens_scope(object_type)) {
status =
acpi_ds_scope_stack_push(node, object_type,
walk_state);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
return_ACPI_STATUS(AE_OK);
}
/*
* Enter the named type into the internal namespace. We enter the name
* as we go downward in the parse tree. Any necessary subobjects that
* involve arguments to the opcode must be created as we go back up the
* parse tree later.
*
* Note: Name may already exist if we are executing a deferred opcode.
*/
if (walk_state->deferred_node) {
/* This name is already in the namespace, get the node */
node = walk_state->deferred_node;
status = AE_OK;
break;
}
flags = ACPI_NS_NO_UPSEARCH;
if (walk_state->pass_number == ACPI_IMODE_EXECUTE) {
/* Execution mode, node cannot already exist, node is temporary */
flags |= ACPI_NS_ERROR_IF_FOUND;
if (!
(walk_state->
parse_flags & ACPI_PARSE_MODULE_LEVEL)) {
flags |= ACPI_NS_TEMPORARY;
}
}
/* Add new entry or lookup existing entry */
status =
acpi_ns_lookup(walk_state->scope_info, buffer_ptr,
object_type, ACPI_IMODE_LOAD_PASS2, flags,
walk_state, &node);
if (ACPI_SUCCESS(status) && (flags & ACPI_NS_TEMPORARY)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"***New Node [%4.4s] %p is temporary\n",
acpi_ut_get_node_name(node), node));
}
break;
}
if (ACPI_FAILURE(status)) {
ACPI_ERROR_NAMESPACE(buffer_ptr, status);
return_ACPI_STATUS(status);
}
if (!op) {
/* Create a new op */
op = acpi_ps_alloc_op(walk_state->opcode);
if (!op) {
return_ACPI_STATUS(AE_NO_MEMORY);
}
/* Initialize the new op */
if (node) {
op->named.name = node->name.integer;
}
*out_op = op;
}
/*
* Put the Node in the "op" object that the parser uses, so we
* can get it again quickly when this scope is closed
*/
op->common.node = node;
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ds_load2_end_op
*
* PARAMETERS: walk_state - Current state of the parse tree walk
*
* RETURN: Status
*
* DESCRIPTION: Ascending callback used during the loading of the namespace,
* both control methods and everything else.
*
******************************************************************************/
acpi_status acpi_ds_load2_end_op(struct acpi_walk_state *walk_state)
{
union acpi_parse_object *op;
acpi_status status = AE_OK;
acpi_object_type object_type;
struct acpi_namespace_node *node;
union acpi_parse_object *arg;
struct acpi_namespace_node *new_node;
#ifndef ACPI_NO_METHOD_EXECUTION
u32 i;
u8 region_space;
#endif
ACPI_FUNCTION_TRACE(ds_load2_end_op);
op = walk_state->op;
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Opcode [%s] Op %p State %p\n",
walk_state->op_info->name, op, walk_state));
/* Check if opcode had an associated namespace object */
if (!(walk_state->op_info->flags & AML_NSOBJECT)) {
return_ACPI_STATUS(AE_OK);
}
if (op->common.aml_opcode == AML_SCOPE_OP) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Ending scope Op=%p State=%p\n", op,
walk_state));
}
object_type = walk_state->op_info->object_type;
/*
* Get the Node/name from the earlier lookup
* (It was saved in the *op structure)
*/
node = op->common.node;
/*
* Put the Node on the object stack (Contains the ACPI Name of
* this object)
*/
walk_state->operands[0] = (void *)node;
walk_state->num_operands = 1;
/* Pop the scope stack */
if (acpi_ns_opens_scope(object_type) &&
(op->common.aml_opcode != AML_INT_METHODCALL_OP)) {
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"(%s) Popping scope for Op %p\n",
acpi_ut_get_type_name(object_type), op));
status = acpi_ds_scope_stack_pop(walk_state);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
}
/*
* Named operations are as follows:
*
* AML_ALIAS
* AML_BANKFIELD
* AML_CREATEBITFIELD
* AML_CREATEBYTEFIELD
* AML_CREATEDWORDFIELD
* AML_CREATEFIELD
* AML_CREATEQWORDFIELD
* AML_CREATEWORDFIELD
* AML_DATA_REGION
* AML_DEVICE
* AML_EVENT
* AML_FIELD
* AML_INDEXFIELD
* AML_METHOD
* AML_METHODCALL
* AML_MUTEX
* AML_NAME
* AML_NAMEDFIELD
* AML_OPREGION
* AML_POWERRES
* AML_PROCESSOR
* AML_SCOPE
* AML_THERMALZONE
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"Create-Load [%s] State=%p Op=%p NamedObj=%p\n",
acpi_ps_get_opcode_name(op->common.aml_opcode),
walk_state, op, node));
/* Decode the opcode */
arg = op->common.value.arg;
switch (walk_state->op_info->type) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_TYPE_CREATE_FIELD:
/*
* Create the field object, but the field buffer and index must
* be evaluated later during the execution phase
*/
status = acpi_ds_create_buffer_field(op, walk_state);
break;
case AML_TYPE_NAMED_FIELD:
/*
* If we are executing a method, initialize the field
*/
if (walk_state->method_node) {
status = acpi_ds_init_field_objects(op, walk_state);
}
switch (op->common.aml_opcode) {
case AML_INDEX_FIELD_OP:
status =
acpi_ds_create_index_field(op,
(acpi_handle) arg->
common.node, walk_state);
break;
case AML_BANK_FIELD_OP:
status =
acpi_ds_create_bank_field(op, arg->common.node,
walk_state);
break;
case AML_FIELD_OP:
status =
acpi_ds_create_field(op, arg->common.node,
walk_state);
break;
default:
/* All NAMED_FIELD opcodes must be handled above */
break;
}
break;
case AML_TYPE_NAMED_SIMPLE:
status = acpi_ds_create_operands(walk_state, arg);
if (ACPI_FAILURE(status)) {
goto cleanup;
}
switch (op->common.aml_opcode) {
case AML_PROCESSOR_OP:
status = acpi_ex_create_processor(walk_state);
break;
case AML_POWER_RES_OP:
status = acpi_ex_create_power_resource(walk_state);
break;
case AML_MUTEX_OP:
status = acpi_ex_create_mutex(walk_state);
break;
case AML_EVENT_OP:
status = acpi_ex_create_event(walk_state);
break;
case AML_ALIAS_OP:
status = acpi_ex_create_alias(walk_state);
break;
default:
/* Unknown opcode */
status = AE_OK;
goto cleanup;
}
/* Delete operands */
for (i = 1; i < walk_state->num_operands; i++) {
acpi_ut_remove_reference(walk_state->operands[i]);
walk_state->operands[i] = NULL;
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */
case AML_TYPE_NAMED_COMPLEX:
switch (op->common.aml_opcode) {
#ifndef ACPI_NO_METHOD_EXECUTION
case AML_REGION_OP:
case AML_DATA_REGION_OP:
if (op->common.aml_opcode == AML_REGION_OP) {
region_space = (acpi_adr_space_type)
((op->common.value.arg)->common.value.
integer);
} else {
region_space = ACPI_ADR_SPACE_DATA_TABLE;
}
/*
* The op_region is not fully parsed at this time. The only valid
* argument is the space_id. (We must save the address of the
* AML of the address and length operands)
*
* If we have a valid region, initialize it. The namespace is
* unlocked at this point.
*
* Need to unlock interpreter if it is locked (if we are running
* a control method), in order to allow _REG methods to be run
* during acpi_ev_initialize_region.
*/
if (walk_state->method_node) {
/*
* Executing a method: initialize the region and unlock
* the interpreter
*/
status =
acpi_ex_create_region(op->named.data,
op->named.length,
region_space,
walk_state);
if (ACPI_FAILURE(status)) {
return (status);
}
acpi_ex_exit_interpreter();
}
status =
acpi_ev_initialize_region
(acpi_ns_get_attached_object(node), FALSE);
if (walk_state->method_node) {
acpi_ex_enter_interpreter();
}
if (ACPI_FAILURE(status)) {
/*
* If AE_NOT_EXIST is returned, it is not fatal
* because many regions get created before a handler
* is installed for said region.
*/
if (AE_NOT_EXIST == status) {
status = AE_OK;
}
}
break;
case AML_NAME_OP:
status = acpi_ds_create_node(walk_state, node, op);
break;
case AML_METHOD_OP:
/*
* method_op pkg_length name_string method_flags term_list
*
* Note: We must create the method node/object pair as soon as we
* see the method declaration. This allows later pass1 parsing
* of invocations of the method (need to know the number of
* arguments.)
*/
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"LOADING-Method: State=%p Op=%p NamedObj=%p\n",
walk_state, op, op->named.node));
if (!acpi_ns_get_attached_object(op->named.node)) {
walk_state->operands[0] =
ACPI_CAST_PTR(void, op->named.node);
walk_state->num_operands = 1;
status =
acpi_ds_create_operands(walk_state,
op->common.value.
arg);
if (ACPI_SUCCESS(status)) {
status =
acpi_ex_create_method(op->named.
data,
op->named.
length,
walk_state);
}
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
}
break;
#endif /* ACPI_NO_METHOD_EXECUTION */
default:
/* All NAMED_COMPLEX opcodes must be handled above */
break;
}
break;
case AML_CLASS_INTERNAL:
/* case AML_INT_NAMEPATH_OP: */
break;
case AML_CLASS_METHOD_CALL:
ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
"RESOLVING-MethodCall: State=%p Op=%p NamedObj=%p\n",
walk_state, op, node));
/*
* Lookup the method name and save the Node
*/
status =
acpi_ns_lookup(walk_state->scope_info,
arg->common.value.string, ACPI_TYPE_ANY,
ACPI_IMODE_LOAD_PASS2,
ACPI_NS_SEARCH_PARENT |
ACPI_NS_DONT_OPEN_SCOPE, walk_state,
&(new_node));
if (ACPI_SUCCESS(status)) {
/*
* Make sure that what we found is indeed a method
* We didn't search for a method on purpose, to see if the name
* would resolve
*/
if (new_node->type != ACPI_TYPE_METHOD) {
status = AE_AML_OPERAND_TYPE;
}
/* We could put the returned object (Node) on the object stack for
* later, but for now, we will put it in the "op" object that the
* parser uses, so we can get it again at the end of this scope
*/
op->common.node = new_node;
} else {
ACPI_ERROR_NAMESPACE(arg->common.value.string, status);
}
break;
default:
break;
}
cleanup:
/* Remove the Node pushed at the very beginning */
walk_state->operands[0] = NULL;
walk_state->num_operands = 0;
return_ACPI_STATUS(status);
}
| gpl-2.0 |
NamelessRom/android_kernel_yu_msm8916 | drivers/staging/keucr/smilmain.c | 7307 | 45354 | #include <linux/slab.h>
#include "usb.h"
#include "scsiglue.h"
#include "smcommon.h"
#include "smil.h"
int Check_D_LogCHS (WORD *,BYTE *,BYTE *);
void Initialize_D_Media (void);
void PowerOff_D_Media (void);
int Check_D_MediaPower (void);
int Check_D_MediaExist (void);
int Check_D_MediaWP (void);
int Check_D_MediaFmt (struct us_data *);
int Check_D_MediaFmtForEraseAll (struct us_data *);
int Conv_D_MediaAddr (struct us_data *, DWORD);
int Inc_D_MediaAddr (struct us_data *);
int Check_D_FirstSect (void);
int Check_D_LastSect (void);
int Media_D_ReadOneSect (struct us_data *, WORD, BYTE *);
int Media_D_WriteOneSect (struct us_data *, WORD, BYTE *);
int Media_D_CopyBlockHead (struct us_data *);
int Media_D_CopyBlockTail (struct us_data *);
int Media_D_EraseOneBlock (void);
int Media_D_EraseAllBlock (void);
int Copy_D_BlockAll (struct us_data *, DWORD);
int Copy_D_BlockHead (struct us_data *);
int Copy_D_BlockTail (struct us_data *);
int Reassign_D_BlockHead (struct us_data *);
int Assign_D_WriteBlock (void);
int Release_D_ReadBlock (struct us_data *);
int Release_D_WriteBlock (struct us_data *);
int Release_D_CopySector (struct us_data *);
int Copy_D_PhyOneSect (struct us_data *);
int Read_D_PhyOneSect (struct us_data *, WORD, BYTE *);
int Write_D_PhyOneSect (struct us_data *, WORD, BYTE *);
int Erase_D_PhyOneBlock (struct us_data *);
int Set_D_PhyFmtValue (struct us_data *);
int Search_D_CIS (struct us_data *);
int Make_D_LogTable (struct us_data *);
void Check_D_BlockIsFull (void);
int MarkFail_D_PhyOneBlock (struct us_data *);
DWORD ErrXDCode;
DWORD ErrCode;
//BYTE SectBuf[SECTSIZE];
static BYTE WorkBuf[SECTSIZE];
static BYTE Redundant[REDTSIZE];
static BYTE WorkRedund[REDTSIZE];
//WORD Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK];
static WORD *Log2Phy[MAX_ZONENUM]; // 128 x 1000, Log2Phy[MAX_ZONENUM][MAX_LOGBLOCK];
static BYTE Assign[MAX_ZONENUM][MAX_BLOCKNUM/8];
static WORD AssignStart[MAX_ZONENUM];
WORD ReadBlock;
WORD WriteBlock;
DWORD MediaChange;
static DWORD SectCopyMode;
//BIT Control Macro
static BYTE BitData[] = { 0x01,0x02,0x04,0x08,0x10,0x20,0x40,0x80 } ;
#define Set_D_Bit(a,b) (a[(BYTE)((b)/8)]|= BitData[(b)%8])
#define Clr_D_Bit(a,b) (a[(BYTE)((b)/8)]&=~BitData[(b)%8])
#define Chk_D_Bit(a,b) (a[(BYTE)((b)/8)] & BitData[(b)%8])
//extern PBYTE SMHostAddr;
BYTE IsSSFDCCompliance;
BYTE IsXDCompliance;
//
////Power Control & Media Exist Check Function
////----- Init_D_SmartMedia() --------------------------------------------
//int Init_D_SmartMedia(void)
//{
// int i;
//
// EMCR_Print("Init_D_SmartMedia start\n");
// for (i=0; i<MAX_ZONENUM; i++)
// {
// if (Log2Phy[i]!=NULL)
// {
// EMCR_Print("ExFreePool Zone = %x, Addr = %x\n", i, Log2Phy[i]);
// ExFreePool(Log2Phy[i]);
// Log2Phy[i] = NULL;
// }
// }
//
// Initialize_D_Media();
// return(NO_ERROR);
//}
//----- SM_FreeMem() -------------------------------------------------
int SM_FreeMem(void)
{
int i;
pr_info("SM_FreeMem start\n");
for (i=0; i<MAX_ZONENUM; i++)
{
if (Log2Phy[i]!=NULL)
{
pr_info("Free Zone = %x, Addr = %p\n", i, Log2Phy[i]);
kfree(Log2Phy[i]);
Log2Phy[i] = NULL;
}
}
return(NO_ERROR);
}
////----- Pwoff_D_SmartMedia() -------------------------------------------
//int Pwoff_D_SmartMedia(void)
//{
// PowerOff_D_Media();
// return(NO_ERROR);
//}
//
////----- Check_D_SmartMedia() -------------------------------------------
//int Check_D_SmartMedia(void)
//{
// if (Check_D_MediaExist())
// return(ErrCode);
//
// return(NO_ERROR);
//}
//
////----- Check_D_Parameter() --------------------------------------------
//int Check_D_Parameter(PFDO_DEVICE_EXTENSION fdoExt,WORD *pcyl,BYTE *phead,BYTE *psect)
//{
// if (Check_D_MediaPower())
// return(ErrCode);
//
// if (Check_D_MediaFmt(fdoExt))
// return(ErrCode);
//
// if (Check_D_LogCHS(pcyl,phead,psect))
// return(ErrCode);
//
// return(NO_ERROR);
//}
//SmartMedia Read/Write/Erase Function
//----- Media_D_ReadSector() -------------------------------------------
int Media_D_ReadSector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
{
WORD len, bn;
//if (Check_D_MediaPower()) ; ¦b 6250 don't care
// return(ErrCode);
//if (Check_D_MediaFmt(fdoExt)) ;
// return(ErrCode);
if (Conv_D_MediaAddr(us, start))
return(ErrCode);
while(1)
{
len = Ssfdc.MaxSectors - Media.Sector;
if (count > len)
bn = len;
else
bn = count;
//if (Media_D_ReadOneSect(fdoExt, SectBuf))
//if (Media_D_ReadOneSect(fdoExt, count, buf))
if (Media_D_ReadOneSect(us, bn, buf))
{
ErrCode = ERR_EccReadErr;
return(ErrCode);
}
Media.Sector += bn;
count -= bn;
if (count<=0)
break;
buf += bn * SECTSIZE;
if (Inc_D_MediaAddr(us))
return(ErrCode);
}
return(NO_ERROR);
}
// here
//----- Media_D_CopySector() ------------------------------------------
int Media_D_CopySector(struct us_data *us, DWORD start,WORD count,BYTE *buf)
{
//DWORD mode;
//int i;
WORD len, bn;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
/* pr_info("Media_D_CopySector !!!\n"); */
if (Conv_D_MediaAddr(us, start))
return(ErrCode);
while(1)
{
if (Assign_D_WriteBlock())
return(ERROR);
len = Ssfdc.MaxSectors - Media.Sector;
if (count > len)
bn = len;
else
bn = count;
//if (Ssfdc_D_CopyBlock(fdoExt,count,buf,Redundant))
if (Ssfdc_D_CopyBlock(us,bn,buf,Redundant))
{
ErrCode = ERR_WriteFault;
return(ErrCode);
}
Media.Sector = 0x1F;
//if (Release_D_ReadBlock(fdoExt))
if (Release_D_CopySector(us))
{
if (ErrCode==ERR_HwError)
{
ErrCode = ERR_WriteFault;
return(ErrCode);
}
}
count -= bn;
if (count<=0)
break;
buf += bn * SECTSIZE;
if (Inc_D_MediaAddr(us))
return(ErrCode);
}
return(NO_ERROR);
}
//----- Release_D_CopySector() ------------------------------------------
int Release_D_CopySector(struct us_data *us)
{
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
Log2Phy[Media.Zone][Media.LogBlock]=WriteBlock;
Media.PhyBlock=ReadBlock;
if (Media.PhyBlock==NO_ASSIGN)
{
Media.PhyBlock=WriteBlock;
return(SMSUCCESS);
}
Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
Media.PhyBlock=WriteBlock;
return(SMSUCCESS);
}
/*
//----- Media_D_WriteSector() ------------------------------------------
int Media_D_WriteSector(PFDO_DEVICE_EXTENSION fdoExt, DWORD start,WORD count,BYTE *buf)
{
int i;
WORD len, bn;
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
//if (Check_D_MediaPower())
// return(ErrCode);
//
//if (Check_D_MediaFmt(fdoExt))
// return(ErrCode);
//
//if (Check_D_MediaWP())
// return(ErrCode);
if (Conv_D_MediaAddr(fdoExt, start))
return(ErrCode);
//ENE_Print("Media_D_WriteSector --- Sector = %x\n", Media.Sector);
if (Check_D_FirstSect())
{
if (Media_D_CopyBlockHead(fdoExt))
{
ErrCode = ERR_WriteFault;
return(ErrCode);
}
}
while(1)
{
if (!Check_D_FirstSect())
{
if (Assign_D_WriteBlock())
return(ErrCode);
}
len = Ssfdc.MaxSectors - Media.Sector;
if (count > len)
bn = len;
else
bn = count;
//for(i=0;i<SECTSIZE;i++)
// SectBuf[i]=*buf++;
//if (Media_D_WriteOneSect(fdoExt, SectBuf))
if (Media_D_WriteOneSect(fdoExt, bn, buf))
{
ErrCode = ERR_WriteFault;
return(ErrCode);
}
Media.Sector += bn - 1;
if (!Check_D_LastSect())
{
if (Release_D_ReadBlock(fdoExt))
{ if (ErrCode==ERR_HwError)
{
ErrCode = ERR_WriteFault;
return(ErrCode);
}
}
}
count -= bn;
if (count<=0)
break;
buf += bn * SECTSIZE;
//if (--count<=0)
// break;
if (Inc_D_MediaAddr(fdoExt))
return(ErrCode);
}
if (!Check_D_LastSect())
return(NO_ERROR);
if (Inc_D_MediaAddr(fdoExt))
return(ErrCode);
if (Media_D_CopyBlockTail(fdoExt))
{
ErrCode = ERR_WriteFault;
return(ErrCode);
}
return(NO_ERROR);
}
//
////----- Media_D_EraseBlock() -------------------------------------------
//int Media_D_EraseBlock(PFDO_DEVICE_EXTENSION fdoExt, DWORD start,WORD count)
//{
// if (Check_D_MediaPower())
// return(ErrCode);
//
// if (Check_D_MediaFmt(fdoExt))
// return(ErrCode);
//
// if (Check_D_MediaWP())
// return(ErrCode);
//
// if (Conv_D_MediaAddr(start))
// return(ErrCode);
//
// while(Check_D_FirstSect()) {
// if (Inc_D_MediaAddr(fdoExt))
// return(ErrCode);
//
// if (--count<=0)
// return(NO_ERROR);
// }
//
// while(1) {
// if (!Check_D_LastSect())
// if (Media_D_EraseOneBlock())
// if (ErrCode==ERR_HwError)
// {
// ErrCode = ERR_WriteFault;
// return(ErrCode);
// }
//
// if (Inc_D_MediaAddr(fdoExt))
// return(ErrCode);
//
// if (--count<=0)
// return(NO_ERROR);
// }
//}
//
////----- Media_D_EraseAll() ---------------------------------------------
//int Media_D_EraseAll(PFDO_DEVICE_EXTENSION fdoExt)
//{
// if (Check_D_MediaPower())
// return(ErrCode);
//
// if (Check_D_MediaFmtForEraseAll(fdoExt))
// return(ErrCode);
//
// if (Check_D_MediaWP())
// return(ErrCode);
//
// if (Media_D_EraseAllBlock())
// return(ErrCode);
//
// return(NO_ERROR);
//}
//SmartMedia Write Function for One Sector Write Mode
//----- Media_D_OneSectWriteStart() ------------------------------------
int Media_D_OneSectWriteStart(PFDO_DEVICE_EXTENSION fdoExt,DWORD start,BYTE *buf)
{
// int i;
// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
// ADDRESS_T bb = (ADDRESS_T) &Media;
//
// //if (Check_D_MediaPower())
// // return(ErrCode);
// //if (Check_D_MediaFmt(fdoExt))
// // return(ErrCode);
// //if (Check_D_MediaWP())
// // return(ErrCode);
// if (Conv_D_MediaAddr(fdoExt, start))
// return(ErrCode);
//
// if (Check_D_FirstSect())
// if (Media_D_CopyBlockHead(fdoExt))
// {
// ErrCode = ERR_WriteFault;
// return(ErrCode);
// }
//
// if (!Check_D_FirstSect())
// if (Assign_D_WriteBlock())
// return(ErrCode);
//
// //for(i=0;i<SECTSIZE;i++)
// // SectBuf[i]=*buf++;
//
// //if (Media_D_WriteOneSect(fdoExt, SectBuf))
// if (Media_D_WriteOneSect(fdoExt, buf))
// {
// ErrCode = ERR_WriteFault;
// return(ErrCode);
// }
//
// if (!Check_D_LastSect())
// {
// if (Release_D_ReadBlock(fdoExt))
// if (ErrCode==ERR_HwError)
// {
// ErrCode = ERR_WriteFault;
// return(ErrCode);
// }
// }
return(NO_ERROR);
}
//----- Media_D_OneSectWriteNext() -------------------------------------
int Media_D_OneSectWriteNext(PFDO_DEVICE_EXTENSION fdoExt, BYTE *buf)
{
// int i;
// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
// ADDRESS_T bb = (ADDRESS_T) &Media;
//
// if (Inc_D_MediaAddr(fdoExt))
// return(ErrCode);
//
// if (!Check_D_FirstSect())
// if (Assign_D_WriteBlock())
// return(ErrCode);
//
// //for(i=0;i<SECTSIZE;i++)
// // SectBuf[i]=*buf++;
//
// //if (Media_D_WriteOneSect(fdoExt, SectBuf))
// if (Media_D_WriteOneSect(fdoExt, buf))
// {
// ErrCode = ERR_WriteFault;
// return(ErrCode);
// }
//
// if (!Check_D_LastSect())
// {
// if (Release_D_ReadBlock(fdoExt))
// if (ErrCode==ERR_HwError)
// {
// ErrCode = ERR_WriteFault;
// return(ErrCode);
// }
// }
return(NO_ERROR);
}
//----- Media_D_OneSectWriteFlush() ------------------------------------
int Media_D_OneSectWriteFlush(PFDO_DEVICE_EXTENSION fdoExt)
{
if (!Check_D_LastSect())
return(NO_ERROR);
if (Inc_D_MediaAddr(fdoExt))
return(ErrCode);
if (Media_D_CopyBlockTail(fdoExt))
{
ErrCode = ERR_WriteFault;
return(ErrCode);
}
return(NO_ERROR);
}
//
////LED Tern On/Off Subroutine
////----- SM_EnableLED() -----------------------------------------------
//void SM_EnableLED(PFDO_DEVICE_EXTENSION fdoExt, BOOLEAN enable)
//{
// if (fdoExt->Drive_IsSWLED)
// {
// if (enable)
// Led_D_TernOn();
// else
// Led_D_TernOff();
// }
//}
//
////----- Led_D_TernOn() -------------------------------------------------
//void Led_D_TernOn(void)
//{
// if (Check_D_CardStsChg())
// MediaChange=ERROR;
//
// Cnt_D_LedOn();
//}
//
////----- Led_D_TernOff() ------------------------------------------------
//void Led_D_TernOff(void)
//{
// if (Check_D_CardStsChg())
// MediaChange=ERROR;
//
// Cnt_D_LedOff();
//}
//
////SmartMedia Logical Format Subroutine
////----- Check_D_LogCHS() -----------------------------------------------
//int Check_D_LogCHS(WORD *c,BYTE *h,BYTE *s)
//{
// switch(Ssfdc.Model) {
// case SSFDC1MB: *c=125; *h= 4; *s= 4; break;
// case SSFDC2MB: *c=125; *h= 4; *s= 8; break;
// case SSFDC4MB: *c=250; *h= 4; *s= 8; break;
// case SSFDC8MB: *c=250; *h= 4; *s=16; break;
// case SSFDC16MB: *c=500; *h= 4; *s=16; break;
// case SSFDC32MB: *c=500; *h= 8; *s=16; break;
// case SSFDC64MB: *c=500; *h= 8; *s=32; break;
// case SSFDC128MB: *c=500; *h=16; *s=32; break;
// default: *c= 0; *h= 0; *s= 0; ErrCode = ERR_NoSmartMedia; return(ERROR);
// }
//
// return(SMSUCCESS);
//}
//
////Power Control & Media Exist Check Subroutine
////----- Initialize_D_Media() -------------------------------------------
//void Initialize_D_Media(void)
//{
// ErrCode = NO_ERROR;
// MediaChange = ERROR;
// SectCopyMode = COMPLETED;
// Cnt_D_Reset();
//}
//
////----- PowerOff_D_Media() ---------------------------------------------
//void PowerOff_D_Media(void)
//{
// Cnt_D_PowerOff();
//}
//
////----- Check_D_MediaPower() -------------------------------------------
//int Check_D_MediaPower(void)
//{
// //usleep(56*1024);
// if (Check_D_CardStsChg())
// MediaChange = ERROR;
// //usleep(56*1024);
// if ((!Check_D_CntPower())&&(!MediaChange)) // ¦³ power & Media ¨S³Q change, «h return success
// return(SMSUCCESS);
// //usleep(56*1024);
//
// if (Check_D_CardExist()) // Check if card is not exist, return err
// {
// ErrCode = ERR_NoSmartMedia;
// MediaChange = ERROR;
// return(ERROR);
// }
// //usleep(56*1024);
// if (Cnt_D_PowerOn())
// {
// ErrCode = ERR_NoSmartMedia;
// MediaChange = ERROR;
// return(ERROR);
// }
// //usleep(56*1024);
// Ssfdc_D_Reset(fdoExt);
// //usleep(56*1024);
// return(SMSUCCESS);
//}
//
////-----Check_D_MediaExist() --------------------------------------------
//int Check_D_MediaExist(void)
//{
// if (Check_D_CardStsChg())
// MediaChange = ERROR;
//
// if (!Check_D_CardExist())
// {
// if (!MediaChange)
// return(SMSUCCESS);
//
// ErrCode = ERR_ChangedMedia;
// return(ERROR);
// }
//
// ErrCode = ERR_NoSmartMedia;
//
// return(ERROR);
//}
//
////----- Check_D_MediaWP() ----------------------------------------------
//int Check_D_MediaWP(void)
//{
// if (Ssfdc.Attribute &MWP)
// {
// ErrCode = ERR_WrtProtect;
// return(ERROR);
// }
//
// return(SMSUCCESS);
//}
*/
//SmartMedia Physical Format Test Subroutine
//----- Check_D_MediaFmt() ---------------------------------------------
int Check_D_MediaFmt(struct us_data *us)
{
pr_info("Check_D_MediaFmt\n");
//ULONG i,j, result=FALSE, zone,block;
//usleep(56*1024);
if (!MediaChange)
return(SMSUCCESS);
MediaChange = ERROR;
SectCopyMode = COMPLETED;
//usleep(56*1024);
if (Set_D_PhyFmtValue(us))
{
ErrCode = ERR_UnknownMedia;
return(ERROR);
}
//usleep(56*1024);
if (Search_D_CIS(us))
{
ErrCode = ERR_IllegalFmt;
return(ERROR);
}
MediaChange = SMSUCCESS;
return(SMSUCCESS);
}
/*
////----- Check_D_BlockIsFull() ----------------------------------
//void Check_D_BlockIsFull()
//{
// ULONG i, block;
//
// if (IsXDCompliance || IsSSFDCCompliance)
// {
// // If the blocks are full then return write-protect.
// block = Ssfdc.MaxBlocks/8;
// for (Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++)
// {
// if (Log2Phy[Media.Zone]==NULL)
// {
// if (Make_D_LogTable())
// {
// ErrCode = ERR_IllegalFmt;
// return;
// }
// }
//
// for (i=0; i<block; i++)
// {
// if (Assign[Media.Zone][i] != 0xFF)
// return;
// }
// }
// Ssfdc.Attribute |= WP;
// }
//}
//
//
////----- Check_D_MediaFmtForEraseAll() ----------------------------------
//int Check_D_MediaFmtForEraseAll(PFDO_DEVICE_EXTENSION fdoExt)
//{
// MediaChange = ERROR;
// SectCopyMode = COMPLETED;
//
// if (Set_D_PhyFmtValue(fdoExt))
// {
// ErrCode = ERR_UnknownMedia;
// return(ERROR);
// }
//
// if (Search_D_CIS(fdoExt))
// {
// ErrCode = ERR_IllegalFmt;
// return(ERROR);
// }
//
// return(SMSUCCESS);
//}
*/
//SmartMedia Physical Address Control Subroutine
//----- Conv_D_MediaAddr() ---------------------------------------------
int Conv_D_MediaAddr(struct us_data *us, DWORD addr)
{
DWORD temp;
//ULONG zz;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
temp = addr/Ssfdc.MaxSectors;
Media.Zone = (BYTE) (temp/Ssfdc.MaxLogBlocks);
if (Log2Phy[Media.Zone]==NULL)
{
if (Make_D_LogTable(us))
{
ErrCode = ERR_IllegalFmt;
return(ERROR);
}
}
Media.Sector = (BYTE) (addr%Ssfdc.MaxSectors);
Media.LogBlock = (WORD) (temp%Ssfdc.MaxLogBlocks);
if (Media.Zone<Ssfdc.MaxZones)
{
Clr_D_RedundantData(Redundant);
Set_D_LogBlockAddr(Redundant);
Media.PhyBlock = Log2Phy[Media.Zone][Media.LogBlock];
return(SMSUCCESS);
}
ErrCode = ERR_OutOfLBA;
return(ERROR);
}
//----- Inc_D_MediaAddr() ----------------------------------------------
int Inc_D_MediaAddr(struct us_data *us)
{
WORD LogBlock = Media.LogBlock;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
if (++Media.Sector<Ssfdc.MaxSectors)
return(SMSUCCESS);
if (Log2Phy[Media.Zone]==NULL)
{
if (Make_D_LogTable(us))
{
ErrCode = ERR_IllegalFmt;
return(ERROR);
}
}
Media.Sector=0;
Media.LogBlock = LogBlock;
if (++Media.LogBlock<Ssfdc.MaxLogBlocks)
{
Clr_D_RedundantData(Redundant);
Set_D_LogBlockAddr(Redundant);
Media.PhyBlock=Log2Phy[Media.Zone][Media.LogBlock];
return(SMSUCCESS);
}
Media.LogBlock=0;
if (++Media.Zone<Ssfdc.MaxZones)
{
if (Log2Phy[Media.Zone]==NULL)
{
if (Make_D_LogTable(us))
{
ErrCode = ERR_IllegalFmt;
return(ERROR);
}
}
Media.LogBlock = 0;
Clr_D_RedundantData(Redundant);
Set_D_LogBlockAddr(Redundant);
Media.PhyBlock=Log2Phy[Media.Zone][Media.LogBlock];
return(SMSUCCESS);
}
Media.Zone=0;
ErrCode = ERR_OutOfLBA;
return(ERROR);
}
/*
//----- Check_D_FirstSect() --------------------------------------------
int Check_D_FirstSect(void)
{
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
if (!Media.Sector)
return(SMSUCCESS);
return(ERROR);
}
//----- Check_D_LastSect() ---------------------------------------------
int Check_D_LastSect(void)
{
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
if (Media.Sector<(Ssfdc.MaxSectors-1))
return(ERROR);
return(SMSUCCESS);
}
*/
//SmartMedia Read/Write Subroutine with Retry
//----- Media_D_ReadOneSect() ------------------------------------------
int Media_D_ReadOneSect(struct us_data *us, WORD count, BYTE *buf)
{
DWORD err, retry;
if (!Read_D_PhyOneSect(us, count, buf))
return(SMSUCCESS);
if (ErrCode==ERR_HwError)
return(ERROR);
if (ErrCode==ERR_DataStatus)
return(ERROR);
#ifdef RDERR_REASSIGN
if (Ssfdc.Attribute &MWP)
{
if (ErrCode==ERR_CorReadErr)
return(SMSUCCESS);
return(ERROR);
}
err=ErrCode;
for(retry=0; retry<2; retry++)
{
if (Copy_D_BlockAll(us, (err==ERR_EccReadErr)?REQ_FAIL:REQ_ERASE))
{
if (ErrCode==ERR_HwError)
return(ERROR);
continue;
}
ErrCode = err;
if (ErrCode==ERR_CorReadErr)
return(SMSUCCESS);
return(ERROR);
}
MediaChange = ERROR;
#else
if (ErrCode==ERR_CorReadErr) return(SMSUCCESS);
#endif
return(ERROR);
}
/*
//----- Media_D_WriteOneSect() -----------------------------------------
int Media_D_WriteOneSect(PFDO_DEVICE_EXTENSION fdoExt, WORD count, BYTE *buf)
{
DWORD retry;
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
if (!Write_D_PhyOneSect(fdoExt, count, buf))
return(SMSUCCESS);
if (ErrCode==ERR_HwError)
return(ERROR);
for(retry=1; retry<2; retry++)
{
if (Reassign_D_BlockHead(fdoExt))
{
if (ErrCode==ERR_HwError)
return(ERROR);
continue;
}
if (!Write_D_PhyOneSect(fdoExt, count, buf))
return(SMSUCCESS);
if (ErrCode==ERR_HwError)
return(ERROR);
}
if (Release_D_WriteBlock(fdoExt))
return(ERROR);
ErrCode = ERR_WriteFault;
MediaChange = ERROR;
return(ERROR);
}
//SmartMedia Data Copy Subroutine with Retry
//----- Media_D_CopyBlockHead() ----------------------------------------
int Media_D_CopyBlockHead(PFDO_DEVICE_EXTENSION fdoExt)
{
DWORD retry;
for(retry=0; retry<2; retry++)
{
if (!Copy_D_BlockHead(fdoExt))
return(SMSUCCESS);
if (ErrCode==ERR_HwError)
return(ERROR);
}
MediaChange = ERROR;
return(ERROR);
}
//----- Media_D_CopyBlockTail() ----------------------------------------
int Media_D_CopyBlockTail(PFDO_DEVICE_EXTENSION fdoExt)
{
DWORD retry;
if (!Copy_D_BlockTail(fdoExt))
return(SMSUCCESS);
if (ErrCode==ERR_HwError)
return(ERROR);
for(retry=1; retry<2; retry++)
{
if (Reassign_D_BlockHead(fdoExt))
{
if (ErrCode==ERR_HwError)
return(ERROR);
continue;
}
if (!Copy_D_BlockTail(fdoExt))
return(SMSUCCESS);
if (ErrCode==ERR_HwError)
return(ERROR);
}
if (Release_D_WriteBlock(fdoExt))
return(ERROR);
ErrCode = ERR_WriteFault;
MediaChange = ERROR;
return(ERROR);
}
//
////----- Media_D_EraseOneBlock() ----------------------------------------
//int Media_D_EraseOneBlock(void)
//{
// WORD LogBlock = Media.LogBlock;
// WORD PhyBlock = Media.PhyBlock;
// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
// ADDRESS_T bb = (ADDRESS_T) &Media;
//
// if (Media.PhyBlock==NO_ASSIGN)
// return(SMSUCCESS);
//
// if (Log2Phy[Media.Zone]==NULL)
// {
// if (Make_D_LogTable())
// {
// ErrCode = ERR_IllegalFmt;
// return(ERROR);
// }
// }
// Media.LogBlock = LogBlock;
// Media.PhyBlock = PhyBlock;
//
// Log2Phy[Media.Zone][Media.LogBlock]=NO_ASSIGN;
//
// if (Erase_D_PhyOneBlock(fdoExt))
// {
// if (ErrCode==ERR_HwError)
// return(ERROR);
// if (MarkFail_D_PhyOneBlock())
// return(ERROR);
//
// ErrCode = ERR_WriteFault;
// return(ERROR);
// }
//
// Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
// Media.PhyBlock=NO_ASSIGN;
// return(SMSUCCESS);
//}
//
////SmartMedia Erase Subroutine
////----- Media_D_EraseAllBlock() ----------------------------------------
//int Media_D_EraseAllBlock(void)
//{
// WORD cis=0;
//
// SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
// ADDRESS_T bb = (ADDRESS_T) &Media;
//
// MediaChange = ERROR;
// Media.Sector = 0;
//
// for(Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++)
// for(Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++) {
// if (Ssfdc_D_ReadRedtData(Redundant))
// {
// Ssfdc_D_Reset(fdoExt);
// return(ERROR);
// }
//
// Ssfdc_D_Reset(fdoExt);
// if (!Check_D_FailBlock(Redundant))
// {
// if (cis)
// {
// if (Ssfdc_D_EraseBlock(fdoExt))
// {
// ErrCode = ERR_HwError;
// return(ERROR);
// }
//
// if (Ssfdc_D_CheckStatus())
// {
// if (MarkFail_D_PhyOneBlock())
// return(ERROR);
// }
//
// continue;
// }
//
// if (Media.PhyBlock!=CisArea.PhyBlock)
// {
// ErrCode = ERR_IllegalFmt;
// return(ERROR);
// }
//
// cis++;
// }
//
// }
// return(SMSUCCESS);
//}
*/
//SmartMedia Physical Sector Data Copy Subroutine
//----- Copy_D_BlockAll() ----------------------------------------------
int Copy_D_BlockAll(struct us_data *us, DWORD mode)
{
BYTE sect;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
sect=Media.Sector;
if (Assign_D_WriteBlock())
return(ERROR);
if (mode==REQ_FAIL)
SectCopyMode=REQ_FAIL;
for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
{
if (Copy_D_PhyOneSect(us))
{
if (ErrCode==ERR_HwError)
return(ERROR);
if (Release_D_WriteBlock(us))
return(ERROR);
ErrCode = ERR_WriteFault;
Media.PhyBlock=ReadBlock;
Media.Sector=sect;
return(ERROR);
}
}
if (Release_D_ReadBlock(us))
return(ERROR);
Media.PhyBlock=WriteBlock;
Media.Sector=sect;
return(SMSUCCESS);
}
/*
//----- Copy_D_BlockHead() ---------------------------------------------
int Copy_D_BlockHead(PFDO_DEVICE_EXTENSION fdoExt)
{
BYTE sect;
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
sect=Media.Sector;
if (Assign_D_WriteBlock())
return(ERROR);
for(Media.Sector=0; Media.Sector<sect; Media.Sector++)
{
if (Copy_D_PhyOneSect(fdoExt))
{
if (ErrCode==ERR_HwError)
return(ERROR);
if (Release_D_WriteBlock(fdoExt))
return(ERROR);
ErrCode = ERR_WriteFault;
Media.PhyBlock=ReadBlock;
Media.Sector=sect;
return(ERROR);
}
}
Media.PhyBlock=WriteBlock;
Media.Sector=sect;
return(SMSUCCESS);
}
//----- Copy_D_BlockTail() ---------------------------------------------
int Copy_D_BlockTail(PFDO_DEVICE_EXTENSION fdoExt)
{
BYTE sect;
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
for(sect=Media.Sector; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
{
if (Copy_D_PhyOneSect(fdoExt))
{
if (ErrCode==ERR_HwError)
return(ERROR);
Media.PhyBlock=WriteBlock;
Media.Sector=sect;
return(ERROR);
}
}
if (Release_D_ReadBlock(fdoExt))
return(ERROR);
Media.PhyBlock=WriteBlock;
Media.Sector=sect;
return(SMSUCCESS);
}
//----- Reassign_D_BlockHead() -----------------------------------------
int Reassign_D_BlockHead(PFDO_DEVICE_EXTENSION fdoExt)
{
DWORD mode;
WORD block;
BYTE sect;
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
mode=SectCopyMode;
block=ReadBlock;
sect=Media.Sector;
if (Assign_D_WriteBlock())
return(ERROR);
SectCopyMode=REQ_FAIL;
for(Media.Sector=0; Media.Sector<sect; Media.Sector++)
{
if (Copy_D_PhyOneSect(fdoExt))
{
if (ErrCode==ERR_HwError)
return(ERROR);
if (Release_D_WriteBlock(fdoExt))
return(ERROR);
ErrCode = ERR_WriteFault;
SectCopyMode=mode;
WriteBlock=ReadBlock;
ReadBlock=block;
Media.Sector=sect;
Media.PhyBlock=WriteBlock;
return(ERROR);
}
}
if (Release_D_ReadBlock(fdoExt))
return(ERROR);
SectCopyMode=mode;
ReadBlock=block;
Media.Sector=sect;
Media.PhyBlock=WriteBlock;
return(SMSUCCESS);
}
*/
//SmartMedia Physical Block Assign/Release Subroutine
//----- Assign_D_WriteBlock() ------------------------------------------
int Assign_D_WriteBlock(void)
{
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
ReadBlock=Media.PhyBlock;
for(WriteBlock=AssignStart[Media.Zone]; WriteBlock<Ssfdc.MaxBlocks; WriteBlock++)
{
if (!Chk_D_Bit(Assign[Media.Zone],WriteBlock))
{
Set_D_Bit(Assign[Media.Zone],WriteBlock);
AssignStart[Media.Zone]=WriteBlock+1;
Media.PhyBlock=WriteBlock;
SectCopyMode=REQ_ERASE;
//ErrXDCode = NO_ERROR;
return(SMSUCCESS);
}
}
for(WriteBlock=0; WriteBlock<AssignStart[Media.Zone]; WriteBlock++)
{
if (!Chk_D_Bit(Assign[Media.Zone],WriteBlock))
{
Set_D_Bit(Assign[Media.Zone],WriteBlock);
AssignStart[Media.Zone]=WriteBlock+1;
Media.PhyBlock=WriteBlock;
SectCopyMode=REQ_ERASE;
//ErrXDCode = NO_ERROR;
return(SMSUCCESS);
}
}
WriteBlock=NO_ASSIGN;
ErrCode = ERR_WriteFault;
// For xD test
//Ssfdc.Attribute |= WP;
//ErrXDCode = ERR_WrtProtect;
return(ERROR);
}
//----- Release_D_ReadBlock() ------------------------------------------
int Release_D_ReadBlock(struct us_data *us)
{
DWORD mode;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
mode=SectCopyMode;
SectCopyMode=COMPLETED;
if (mode==COMPLETED)
return(SMSUCCESS);
Log2Phy[Media.Zone][Media.LogBlock]=WriteBlock;
Media.PhyBlock=ReadBlock;
if (Media.PhyBlock==NO_ASSIGN)
{
Media.PhyBlock=WriteBlock;
return(SMSUCCESS);
}
if (mode==REQ_ERASE)
{
if (Erase_D_PhyOneBlock(us))
{
if (ErrCode==ERR_HwError) return(ERROR);
if (MarkFail_D_PhyOneBlock(us)) return(ERROR);
}
else
Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
}
else if (MarkFail_D_PhyOneBlock(us))
return(ERROR);
Media.PhyBlock=WriteBlock;
return(SMSUCCESS);
}
//----- Release_D_WriteBlock() -----------------------------------------
int Release_D_WriteBlock(struct us_data *us)
{
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
SectCopyMode=COMPLETED;
Media.PhyBlock=WriteBlock;
if (MarkFail_D_PhyOneBlock(us))
return(ERROR);
Media.PhyBlock=ReadBlock;
return(SMSUCCESS);
}
//SmartMedia Physical Sector Data Copy Subroutine
//----- Copy_D_PhyOneSect() --------------------------------------------
int Copy_D_PhyOneSect(struct us_data *us)
{
int i;
DWORD err, retry;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
/* pr_info("Copy_D_PhyOneSect --- Secotr = %x\n", Media.Sector); */
if (ReadBlock!=NO_ASSIGN)
{
Media.PhyBlock=ReadBlock;
for(retry=0; retry<2; retry++)
{
if (retry!=0)
{
Ssfdc_D_Reset(us);
if (Ssfdc_D_ReadCisSect(us,WorkBuf,WorkRedund))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
if (Check_D_CISdata(WorkBuf,WorkRedund))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
}
if (Ssfdc_D_ReadSect(us,WorkBuf,WorkRedund))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
if (Check_D_DataStatus(WorkRedund))
{ err=ERROR; break; }
if (!Check_D_ReadError(WorkRedund))
{ err=SMSUCCESS; break; }
if (!Check_D_Correct(WorkBuf,WorkRedund))
{ err=SMSUCCESS; break; }
err=ERROR;
SectCopyMode=REQ_FAIL;
}
}
else
{
err=SMSUCCESS;
for(i=0; i<SECTSIZE; i++)
WorkBuf[i]=DUMMY_DATA;
Clr_D_RedundantData(WorkRedund);
}
Set_D_LogBlockAddr(WorkRedund);
if (err==ERROR)
{
Set_D_RightECC(WorkRedund);
Set_D_DataStaus(WorkRedund);
}
Media.PhyBlock=WriteBlock;
if (Ssfdc_D_WriteSectForCopy(us, WorkBuf, WorkRedund))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
if (Ssfdc_D_CheckStatus())
{ ErrCode = ERR_WriteFault; return(ERROR); }
Media.PhyBlock=ReadBlock;
return(SMSUCCESS);
}
//SmartMedia Physical Sector Read/Write/Erase Subroutine
//----- Read_D_PhyOneSect() --------------------------------------------
int Read_D_PhyOneSect(struct us_data *us, WORD count, BYTE *buf)
{
int i;
DWORD retry;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
if (Media.PhyBlock==NO_ASSIGN)
{
for(i=0; i<SECTSIZE; i++)
*buf++=DUMMY_DATA;
return(SMSUCCESS);
}
for(retry=0; retry<2; retry++)
{
if (retry!=0)
{
Ssfdc_D_Reset(us);
if (Ssfdc_D_ReadCisSect(us,WorkBuf,WorkRedund))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
if (Check_D_CISdata(WorkBuf,WorkRedund))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
}
//if (Ssfdc_D_ReadSect(fdoExt,buf,Redundant))
if (Ssfdc_D_ReadBlock(us,count,buf,Redundant))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
if (Check_D_DataStatus(Redundant))
{ ErrCode = ERR_DataStatus; return(ERROR); }
if (!Check_D_ReadError(Redundant))
return(SMSUCCESS);
if (!Check_D_Correct(buf,Redundant))
{ ErrCode = ERR_CorReadErr; return(ERROR); }
}
ErrCode = ERR_EccReadErr;
return(ERROR);
}
/*
//----- Write_D_PhyOneSect() -------------------------------------------
int Write_D_PhyOneSect(PFDO_DEVICE_EXTENSION fdoExt, WORD count, BYTE *buf)
{
SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
ADDRESS_T bb = (ADDRESS_T) &Media;
//if (Ssfdc_D_WriteSect(fdoExt,buf,Redundant))
if (Ssfdc_D_WriteBlock(fdoExt,count,buf,Redundant))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
if (Ssfdc_D_CheckStatus())
{ ErrCode = ERR_WriteFault; return(ERROR); }
return(SMSUCCESS);
}
*/
//----- Erase_D_PhyOneBlock() ------------------------------------------
int Erase_D_PhyOneBlock(struct us_data *us)
{
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
if (Ssfdc_D_EraseBlock(us))
{ ErrCode = ERR_HwError; MediaChange=ERROR; return(ERROR); }
if (Ssfdc_D_CheckStatus())
{ ErrCode = ERR_WriteFault; return(ERROR); }
return(SMSUCCESS);
}
//SmartMedia Physical Format Check Local Subroutine
//----- Set_D_PhyFmtValue() --------------------------------------------
int Set_D_PhyFmtValue(struct us_data *us)
{
// PPDO_DEVICE_EXTENSION pdoExt;
// BYTE idcode[4];
// DWORD UserDefData_1, UserDefData_2, Data, mask;
//
// //if (!fdoExt->ChildDeviceObject) return(ERROR);
// //pdoExt = fdoExt->ChildDeviceObject->DeviceExtension;
//
// Ssfdc_D_ReadID(idcode, READ_ID_1);
//
//if (Set_D_SsfdcModel(idcode[1]))
if (Set_D_SsfdcModel(us->SM_DeviceID))
return(ERROR);
// //Use Multi-function pin to differentiate SM and xD.
// UserDefData_1 = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, fdoExt->FuncID, PCI_REG_USER_DEF) & 0x80;
// if (UserDefData_1)
// {
// if ( READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x80 ) fdoExt->DiskType = DISKTYPE_XD;
// if ( READ_PORT_BYTE(SM_REG_INT_STATUS) & 0x40 ) fdoExt->DiskType = DISKTYPE_SM;
//
// if ( IsXDCompliance && (fdoExt->DiskType == DISKTYPE_XD) )
// {
// Ssfdc_D_ReadID(idcode, READ_ID_3);
// if (idcode[2] != 0xB5)
// return(ERROR);
// }
// }
//
// //Use GPIO to differentiate SM and xD.
// UserDefData_2 = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, fdoExt->FuncID, PCI_REG_USER_DEF) >> 8;
// if ( UserDefData_2 )
// {
// Data = ReadPCIReg(fdoExt->BusID, fdoExt->DevID, 0, 0xAC);
//
// mask = 1 << (UserDefData_2-1);
// // 1 : xD , 0 : SM
// if ( Data & mask)
// fdoExt->DiskType = DISKTYPE_XD;
// else
// fdoExt->DiskType = DISKTYPE_SM;
//
// if ( IsXDCompliance && (fdoExt->DiskType == DISKTYPE_XD) )
// {
// Ssfdc_D_ReadID(idcode, READ_ID_3);
// if (idcode[2] != 0xB5)
// return(ERROR);
// }
// }
//
// if ( !(UserDefData_1 | UserDefData_2) )
// {
// // Use UserDefine Register to differentiate SM and xD.
// Ssfdc_D_ReadID(idcode, READ_ID_3);
//
// if (idcode[2] == 0xB5)
// fdoExt->DiskType = DISKTYPE_XD;
// else
// {
// if (!IsXDCompliance)
// fdoExt->DiskType = DISKTYPE_SM;
// else
// return(ERROR);
// }
//
// if (fdoExt->UserDef_DiskType == 0x04) fdoExt->DiskType = DISKTYPE_XD;
// if (fdoExt->UserDef_DiskType == 0x08) fdoExt->DiskType = DISKTYPE_SM;
// }
//
// if (!fdoExt->UserDef_DisableWP)
// {
// if (fdoExt->DiskType == DISKTYPE_SM)
// {
// if (Check_D_SsfdcWP())
// Ssfdc.Attribute|=WP;
// }
// }
return(SMSUCCESS);
}
//----- Search_D_CIS() -------------------------------------------------
int Search_D_CIS(struct us_data *us)
{
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
Media.Zone=0; Media.Sector=0;
for (Media.PhyBlock=0; Media.PhyBlock<(Ssfdc.MaxBlocks-Ssfdc.MaxLogBlocks-1); Media.PhyBlock++)
{
if (Ssfdc_D_ReadRedtData(us, Redundant))
{
Ssfdc_D_Reset(us);
return(ERROR);
}
if (!Check_D_FailBlock(Redundant))
break;
}
if (Media.PhyBlock==(Ssfdc.MaxBlocks-Ssfdc.MaxLogBlocks-1))
{
Ssfdc_D_Reset(us);
return(ERROR);
}
while (Media.Sector<CIS_SEARCH_SECT)
{
if (Media.Sector)
{
if (Ssfdc_D_ReadRedtData(us, Redundant))
{
Ssfdc_D_Reset(us);
return(ERROR);
}
}
if (!Check_D_DataStatus(Redundant))
{
if (Ssfdc_D_ReadSect(us,WorkBuf,Redundant))
{
Ssfdc_D_Reset(us);
return(ERROR);
}
if (Check_D_CISdata(WorkBuf,Redundant))
{
Ssfdc_D_Reset(us);
return(ERROR);
}
CisArea.PhyBlock=Media.PhyBlock;
CisArea.Sector=Media.Sector;
Ssfdc_D_Reset(us);
return(SMSUCCESS);
}
Media.Sector++;
}
Ssfdc_D_Reset(us);
return(ERROR);
}
//----- Make_D_LogTable() ----------------------------------------------
int Make_D_LogTable(struct us_data *us)
{
WORD phyblock,logblock;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
if (Log2Phy[Media.Zone]==NULL)
{
Log2Phy[Media.Zone] = kmalloc(MAX_LOGBLOCK*sizeof(WORD), GFP_KERNEL);
/* pr_info("ExAllocatePool Zone = %x, Addr = %x\n",
Media.Zone, Log2Phy[Media.Zone]); */
if (Log2Phy[Media.Zone]==NULL)
return(ERROR);
}
Media.Sector=0;
//for(Media.Zone=0; Media.Zone<MAX_ZONENUM; Media.Zone++)
//for(Media.Zone=0; Media.Zone<Ssfdc.MaxZones; Media.Zone++)
{
/* pr_info("Make_D_LogTable --- MediaZone = 0x%x\n",
Media.Zone); */
for(Media.LogBlock=0; Media.LogBlock<Ssfdc.MaxLogBlocks; Media.LogBlock++)
Log2Phy[Media.Zone][Media.LogBlock]=NO_ASSIGN;
for(Media.PhyBlock=0; Media.PhyBlock<(MAX_BLOCKNUM/8); Media.PhyBlock++)
Assign[Media.Zone][Media.PhyBlock]=0x00;
for(Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++)
{
if ((!Media.Zone) && (Media.PhyBlock<=CisArea.PhyBlock))
{
Set_D_Bit(Assign[Media.Zone],Media.PhyBlock);
continue;
}
if (Ssfdc_D_ReadRedtData(us, Redundant))
{ Ssfdc_D_Reset(us); return(ERROR); }
if (!Check_D_DataBlank(Redundant))
continue;
Set_D_Bit(Assign[Media.Zone],Media.PhyBlock);
if (Check_D_FailBlock(Redundant))
continue;
//if (Check_D_DataStatus(Redundant))
// continue;
if (Load_D_LogBlockAddr(Redundant))
continue;
if (Media.LogBlock>=Ssfdc.MaxLogBlocks)
continue;
if (Log2Phy[Media.Zone][Media.LogBlock]==NO_ASSIGN)
{
Log2Phy[Media.Zone][Media.LogBlock]=Media.PhyBlock;
continue;
}
phyblock = Media.PhyBlock;
logblock = Media.LogBlock;
Media.Sector = (BYTE)(Ssfdc.MaxSectors-1);
if (Ssfdc_D_ReadRedtData(us, Redundant))
{ Ssfdc_D_Reset(us); return(ERROR); }
if (!Load_D_LogBlockAddr(Redundant))
{
if (Media.LogBlock==logblock)
{
Media.PhyBlock=Log2Phy[Media.Zone][logblock];
if (Ssfdc_D_ReadRedtData(us, Redundant))
{ Ssfdc_D_Reset(us); return(ERROR); }
Media.PhyBlock=phyblock;
if (!Load_D_LogBlockAddr(Redundant))
{
if (Media.LogBlock!=logblock)
{
Media.PhyBlock=Log2Phy[Media.Zone][logblock];
Log2Phy[Media.Zone][logblock]=phyblock;
}
}
else
{
Media.PhyBlock=Log2Phy[Media.Zone][logblock];
Log2Phy[Media.Zone][logblock]=phyblock;
}
}
}
Media.Sector=0;
// here Not yet
//#ifdef L2P_ERR_ERASE
// if (!(Ssfdc.Attribute &MWP))
// {
// Ssfdc_D_Reset(fdoExt);
// if (Ssfdc_D_EraseBlock(fdoExt))
// return(ERROR);
//
// if (Ssfdc_D_CheckStatus())
// {
// if (MarkFail_D_PhyOneBlock())
// return(ERROR);
// }
// else
// Clr_D_Bit(Assign[Media.Zone],Media.PhyBlock);
// }
//#else
// Ssfdc.Attribute|=MWP;
//#endif
Media.PhyBlock=phyblock;
} // End for (Media.PhyBlock<Ssfdc.MaxBlocks)
AssignStart[Media.Zone]=0;
} // End for (Media.Zone<MAX_ZONENUM)
Ssfdc_D_Reset(us);
return(SMSUCCESS);
}
//----- MarkFail_D_PhyOneBlock() ---------------------------------------
int MarkFail_D_PhyOneBlock(struct us_data *us)
{
BYTE sect;
//SSFDCTYPE_T aa = (SSFDCTYPE_T ) &Ssfdc;
//ADDRESS_T bb = (ADDRESS_T) &Media;
sect=Media.Sector;
Set_D_FailBlock(WorkRedund);
//Ssfdc_D_WriteRedtMode();
for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
{
if (Ssfdc_D_WriteRedtData(us, WorkRedund))
{
Ssfdc_D_Reset(us);
Media.Sector = sect;
ErrCode = ERR_HwError;
MediaChange = ERROR;
return(ERROR);
} // NO Status Check
}
Ssfdc_D_Reset(us);
Media.Sector=sect;
return(SMSUCCESS);
}
/*
//
////----- SM_Init() ----------------------------------------------------
//void SM_Init(void)
//{
// _Hw_D_ClrIntCardChg();
// _Hw_D_SetIntMask();
// // For DMA Interrupt
// _Hw_D_ClrDMAIntCardChg();
// _Hw_D_SetDMAIntMask();
//}
//
////----- Media_D_EraseAllRedtData() -----------------------------------
//int Media_D_EraseAllRedtData(DWORD Index, BOOLEAN CheckBlock)
//{
// BYTE i;
//
// if (Check_D_MediaPower())
// return(ErrCode);
//
// if (Check_D_MediaWP())
// return(ErrCode);
//
// for (i=0; i<REDTSIZE; i++)
// WorkRedund[i] = 0xFF;
//
// Media.Zone = (BYTE)Index;
// for (Media.PhyBlock=0; Media.PhyBlock<Ssfdc.MaxBlocks; Media.PhyBlock++)
// {
// if ((!Media.Zone) && (Media.PhyBlock<=CisArea.PhyBlock))
// continue;
//
// if (Ssfdc_D_EraseBlock(fdoExt))
// {
// ErrCode = ERR_HwError;
// return(ERROR);
// }
//
// for(Media.Sector=0; Media.Sector<Ssfdc.MaxSectors; Media.Sector++)
// {
// Ssfdc_D_WriteRedtMode();
//
// if (Ssfdc_D_WriteRedtData(WorkRedund))
// {
// Ssfdc_D_Reset(fdoExt);
// ErrCode = ERR_HwError;
// MediaChange = ERROR;
// return(ERROR);
// } // NO Status Check
// }
//
// Ssfdc_D_Reset(fdoExt);
// }
//
// Ssfdc_D_Reset(fdoExt);
//
// return(SMSUCCESS);
//}
//
////----- Media_D_GetMediaInfo() ---------------------------------------
//DWORD Media_D_GetMediaInfo(PFDO_DEVICE_EXTENSION fdoExt, PIOCTL_MEDIA_INFO_IN pParamIn, PIOCTL_MEDIA_INFO_OUT pParamOut)
//{
// pParamOut->ErrCode = STATUS_CMD_FAIL;
//
// Init_D_SmartMedia();
//
// if (Check_D_MediaPower())
// return (ErrCode==ERR_NoSmartMedia) ? STATUS_CMD_NO_MEDIA : STATUS_CMD_FAIL;
//
// if (Set_D_PhyFmtValue(fdoExt))
// return STATUS_CMD_FAIL;
//
// //usleep(56*1024);
// if (Search_D_CIS(fdoExt))
// return STATUS_CMD_FAIL;
//
// if (Check_D_MediaWP())
// return STATUS_CMD_MEDIA_WP;
//
// pParamOut->PageSize = Ssfdc.MaxSectors;
// pParamOut->BlockSize = Ssfdc.MaxBlocks;
// pParamOut->ZoneSize = Ssfdc.MaxZones;
//
// return STATUS_CMD_SUCCESS;
//}*/
| gpl-2.0 |
hvaibhav/am335x-linux | arch/cris/arch-v32/kernel/setup.c | 11147 | 4410 | /*
* Display CPU info in /proc/cpuinfo.
*
* Copyright (C) 2003, Axis Communications AB.
*/
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/param.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#ifdef CONFIG_PROC_FS
#define HAS_FPU 0x0001
#define HAS_MMU 0x0002
#define HAS_ETHERNET100 0x0004
#define HAS_TOKENRING 0x0008
#define HAS_SCSI 0x0010
#define HAS_ATA 0x0020
#define HAS_USB 0x0040
#define HAS_IRQ_BUG 0x0080
#define HAS_MMU_BUG 0x0100
struct cpu_info {
char *cpu_model;
unsigned short rev;
unsigned short cache_size;
unsigned short flags;
};
/* Some of these model are here for historical reasons only. */
static struct cpu_info cpinfo[] = {
{"ETRAX 1", 0, 0, 0},
{"ETRAX 2", 1, 0, 0},
{"ETRAX 3", 2, 0, 0},
{"ETRAX 4", 3, 0, 0},
{"Simulator", 7, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA},
{"ETRAX 100", 8, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_IRQ_BUG},
{"ETRAX 100", 9, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA},
{"ETRAX 100LX", 10, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB
| HAS_MMU | HAS_MMU_BUG},
{"ETRAX 100LX v2", 11, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB
| HAS_MMU},
#ifdef CONFIG_ETRAXFS
{"ETRAX FS", 32, 32, HAS_ETHERNET100 | HAS_ATA | HAS_MMU},
#else
{"ARTPEC-3", 32, 32, HAS_ETHERNET100 | HAS_MMU},
#endif
{"Unknown", 0, 0, 0}
};
int show_cpuinfo(struct seq_file *m, void *v)
{
int i;
int cpu = (int)v - 1;
unsigned long revision;
struct cpu_info *info;
info = &cpinfo[ARRAY_SIZE(cpinfo) - 1];
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
return 0;
#endif
revision = rdvr();
for (i = 0; i < ARRAY_SIZE(cpinfo); i++) {
if (cpinfo[i].rev == revision) {
info = &cpinfo[i];
break;
}
}
return seq_printf(m,
"processor\t: %d\n"
"cpu\t\t: CRIS\n"
"cpu revision\t: %lu\n"
"cpu model\t: %s\n"
"cache size\t: %d KB\n"
"fpu\t\t: %s\n"
"mmu\t\t: %s\n"
"mmu DMA bug\t: %s\n"
"ethernet\t: %s Mbps\n"
"token ring\t: %s\n"
"scsi\t\t: %s\n"
"ata\t\t: %s\n"
"usb\t\t: %s\n"
"bogomips\t: %lu.%02lu\n\n",
cpu,
revision,
info->cpu_model,
info->cache_size,
info->flags & HAS_FPU ? "yes" : "no",
info->flags & HAS_MMU ? "yes" : "no",
info->flags & HAS_MMU_BUG ? "yes" : "no",
info->flags & HAS_ETHERNET100 ? "10/100" : "10",
info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no",
info->flags & HAS_SCSI ? "yes" : "no",
info->flags & HAS_ATA ? "yes" : "no",
info->flags & HAS_USB ? "yes" : "no",
(loops_per_jiffy * HZ + 500) / 500000,
((loops_per_jiffy * HZ + 500) / 5000) % 100);
}
#endif /* CONFIG_PROC_FS */
void show_etrax_copyright(void)
{
#ifdef CONFIG_ETRAXFS
printk(KERN_INFO "Linux/CRISv32 port on ETRAX FS "
"(C) 2003, 2004 Axis Communications AB\n");
#else
printk(KERN_INFO "Linux/CRISv32 port on ARTPEC-3 "
"(C) 2003-2009 Axis Communications AB\n");
#endif
}
static struct i2c_board_info __initdata i2c_info[] = {
{I2C_BOARD_INFO("camblock", 0x43)},
{I2C_BOARD_INFO("tmp100", 0x48)},
{I2C_BOARD_INFO("tmp100", 0x4A)},
{I2C_BOARD_INFO("tmp100", 0x4C)},
{I2C_BOARD_INFO("tmp100", 0x4D)},
{I2C_BOARD_INFO("tmp100", 0x4E)},
#ifdef CONFIG_RTC_DRV_PCF8563
{I2C_BOARD_INFO("pcf8563", 0x51)},
#endif
#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
{I2C_BOARD_INFO("vgpio", 0x20)},
{I2C_BOARD_INFO("vgpio", 0x21)},
#endif
{I2C_BOARD_INFO("pca9536", 0x41)},
{I2C_BOARD_INFO("fnp300", 0x40)},
{I2C_BOARD_INFO("fnp300", 0x42)},
{I2C_BOARD_INFO("adc101", 0x54)},
};
static struct i2c_board_info __initdata i2c_info2[] = {
{I2C_BOARD_INFO("camblock", 0x43)},
{I2C_BOARD_INFO("tmp100", 0x48)},
{I2C_BOARD_INFO("tmp100", 0x4A)},
{I2C_BOARD_INFO("tmp100", 0x4C)},
{I2C_BOARD_INFO("tmp100", 0x4D)},
{I2C_BOARD_INFO("tmp100", 0x4E)},
#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
{I2C_BOARD_INFO("vgpio", 0x20)},
{I2C_BOARD_INFO("vgpio", 0x21)},
#endif
{I2C_BOARD_INFO("pca9536", 0x41)},
{I2C_BOARD_INFO("fnp300", 0x40)},
{I2C_BOARD_INFO("fnp300", 0x42)},
{I2C_BOARD_INFO("adc101", 0x54)},
};
static struct i2c_board_info __initdata i2c_info3[] = {
{I2C_BOARD_INFO("adc101", 0x54)},
};
static int __init etrax_init(void)
{
i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info));
i2c_register_board_info(1, i2c_info2, ARRAY_SIZE(i2c_info2));
i2c_register_board_info(2, i2c_info3, ARRAY_SIZE(i2c_info3));
return 0;
}
arch_initcall(etrax_init);
| gpl-2.0 |
umiddelb/linux-fslc | arch/cris/arch-v32/kernel/setup.c | 11147 | 4410 | /*
* Display CPU info in /proc/cpuinfo.
*
* Copyright (C) 2003, Axis Communications AB.
*/
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/delay.h>
#include <linux/param.h>
#include <linux/i2c.h>
#include <linux/platform_device.h>
#ifdef CONFIG_PROC_FS
#define HAS_FPU 0x0001
#define HAS_MMU 0x0002
#define HAS_ETHERNET100 0x0004
#define HAS_TOKENRING 0x0008
#define HAS_SCSI 0x0010
#define HAS_ATA 0x0020
#define HAS_USB 0x0040
#define HAS_IRQ_BUG 0x0080
#define HAS_MMU_BUG 0x0100
struct cpu_info {
char *cpu_model;
unsigned short rev;
unsigned short cache_size;
unsigned short flags;
};
/* Some of these model are here for historical reasons only. */
static struct cpu_info cpinfo[] = {
{"ETRAX 1", 0, 0, 0},
{"ETRAX 2", 1, 0, 0},
{"ETRAX 3", 2, 0, 0},
{"ETRAX 4", 3, 0, 0},
{"Simulator", 7, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA},
{"ETRAX 100", 8, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_IRQ_BUG},
{"ETRAX 100", 9, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA},
{"ETRAX 100LX", 10, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB
| HAS_MMU | HAS_MMU_BUG},
{"ETRAX 100LX v2", 11, 8, HAS_ETHERNET100 | HAS_SCSI | HAS_ATA | HAS_USB
| HAS_MMU},
#ifdef CONFIG_ETRAXFS
{"ETRAX FS", 32, 32, HAS_ETHERNET100 | HAS_ATA | HAS_MMU},
#else
{"ARTPEC-3", 32, 32, HAS_ETHERNET100 | HAS_MMU},
#endif
{"Unknown", 0, 0, 0}
};
int show_cpuinfo(struct seq_file *m, void *v)
{
int i;
int cpu = (int)v - 1;
unsigned long revision;
struct cpu_info *info;
info = &cpinfo[ARRAY_SIZE(cpinfo) - 1];
#ifdef CONFIG_SMP
if (!cpu_online(cpu))
return 0;
#endif
revision = rdvr();
for (i = 0; i < ARRAY_SIZE(cpinfo); i++) {
if (cpinfo[i].rev == revision) {
info = &cpinfo[i];
break;
}
}
return seq_printf(m,
"processor\t: %d\n"
"cpu\t\t: CRIS\n"
"cpu revision\t: %lu\n"
"cpu model\t: %s\n"
"cache size\t: %d KB\n"
"fpu\t\t: %s\n"
"mmu\t\t: %s\n"
"mmu DMA bug\t: %s\n"
"ethernet\t: %s Mbps\n"
"token ring\t: %s\n"
"scsi\t\t: %s\n"
"ata\t\t: %s\n"
"usb\t\t: %s\n"
"bogomips\t: %lu.%02lu\n\n",
cpu,
revision,
info->cpu_model,
info->cache_size,
info->flags & HAS_FPU ? "yes" : "no",
info->flags & HAS_MMU ? "yes" : "no",
info->flags & HAS_MMU_BUG ? "yes" : "no",
info->flags & HAS_ETHERNET100 ? "10/100" : "10",
info->flags & HAS_TOKENRING ? "4/16 Mbps" : "no",
info->flags & HAS_SCSI ? "yes" : "no",
info->flags & HAS_ATA ? "yes" : "no",
info->flags & HAS_USB ? "yes" : "no",
(loops_per_jiffy * HZ + 500) / 500000,
((loops_per_jiffy * HZ + 500) / 5000) % 100);
}
#endif /* CONFIG_PROC_FS */
void show_etrax_copyright(void)
{
#ifdef CONFIG_ETRAXFS
printk(KERN_INFO "Linux/CRISv32 port on ETRAX FS "
"(C) 2003, 2004 Axis Communications AB\n");
#else
printk(KERN_INFO "Linux/CRISv32 port on ARTPEC-3 "
"(C) 2003-2009 Axis Communications AB\n");
#endif
}
static struct i2c_board_info __initdata i2c_info[] = {
{I2C_BOARD_INFO("camblock", 0x43)},
{I2C_BOARD_INFO("tmp100", 0x48)},
{I2C_BOARD_INFO("tmp100", 0x4A)},
{I2C_BOARD_INFO("tmp100", 0x4C)},
{I2C_BOARD_INFO("tmp100", 0x4D)},
{I2C_BOARD_INFO("tmp100", 0x4E)},
#ifdef CONFIG_RTC_DRV_PCF8563
{I2C_BOARD_INFO("pcf8563", 0x51)},
#endif
#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
{I2C_BOARD_INFO("vgpio", 0x20)},
{I2C_BOARD_INFO("vgpio", 0x21)},
#endif
{I2C_BOARD_INFO("pca9536", 0x41)},
{I2C_BOARD_INFO("fnp300", 0x40)},
{I2C_BOARD_INFO("fnp300", 0x42)},
{I2C_BOARD_INFO("adc101", 0x54)},
};
static struct i2c_board_info __initdata i2c_info2[] = {
{I2C_BOARD_INFO("camblock", 0x43)},
{I2C_BOARD_INFO("tmp100", 0x48)},
{I2C_BOARD_INFO("tmp100", 0x4A)},
{I2C_BOARD_INFO("tmp100", 0x4C)},
{I2C_BOARD_INFO("tmp100", 0x4D)},
{I2C_BOARD_INFO("tmp100", 0x4E)},
#ifdef CONFIG_ETRAX_VIRTUAL_GPIO
{I2C_BOARD_INFO("vgpio", 0x20)},
{I2C_BOARD_INFO("vgpio", 0x21)},
#endif
{I2C_BOARD_INFO("pca9536", 0x41)},
{I2C_BOARD_INFO("fnp300", 0x40)},
{I2C_BOARD_INFO("fnp300", 0x42)},
{I2C_BOARD_INFO("adc101", 0x54)},
};
static struct i2c_board_info __initdata i2c_info3[] = {
{I2C_BOARD_INFO("adc101", 0x54)},
};
static int __init etrax_init(void)
{
i2c_register_board_info(0, i2c_info, ARRAY_SIZE(i2c_info));
i2c_register_board_info(1, i2c_info2, ARRAY_SIZE(i2c_info2));
i2c_register_board_info(2, i2c_info3, ARRAY_SIZE(i2c_info3));
return 0;
}
arch_initcall(etrax_init);
| gpl-2.0 |
Vanuan/pocketbook_free_linux | drivers/staging/meilhaus/meslist.c | 140 | 3612 | /**
* @file me_slist.c
*
* @brief Implements the subdevice list class.
* @note Copyright (C) 2007 Meilhaus Electronic GmbH (support@meilhaus.de)
* @author Guenter Gebhardt
*/
/*
* Copyright (C) 2007 Meilhaus Electronic GmbH (support@meilhaus.de)
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "meerror.h"
#include "medefines.h"
#include "meslist.h"
#include "medebug.h"
int me_slist_query_number_subdevices(struct me_slist *slist, int *number)
{
PDEBUG_LOCKS("called.\n");
*number = slist->n;
return ME_ERRNO_SUCCESS;
}
unsigned int me_slist_get_number_subdevices(struct me_slist *slist)
{
PDEBUG_LOCKS("called.\n");
return slist->n;
}
me_subdevice_t *me_slist_get_subdevice(struct me_slist * slist,
unsigned int index)
{
struct list_head *pos;
me_subdevice_t *subdevice = NULL;
unsigned int i = 0;
PDEBUG_LOCKS("called.\n");
if (index >= slist->n) {
PERROR("Index out of range.\n");
return NULL;
}
list_for_each(pos, &slist->head) {
if (i == index) {
subdevice = list_entry(pos, me_subdevice_t, list);
break;
}
++i;
}
return subdevice;
}
int me_slist_get_subdevice_by_type(struct me_slist *slist,
unsigned int start_subdevice,
int type, int subtype, int *subdevice)
{
me_subdevice_t *pos;
int s_type, s_subtype;
unsigned int index = 0;
PDEBUG_LOCKS("called.\n");
if (start_subdevice >= slist->n) {
PERROR("Start index out of range.\n");
return ME_ERRNO_NOMORE_SUBDEVICE_TYPE;
}
list_for_each_entry(pos, &slist->head, list) {
if (index < start_subdevice) { // Go forward to start subdevice.
++index;
continue;
}
pos->me_subdevice_query_subdevice_type(pos,
&s_type, &s_subtype);
if (subtype == ME_SUBTYPE_ANY) {
if (s_type == type)
break;
} else {
if ((s_type == type) && (s_subtype == subtype))
break;
}
++index;
}
if (index >= slist->n) {
return ME_ERRNO_NOMORE_SUBDEVICE_TYPE;
}
*subdevice = index;
return ME_ERRNO_SUCCESS;
}
void me_slist_add_subdevice_tail(struct me_slist *slist,
me_subdevice_t * subdevice)
{
PDEBUG_LOCKS("called.\n");
list_add_tail(&subdevice->list, &slist->head);
++slist->n;
}
me_subdevice_t *me_slist_del_subdevice_tail(struct me_slist *slist)
{
struct list_head *last;
me_subdevice_t *subdevice;
PDEBUG_LOCKS("called.\n");
if (list_empty(&slist->head))
return NULL;
last = slist->head.prev;
subdevice = list_entry(last, me_subdevice_t, list);
list_del(last);
--slist->n;
return subdevice;
}
int me_slist_init(me_slist_t * slist)
{
PDEBUG_LOCKS("called.\n");
INIT_LIST_HEAD(&slist->head);
slist->n = 0;
return 0;
}
void me_slist_deinit(me_slist_t * slist)
{
struct list_head *s;
me_subdevice_t *subdevice;
PDEBUG_LOCKS("called.\n");
while (!list_empty(&slist->head)) {
s = slist->head.next;
list_del(s);
subdevice = list_entry(s, me_subdevice_t, list);
subdevice->me_subdevice_destructor(subdevice);
}
slist->n = 0;
}
| gpl-2.0 |
Klozz/Stratos-kernel-GB | drivers/acpi/acpica/evgpe.c | 140 | 20787 | /******************************************************************************
*
* Module Name: evgpe - General Purpose Event handling and dispatch
*
*****************************************************************************/
/*
* Copyright (C) 2000 - 2008, Intel Corp.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions, and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* substantially similar to the "NO WARRANTY" disclaimer below
* ("Disclaimer") and any redistribution must be conditioned upon
* including a substantially similar Disclaimer requirement for further
* binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGES.
*/
#include <acpi/acpi.h>
#include "accommon.h"
#include "acevents.h"
#include "acnamesp.h"
#define _COMPONENT ACPI_EVENTS
ACPI_MODULE_NAME("evgpe")
/* Local prototypes */
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
/*******************************************************************************
*
* FUNCTION: acpi_ev_set_gpe_type
*
* PARAMETERS: gpe_event_info - GPE to set
* Type - New type
*
* RETURN: Status
*
* DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
*
******************************************************************************/
acpi_status
acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_set_gpe_type);
/* Validate type and update register enable masks */
switch (type) {
case ACPI_GPE_TYPE_WAKE:
case ACPI_GPE_TYPE_RUNTIME:
case ACPI_GPE_TYPE_WAKE_RUN:
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
/* Disable the GPE if currently enabled */
status = acpi_ev_disable_gpe(gpe_event_info);
/* Type was validated above */
gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK; /* Clear type bits */
gpe_event_info->flags |= type; /* Insert type */
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_update_gpe_enable_masks
*
* PARAMETERS: gpe_event_info - GPE to update
* Type - What to do: ACPI_GPE_DISABLE or
* ACPI_GPE_ENABLE
*
* RETURN: Status
*
* DESCRIPTION: Updates GPE register enable masks based on the GPE type
*
******************************************************************************/
acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
u8 type)
{
struct acpi_gpe_register_info *gpe_register_info;
u8 register_bit;
ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
gpe_register_info = gpe_event_info->register_info;
if (!gpe_register_info) {
return_ACPI_STATUS(AE_NOT_EXIST);
}
register_bit = (u8)
(1 <<
(gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
/* 1) Disable case. Simply clear all enable bits */
if (type == ACPI_GPE_DISABLE) {
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
return_ACPI_STATUS(AE_OK);
}
/* 2) Enable case. Set/Clear the appropriate enable bits */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
break;
case ACPI_GPE_TYPE_RUNTIME:
ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
register_bit);
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_enable_gpe
*
* PARAMETERS: gpe_event_info - GPE to enable
* write_to_hardware - Enable now, or just mark data structs
* (WAKE GPEs should be deferred)
*
* RETURN: Status
*
* DESCRIPTION: Enable a GPE based on the GPE type
*
******************************************************************************/
acpi_status
acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
u8 write_to_hardware)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_enable_gpe);
/* Make sure HW enable masks are updated */
status =
acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Mark wake-enabled or HW enable, or both */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
/*lint -fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
if (write_to_hardware) {
/* Clear the GPE (of stale events), then enable it */
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Enable the requested runtime GPE */
status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
}
break;
default:
return_ACPI_STATUS(AE_BAD_PARAMETER);
}
return_ACPI_STATUS(AE_OK);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_disable_gpe
*
* PARAMETERS: gpe_event_info - GPE to disable
*
* RETURN: Status
*
* DESCRIPTION: Disable a GPE based on the GPE type
*
******************************************************************************/
acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_disable_gpe);
/* Make sure HW enable masks are updated */
status =
acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
if (ACPI_FAILURE(status)) {
return_ACPI_STATUS(status);
}
/* Clear the appropriate enabled flags for this GPE */
switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
case ACPI_GPE_TYPE_WAKE:
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
break;
case ACPI_GPE_TYPE_WAKE_RUN:
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
/* fallthrough */
case ACPI_GPE_TYPE_RUNTIME:
/* Disable the requested runtime GPE */
ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
break;
default:
break;
}
/*
* Even if we don't know the GPE type, make sure that we always
* disable it. low_disable_gpe will just clear the enable bit for this
* GPE and write it. It will not write out the current GPE enable mask,
* since this may inadvertently enable GPEs too early, if a rogue GPE has
* come in during ACPICA initialization - possibly as a result of AML or
* other code that has enabled the GPE.
*/
status = acpi_hw_low_disable_gpe(gpe_event_info);
return_ACPI_STATUS(status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_get_gpe_event_info
*
* PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
* gpe_number - Raw GPE number
*
* RETURN: A GPE event_info struct. NULL if not a valid GPE
*
* DESCRIPTION: Returns the event_info struct associated with this GPE.
* Validates the gpe_block and the gpe_number
*
* Should be called only when the GPE lists are semaphore locked
* and not subject to change.
*
******************************************************************************/
struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
u32 gpe_number)
{
union acpi_operand_object *obj_desc;
struct acpi_gpe_block_info *gpe_block;
u32 i;
ACPI_FUNCTION_ENTRY();
/* A NULL gpe_block means use the FADT-defined GPE block(s) */
if (!gpe_device) {
/* Examine GPE Block 0 and 1 (These blocks are permanent) */
for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
gpe_block = acpi_gbl_gpe_fadt_blocks[i];
if (gpe_block) {
if ((gpe_number >= gpe_block->block_base_number)
&& (gpe_number <
gpe_block->block_base_number +
(gpe_block->register_count * 8))) {
return (&gpe_block->
event_info[gpe_number -
gpe_block->
block_base_number]);
}
}
}
/* The gpe_number was not in the range of either FADT GPE block */
return (NULL);
}
/* A Non-NULL gpe_device means this is a GPE Block Device */
obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
gpe_device);
if (!obj_desc || !obj_desc->device.gpe_block) {
return (NULL);
}
gpe_block = obj_desc->device.gpe_block;
if ((gpe_number >= gpe_block->block_base_number) &&
(gpe_number <
gpe_block->block_base_number + (gpe_block->register_count * 8))) {
return (&gpe_block->
event_info[gpe_number - gpe_block->block_base_number]);
}
return (NULL);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_detect
*
* PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt.
* Can have multiple GPE blocks attached.
*
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
*
* DESCRIPTION: Detect if any GP events have occurred. This function is
* executed at interrupt level.
*
******************************************************************************/
u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
{
acpi_status status;
struct acpi_gpe_block_info *gpe_block;
struct acpi_gpe_register_info *gpe_register_info;
u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
u8 enabled_status_byte;
u32 status_reg;
u32 enable_reg;
acpi_cpu_flags flags;
u32 i;
u32 j;
ACPI_FUNCTION_NAME(ev_gpe_detect);
/* Check for the case where there are no GPEs */
if (!gpe_xrupt_list) {
return (int_status);
}
/*
* We need to obtain the GPE lock for both the data structs and registers
* Note: Not necessary to obtain the hardware lock, since the GPE
* registers are owned by the gpe_lock.
*/
flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
/* Examine all GPE blocks attached to this interrupt level */
gpe_block = gpe_xrupt_list->gpe_block_list_head;
while (gpe_block) {
/*
* Read all of the 8-bit GPE status and enable registers in this GPE
* block, saving all of them. Find all currently active GP events.
*/
for (i = 0; i < gpe_block->register_count; i++) {
/* Get the next status/enable pair */
gpe_register_info = &gpe_block->register_info[i];
/* Read the Status Register */
status =
acpi_read(&status_reg,
&gpe_register_info->status_address);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
/* Read the Enable Register */
status =
acpi_read(&enable_reg,
&gpe_register_info->enable_address);
if (ACPI_FAILURE(status)) {
goto unlock_and_exit;
}
ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
"Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
gpe_register_info->base_gpe_number,
status_reg, enable_reg));
/* Check if there is anything active at all in this register */
enabled_status_byte = (u8) (status_reg & enable_reg);
if (!enabled_status_byte) {
/* No active GPEs in this register, move on */
continue;
}
/* Now look at the individual GPEs in this byte register */
for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
/* Examine one GPE bit */
if (enabled_status_byte & (1 << j)) {
/*
* Found an active GPE. Dispatch the event to a handler
* or method.
*/
int_status |=
acpi_ev_gpe_dispatch(&gpe_block->
event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
}
}
}
gpe_block = gpe_block->next;
}
unlock_and_exit:
acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
return (int_status);
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_asynch_execute_gpe_method
*
* PARAMETERS: Context (gpe_event_info) - Info for this GPE
*
* RETURN: None
*
* DESCRIPTION: Perform the actual execution of a GPE control method. This
* function is called from an invocation of acpi_os_execute and
* therefore does NOT execute at interrupt level - so that
* the control method itself is not executed in the context of
* an interrupt handler.
*
******************************************************************************/
static void acpi_ev_asynch_enable_gpe(void *context);
static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = (void *)context;
acpi_status status;
struct acpi_gpe_event_info local_gpe_event_info;
struct acpi_evaluate_info *info;
ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/* Must revalidate the gpe_number/gpe_block */
if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
return_VOID;
}
/* Set the GPE flags for return to enabled state */
(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
/*
* Take a snapshot of the GPE info for this level - we copy the info to
* prevent a race condition with remove_handler/remove_block.
*/
ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
sizeof(struct acpi_gpe_event_info));
status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
if (ACPI_FAILURE(status)) {
return_VOID;
}
/*
* Must check for control method type dispatch one more time to avoid a
* race with ev_gpe_install_handler
*/
if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
ACPI_GPE_DISPATCH_METHOD) {
/* Allocate the evaluation information block */
info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
if (!info) {
status = AE_NO_MEMORY;
} else {
/*
* Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
* control method that corresponds to this GPE
*/
info->prefix_node =
local_gpe_event_info.dispatch.method_node;
info->flags = ACPI_IGNORE_RETURN_VALUE;
status = acpi_ns_evaluate(info);
ACPI_FREE(info);
}
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"while evaluating GPE method [%4.4s]",
acpi_ut_get_node_name
(local_gpe_event_info.dispatch.
method_node)));
}
}
/* Defer enabling of GPE until all notify handlers are done */
acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
gpe_event_info);
return_VOID;
}
static void acpi_ev_asynch_enable_gpe(void *context)
{
struct acpi_gpe_event_info *gpe_event_info = context;
acpi_status status;
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_LEVEL_TRIGGERED) {
/*
* GPE is level-triggered, we clear the GPE status bit after handling
* the event.
*/
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
return_VOID;
}
}
/* Enable this GPE */
(void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
return_VOID;
}
/*******************************************************************************
*
* FUNCTION: acpi_ev_gpe_dispatch
*
* PARAMETERS: gpe_event_info - Info for this GPE
* gpe_number - Number relative to the parent GPE block
*
* RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
*
* DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
* or method (e.g. _Lxx/_Exx) handler.
*
* This function executes at interrupt level.
*
******************************************************************************/
u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
{
acpi_status status;
ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
acpi_os_gpe_count(gpe_number);
/*
* If edge-triggered, clear the GPE status bit now. Note that
* level-triggered events are cleared after the GPE is serviced.
*/
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_EDGE_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
/*
* Dispatch the GPE to either an installed handler, or the control method
* associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
* it and do not attempt to run the method. If there is neither a handler
* nor a method, we disable this GPE to prevent further such pointless
* events from firing.
*/
switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
case ACPI_GPE_DISPATCH_HANDLER:
/*
* Invoke the installed handler (at interrupt level)
* Ignore return status for now.
* TBD: leave GPE disabled on error?
*/
(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
dispatch.
handler->
context);
/* It is now safe to clear level-triggered events. */
if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
ACPI_GPE_LEVEL_TRIGGERED) {
status = acpi_hw_clear_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to clear GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
}
break;
case ACPI_GPE_DISPATCH_METHOD:
/*
* Disable the GPE, so it doesn't keep firing before the method has a
* chance to run (it runs asynchronously with interrupts enabled).
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
/*
* Execute the method associated with the GPE
* NOTE: Level-triggered GPEs are cleared after the method completes.
*/
status = acpi_os_execute(OSL_GPE_HANDLER,
acpi_ev_asynch_execute_gpe_method,
gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to queue handler for GPE[%2X] - event disabled",
gpe_number));
}
break;
default:
/* No handler or method to run! */
ACPI_ERROR((AE_INFO,
"No handler or method for GPE[%2X], disabling event",
gpe_number));
/*
* Disable the GPE. The GPE will remain disabled until the ACPICA
* Core Subsystem is restarted, or a handler is installed.
*/
status = acpi_ev_disable_gpe(gpe_event_info);
if (ACPI_FAILURE(status)) {
ACPI_EXCEPTION((AE_INFO, status,
"Unable to disable GPE[%2X]",
gpe_number));
return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
}
break;
}
return_UINT32(ACPI_INTERRUPT_HANDLED);
}
| gpl-2.0 |
ByteInternet/linux-grsec | drivers/usb/gadget/net2272.c | 140 | 70687 | /*
* Driver for PLX NET2272 USB device controller
*
* Copyright (C) 2005-2006 PLX Technology, Inc.
* Copyright (C) 2006-2011 Analog Devices, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/errno.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/prefetch.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/usb.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <asm/byteorder.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include "net2272.h"
#define DRIVER_DESC "PLX NET2272 USB Peripheral Controller"
static const char driver_name[] = "net2272";
static const char driver_vers[] = "2006 October 17/mainline";
static const char driver_desc[] = DRIVER_DESC;
static const char ep0name[] = "ep0";
static const char * const ep_name[] = {
ep0name,
"ep-a", "ep-b", "ep-c",
};
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
#ifdef CONFIG_USB_GADGET_NET2272_DMA
/*
* use_dma: the NET2272 can use an external DMA controller.
* Note that since there is no generic DMA api, some functions,
* notably request_dma, start_dma, and cancel_dma will need to be
* modified for your platform's particular dma controller.
*
* If use_dma is disabled, pio will be used instead.
*/
static int use_dma = 0;
module_param(use_dma, bool, 0644);
/*
* dma_ep: selects the endpoint for use with dma (1=ep-a, 2=ep-b)
* The NET2272 can only use dma for a single endpoint at a time.
* At some point this could be modified to allow either endpoint
* to take control of dma as it becomes available.
*
* Note that DMA should not be used on OUT endpoints unless it can
* be guaranteed that no short packets will arrive on an IN endpoint
* while the DMA operation is pending. Otherwise the OUT DMA will
* terminate prematurely (See NET2272 Errata 630-0213-0101)
*/
static ushort dma_ep = 1;
module_param(dma_ep, ushort, 0644);
/*
* dma_mode: net2272 dma mode setting (see LOCCTL1 definiton):
* mode 0 == Slow DREQ mode
* mode 1 == Fast DREQ mode
* mode 2 == Burst mode
*/
static ushort dma_mode = 2;
module_param(dma_mode, ushort, 0644);
#else
#define use_dma 0
#define dma_ep 1
#define dma_mode 2
#endif
/*
* fifo_mode: net2272 buffer configuration:
* mode 0 == ep-{a,b,c} 512db each
* mode 1 == ep-a 1k, ep-{b,c} 512db
* mode 2 == ep-a 1k, ep-b 1k, ep-c 512db
* mode 3 == ep-a 1k, ep-b disabled, ep-c 512db
*/
static ushort fifo_mode = 0;
module_param(fifo_mode, ushort, 0644);
/*
* enable_suspend: When enabled, the driver will respond to
* USB suspend requests by powering down the NET2272. Otherwise,
* USB suspend requests will be ignored. This is acceptible for
* self-powered devices. For bus powered devices set this to 1.
*/
static ushort enable_suspend = 0;
module_param(enable_suspend, ushort, 0644);
static void assert_out_naking(struct net2272_ep *ep, const char *where)
{
u8 tmp;
#ifndef DEBUG
return;
#endif
tmp = net2272_ep_read(ep, EP_STAT0);
if ((tmp & (1 << NAK_OUT_PACKETS)) == 0) {
dev_dbg(ep->dev->dev, "%s %s %02x !NAK\n",
ep->ep.name, where, tmp);
net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
}
}
#define ASSERT_OUT_NAKING(ep) assert_out_naking(ep, __func__)
static void stop_out_naking(struct net2272_ep *ep)
{
u8 tmp = net2272_ep_read(ep, EP_STAT0);
if ((tmp & (1 << NAK_OUT_PACKETS)) != 0)
net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
}
#define PIPEDIR(bAddress) (usb_pipein(bAddress) ? "in" : "out")
static char *type_string(u8 bmAttributes)
{
switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
case USB_ENDPOINT_XFER_BULK: return "bulk";
case USB_ENDPOINT_XFER_ISOC: return "iso";
case USB_ENDPOINT_XFER_INT: return "intr";
default: return "control";
}
}
static char *buf_state_string(unsigned state)
{
switch (state) {
case BUFF_FREE: return "free";
case BUFF_VALID: return "valid";
case BUFF_LCL: return "local";
case BUFF_USB: return "usb";
default: return "unknown";
}
}
static char *dma_mode_string(void)
{
if (!use_dma)
return "PIO";
switch (dma_mode) {
case 0: return "SLOW DREQ";
case 1: return "FAST DREQ";
case 2: return "BURST";
default: return "invalid";
}
}
static void net2272_dequeue_all(struct net2272_ep *);
static int net2272_kick_dma(struct net2272_ep *, struct net2272_request *);
static int net2272_fifo_status(struct usb_ep *);
static struct usb_ep_ops net2272_ep_ops;
/*---------------------------------------------------------------------------*/
static int
net2272_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct net2272 *dev;
struct net2272_ep *ep;
u32 max;
u8 tmp;
unsigned long flags;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || !desc || ep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
max = usb_endpoint_maxp(desc) & 0x1fff;
spin_lock_irqsave(&dev->lock, flags);
_ep->maxpacket = max & 0x7fff;
ep->desc = desc;
/* net2272_ep_reset() has already been called */
ep->stopped = 0;
ep->wedged = 0;
/* set speed-dependent max packet */
net2272_ep_write(ep, EP_MAXPKT0, max & 0xff);
net2272_ep_write(ep, EP_MAXPKT1, (max & 0xff00) >> 8);
/* set type, direction, address; reset fifo counters */
net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
tmp = usb_endpoint_type(desc);
if (usb_endpoint_xfer_bulk(desc)) {
/* catch some particularly blatant driver bugs */
if ((dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
(dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
spin_unlock_irqrestore(&dev->lock, flags);
return -ERANGE;
}
}
ep->is_iso = usb_endpoint_xfer_isoc(desc) ? 1 : 0;
tmp <<= ENDPOINT_TYPE;
tmp |= ((desc->bEndpointAddress & 0x0f) << ENDPOINT_NUMBER);
tmp |= usb_endpoint_dir_in(desc) << ENDPOINT_DIRECTION;
tmp |= (1 << ENDPOINT_ENABLE);
/* for OUT transfers, block the rx fifo until a read is posted */
ep->is_in = usb_endpoint_dir_in(desc);
if (!ep->is_in)
net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
net2272_ep_write(ep, EP_CFG, tmp);
/* enable irqs */
tmp = (1 << ep->num) | net2272_read(dev, IRQENB0);
net2272_write(dev, IRQENB0, tmp);
tmp = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
| net2272_ep_read(ep, EP_IRQENB);
net2272_ep_write(ep, EP_IRQENB, tmp);
tmp = desc->bEndpointAddress;
dev_dbg(dev->dev, "enabled %s (ep%d%s-%s) max %04x cfg %02x\n",
_ep->name, tmp & 0x0f, PIPEDIR(tmp),
type_string(desc->bmAttributes), max,
net2272_ep_read(ep, EP_CFG));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static void net2272_ep_reset(struct net2272_ep *ep)
{
u8 tmp;
ep->desc = NULL;
INIT_LIST_HEAD(&ep->queue);
ep->ep.maxpacket = ~0;
ep->ep.ops = &net2272_ep_ops;
/* disable irqs, endpoint */
net2272_ep_write(ep, EP_IRQENB, 0);
/* init to our chosen defaults, notably so that we NAK OUT
* packets until the driver queues a read.
*/
tmp = (1 << NAK_OUT_PACKETS_MODE) | (1 << ALT_NAK_OUT_PACKETS);
net2272_ep_write(ep, EP_RSPSET, tmp);
tmp = (1 << INTERRUPT_MODE) | (1 << HIDE_STATUS_PHASE);
if (ep->num != 0)
tmp |= (1 << ENDPOINT_TOGGLE) | (1 << ENDPOINT_HALT);
net2272_ep_write(ep, EP_RSPCLR, tmp);
/* scrub most status bits, and flush any fifo state */
net2272_ep_write(ep, EP_STAT0,
(1 << DATA_IN_TOKEN_INTERRUPT)
| (1 << DATA_OUT_TOKEN_INTERRUPT)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
net2272_ep_write(ep, EP_STAT1,
(1 << TIMEOUT)
| (1 << USB_OUT_ACK_SENT)
| (1 << USB_OUT_NAK_SENT)
| (1 << USB_IN_ACK_RCVD)
| (1 << USB_IN_NAK_SENT)
| (1 << USB_STALL_SENT)
| (1 << LOCAL_OUT_ZLP)
| (1 << BUFFER_FLUSH));
/* fifo size is handled seperately */
}
static int net2272_disable(struct usb_ep *_ep)
{
struct net2272_ep *ep;
unsigned long flags;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || !ep->desc || _ep->name == ep0name)
return -EINVAL;
spin_lock_irqsave(&ep->dev->lock, flags);
net2272_dequeue_all(ep);
net2272_ep_reset(ep);
dev_vdbg(ep->dev->dev, "disabled %s\n", _ep->name);
spin_unlock_irqrestore(&ep->dev->lock, flags);
return 0;
}
/*---------------------------------------------------------------------------*/
static struct usb_request *
net2272_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct net2272_ep *ep;
struct net2272_request *req;
if (!_ep)
return NULL;
ep = container_of(_ep, struct net2272_ep, ep);
req = kzalloc(sizeof(*req), gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
net2272_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct net2272_ep *ep;
struct net2272_request *req;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || !_req)
return;
req = container_of(_req, struct net2272_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
static void
net2272_done(struct net2272_ep *ep, struct net2272_request *req, int status)
{
struct net2272 *dev;
unsigned stopped = ep->stopped;
if (ep->num == 0) {
if (ep->dev->protocol_stall) {
ep->stopped = 1;
set_halt(ep);
}
allow_status(ep);
}
list_del_init(&req->queue);
if (req->req.status == -EINPROGRESS)
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (use_dma && req->mapped) {
dma_unmap_single(dev->dev, req->req.dma, req->req.length,
ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
}
if (status && status != -ESHUTDOWN)
dev_vdbg(dev->dev, "complete %s req %p stat %d len %u/%u buf %p\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length, req->req.buf);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
static int
net2272_write_packet(struct net2272_ep *ep, u8 *buf,
struct net2272_request *req, unsigned max)
{
u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
u16 *bufp;
unsigned length, count;
u8 tmp;
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
dev_vdbg(ep->dev->dev, "write packet %s req %p max %u len %u avail %u\n",
ep->ep.name, req, max, length,
(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
count = length;
bufp = (u16 *)buf;
while (likely(count >= 2)) {
/* no byte-swap required; chip endian set during init */
writew(*bufp++, ep_data);
count -= 2;
}
buf = (u8 *)bufp;
/* write final byte by placing the NET2272 into 8-bit mode */
if (unlikely(count)) {
tmp = net2272_read(ep->dev, LOCCTL);
net2272_write(ep->dev, LOCCTL, tmp & ~(1 << DATA_WIDTH));
writeb(*buf, ep_data);
net2272_write(ep->dev, LOCCTL, tmp);
}
return length;
}
/* returns: 0: still running, 1: completed, negative: errno */
static int
net2272_write_fifo(struct net2272_ep *ep, struct net2272_request *req)
{
u8 *buf;
unsigned count, max;
int status;
dev_vdbg(ep->dev->dev, "write_fifo %s actual %d len %d\n",
ep->ep.name, req->req.actual, req->req.length);
/*
* Keep loading the endpoint until the final packet is loaded,
* or the endpoint buffer is full.
*/
top:
/*
* Clear interrupt status
* - Packet Transmitted interrupt will become set again when the
* host successfully takes another packet
*/
net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_FULL))) {
buf = req->req.buf + req->req.actual;
prefetch(buf);
/* force pagesel */
net2272_ep_read(ep, EP_STAT0);
max = (net2272_ep_read(ep, EP_AVAIL1) << 8) |
(net2272_ep_read(ep, EP_AVAIL0));
if (max < ep->ep.maxpacket)
max = (net2272_ep_read(ep, EP_AVAIL1) << 8)
| (net2272_ep_read(ep, EP_AVAIL0));
count = net2272_write_packet(ep, buf, req, max);
/* see if we are done */
if (req->req.length == req->req.actual) {
/* validate short or zlp packet */
if (count < ep->ep.maxpacket)
set_fifo_bytecount(ep, 0);
net2272_done(ep, req, 0);
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request,
queue);
status = net2272_kick_dma(ep, req);
if (status < 0)
if ((net2272_ep_read(ep, EP_STAT0)
& (1 << BUFFER_EMPTY)))
goto top;
}
return 1;
}
net2272_ep_write(ep, EP_STAT0, (1 << DATA_PACKET_TRANSMITTED_INTERRUPT));
}
return 0;
}
static void
net2272_out_flush(struct net2272_ep *ep)
{
ASSERT_OUT_NAKING(ep);
net2272_ep_write(ep, EP_STAT0, (1 << DATA_OUT_TOKEN_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT));
net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
}
static int
net2272_read_packet(struct net2272_ep *ep, u8 *buf,
struct net2272_request *req, unsigned avail)
{
u16 __iomem *ep_data = net2272_reg_addr(ep->dev, EP_DATA);
unsigned is_short;
u16 *bufp;
req->req.actual += avail;
dev_vdbg(ep->dev->dev, "read packet %s req %p len %u avail %u\n",
ep->ep.name, req, avail,
(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0));
is_short = (avail < ep->ep.maxpacket);
if (unlikely(avail == 0)) {
/* remove any zlp from the buffer */
(void)readw(ep_data);
return is_short;
}
/* Ensure we get the final byte */
if (unlikely(avail % 2))
avail++;
bufp = (u16 *)buf;
do {
*bufp++ = readw(ep_data);
avail -= 2;
} while (avail);
/*
* To avoid false endpoint available race condition must read
* ep stat0 twice in the case of a short transfer
*/
if (net2272_ep_read(ep, EP_STAT0) & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT))
net2272_ep_read(ep, EP_STAT0);
return is_short;
}
static int
net2272_read_fifo(struct net2272_ep *ep, struct net2272_request *req)
{
u8 *buf;
unsigned is_short;
int count;
int tmp;
int cleanup = 0;
int status = -1;
dev_vdbg(ep->dev->dev, "read_fifo %s actual %d len %d\n",
ep->ep.name, req->req.actual, req->req.length);
top:
do {
buf = req->req.buf + req->req.actual;
prefetchw(buf);
count = (net2272_ep_read(ep, EP_AVAIL1) << 8)
| net2272_ep_read(ep, EP_AVAIL0);
net2272_ep_write(ep, EP_STAT0,
(1 << SHORT_PACKET_TRANSFERRED_INTERRUPT) |
(1 << DATA_PACKET_RECEIVED_INTERRUPT));
tmp = req->req.length - req->req.actual;
if (count > tmp) {
if ((tmp % ep->ep.maxpacket) != 0) {
dev_err(ep->dev->dev,
"%s out fifo %d bytes, expected %d\n",
ep->ep.name, count, tmp);
cleanup = 1;
}
count = (tmp > 0) ? tmp : 0;
}
is_short = net2272_read_packet(ep, buf, req, count);
/* completion */
if (unlikely(cleanup || is_short ||
((req->req.actual == req->req.length)
&& !req->req.zero))) {
if (cleanup) {
net2272_out_flush(ep);
net2272_done(ep, req, -EOVERFLOW);
} else
net2272_done(ep, req, 0);
/* re-initialize endpoint transfer registers
* otherwise they may result in erroneous pre-validation
* for subsequent control reads
*/
if (unlikely(ep->num == 0)) {
net2272_ep_write(ep, EP_TRANSFER2, 0);
net2272_ep_write(ep, EP_TRANSFER1, 0);
net2272_ep_write(ep, EP_TRANSFER0, 0);
}
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request, queue);
status = net2272_kick_dma(ep, req);
if ((status < 0) &&
!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)))
goto top;
}
return 1;
}
} while (!(net2272_ep_read(ep, EP_STAT0) & (1 << BUFFER_EMPTY)));
return 0;
}
static void
net2272_pio_advance(struct net2272_ep *ep)
{
struct net2272_request *req;
if (unlikely(list_empty(&ep->queue)))
return;
req = list_entry(ep->queue.next, struct net2272_request, queue);
(ep->is_in ? net2272_write_fifo : net2272_read_fifo)(ep, req);
}
/* returns 0 on success, else negative errno */
static int
net2272_request_dma(struct net2272 *dev, unsigned ep, u32 buf,
unsigned len, unsigned dir)
{
dev_vdbg(dev->dev, "request_dma ep %d buf %08x len %d dir %d\n",
ep, buf, len, dir);
/* The NET2272 only supports a single dma channel */
if (dev->dma_busy)
return -EBUSY;
/*
* EP_TRANSFER (used to determine the number of bytes received
* in an OUT transfer) is 24 bits wide; don't ask for more than that.
*/
if ((dir == 1) && (len > 0x1000000))
return -EINVAL;
dev->dma_busy = 1;
/* initialize platform's dma */
#ifdef CONFIG_PCI
/* NET2272 addr, buffer addr, length, etc. */
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
/* Setup PLX 9054 DMA mode */
writel((1 << LOCAL_BUS_WIDTH) |
(1 << TA_READY_INPUT_ENABLE) |
(0 << LOCAL_BURST_ENABLE) |
(1 << DONE_INTERRUPT_ENABLE) |
(1 << LOCAL_ADDRESSING_MODE) |
(1 << DEMAND_MODE) |
(1 << DMA_EOT_ENABLE) |
(1 << FAST_SLOW_TERMINATE_MODE_SELECT) |
(1 << DMA_CHANNEL_INTERRUPT_SELECT),
dev->rdk1.plx9054_base_addr + DMAMODE0);
writel(0x100000, dev->rdk1.plx9054_base_addr + DMALADR0);
writel(buf, dev->rdk1.plx9054_base_addr + DMAPADR0);
writel(len, dev->rdk1.plx9054_base_addr + DMASIZ0);
writel((dir << DIRECTION_OF_TRANSFER) |
(1 << INTERRUPT_AFTER_TERMINAL_COUNT),
dev->rdk1.plx9054_base_addr + DMADPR0);
writel((1 << LOCAL_DMA_CHANNEL_0_INTERRUPT_ENABLE) |
readl(dev->rdk1.plx9054_base_addr + INTCSR),
dev->rdk1.plx9054_base_addr + INTCSR);
break;
}
#endif
net2272_write(dev, DMAREQ,
(0 << DMA_BUFFER_VALID) |
(1 << DMA_REQUEST_ENABLE) |
(1 << DMA_CONTROL_DACK) |
(dev->dma_eot_polarity << EOT_POLARITY) |
(dev->dma_dack_polarity << DACK_POLARITY) |
(dev->dma_dreq_polarity << DREQ_POLARITY) |
((ep >> 1) << DMA_ENDPOINT_SELECT));
(void) net2272_read(dev, SCRATCH);
return 0;
}
static void
net2272_start_dma(struct net2272 *dev)
{
/* start platform's dma controller */
#ifdef CONFIG_PCI
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
writeb((1 << CHANNEL_ENABLE) | (1 << CHANNEL_START),
dev->rdk1.plx9054_base_addr + DMACSR0);
break;
}
#endif
}
/* returns 0 on success, else negative errno */
static int
net2272_kick_dma(struct net2272_ep *ep, struct net2272_request *req)
{
unsigned size;
u8 tmp;
if (!use_dma || (ep->num < 1) || (ep->num > 2) || !ep->dma)
return -EINVAL;
/* don't use dma for odd-length transfers
* otherwise, we'd need to deal with the last byte with pio
*/
if (req->req.length & 1)
return -EINVAL;
dev_vdbg(ep->dev->dev, "kick_dma %s req %p dma %08llx\n",
ep->ep.name, req, (unsigned long long) req->req.dma);
net2272_ep_write(ep, EP_RSPSET, 1 << ALT_NAK_OUT_PACKETS);
/* The NET2272 can only use DMA on one endpoint at a time */
if (ep->dev->dma_busy)
return -EBUSY;
/* Make sure we only DMA an even number of bytes (we'll use
* pio to complete the transfer)
*/
size = req->req.length;
size &= ~1;
/* device-to-host transfer */
if (ep->is_in) {
/* initialize platform's dma controller */
if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 0))
/* unable to obtain DMA channel; return error and use pio mode */
return -EBUSY;
req->req.actual += size;
/* host-to-device transfer */
} else {
tmp = net2272_ep_read(ep, EP_STAT0);
/* initialize platform's dma controller */
if (net2272_request_dma(ep->dev, ep->num, req->req.dma, size, 1))
/* unable to obtain DMA channel; return error and use pio mode */
return -EBUSY;
if (!(tmp & (1 << BUFFER_EMPTY)))
ep->not_empty = 1;
else
ep->not_empty = 0;
/* allow the endpoint's buffer to fill */
net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
/* this transfer completed and data's already in the fifo
* return error so pio gets used.
*/
if (tmp & (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
/* deassert dreq */
net2272_write(ep->dev, DMAREQ,
(0 << DMA_BUFFER_VALID) |
(0 << DMA_REQUEST_ENABLE) |
(1 << DMA_CONTROL_DACK) |
(ep->dev->dma_eot_polarity << EOT_POLARITY) |
(ep->dev->dma_dack_polarity << DACK_POLARITY) |
(ep->dev->dma_dreq_polarity << DREQ_POLARITY) |
((ep->num >> 1) << DMA_ENDPOINT_SELECT));
return -EBUSY;
}
}
/* Don't use per-packet interrupts: use dma interrupts only */
net2272_ep_write(ep, EP_IRQENB, 0);
net2272_start_dma(ep->dev);
return 0;
}
static void net2272_cancel_dma(struct net2272 *dev)
{
#ifdef CONFIG_PCI
switch (dev->dev_id) {
case PCI_DEVICE_ID_RDK1:
writeb(0, dev->rdk1.plx9054_base_addr + DMACSR0);
writeb(1 << CHANNEL_ABORT, dev->rdk1.plx9054_base_addr + DMACSR0);
while (!(readb(dev->rdk1.plx9054_base_addr + DMACSR0) &
(1 << CHANNEL_DONE)))
continue; /* wait for dma to stabalize */
/* dma abort generates an interrupt */
writeb(1 << CHANNEL_CLEAR_INTERRUPT,
dev->rdk1.plx9054_base_addr + DMACSR0);
break;
}
#endif
dev->dma_busy = 0;
}
/*---------------------------------------------------------------------------*/
static int
net2272_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct net2272_request *req;
struct net2272_ep *ep;
struct net2272 *dev;
unsigned long flags;
int status = -1;
u8 s;
req = container_of(_req, struct net2272_request, req);
if (!_req || !_req->complete || !_req->buf
|| !list_empty(&req->queue))
return -EINVAL;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
/* set up dma mapping in case the caller didn't */
if (use_dma && ep->dma && _req->dma == DMA_ADDR_INVALID) {
_req->dma = dma_map_single(dev->dev, _req->buf, _req->length,
ep->is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
req->mapped = 1;
}
dev_vdbg(dev->dev, "%s queue req %p, len %d buf %p dma %08llx %s\n",
_ep->name, _req, _req->length, _req->buf,
(unsigned long long) _req->dma, _req->zero ? "zero" : "!zero");
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* kickstart this i/o queue? */
if (list_empty(&ep->queue) && !ep->stopped) {
/* maybe there's no control data, just status ack */
if (ep->num == 0 && _req->length == 0) {
net2272_done(ep, req, 0);
dev_vdbg(dev->dev, "%s status ack\n", ep->ep.name);
goto done;
}
/* Return zlp, don't let it block subsequent packets */
s = net2272_ep_read(ep, EP_STAT0);
if (s & (1 << BUFFER_EMPTY)) {
/* Buffer is empty check for a blocking zlp, handle it */
if ((s & (1 << NAK_OUT_PACKETS)) &&
net2272_ep_read(ep, EP_STAT1) & (1 << LOCAL_OUT_ZLP)) {
dev_dbg(dev->dev, "WARNING: returning ZLP short packet termination!\n");
/*
* Request is going to terminate with a short packet ...
* hope the client is ready for it!
*/
status = net2272_read_fifo(ep, req);
/* clear short packet naking */
net2272_ep_write(ep, EP_STAT0, (1 << NAK_OUT_PACKETS));
goto done;
}
}
/* try dma first */
status = net2272_kick_dma(ep, req);
if (status < 0) {
/* dma failed (most likely in use by another endpoint)
* fallback to pio
*/
status = 0;
if (ep->is_in)
status = net2272_write_fifo(ep, req);
else {
s = net2272_ep_read(ep, EP_STAT0);
if ((s & (1 << BUFFER_EMPTY)) == 0)
status = net2272_read_fifo(ep, req);
}
if (unlikely(status != 0)) {
if (status > 0)
status = 0;
req = NULL;
}
}
}
if (likely(req != 0))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue)))
net2272_ep_write(ep, EP_RSPCLR, 1 << ALT_NAK_OUT_PACKETS);
done:
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/* dequeue ALL requests */
static void
net2272_dequeue_all(struct net2272_ep *ep)
{
struct net2272_request *req;
/* called with spinlock held */
ep->stopped = 1;
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request,
queue);
net2272_done(ep, req, -ESHUTDOWN);
}
}
/* dequeue JUST ONE request */
static int
net2272_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct net2272_ep *ep;
struct net2272_request *req;
unsigned long flags;
int stopped;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0) || !_req)
return -EINVAL;
spin_lock_irqsave(&ep->dev->lock, flags);
stopped = ep->stopped;
ep->stopped = 1;
/* make sure it's still queued on this endpoint */
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore(&ep->dev->lock, flags);
return -EINVAL;
}
/* queue head may be partially complete */
if (ep->queue.next == &req->queue) {
dev_dbg(ep->dev->dev, "unlink (%s) pio\n", _ep->name);
net2272_done(ep, req, -ECONNRESET);
}
req = NULL;
ep->stopped = stopped;
spin_unlock_irqrestore(&ep->dev->lock, flags);
return 0;
}
/*---------------------------------------------------------------------------*/
static int
net2272_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct net2272_ep *ep;
unsigned long flags;
int ret = 0;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -EINVAL;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->desc /* not ep0 */ && usb_endpoint_xfer_isoc(ep->desc))
return -EINVAL;
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue))
ret = -EAGAIN;
else if (ep->is_in && value && net2272_fifo_status(_ep) != 0)
ret = -EAGAIN;
else {
dev_vdbg(ep->dev->dev, "%s %s %s\n", _ep->name,
value ? "set" : "clear",
wedged ? "wedge" : "halt");
/* set/clear */
if (value) {
if (ep->num == 0)
ep->dev->protocol_stall = 1;
else
set_halt(ep);
if (wedged)
ep->wedged = 1;
} else {
clear_halt(ep);
ep->wedged = 0;
}
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return ret;
}
static int
net2272_set_halt(struct usb_ep *_ep, int value)
{
return net2272_set_halt_and_wedge(_ep, value, 0);
}
static int
net2272_set_wedge(struct usb_ep *_ep)
{
if (!_ep || _ep->name == ep0name)
return -EINVAL;
return net2272_set_halt_and_wedge(_ep, 1, 1);
}
static int
net2272_fifo_status(struct usb_ep *_ep)
{
struct net2272_ep *ep;
u16 avail;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return -ENODEV;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
avail = net2272_ep_read(ep, EP_AVAIL1) << 8;
avail |= net2272_ep_read(ep, EP_AVAIL0);
if (avail > ep->fifo_size)
return -EOVERFLOW;
if (ep->is_in)
avail = ep->fifo_size - avail;
return avail;
}
static void
net2272_fifo_flush(struct usb_ep *_ep)
{
struct net2272_ep *ep;
ep = container_of(_ep, struct net2272_ep, ep);
if (!_ep || (!ep->desc && ep->num != 0))
return;
if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
return;
net2272_ep_write(ep, EP_STAT1, 1 << BUFFER_FLUSH);
}
static struct usb_ep_ops net2272_ep_ops = {
.enable = net2272_enable,
.disable = net2272_disable,
.alloc_request = net2272_alloc_request,
.free_request = net2272_free_request,
.queue = net2272_queue,
.dequeue = net2272_dequeue,
.set_halt = net2272_set_halt,
.set_wedge = net2272_set_wedge,
.fifo_status = net2272_fifo_status,
.fifo_flush = net2272_fifo_flush,
};
/*---------------------------------------------------------------------------*/
static int
net2272_get_frame(struct usb_gadget *_gadget)
{
struct net2272 *dev;
unsigned long flags;
u16 ret;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
ret = net2272_read(dev, FRAME1) << 8;
ret |= net2272_read(dev, FRAME0);
spin_unlock_irqrestore(&dev->lock, flags);
return ret;
}
static int
net2272_wakeup(struct usb_gadget *_gadget)
{
struct net2272 *dev;
u8 tmp;
unsigned long flags;
if (!_gadget)
return 0;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
tmp = net2272_read(dev, USBCTL0);
if (tmp & (1 << IO_WAKEUP_ENABLE))
net2272_write(dev, USBCTL1, (1 << GENERATE_RESUME));
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static int
net2272_set_selfpowered(struct usb_gadget *_gadget, int value)
{
struct net2272 *dev;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct net2272, gadget);
dev->is_selfpowered = value;
return 0;
}
static int
net2272_pullup(struct usb_gadget *_gadget, int is_on)
{
struct net2272 *dev;
u8 tmp;
unsigned long flags;
if (!_gadget)
return -ENODEV;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
tmp = net2272_read(dev, USBCTL0);
dev->softconnect = (is_on != 0);
if (is_on)
tmp |= (1 << USB_DETECT_ENABLE);
else
tmp &= ~(1 << USB_DETECT_ENABLE);
net2272_write(dev, USBCTL0, tmp);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
static int net2272_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static int net2272_stop(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver);
static const struct usb_gadget_ops net2272_ops = {
.get_frame = net2272_get_frame,
.wakeup = net2272_wakeup,
.set_selfpowered = net2272_set_selfpowered,
.pullup = net2272_pullup,
.udc_start = net2272_start,
.udc_stop = net2272_stop,
};
/*---------------------------------------------------------------------------*/
static ssize_t
net2272_show_registers(struct device *_dev, struct device_attribute *attr, char *buf)
{
struct net2272 *dev;
char *next;
unsigned size, t;
unsigned long flags;
u8 t1, t2;
int i;
const char *s;
dev = dev_get_drvdata(_dev);
next = buf;
size = PAGE_SIZE;
spin_lock_irqsave(&dev->lock, flags);
if (dev->driver)
s = dev->driver->driver.name;
else
s = "(none)";
/* Main Control Registers */
t = scnprintf(next, size, "%s version %s,"
"chiprev %02x, locctl %02x\n"
"irqenb0 %02x irqenb1 %02x "
"irqstat0 %02x irqstat1 %02x\n",
driver_name, driver_vers, dev->chiprev,
net2272_read(dev, LOCCTL),
net2272_read(dev, IRQENB0),
net2272_read(dev, IRQENB1),
net2272_read(dev, IRQSTAT0),
net2272_read(dev, IRQSTAT1));
size -= t;
next += t;
/* DMA */
t1 = net2272_read(dev, DMAREQ);
t = scnprintf(next, size, "\ndmareq %02x: %s %s%s%s%s\n",
t1, ep_name[(t1 & 0x01) + 1],
t1 & (1 << DMA_CONTROL_DACK) ? "dack " : "",
t1 & (1 << DMA_REQUEST_ENABLE) ? "reqenb " : "",
t1 & (1 << DMA_REQUEST) ? "req " : "",
t1 & (1 << DMA_BUFFER_VALID) ? "valid " : "");
size -= t;
next += t;
/* USB Control Registers */
t1 = net2272_read(dev, USBCTL1);
if (t1 & (1 << VBUS_PIN)) {
if (t1 & (1 << USB_HIGH_SPEED))
s = "high speed";
else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
s = "powered";
else
s = "full speed";
} else
s = "not attached";
t = scnprintf(next, size,
"usbctl0 %02x usbctl1 %02x addr 0x%02x (%s)\n",
net2272_read(dev, USBCTL0), t1,
net2272_read(dev, OURADDR), s);
size -= t;
next += t;
/* Endpoint Registers */
for (i = 0; i < 4; ++i) {
struct net2272_ep *ep;
ep = &dev->ep[i];
if (i && !ep->desc)
continue;
t1 = net2272_ep_read(ep, EP_CFG);
t2 = net2272_ep_read(ep, EP_RSPSET);
t = scnprintf(next, size,
"\n%s\tcfg %02x rsp (%02x) %s%s%s%s%s%s%s%s"
"irqenb %02x\n",
ep->ep.name, t1, t2,
(t2 & (1 << ALT_NAK_OUT_PACKETS)) ? "NAK " : "",
(t2 & (1 << HIDE_STATUS_PHASE)) ? "hide " : "",
(t2 & (1 << AUTOVALIDATE)) ? "auto " : "",
(t2 & (1 << INTERRUPT_MODE)) ? "interrupt " : "",
(t2 & (1 << CONTROL_STATUS_PHASE_HANDSHAKE)) ? "status " : "",
(t2 & (1 << NAK_OUT_PACKETS_MODE)) ? "NAKmode " : "",
(t2 & (1 << ENDPOINT_TOGGLE)) ? "DATA1 " : "DATA0 ",
(t2 & (1 << ENDPOINT_HALT)) ? "HALT " : "",
net2272_ep_read(ep, EP_IRQENB));
size -= t;
next += t;
t = scnprintf(next, size,
"\tstat0 %02x stat1 %02x avail %04x "
"(ep%d%s-%s)%s\n",
net2272_ep_read(ep, EP_STAT0),
net2272_ep_read(ep, EP_STAT1),
(net2272_ep_read(ep, EP_AVAIL1) << 8) | net2272_ep_read(ep, EP_AVAIL0),
t1 & 0x0f,
ep->is_in ? "in" : "out",
type_string(t1 >> 5),
ep->stopped ? "*" : "");
size -= t;
next += t;
t = scnprintf(next, size,
"\tep_transfer %06x\n",
((net2272_ep_read(ep, EP_TRANSFER2) & 0xff) << 16) |
((net2272_ep_read(ep, EP_TRANSFER1) & 0xff) << 8) |
((net2272_ep_read(ep, EP_TRANSFER0) & 0xff)));
size -= t;
next += t;
t1 = net2272_ep_read(ep, EP_BUFF_STATES) & 0x03;
t2 = (net2272_ep_read(ep, EP_BUFF_STATES) >> 2) & 0x03;
t = scnprintf(next, size,
"\tbuf-a %s buf-b %s\n",
buf_state_string(t1),
buf_state_string(t2));
size -= t;
next += t;
}
spin_unlock_irqrestore(&dev->lock, flags);
return PAGE_SIZE - size;
}
static DEVICE_ATTR(registers, S_IRUGO, net2272_show_registers, NULL);
/*---------------------------------------------------------------------------*/
static void
net2272_set_fifo_mode(struct net2272 *dev, int mode)
{
u8 tmp;
tmp = net2272_read(dev, LOCCTL) & 0x3f;
tmp |= (mode << 6);
net2272_write(dev, LOCCTL, tmp);
INIT_LIST_HEAD(&dev->gadget.ep_list);
/* always ep-a, ep-c ... maybe not ep-b */
list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
switch (mode) {
case 0:
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 512;
break;
case 1:
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = 1024;
dev->ep[2].fifo_size = 512;
break;
case 2:
list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
break;
case 3:
dev->ep[1].fifo_size = 1024;
break;
}
/* ep-c is always 2 512 byte buffers */
list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
dev->ep[3].fifo_size = 512;
}
/*---------------------------------------------------------------------------*/
static void
net2272_usb_reset(struct net2272 *dev)
{
dev->gadget.speed = USB_SPEED_UNKNOWN;
net2272_cancel_dma(dev);
net2272_write(dev, IRQENB0, 0);
net2272_write(dev, IRQENB1, 0);
/* clear irq state */
net2272_write(dev, IRQSTAT0, 0xff);
net2272_write(dev, IRQSTAT1, ~(1 << SUSPEND_REQUEST_INTERRUPT));
net2272_write(dev, DMAREQ,
(0 << DMA_BUFFER_VALID) |
(0 << DMA_REQUEST_ENABLE) |
(1 << DMA_CONTROL_DACK) |
(dev->dma_eot_polarity << EOT_POLARITY) |
(dev->dma_dack_polarity << DACK_POLARITY) |
(dev->dma_dreq_polarity << DREQ_POLARITY) |
((dma_ep >> 1) << DMA_ENDPOINT_SELECT));
net2272_cancel_dma(dev);
net2272_set_fifo_mode(dev, (fifo_mode <= 3) ? fifo_mode : 0);
/* Set the NET2272 ep fifo data width to 16-bit mode and for correct byte swapping
* note that the higher level gadget drivers are expected to convert data to little endian.
* Enable byte swap for your local bus/cpu if needed by setting BYTE_SWAP in LOCCTL here
*/
net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) | (1 << DATA_WIDTH));
net2272_write(dev, LOCCTL1, (dma_mode << DMA_MODE));
}
static void
net2272_usb_reinit(struct net2272 *dev)
{
int i;
/* basic endpoint init */
for (i = 0; i < 4; ++i) {
struct net2272_ep *ep = &dev->ep[i];
ep->ep.name = ep_name[i];
ep->dev = dev;
ep->num = i;
ep->not_empty = 0;
if (use_dma && ep->num == dma_ep)
ep->dma = 1;
if (i > 0 && i <= 3)
ep->fifo_size = 512;
else
ep->fifo_size = 64;
net2272_ep_reset(ep);
}
dev->ep[0].ep.maxpacket = 64;
dev->gadget.ep0 = &dev->ep[0].ep;
dev->ep[0].stopped = 0;
INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
}
static void
net2272_ep0_start(struct net2272 *dev)
{
struct net2272_ep *ep0 = &dev->ep[0];
net2272_ep_write(ep0, EP_RSPSET,
(1 << NAK_OUT_PACKETS_MODE) |
(1 << ALT_NAK_OUT_PACKETS));
net2272_ep_write(ep0, EP_RSPCLR,
(1 << HIDE_STATUS_PHASE) |
(1 << CONTROL_STATUS_PHASE_HANDSHAKE));
net2272_write(dev, USBCTL0,
(dev->softconnect << USB_DETECT_ENABLE) |
(1 << USB_ROOT_PORT_WAKEUP_ENABLE) |
(1 << IO_WAKEUP_ENABLE));
net2272_write(dev, IRQENB0,
(1 << SETUP_PACKET_INTERRUPT_ENABLE) |
(1 << ENDPOINT_0_INTERRUPT_ENABLE) |
(1 << DMA_DONE_INTERRUPT_ENABLE));
net2272_write(dev, IRQENB1,
(1 << VBUS_INTERRUPT_ENABLE) |
(1 << ROOT_PORT_RESET_INTERRUPT_ENABLE) |
(1 << SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE));
}
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
static int net2272_start(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver)
{
struct net2272 *dev;
unsigned i;
if (!driver || !driver->unbind || !driver->setup ||
driver->speed != USB_SPEED_HIGH)
return -EINVAL;
dev = container_of(_gadget, struct net2272, gadget);
for (i = 0; i < 4; ++i)
dev->ep[i].irqs = 0;
/* hook up the driver ... */
dev->softconnect = 1;
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
/* ... then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
net2272_ep0_start(dev);
dev_dbg(dev->dev, "%s ready\n", driver->driver.name);
return 0;
}
static void
stop_activity(struct net2272 *dev, struct usb_gadget_driver *driver)
{
int i;
/* don't disconnect if it's not connected */
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* stop hardware; prevent new request submissions;
* and kill any outstanding requests.
*/
net2272_usb_reset(dev);
for (i = 0; i < 4; ++i)
net2272_dequeue_all(&dev->ep[i]);
net2272_usb_reinit(dev);
}
static int net2272_stop(struct usb_gadget *_gadget,
struct usb_gadget_driver *driver)
{
struct net2272 *dev;
unsigned long flags;
dev = container_of(_gadget, struct net2272, gadget);
spin_lock_irqsave(&dev->lock, flags);
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
dev->gadget.dev.driver = NULL;
dev->driver = NULL;
dev_dbg(dev->dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
/*---------------------------------------------------------------------------*/
/* handle ep-a/ep-b dma completions */
static void
net2272_handle_dma(struct net2272_ep *ep)
{
struct net2272_request *req;
unsigned len;
int status;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct net2272_request, queue);
else
req = NULL;
dev_vdbg(ep->dev->dev, "handle_dma %s req %p\n", ep->ep.name, req);
/* Ensure DREQ is de-asserted */
net2272_write(ep->dev, DMAREQ,
(0 << DMA_BUFFER_VALID)
| (0 << DMA_REQUEST_ENABLE)
| (1 << DMA_CONTROL_DACK)
| (ep->dev->dma_eot_polarity << EOT_POLARITY)
| (ep->dev->dma_dack_polarity << DACK_POLARITY)
| (ep->dev->dma_dreq_polarity << DREQ_POLARITY)
| ((ep->dma >> 1) << DMA_ENDPOINT_SELECT));
ep->dev->dma_busy = 0;
net2272_ep_write(ep, EP_IRQENB,
(1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
| net2272_ep_read(ep, EP_IRQENB));
/* device-to-host transfer completed */
if (ep->is_in) {
/* validate a short packet or zlp if necessary */
if ((req->req.length % ep->ep.maxpacket != 0) ||
req->req.zero)
set_fifo_bytecount(ep, 0);
net2272_done(ep, req, 0);
if (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request, queue);
status = net2272_kick_dma(ep, req);
if (status < 0)
net2272_pio_advance(ep);
}
/* host-to-device transfer completed */
} else {
/* terminated with a short packet? */
if (net2272_read(ep->dev, IRQSTAT0) &
(1 << DMA_DONE_INTERRUPT)) {
/* abort system dma */
net2272_cancel_dma(ep->dev);
}
/* EP_TRANSFER will contain the number of bytes
* actually received.
* NOTE: There is no overflow detection on EP_TRANSFER:
* We can't deal with transfers larger than 2^24 bytes!
*/
len = (net2272_ep_read(ep, EP_TRANSFER2) << 16)
| (net2272_ep_read(ep, EP_TRANSFER1) << 8)
| (net2272_ep_read(ep, EP_TRANSFER0));
if (ep->not_empty)
len += 4;
req->req.actual += len;
/* get any remaining data */
net2272_pio_advance(ep);
}
}
/*---------------------------------------------------------------------------*/
static void
net2272_handle_ep(struct net2272_ep *ep)
{
struct net2272_request *req;
u8 stat0, stat1;
if (!list_empty(&ep->queue))
req = list_entry(ep->queue.next,
struct net2272_request, queue);
else
req = NULL;
/* ack all, and handle what we care about */
stat0 = net2272_ep_read(ep, EP_STAT0);
stat1 = net2272_ep_read(ep, EP_STAT1);
ep->irqs++;
dev_vdbg(ep->dev->dev, "%s ack ep_stat0 %02x, ep_stat1 %02x, req %p\n",
ep->ep.name, stat0, stat1, req ? &req->req : 0);
net2272_ep_write(ep, EP_STAT0, stat0 &
~((1 << NAK_OUT_PACKETS)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT)));
net2272_ep_write(ep, EP_STAT1, stat1);
/* data packet(s) received (in the fifo, OUT)
* direction must be validated, otherwise control read status phase
* could be interpreted as a valid packet
*/
if (!ep->is_in && (stat0 & (1 << DATA_PACKET_RECEIVED_INTERRUPT)))
net2272_pio_advance(ep);
/* data packet(s) transmitted (IN) */
else if (stat0 & (1 << DATA_PACKET_TRANSMITTED_INTERRUPT))
net2272_pio_advance(ep);
}
static struct net2272_ep *
net2272_get_ep_by_addr(struct net2272 *dev, u16 wIndex)
{
struct net2272_ep *ep;
if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
return &dev->ep[0];
list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
u8 bEndpointAddress;
if (!ep->desc)
continue;
bEndpointAddress = ep->desc->bEndpointAddress;
if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
continue;
if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
return ep;
}
return NULL;
}
/*
* USB Test Packet:
* JKJKJKJK * 9
* JJKKJJKK * 8
* JJJJKKKK * 8
* JJJJJJJKKKKKKK * 8
* JJJJJJJK * 8
* {JKKKKKKK * 10}, JK
*/
static const u8 net2272_test_packet[] = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA,
0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE, 0xEE,
0xFE, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0x7F, 0xBF, 0xDF, 0xEF, 0xF7, 0xFB, 0xFD,
0xFC, 0x7E, 0xBF, 0xDF, 0xEF, 0xF7, 0xFD, 0x7E
};
static void
net2272_set_test_mode(struct net2272 *dev, int mode)
{
int i;
/* Disable all net2272 interrupts:
* Nothing but a power cycle should stop the test.
*/
net2272_write(dev, IRQENB0, 0x00);
net2272_write(dev, IRQENB1, 0x00);
/* Force tranceiver to high-speed */
net2272_write(dev, XCVRDIAG, 1 << FORCE_HIGH_SPEED);
net2272_write(dev, PAGESEL, 0);
net2272_write(dev, EP_STAT0, 1 << DATA_PACKET_TRANSMITTED_INTERRUPT);
net2272_write(dev, EP_RSPCLR,
(1 << CONTROL_STATUS_PHASE_HANDSHAKE)
| (1 << HIDE_STATUS_PHASE));
net2272_write(dev, EP_CFG, 1 << ENDPOINT_DIRECTION);
net2272_write(dev, EP_STAT1, 1 << BUFFER_FLUSH);
/* wait for status phase to complete */
while (!(net2272_read(dev, EP_STAT0) &
(1 << DATA_PACKET_TRANSMITTED_INTERRUPT)))
;
/* Enable test mode */
net2272_write(dev, USBTEST, mode);
/* load test packet */
if (mode == TEST_PACKET) {
/* switch to 8 bit mode */
net2272_write(dev, LOCCTL, net2272_read(dev, LOCCTL) &
~(1 << DATA_WIDTH));
for (i = 0; i < sizeof(net2272_test_packet); ++i)
net2272_write(dev, EP_DATA, net2272_test_packet[i]);
/* Validate test packet */
net2272_write(dev, EP_TRANSFER0, 0);
}
}
static void
net2272_handle_stat0_irqs(struct net2272 *dev, u8 stat)
{
struct net2272_ep *ep;
u8 num, scratch;
/* starting a control request? */
if (unlikely(stat & (1 << SETUP_PACKET_INTERRUPT))) {
union {
u8 raw[8];
struct usb_ctrlrequest r;
} u;
int tmp = 0;
struct net2272_request *req;
if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
if (net2272_read(dev, USBCTL1) & (1 << USB_HIGH_SPEED))
dev->gadget.speed = USB_SPEED_HIGH;
else
dev->gadget.speed = USB_SPEED_FULL;
dev_dbg(dev->dev, "%s\n",
usb_speed_string(dev->gadget.speed));
}
ep = &dev->ep[0];
ep->irqs++;
/* make sure any leftover interrupt state is cleared */
stat &= ~(1 << ENDPOINT_0_INTERRUPT);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct net2272_request, queue);
net2272_done(ep, req,
(req->req.actual == req->req.length) ? 0 : -EPROTO);
}
ep->stopped = 0;
dev->protocol_stall = 0;
net2272_ep_write(ep, EP_STAT0,
(1 << DATA_IN_TOKEN_INTERRUPT)
| (1 << DATA_OUT_TOKEN_INTERRUPT)
| (1 << DATA_PACKET_TRANSMITTED_INTERRUPT)
| (1 << DATA_PACKET_RECEIVED_INTERRUPT)
| (1 << SHORT_PACKET_TRANSFERRED_INTERRUPT));
net2272_ep_write(ep, EP_STAT1,
(1 << TIMEOUT)
| (1 << USB_OUT_ACK_SENT)
| (1 << USB_OUT_NAK_SENT)
| (1 << USB_IN_ACK_RCVD)
| (1 << USB_IN_NAK_SENT)
| (1 << USB_STALL_SENT)
| (1 << LOCAL_OUT_ZLP));
/*
* Ensure Control Read pre-validation setting is beyond maximum size
* - Control Writes can leave non-zero values in EP_TRANSFER. If
* an EP0 transfer following the Control Write is a Control Read,
* the NET2272 sees the non-zero EP_TRANSFER as an unexpected
* pre-validation count.
* - Setting EP_TRANSFER beyond the maximum EP0 transfer size ensures
* the pre-validation count cannot cause an unexpected validatation
*/
net2272_write(dev, PAGESEL, 0);
net2272_write(dev, EP_TRANSFER2, 0xff);
net2272_write(dev, EP_TRANSFER1, 0xff);
net2272_write(dev, EP_TRANSFER0, 0xff);
u.raw[0] = net2272_read(dev, SETUP0);
u.raw[1] = net2272_read(dev, SETUP1);
u.raw[2] = net2272_read(dev, SETUP2);
u.raw[3] = net2272_read(dev, SETUP3);
u.raw[4] = net2272_read(dev, SETUP4);
u.raw[5] = net2272_read(dev, SETUP5);
u.raw[6] = net2272_read(dev, SETUP6);
u.raw[7] = net2272_read(dev, SETUP7);
/*
* If you have a big endian cpu make sure le16_to_cpus
* performs the proper byte swapping here...
*/
le16_to_cpus(&u.r.wValue);
le16_to_cpus(&u.r.wIndex);
le16_to_cpus(&u.r.wLength);
/* ack the irq */
net2272_write(dev, IRQSTAT0, 1 << SETUP_PACKET_INTERRUPT);
stat ^= (1 << SETUP_PACKET_INTERRUPT);
/* watch control traffic at the token level, and force
* synchronization before letting the status phase happen.
*/
ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
if (ep->is_in) {
scratch = (1 << DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE)
| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
stop_out_naking(ep);
} else
scratch = (1 << DATA_PACKET_RECEIVED_INTERRUPT_ENABLE)
| (1 << DATA_OUT_TOKEN_INTERRUPT_ENABLE)
| (1 << DATA_IN_TOKEN_INTERRUPT_ENABLE);
net2272_ep_write(ep, EP_IRQENB, scratch);
if ((u.r.bRequestType & USB_TYPE_MASK) != USB_TYPE_STANDARD)
goto delegate;
switch (u.r.bRequest) {
case USB_REQ_GET_STATUS: {
struct net2272_ep *e;
u16 status = 0;
switch (u.r.bRequestType & USB_RECIP_MASK) {
case USB_RECIP_ENDPOINT:
e = net2272_get_ep_by_addr(dev, u.r.wIndex);
if (!e || u.r.wLength > 2)
goto do_stall;
if (net2272_ep_read(e, EP_RSPSET) & (1 << ENDPOINT_HALT))
status = __constant_cpu_to_le16(1);
else
status = __constant_cpu_to_le16(0);
/* don't bother with a request object! */
net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
writew(status, net2272_reg_addr(dev, EP_DATA));
set_fifo_bytecount(&dev->ep[0], 0);
allow_status(ep);
dev_vdbg(dev->dev, "%s stat %02x\n",
ep->ep.name, status);
goto next_endpoints;
case USB_RECIP_DEVICE:
if (u.r.wLength > 2)
goto do_stall;
if (dev->is_selfpowered)
status = (1 << USB_DEVICE_SELF_POWERED);
/* don't bother with a request object! */
net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
writew(status, net2272_reg_addr(dev, EP_DATA));
set_fifo_bytecount(&dev->ep[0], 0);
allow_status(ep);
dev_vdbg(dev->dev, "device stat %02x\n", status);
goto next_endpoints;
case USB_RECIP_INTERFACE:
if (u.r.wLength > 2)
goto do_stall;
/* don't bother with a request object! */
net2272_ep_write(&dev->ep[0], EP_IRQENB, 0);
writew(status, net2272_reg_addr(dev, EP_DATA));
set_fifo_bytecount(&dev->ep[0], 0);
allow_status(ep);
dev_vdbg(dev->dev, "interface status %02x\n", status);
goto next_endpoints;
}
break;
}
case USB_REQ_CLEAR_FEATURE: {
struct net2272_ep *e;
if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (u.r.wValue != USB_ENDPOINT_HALT ||
u.r.wLength != 0)
goto do_stall;
e = net2272_get_ep_by_addr(dev, u.r.wIndex);
if (!e)
goto do_stall;
if (e->wedged) {
dev_vdbg(dev->dev, "%s wedged, halt not cleared\n",
ep->ep.name);
} else {
dev_vdbg(dev->dev, "%s clear halt\n", ep->ep.name);
clear_halt(e);
}
allow_status(ep);
goto next_endpoints;
}
case USB_REQ_SET_FEATURE: {
struct net2272_ep *e;
if (u.r.bRequestType == USB_RECIP_DEVICE) {
if (u.r.wIndex != NORMAL_OPERATION)
net2272_set_test_mode(dev, (u.r.wIndex >> 8));
allow_status(ep);
dev_vdbg(dev->dev, "test mode: %d\n", u.r.wIndex);
goto next_endpoints;
} else if (u.r.bRequestType != USB_RECIP_ENDPOINT)
goto delegate;
if (u.r.wValue != USB_ENDPOINT_HALT ||
u.r.wLength != 0)
goto do_stall;
e = net2272_get_ep_by_addr(dev, u.r.wIndex);
if (!e)
goto do_stall;
set_halt(e);
allow_status(ep);
dev_vdbg(dev->dev, "%s set halt\n", ep->ep.name);
goto next_endpoints;
}
case USB_REQ_SET_ADDRESS: {
net2272_write(dev, OURADDR, u.r.wValue & 0xff);
allow_status(ep);
break;
}
default:
delegate:
dev_vdbg(dev->dev, "setup %02x.%02x v%04x i%04x "
"ep_cfg %08x\n",
u.r.bRequestType, u.r.bRequest,
u.r.wValue, u.r.wIndex,
net2272_ep_read(ep, EP_CFG));
spin_unlock(&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &u.r);
spin_lock(&dev->lock);
}
/* stall ep0 on error */
if (tmp < 0) {
do_stall:
dev_vdbg(dev->dev, "req %02x.%02x protocol STALL; stat %d\n",
u.r.bRequestType, u.r.bRequest, tmp);
dev->protocol_stall = 1;
}
/* endpoint dma irq? */
} else if (stat & (1 << DMA_DONE_INTERRUPT)) {
net2272_cancel_dma(dev);
net2272_write(dev, IRQSTAT0, 1 << DMA_DONE_INTERRUPT);
stat &= ~(1 << DMA_DONE_INTERRUPT);
num = (net2272_read(dev, DMAREQ) & (1 << DMA_ENDPOINT_SELECT))
? 2 : 1;
ep = &dev->ep[num];
net2272_handle_dma(ep);
}
next_endpoints:
/* endpoint data irq? */
scratch = stat & 0x0f;
stat &= ~0x0f;
for (num = 0; scratch; num++) {
u8 t;
/* does this endpoint's FIFO and queue need tending? */
t = 1 << num;
if ((scratch & t) == 0)
continue;
scratch ^= t;
ep = &dev->ep[num];
net2272_handle_ep(ep);
}
/* some interrupts we can just ignore */
stat &= ~(1 << SOF_INTERRUPT);
if (stat)
dev_dbg(dev->dev, "unhandled irqstat0 %02x\n", stat);
}
static void
net2272_handle_stat1_irqs(struct net2272 *dev, u8 stat)
{
u8 tmp, mask;
/* after disconnect there's nothing else to do! */
tmp = (1 << VBUS_INTERRUPT) | (1 << ROOT_PORT_RESET_INTERRUPT);
mask = (1 << USB_HIGH_SPEED) | (1 << USB_FULL_SPEED);
if (stat & tmp) {
net2272_write(dev, IRQSTAT1, tmp);
if ((((stat & (1 << ROOT_PORT_RESET_INTERRUPT)) &&
((net2272_read(dev, USBCTL1) & mask) == 0))
|| ((net2272_read(dev, USBCTL1) & (1 << VBUS_PIN))
== 0))
&& (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
dev_dbg(dev->dev, "disconnect %s\n",
dev->driver->driver.name);
stop_activity(dev, dev->driver);
net2272_ep0_start(dev);
return;
}
stat &= ~tmp;
if (!stat)
return;
}
tmp = (1 << SUSPEND_REQUEST_CHANGE_INTERRUPT);
if (stat & tmp) {
net2272_write(dev, IRQSTAT1, tmp);
if (stat & (1 << SUSPEND_REQUEST_INTERRUPT)) {
if (dev->driver->suspend)
dev->driver->suspend(&dev->gadget);
if (!enable_suspend) {
stat &= ~(1 << SUSPEND_REQUEST_INTERRUPT);
dev_dbg(dev->dev, "Suspend disabled, ignoring\n");
}
} else {
if (dev->driver->resume)
dev->driver->resume(&dev->gadget);
}
stat &= ~tmp;
}
/* clear any other status/irqs */
if (stat)
net2272_write(dev, IRQSTAT1, stat);
/* some status we can just ignore */
stat &= ~((1 << CONTROL_STATUS_INTERRUPT)
| (1 << SUSPEND_REQUEST_INTERRUPT)
| (1 << RESUME_INTERRUPT));
if (!stat)
return;
else
dev_dbg(dev->dev, "unhandled irqstat1 %02x\n", stat);
}
static irqreturn_t net2272_irq(int irq, void *_dev)
{
struct net2272 *dev = _dev;
#if defined(PLX_PCI_RDK) || defined(PLX_PCI_RDK2)
u32 intcsr;
#endif
#if defined(PLX_PCI_RDK)
u8 dmareq;
#endif
spin_lock(&dev->lock);
#if defined(PLX_PCI_RDK)
intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
if ((intcsr & LOCAL_INTERRUPT_TEST) == LOCAL_INTERRUPT_TEST) {
writel(intcsr & ~(1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
intcsr = readl(dev->rdk1.plx9054_base_addr + INTCSR);
writel(intcsr | (1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
}
if ((intcsr & DMA_CHANNEL_0_TEST) == DMA_CHANNEL_0_TEST) {
writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
dev->rdk1.plx9054_base_addr + DMACSR0);
dmareq = net2272_read(dev, DMAREQ);
if (dmareq & 0x01)
net2272_handle_dma(&dev->ep[2]);
else
net2272_handle_dma(&dev->ep[1]);
}
#endif
#if defined(PLX_PCI_RDK2)
/* see if PCI int for us by checking irqstat */
intcsr = readl(dev->rdk2.fpga_base_addr + RDK2_IRQSTAT);
if (!intcsr & (1 << NET2272_PCI_IRQ))
return IRQ_NONE;
/* check dma interrupts */
#endif
/* Platform/devcice interrupt handler */
#if !defined(PLX_PCI_RDK)
net2272_handle_stat1_irqs(dev, net2272_read(dev, IRQSTAT1));
net2272_handle_stat0_irqs(dev, net2272_read(dev, IRQSTAT0));
#endif
spin_unlock(&dev->lock);
return IRQ_HANDLED;
}
static int net2272_present(struct net2272 *dev)
{
/*
* Quick test to see if CPU can communicate properly with the NET2272.
* Verifies connection using writes and reads to write/read and
* read-only registers.
*
* This routine is strongly recommended especially during early bring-up
* of new hardware, however for designs that do not apply Power On System
* Tests (POST) it may discarded (or perhaps minimized).
*/
unsigned int ii;
u8 val, refval;
/* Verify NET2272 write/read SCRATCH register can write and read */
refval = net2272_read(dev, SCRATCH);
for (ii = 0; ii < 0x100; ii += 7) {
net2272_write(dev, SCRATCH, ii);
val = net2272_read(dev, SCRATCH);
if (val != ii) {
dev_dbg(dev->dev,
"%s: write/read SCRATCH register test failed: "
"wrote:0x%2.2x, read:0x%2.2x\n",
__func__, ii, val);
return -EINVAL;
}
}
/* To be nice, we write the original SCRATCH value back: */
net2272_write(dev, SCRATCH, refval);
/* Verify NET2272 CHIPREV register is read-only: */
refval = net2272_read(dev, CHIPREV_2272);
for (ii = 0; ii < 0x100; ii += 7) {
net2272_write(dev, CHIPREV_2272, ii);
val = net2272_read(dev, CHIPREV_2272);
if (val != refval) {
dev_dbg(dev->dev,
"%s: write/read CHIPREV register test failed: "
"wrote 0x%2.2x, read:0x%2.2x expected:0x%2.2x\n",
__func__, ii, val, refval);
return -EINVAL;
}
}
/*
* Verify NET2272's "NET2270 legacy revision" register
* - NET2272 has two revision registers. The NET2270 legacy revision
* register should read the same value, regardless of the NET2272
* silicon revision. The legacy register applies to NET2270
* firmware being applied to the NET2272.
*/
val = net2272_read(dev, CHIPREV_LEGACY);
if (val != NET2270_LEGACY_REV) {
/*
* Unexpected legacy revision value
* - Perhaps the chip is a NET2270?
*/
dev_dbg(dev->dev,
"%s: WARNING: UNEXPECTED NET2272 LEGACY REGISTER VALUE:\n"
" - CHIPREV_LEGACY: expected 0x%2.2x, got:0x%2.2x. (Not NET2272?)\n",
__func__, NET2270_LEGACY_REV, val);
return -EINVAL;
}
/*
* Verify NET2272 silicon revision
* - This revision register is appropriate for the silicon version
* of the NET2272
*/
val = net2272_read(dev, CHIPREV_2272);
switch (val) {
case CHIPREV_NET2272_R1:
/*
* NET2272 Rev 1 has DMA related errata:
* - Newer silicon (Rev 1A or better) required
*/
dev_dbg(dev->dev,
"%s: Rev 1 detected: newer silicon recommended for DMA support\n",
__func__);
break;
case CHIPREV_NET2272_R1A:
break;
default:
/* NET2272 silicon version *may* not work with this firmware */
dev_dbg(dev->dev,
"%s: unexpected silicon revision register value: "
" CHIPREV_2272: 0x%2.2x\n",
__func__, val);
/*
* Return Success, even though the chip rev is not an expected value
* - Older, pre-built firmware can attempt to operate on newer silicon
* - Often, new silicon is perfectly compatible
*/
}
/* Success: NET2272 checks out OK */
return 0;
}
static void
net2272_gadget_release(struct device *_dev)
{
struct net2272 *dev = dev_get_drvdata(_dev);
kfree(dev);
}
/*---------------------------------------------------------------------------*/
static void __devexit
net2272_remove(struct net2272 *dev)
{
usb_del_gadget_udc(&dev->gadget);
/* start with the driver above us */
if (dev->driver) {
/* should have been done already by driver model core */
dev_warn(dev->dev, "pci remove, driver '%s' is still registered\n",
dev->driver->driver.name);
usb_gadget_unregister_driver(dev->driver);
}
free_irq(dev->irq, dev);
iounmap(dev->base_addr);
device_unregister(&dev->gadget.dev);
device_remove_file(dev->dev, &dev_attr_registers);
dev_info(dev->dev, "unbind\n");
}
static struct net2272 * __devinit
net2272_probe_init(struct device *dev, unsigned int irq)
{
struct net2272 *ret;
if (!irq) {
dev_dbg(dev, "No IRQ!\n");
return ERR_PTR(-ENODEV);
}
/* alloc, and start init */
ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return ERR_PTR(-ENOMEM);
spin_lock_init(&ret->lock);
ret->irq = irq;
ret->dev = dev;
ret->gadget.ops = &net2272_ops;
ret->gadget.is_dualspeed = 1;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&ret->gadget.dev, "gadget");
ret->gadget.dev.parent = dev;
ret->gadget.dev.dma_mask = dev->dma_mask;
ret->gadget.dev.release = net2272_gadget_release;
ret->gadget.name = driver_name;
return ret;
}
static int __devinit
net2272_probe_fin(struct net2272 *dev, unsigned int irqflags)
{
int ret;
/* See if there... */
if (net2272_present(dev)) {
dev_warn(dev->dev, "2272 not found!\n");
ret = -ENODEV;
goto err;
}
net2272_usb_reset(dev);
net2272_usb_reinit(dev);
ret = request_irq(dev->irq, net2272_irq, irqflags, driver_name, dev);
if (ret) {
dev_err(dev->dev, "request interrupt %i failed\n", dev->irq);
goto err;
}
dev->chiprev = net2272_read(dev, CHIPREV_2272);
/* done */
dev_info(dev->dev, "%s\n", driver_desc);
dev_info(dev->dev, "irq %i, mem %p, chip rev %04x, dma %s\n",
dev->irq, dev->base_addr, dev->chiprev,
dma_mode_string());
dev_info(dev->dev, "version: %s\n", driver_vers);
ret = device_register(&dev->gadget.dev);
if (ret)
goto err_irq;
ret = device_create_file(dev->dev, &dev_attr_registers);
if (ret)
goto err_dev_reg;
ret = usb_add_gadget_udc(dev->dev, &dev->gadget);
if (ret)
goto err_add_udc;
return 0;
err_add_udc:
device_remove_file(dev->dev, &dev_attr_registers);
err_dev_reg:
device_unregister(&dev->gadget.dev);
err_irq:
free_irq(dev->irq, dev);
err:
return ret;
}
#ifdef CONFIG_PCI
/*
* wrap this driver around the specified device, but
* don't respond over USB until a gadget driver binds to us
*/
static int __devinit
net2272_rdk1_probe(struct pci_dev *pdev, struct net2272 *dev)
{
unsigned long resource, len, tmp;
void __iomem *mem_mapped_addr[4];
int ret, i;
/*
* BAR 0 holds PLX 9054 config registers
* BAR 1 is i/o memory; unused here
* BAR 2 holds EPLD config registers
* BAR 3 holds NET2272 registers
*/
/* Find and map all address spaces */
for (i = 0; i < 4; ++i) {
if (i == 1)
continue; /* BAR1 unused */
resource = pci_resource_start(pdev, i);
len = pci_resource_len(pdev, i);
if (!request_mem_region(resource, len, driver_name)) {
dev_dbg(dev->dev, "controller already in use\n");
ret = -EBUSY;
goto err;
}
mem_mapped_addr[i] = ioremap_nocache(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
goto err;
}
}
dev->rdk1.plx9054_base_addr = mem_mapped_addr[0];
dev->rdk1.epld_base_addr = mem_mapped_addr[2];
dev->base_addr = mem_mapped_addr[3];
/* Set PLX 9054 bus width (16 bits) */
tmp = readl(dev->rdk1.plx9054_base_addr + LBRD1);
writel((tmp & ~(3 << MEMORY_SPACE_LOCAL_BUS_WIDTH)) | W16_BIT,
dev->rdk1.plx9054_base_addr + LBRD1);
/* Enable PLX 9054 Interrupts */
writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) |
(1 << PCI_INTERRUPT_ENABLE) |
(1 << LOCAL_INTERRUPT_INPUT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
writeb((1 << CHANNEL_CLEAR_INTERRUPT | (0 << CHANNEL_ENABLE)),
dev->rdk1.plx9054_base_addr + DMACSR0);
/* reset */
writeb((1 << EPLD_DMA_ENABLE) |
(1 << DMA_CTL_DACK) |
(1 << DMA_TIMEOUT_ENABLE) |
(1 << USER) |
(0 << MPX_MODE) |
(1 << BUSWIDTH) |
(1 << NET2272_RESET),
dev->base_addr + EPLD_IO_CONTROL_REGISTER);
mb();
writeb(readb(dev->base_addr + EPLD_IO_CONTROL_REGISTER) &
~(1 << NET2272_RESET),
dev->base_addr + EPLD_IO_CONTROL_REGISTER);
udelay(200);
return 0;
err:
while (--i >= 0) {
iounmap(mem_mapped_addr[i]);
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
return ret;
}
static int __devinit
net2272_rdk2_probe(struct pci_dev *pdev, struct net2272 *dev)
{
unsigned long resource, len;
void __iomem *mem_mapped_addr[2];
int ret, i;
/*
* BAR 0 holds FGPA config registers
* BAR 1 holds NET2272 registers
*/
/* Find and map all address spaces, bar2-3 unused in rdk 2 */
for (i = 0; i < 2; ++i) {
resource = pci_resource_start(pdev, i);
len = pci_resource_len(pdev, i);
if (!request_mem_region(resource, len, driver_name)) {
dev_dbg(dev->dev, "controller already in use\n");
ret = -EBUSY;
goto err;
}
mem_mapped_addr[i] = ioremap_nocache(resource, len);
if (mem_mapped_addr[i] == NULL) {
release_mem_region(resource, len);
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
goto err;
}
}
dev->rdk2.fpga_base_addr = mem_mapped_addr[0];
dev->base_addr = mem_mapped_addr[1];
mb();
/* Set 2272 bus width (16 bits) and reset */
writel((1 << CHIP_RESET), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
udelay(200);
writel((1 << BUS_WIDTH), dev->rdk2.fpga_base_addr + RDK2_LOCCTLRDK);
/* Print fpga version number */
dev_info(dev->dev, "RDK2 FPGA version %08x\n",
readl(dev->rdk2.fpga_base_addr + RDK2_FPGAREV));
/* Enable FPGA Interrupts */
writel((1 << NET2272_PCI_IRQ), dev->rdk2.fpga_base_addr + RDK2_IRQENB);
return 0;
err:
while (--i >= 0) {
iounmap(mem_mapped_addr[i]);
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
return ret;
}
static int __devinit
net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct net2272 *dev;
int ret;
dev = net2272_probe_init(&pdev->dev, pdev->irq);
if (IS_ERR(dev))
return PTR_ERR(dev);
dev->dev_id = pdev->device;
if (pci_enable_device(pdev) < 0) {
ret = -ENODEV;
goto err_free;
}
pci_set_master(pdev);
switch (pdev->device) {
case PCI_DEVICE_ID_RDK1: ret = net2272_rdk1_probe(pdev, dev); break;
case PCI_DEVICE_ID_RDK2: ret = net2272_rdk2_probe(pdev, dev); break;
default: BUG();
}
if (ret)
goto err_pci;
ret = net2272_probe_fin(dev, 0);
if (ret)
goto err_pci;
pci_set_drvdata(pdev, dev);
return 0;
err_pci:
pci_disable_device(pdev);
err_free:
kfree(dev);
return ret;
}
static void __devexit
net2272_rdk1_remove(struct pci_dev *pdev, struct net2272 *dev)
{
int i;
/* disable PLX 9054 interrupts */
writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
~(1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
/* clean up resources allocated during probe() */
iounmap(dev->rdk1.plx9054_base_addr);
iounmap(dev->rdk1.epld_base_addr);
for (i = 0; i < 4; ++i) {
if (i == 1)
continue; /* BAR1 unused */
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
}
static void __devexit
net2272_rdk2_remove(struct pci_dev *pdev, struct net2272 *dev)
{
int i;
/* disable fpga interrupts
writel(readl(dev->rdk1.plx9054_base_addr + INTCSR) &
~(1 << PCI_INTERRUPT_ENABLE),
dev->rdk1.plx9054_base_addr + INTCSR);
*/
/* clean up resources allocated during probe() */
iounmap(dev->rdk2.fpga_base_addr);
for (i = 0; i < 2; ++i)
release_mem_region(pci_resource_start(pdev, i),
pci_resource_len(pdev, i));
}
static void __devexit
net2272_pci_remove(struct pci_dev *pdev)
{
struct net2272 *dev = pci_get_drvdata(pdev);
net2272_remove(dev);
switch (pdev->device) {
case PCI_DEVICE_ID_RDK1: net2272_rdk1_remove(pdev, dev); break;
case PCI_DEVICE_ID_RDK2: net2272_rdk2_remove(pdev, dev); break;
default: BUG();
}
pci_disable_device(pdev);
kfree(dev);
}
/* Table of matching PCI IDs */
static struct pci_device_id __devinitdata pci_ids[] = {
{ /* RDK 1 card */
.class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
.class_mask = 0,
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_RDK1,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ /* RDK 2 card */
.class = ((PCI_CLASS_BRIDGE_OTHER << 8) | 0xfe),
.class_mask = 0,
.vendor = PCI_VENDOR_ID_PLX,
.device = PCI_DEVICE_ID_RDK2,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
},
{ }
};
MODULE_DEVICE_TABLE(pci, pci_ids);
static struct pci_driver net2272_pci_driver = {
.name = driver_name,
.id_table = pci_ids,
.probe = net2272_pci_probe,
.remove = __devexit_p(net2272_pci_remove),
};
static int net2272_pci_register(void)
{
return pci_register_driver(&net2272_pci_driver);
}
static void net2272_pci_unregister(void)
{
pci_unregister_driver(&net2272_pci_driver);
}
#else
static inline int net2272_pci_register(void) { return 0; }
static inline void net2272_pci_unregister(void) { }
#endif
/*---------------------------------------------------------------------------*/
static int __devinit
net2272_plat_probe(struct platform_device *pdev)
{
struct net2272 *dev;
int ret;
unsigned int irqflags;
resource_size_t base, len;
struct resource *iomem, *iomem_bus, *irq_res;
irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
iomem_bus = platform_get_resource(pdev, IORESOURCE_BUS, 0);
if (!irq_res || !iomem) {
dev_err(&pdev->dev, "must provide irq/base addr");
return -EINVAL;
}
dev = net2272_probe_init(&pdev->dev, irq_res->start);
if (IS_ERR(dev))
return PTR_ERR(dev);
irqflags = 0;
if (irq_res->flags & IORESOURCE_IRQ_HIGHEDGE)
irqflags |= IRQF_TRIGGER_RISING;
if (irq_res->flags & IORESOURCE_IRQ_LOWEDGE)
irqflags |= IRQF_TRIGGER_FALLING;
if (irq_res->flags & IORESOURCE_IRQ_HIGHLEVEL)
irqflags |= IRQF_TRIGGER_HIGH;
if (irq_res->flags & IORESOURCE_IRQ_LOWLEVEL)
irqflags |= IRQF_TRIGGER_LOW;
base = iomem->start;
len = resource_size(iomem);
if (iomem_bus)
dev->base_shift = iomem_bus->start;
if (!request_mem_region(base, len, driver_name)) {
dev_dbg(dev->dev, "get request memory region!\n");
ret = -EBUSY;
goto err;
}
dev->base_addr = ioremap_nocache(base, len);
if (!dev->base_addr) {
dev_dbg(dev->dev, "can't map memory\n");
ret = -EFAULT;
goto err_req;
}
ret = net2272_probe_fin(dev, IRQF_TRIGGER_LOW);
if (ret)
goto err_io;
platform_set_drvdata(pdev, dev);
dev_info(&pdev->dev, "running in 16-bit, %sbyte swap local bus mode\n",
(net2272_read(dev, LOCCTL) & (1 << BYTE_SWAP)) ? "" : "no ");
return 0;
err_io:
iounmap(dev->base_addr);
err_req:
release_mem_region(base, len);
err:
return ret;
}
static int __devexit
net2272_plat_remove(struct platform_device *pdev)
{
struct net2272 *dev = platform_get_drvdata(pdev);
net2272_remove(dev);
release_mem_region(pdev->resource[0].start,
resource_size(&pdev->resource[0]));
kfree(dev);
return 0;
}
static struct platform_driver net2272_plat_driver = {
.probe = net2272_plat_probe,
.remove = __devexit_p(net2272_plat_remove),
.driver = {
.name = driver_name,
.owner = THIS_MODULE,
},
/* FIXME .suspend, .resume */
};
MODULE_ALIAS("platform:net2272");
static int __init net2272_init(void)
{
int ret;
ret = net2272_pci_register();
if (ret)
return ret;
ret = platform_driver_register(&net2272_plat_driver);
if (ret)
goto err_pci;
return ret;
err_pci:
net2272_pci_unregister();
return ret;
}
module_init(net2272_init);
static void __exit net2272_cleanup(void)
{
net2272_pci_unregister();
platform_driver_unregister(&net2272_plat_driver);
}
module_exit(net2272_cleanup);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("PLX Technology, Inc.");
MODULE_LICENSE("GPL");
| gpl-2.0 |
skalk/linux | drivers/net/ethernet/ti/cpmac.c | 140 | 33441 | // SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2006, 2007 Eugene Konev
*
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/if_vlan.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/skbuff.h>
#include <linux/mii.h>
#include <linux/phy.h>
#include <linux/phy_fixed.h>
#include <linux/platform_device.h>
#include <linux/dma-mapping.h>
#include <linux/clk.h>
#include <linux/gpio.h>
#include <linux/atomic.h>
#include <asm/mach-ar7/ar7.h>
MODULE_AUTHOR("Eugene Konev <ejka@imfi.kspu.ru>");
MODULE_DESCRIPTION("TI AR7 ethernet driver (CPMAC)");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:cpmac");
static int debug_level = 8;
static int dumb_switch;
/* Next 2 are only used in cpmac_probe, so it's pointless to change them */
module_param(debug_level, int, 0444);
module_param(dumb_switch, int, 0444);
MODULE_PARM_DESC(debug_level, "Number of NETIF_MSG bits to enable");
MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
#define CPMAC_VERSION "0.5.2"
/* frame size + 802.1q tag + FCS size */
#define CPMAC_SKB_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
#define CPMAC_QUEUES 8
/* Ethernet registers */
#define CPMAC_TX_CONTROL 0x0004
#define CPMAC_TX_TEARDOWN 0x0008
#define CPMAC_RX_CONTROL 0x0014
#define CPMAC_RX_TEARDOWN 0x0018
#define CPMAC_MBP 0x0100
#define MBP_RXPASSCRC 0x40000000
#define MBP_RXQOS 0x20000000
#define MBP_RXNOCHAIN 0x10000000
#define MBP_RXCMF 0x01000000
#define MBP_RXSHORT 0x00800000
#define MBP_RXCEF 0x00400000
#define MBP_RXPROMISC 0x00200000
#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
#define MBP_RXBCAST 0x00002000
#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
#define MBP_RXMCAST 0x00000020
#define MBP_MCASTCHAN(channel) ((channel) & 0x7)
#define CPMAC_UNICAST_ENABLE 0x0104
#define CPMAC_UNICAST_CLEAR 0x0108
#define CPMAC_MAX_LENGTH 0x010c
#define CPMAC_BUFFER_OFFSET 0x0110
#define CPMAC_MAC_CONTROL 0x0160
#define MAC_TXPTYPE 0x00000200
#define MAC_TXPACE 0x00000040
#define MAC_MII 0x00000020
#define MAC_TXFLOW 0x00000010
#define MAC_RXFLOW 0x00000008
#define MAC_MTEST 0x00000004
#define MAC_LOOPBACK 0x00000002
#define MAC_FDX 0x00000001
#define CPMAC_MAC_STATUS 0x0164
#define MAC_STATUS_QOS 0x00000004
#define MAC_STATUS_RXFLOW 0x00000002
#define MAC_STATUS_TXFLOW 0x00000001
#define CPMAC_TX_INT_ENABLE 0x0178
#define CPMAC_TX_INT_CLEAR 0x017c
#define CPMAC_MAC_INT_VECTOR 0x0180
#define MAC_INT_STATUS 0x00080000
#define MAC_INT_HOST 0x00040000
#define MAC_INT_RX 0x00020000
#define MAC_INT_TX 0x00010000
#define CPMAC_MAC_EOI_VECTOR 0x0184
#define CPMAC_RX_INT_ENABLE 0x0198
#define CPMAC_RX_INT_CLEAR 0x019c
#define CPMAC_MAC_INT_ENABLE 0x01a8
#define CPMAC_MAC_INT_CLEAR 0x01ac
#define CPMAC_MAC_ADDR_LO(channel) (0x01b0 + (channel) * 4)
#define CPMAC_MAC_ADDR_MID 0x01d0
#define CPMAC_MAC_ADDR_HI 0x01d4
#define CPMAC_MAC_HASH_LO 0x01d8
#define CPMAC_MAC_HASH_HI 0x01dc
#define CPMAC_TX_PTR(channel) (0x0600 + (channel) * 4)
#define CPMAC_RX_PTR(channel) (0x0620 + (channel) * 4)
#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
#define CPMAC_REG_END 0x0680
/* Rx/Tx statistics
* TODO: use some of them to fill stats in cpmac_stats()
*/
#define CPMAC_STATS_RX_GOOD 0x0200
#define CPMAC_STATS_RX_BCAST 0x0204
#define CPMAC_STATS_RX_MCAST 0x0208
#define CPMAC_STATS_RX_PAUSE 0x020c
#define CPMAC_STATS_RX_CRC 0x0210
#define CPMAC_STATS_RX_ALIGN 0x0214
#define CPMAC_STATS_RX_OVER 0x0218
#define CPMAC_STATS_RX_JABBER 0x021c
#define CPMAC_STATS_RX_UNDER 0x0220
#define CPMAC_STATS_RX_FRAG 0x0224
#define CPMAC_STATS_RX_FILTER 0x0228
#define CPMAC_STATS_RX_QOSFILTER 0x022c
#define CPMAC_STATS_RX_OCTETS 0x0230
#define CPMAC_STATS_TX_GOOD 0x0234
#define CPMAC_STATS_TX_BCAST 0x0238
#define CPMAC_STATS_TX_MCAST 0x023c
#define CPMAC_STATS_TX_PAUSE 0x0240
#define CPMAC_STATS_TX_DEFER 0x0244
#define CPMAC_STATS_TX_COLLISION 0x0248
#define CPMAC_STATS_TX_SINGLECOLL 0x024c
#define CPMAC_STATS_TX_MULTICOLL 0x0250
#define CPMAC_STATS_TX_EXCESSCOLL 0x0254
#define CPMAC_STATS_TX_LATECOLL 0x0258
#define CPMAC_STATS_TX_UNDERRUN 0x025c
#define CPMAC_STATS_TX_CARRIERSENSE 0x0260
#define CPMAC_STATS_TX_OCTETS 0x0264
#define cpmac_read(base, reg) (readl((void __iomem *)(base) + (reg)))
#define cpmac_write(base, reg, val) (writel(val, (void __iomem *)(base) + \
(reg)))
/* MDIO bus */
#define CPMAC_MDIO_VERSION 0x0000
#define CPMAC_MDIO_CONTROL 0x0004
#define MDIOC_IDLE 0x80000000
#define MDIOC_ENABLE 0x40000000
#define MDIOC_PREAMBLE 0x00100000
#define MDIOC_FAULT 0x00080000
#define MDIOC_FAULTDETECT 0x00040000
#define MDIOC_INTTEST 0x00020000
#define MDIOC_CLKDIV(div) ((div) & 0xff)
#define CPMAC_MDIO_ALIVE 0x0008
#define CPMAC_MDIO_LINK 0x000c
#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
#define MDIO_BUSY 0x80000000
#define MDIO_WRITE 0x40000000
#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
#define MDIO_DATA(data) ((data) & 0xffff)
#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
#define PHYSEL_LINKSEL 0x00000040
#define PHYSEL_LINKINT 0x00000020
struct cpmac_desc {
u32 hw_next;
u32 hw_data;
u16 buflen;
u16 bufflags;
u16 datalen;
u16 dataflags;
#define CPMAC_SOP 0x8000
#define CPMAC_EOP 0x4000
#define CPMAC_OWN 0x2000
#define CPMAC_EOQ 0x1000
struct sk_buff *skb;
struct cpmac_desc *next;
struct cpmac_desc *prev;
dma_addr_t mapping;
dma_addr_t data_mapping;
};
struct cpmac_priv {
spinlock_t lock;
spinlock_t rx_lock;
struct cpmac_desc *rx_head;
int ring_size;
struct cpmac_desc *desc_ring;
dma_addr_t dma_ring;
void __iomem *regs;
struct mii_bus *mii_bus;
char phy_name[MII_BUS_ID_SIZE + 3];
int oldlink, oldspeed, oldduplex;
u32 msg_enable;
struct net_device *dev;
struct work_struct reset_work;
struct platform_device *pdev;
struct napi_struct napi;
atomic_t reset_pending;
};
static irqreturn_t cpmac_irq(int, void *);
static void cpmac_hw_start(struct net_device *dev);
static void cpmac_hw_stop(struct net_device *dev);
static int cpmac_stop(struct net_device *dev);
static int cpmac_open(struct net_device *dev);
static void cpmac_dump_regs(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
for (i = 0; i < CPMAC_REG_END; i += 4) {
if (i % 16 == 0) {
if (i)
printk("\n");
printk("%s: reg[%p]:", dev->name, priv->regs + i);
}
printk(" %08x", cpmac_read(priv->regs, i));
}
printk("\n");
}
static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
{
int i;
printk("%s: desc[%p]:", dev->name, desc);
for (i = 0; i < sizeof(*desc) / 4; i++)
printk(" %08x", ((u32 *)desc)[i]);
printk("\n");
}
static void cpmac_dump_all_desc(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
struct cpmac_desc *dump = priv->rx_head;
do {
cpmac_dump_desc(dev, dump);
dump = dump->next;
} while (dump != priv->rx_head);
}
static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
{
int i;
printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
for (i = 0; i < skb->len; i++) {
if (i % 16 == 0) {
if (i)
printk("\n");
printk("%s: data[%p]:", dev->name, skb->data + i);
}
printk(" %02x", ((u8 *)skb->data)[i]);
}
printk("\n");
}
static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
{
u32 val;
while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
cpu_relax();
cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_REG(reg) |
MDIO_PHY(phy_id));
while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
cpu_relax();
return MDIO_DATA(val);
}
static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
int reg, u16 val)
{
while (cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0)) & MDIO_BUSY)
cpu_relax();
cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
return 0;
}
static int cpmac_mdio_reset(struct mii_bus *bus)
{
struct clk *cpmac_clk;
cpmac_clk = clk_get(&bus->dev, "cpmac");
if (IS_ERR(cpmac_clk)) {
pr_err("unable to get cpmac clock\n");
return -1;
}
ar7_device_reset(AR7_RESET_BIT_MDIO);
cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
return 0;
}
static struct mii_bus *cpmac_mii;
static void cpmac_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
u8 tmp;
u32 mbp, bit, hash[2] = { 0, };
struct cpmac_priv *priv = netdev_priv(dev);
mbp = cpmac_read(priv->regs, CPMAC_MBP);
if (dev->flags & IFF_PROMISC) {
cpmac_write(priv->regs, CPMAC_MBP, (mbp & ~MBP_PROMISCCHAN(0)) |
MBP_RXPROMISC);
} else {
cpmac_write(priv->regs, CPMAC_MBP, mbp & ~MBP_RXPROMISC);
if (dev->flags & IFF_ALLMULTI) {
/* enable all multicast mode */
cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
} else {
/* cpmac uses some strange mac address hashing
* (not crc32)
*/
netdev_for_each_mc_addr(ha, dev) {
bit = 0;
tmp = ha->addr[0];
bit ^= (tmp >> 2) ^ (tmp << 4);
tmp = ha->addr[1];
bit ^= (tmp >> 4) ^ (tmp << 2);
tmp = ha->addr[2];
bit ^= (tmp >> 6) ^ tmp;
tmp = ha->addr[3];
bit ^= (tmp >> 2) ^ (tmp << 4);
tmp = ha->addr[4];
bit ^= (tmp >> 4) ^ (tmp << 2);
tmp = ha->addr[5];
bit ^= (tmp >> 6) ^ tmp;
bit &= 0x3f;
hash[bit / 32] |= 1 << (bit % 32);
}
cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, hash[0]);
cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, hash[1]);
}
}
}
static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
struct cpmac_desc *desc)
{
struct sk_buff *skb, *result = NULL;
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_desc(priv->dev, desc);
cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
if (unlikely(!desc->datalen)) {
if (netif_msg_rx_err(priv) && net_ratelimit())
netdev_warn(priv->dev, "rx: spurious interrupt\n");
return NULL;
}
skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
if (likely(skb)) {
skb_put(desc->skb, desc->datalen);
desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
skb_checksum_none_assert(desc->skb);
priv->dev->stats.rx_packets++;
priv->dev->stats.rx_bytes += desc->datalen;
result = desc->skb;
dma_unmap_single(&priv->dev->dev, desc->data_mapping,
CPMAC_SKB_SIZE, DMA_FROM_DEVICE);
desc->skb = skb;
desc->data_mapping = dma_map_single(&priv->dev->dev, skb->data,
CPMAC_SKB_SIZE,
DMA_FROM_DEVICE);
desc->hw_data = (u32)desc->data_mapping;
if (unlikely(netif_msg_pktdata(priv))) {
netdev_dbg(priv->dev, "received packet:\n");
cpmac_dump_skb(priv->dev, result);
}
} else {
if (netif_msg_rx_err(priv) && net_ratelimit())
netdev_warn(priv->dev,
"low on skbs, dropping packet\n");
priv->dev->stats.rx_dropped++;
}
desc->buflen = CPMAC_SKB_SIZE;
desc->dataflags = CPMAC_OWN;
return result;
}
static int cpmac_poll(struct napi_struct *napi, int budget)
{
struct sk_buff *skb;
struct cpmac_desc *desc, *restart;
struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
int received = 0, processed = 0;
spin_lock(&priv->rx_lock);
if (unlikely(!priv->rx_head)) {
if (netif_msg_rx_err(priv) && net_ratelimit())
netdev_warn(priv->dev, "rx: polling, but no queue\n");
spin_unlock(&priv->rx_lock);
napi_complete(napi);
return 0;
}
desc = priv->rx_head;
restart = NULL;
while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
processed++;
if ((desc->dataflags & CPMAC_EOQ) != 0) {
/* The last update to eoq->hw_next didn't happen
* soon enough, and the receiver stopped here.
* Remember this descriptor so we can restart
* the receiver after freeing some space.
*/
if (unlikely(restart)) {
if (netif_msg_rx_err(priv))
netdev_err(priv->dev, "poll found a"
" duplicate EOQ: %p and %p\n",
restart, desc);
goto fatal_error;
}
restart = desc->next;
}
skb = cpmac_rx_one(priv, desc);
if (likely(skb)) {
netif_receive_skb(skb);
received++;
}
desc = desc->next;
}
if (desc != priv->rx_head) {
/* We freed some buffers, but not the whole ring,
* add what we did free to the rx list
*/
desc->prev->hw_next = (u32)0;
priv->rx_head->prev->hw_next = priv->rx_head->mapping;
}
/* Optimization: If we did not actually process an EOQ (perhaps because
* of quota limits), check to see if the tail of the queue has EOQ set.
* We should immediately restart in that case so that the receiver can
* restart and run in parallel with more packet processing.
* This lets us handle slightly larger bursts before running
* out of ring space (assuming dev->weight < ring_size)
*/
if (!restart &&
(priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
== CPMAC_EOQ &&
(priv->rx_head->dataflags & CPMAC_OWN) != 0) {
/* reset EOQ so the poll loop (above) doesn't try to
* restart this when it eventually gets to this descriptor.
*/
priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
restart = priv->rx_head;
}
if (restart) {
priv->dev->stats.rx_errors++;
priv->dev->stats.rx_fifo_errors++;
if (netif_msg_rx_err(priv) && net_ratelimit())
netdev_warn(priv->dev, "rx dma ring overrun\n");
if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
if (netif_msg_drv(priv))
netdev_err(priv->dev, "cpmac_poll is trying "
"to restart rx from a descriptor "
"that's not free: %p\n", restart);
goto fatal_error;
}
cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
}
priv->rx_head = desc;
spin_unlock(&priv->rx_lock);
if (unlikely(netif_msg_rx_status(priv)))
netdev_dbg(priv->dev, "poll processed %d packets\n", received);
if (processed == 0) {
/* we ran out of packets to read,
* revert to interrupt-driven mode
*/
napi_complete(napi);
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
return 0;
}
return 1;
fatal_error:
/* Something went horribly wrong.
* Reset hardware to try to recover rather than wedging.
*/
if (netif_msg_drv(priv)) {
netdev_err(priv->dev, "cpmac_poll is confused. "
"Resetting hardware\n");
cpmac_dump_all_desc(priv->dev);
netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
}
spin_unlock(&priv->rx_lock);
napi_complete(napi);
netif_tx_stop_all_queues(priv->dev);
napi_disable(&priv->napi);
atomic_inc(&priv->reset_pending);
cpmac_hw_stop(priv->dev);
if (!schedule_work(&priv->reset_work))
atomic_dec(&priv->reset_pending);
return 0;
}
static netdev_tx_t cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
int queue;
unsigned int len;
struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev);
if (unlikely(atomic_read(&priv->reset_pending)))
return NETDEV_TX_BUSY;
if (unlikely(skb_padto(skb, ETH_ZLEN)))
return NETDEV_TX_OK;
len = max_t(unsigned int, skb->len, ETH_ZLEN);
queue = skb_get_queue_mapping(skb);
netif_stop_subqueue(dev, queue);
desc = &priv->desc_ring[queue];
if (unlikely(desc->dataflags & CPMAC_OWN)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
netdev_warn(dev, "tx dma ring full\n");
return NETDEV_TX_BUSY;
}
spin_lock(&priv->lock);
spin_unlock(&priv->lock);
desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
desc->skb = skb;
desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
DMA_TO_DEVICE);
desc->hw_data = (u32)desc->data_mapping;
desc->datalen = len;
desc->buflen = len;
if (unlikely(netif_msg_tx_queued(priv)))
netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_desc(dev, desc);
if (unlikely(netif_msg_pktdata(priv)))
cpmac_dump_skb(dev, skb);
cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);
return NETDEV_TX_OK;
}
static void cpmac_end_xmit(struct net_device *dev, int queue)
{
struct cpmac_desc *desc;
struct cpmac_priv *priv = netdev_priv(dev);
desc = &priv->desc_ring[queue];
cpmac_write(priv->regs, CPMAC_TX_ACK(queue), (u32)desc->mapping);
if (likely(desc->skb)) {
spin_lock(&priv->lock);
dev->stats.tx_packets++;
dev->stats.tx_bytes += desc->skb->len;
spin_unlock(&priv->lock);
dma_unmap_single(&dev->dev, desc->data_mapping, desc->skb->len,
DMA_TO_DEVICE);
if (unlikely(netif_msg_tx_done(priv)))
netdev_dbg(dev, "sent 0x%p, len=%d\n",
desc->skb, desc->skb->len);
dev_consume_skb_irq(desc->skb);
desc->skb = NULL;
if (__netif_subqueue_stopped(dev, queue))
netif_wake_subqueue(dev, queue);
} else {
if (netif_msg_tx_err(priv) && net_ratelimit())
netdev_warn(dev, "end_xmit: spurious interrupt\n");
if (__netif_subqueue_stopped(dev, queue))
netif_wake_subqueue(dev, queue);
}
}
static void cpmac_hw_stop(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
cpmac_write(priv->regs, CPMAC_RX_CONTROL,
cpmac_read(priv->regs, CPMAC_RX_CONTROL) & ~1);
cpmac_write(priv->regs, CPMAC_TX_CONTROL,
cpmac_read(priv->regs, CPMAC_TX_CONTROL) & ~1);
for (i = 0; i < 8; i++) {
cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
}
cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
cpmac_read(priv->regs, CPMAC_MAC_CONTROL) & ~MAC_MII);
}
static void cpmac_hw_start(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
struct plat_cpmac_data *pdata = dev_get_platdata(&priv->pdev->dev);
ar7_device_reset(pdata->reset_bit);
for (i = 0; i < 8; i++) {
cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
cpmac_write(priv->regs, CPMAC_RX_PTR(i), 0);
}
cpmac_write(priv->regs, CPMAC_RX_PTR(0), priv->rx_head->mapping);
cpmac_write(priv->regs, CPMAC_MBP, MBP_RXSHORT | MBP_RXBCAST |
MBP_RXMCAST);
cpmac_write(priv->regs, CPMAC_BUFFER_OFFSET, 0);
for (i = 0; i < 8; i++)
cpmac_write(priv->regs, CPMAC_MAC_ADDR_LO(i), dev->dev_addr[5]);
cpmac_write(priv->regs, CPMAC_MAC_ADDR_MID, dev->dev_addr[4]);
cpmac_write(priv->regs, CPMAC_MAC_ADDR_HI, dev->dev_addr[0] |
(dev->dev_addr[1] << 8) | (dev->dev_addr[2] << 16) |
(dev->dev_addr[3] << 24));
cpmac_write(priv->regs, CPMAC_MAX_LENGTH, CPMAC_SKB_SIZE);
cpmac_write(priv->regs, CPMAC_UNICAST_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_TX_INT_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
cpmac_write(priv->regs, CPMAC_UNICAST_ENABLE, 1);
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
cpmac_write(priv->regs, CPMAC_TX_INT_ENABLE, 0xff);
cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
cpmac_write(priv->regs, CPMAC_RX_CONTROL,
cpmac_read(priv->regs, CPMAC_RX_CONTROL) | 1);
cpmac_write(priv->regs, CPMAC_TX_CONTROL,
cpmac_read(priv->regs, CPMAC_TX_CONTROL) | 1);
cpmac_write(priv->regs, CPMAC_MAC_CONTROL,
cpmac_read(priv->regs, CPMAC_MAC_CONTROL) | MAC_MII |
MAC_FDX);
}
static void cpmac_clear_rx(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
struct cpmac_desc *desc;
int i;
if (unlikely(!priv->rx_head))
return;
desc = priv->rx_head;
for (i = 0; i < priv->ring_size; i++) {
if ((desc->dataflags & CPMAC_OWN) == 0) {
if (netif_msg_rx_err(priv) && net_ratelimit())
netdev_warn(dev, "packet dropped\n");
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_desc(dev, desc);
desc->dataflags = CPMAC_OWN;
dev->stats.rx_dropped++;
}
desc->hw_next = desc->next->mapping;
desc = desc->next;
}
priv->rx_head->prev->hw_next = 0;
}
static void cpmac_clear_tx(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
int i;
if (unlikely(!priv->desc_ring))
return;
for (i = 0; i < CPMAC_QUEUES; i++) {
priv->desc_ring[i].dataflags = 0;
if (priv->desc_ring[i].skb) {
dev_kfree_skb_any(priv->desc_ring[i].skb);
priv->desc_ring[i].skb = NULL;
}
}
}
static void cpmac_hw_error(struct work_struct *work)
{
struct cpmac_priv *priv =
container_of(work, struct cpmac_priv, reset_work);
spin_lock(&priv->rx_lock);
cpmac_clear_rx(priv->dev);
spin_unlock(&priv->rx_lock);
cpmac_clear_tx(priv->dev);
cpmac_hw_start(priv->dev);
barrier();
atomic_dec(&priv->reset_pending);
netif_tx_wake_all_queues(priv->dev);
cpmac_write(priv->regs, CPMAC_MAC_INT_ENABLE, 3);
}
static void cpmac_check_status(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
u32 macstatus = cpmac_read(priv->regs, CPMAC_MAC_STATUS);
int rx_channel = (macstatus >> 8) & 7;
int rx_code = (macstatus >> 12) & 15;
int tx_channel = (macstatus >> 16) & 7;
int tx_code = (macstatus >> 20) & 15;
if (rx_code || tx_code) {
if (netif_msg_drv(priv) && net_ratelimit()) {
/* Can't find any documentation on what these
* error codes actually are. So just log them and hope..
*/
if (rx_code)
netdev_warn(dev, "host error %d on rx "
"channel %d (macstatus %08x), resetting\n",
rx_code, rx_channel, macstatus);
if (tx_code)
netdev_warn(dev, "host error %d on tx "
"channel %d (macstatus %08x), resetting\n",
tx_code, tx_channel, macstatus);
}
netif_tx_stop_all_queues(dev);
cpmac_hw_stop(dev);
if (schedule_work(&priv->reset_work))
atomic_inc(&priv->reset_pending);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_regs(dev);
}
cpmac_write(priv->regs, CPMAC_MAC_INT_CLEAR, 0xff);
}
static irqreturn_t cpmac_irq(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct cpmac_priv *priv;
int queue;
u32 status;
priv = netdev_priv(dev);
status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
if (unlikely(netif_msg_intr(priv)))
netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
if (status & MAC_INT_TX)
cpmac_end_xmit(dev, (status & 7));
if (status & MAC_INT_RX) {
queue = (status >> 8) & 7;
if (napi_schedule_prep(&priv->napi)) {
cpmac_write(priv->regs, CPMAC_RX_INT_CLEAR, 1 << queue);
__napi_schedule(&priv->napi);
}
}
cpmac_write(priv->regs, CPMAC_MAC_EOI_VECTOR, 0);
if (unlikely(status & (MAC_INT_HOST | MAC_INT_STATUS)))
cpmac_check_status(dev);
return IRQ_HANDLED;
}
static void cpmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct cpmac_priv *priv = netdev_priv(dev);
spin_lock(&priv->lock);
dev->stats.tx_errors++;
spin_unlock(&priv->lock);
if (netif_msg_tx_err(priv) && net_ratelimit())
netdev_warn(dev, "transmit timeout\n");
atomic_inc(&priv->reset_pending);
barrier();
cpmac_clear_tx(dev);
barrier();
atomic_dec(&priv->reset_pending);
netif_tx_wake_all_queues(priv->dev);
}
static void cpmac_get_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct cpmac_priv *priv = netdev_priv(dev);
ring->rx_max_pending = 1024;
ring->rx_mini_max_pending = 1;
ring->rx_jumbo_max_pending = 1;
ring->tx_max_pending = 1;
ring->rx_pending = priv->ring_size;
ring->rx_mini_pending = 1;
ring->rx_jumbo_pending = 1;
ring->tx_pending = 1;
}
static int cpmac_set_ringparam(struct net_device *dev,
struct ethtool_ringparam *ring)
{
struct cpmac_priv *priv = netdev_priv(dev);
if (netif_running(dev))
return -EBUSY;
priv->ring_size = ring->rx_pending;
return 0;
}
static void cpmac_get_drvinfo(struct net_device *dev,
struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "cpmac", sizeof(info->driver));
strlcpy(info->version, CPMAC_VERSION, sizeof(info->version));
snprintf(info->bus_info, sizeof(info->bus_info), "%s", "cpmac");
}
static const struct ethtool_ops cpmac_ethtool_ops = {
.get_drvinfo = cpmac_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_ringparam = cpmac_get_ringparam,
.set_ringparam = cpmac_set_ringparam,
.get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings,
};
static void cpmac_adjust_link(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
int new_state = 0;
spin_lock(&priv->lock);
if (dev->phydev->link) {
netif_tx_start_all_queues(dev);
if (dev->phydev->duplex != priv->oldduplex) {
new_state = 1;
priv->oldduplex = dev->phydev->duplex;
}
if (dev->phydev->speed != priv->oldspeed) {
new_state = 1;
priv->oldspeed = dev->phydev->speed;
}
if (!priv->oldlink) {
new_state = 1;
priv->oldlink = 1;
}
} else if (priv->oldlink) {
new_state = 1;
priv->oldlink = 0;
priv->oldspeed = 0;
priv->oldduplex = -1;
}
if (new_state && netif_msg_link(priv) && net_ratelimit())
phy_print_status(dev->phydev);
spin_unlock(&priv->lock);
}
static int cpmac_open(struct net_device *dev)
{
int i, size, res;
struct cpmac_priv *priv = netdev_priv(dev);
struct resource *mem;
struct cpmac_desc *desc;
struct sk_buff *skb;
mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
if (netif_msg_drv(priv))
netdev_err(dev, "failed to request registers\n");
res = -ENXIO;
goto fail_reserve;
}
priv->regs = ioremap(mem->start, resource_size(mem));
if (!priv->regs) {
if (netif_msg_drv(priv))
netdev_err(dev, "failed to remap registers\n");
res = -ENXIO;
goto fail_remap;
}
size = priv->ring_size + CPMAC_QUEUES;
priv->desc_ring = dma_alloc_coherent(&dev->dev,
sizeof(struct cpmac_desc) * size,
&priv->dma_ring,
GFP_KERNEL);
if (!priv->desc_ring) {
res = -ENOMEM;
goto fail_alloc;
}
for (i = 0; i < size; i++)
priv->desc_ring[i].mapping = priv->dma_ring + sizeof(*desc) * i;
priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
if (unlikely(!skb)) {
res = -ENOMEM;
goto fail_desc;
}
desc->skb = skb;
desc->data_mapping = dma_map_single(&dev->dev, skb->data,
CPMAC_SKB_SIZE,
DMA_FROM_DEVICE);
desc->hw_data = (u32)desc->data_mapping;
desc->buflen = CPMAC_SKB_SIZE;
desc->dataflags = CPMAC_OWN;
desc->next = &priv->rx_head[(i + 1) % priv->ring_size];
desc->next->prev = desc;
desc->hw_next = (u32)desc->next->mapping;
}
priv->rx_head->prev->hw_next = (u32)0;
res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
if (res) {
if (netif_msg_drv(priv))
netdev_err(dev, "failed to obtain irq\n");
goto fail_irq;
}
atomic_set(&priv->reset_pending, 0);
INIT_WORK(&priv->reset_work, cpmac_hw_error);
cpmac_hw_start(dev);
napi_enable(&priv->napi);
phy_start(dev->phydev);
return 0;
fail_irq:
fail_desc:
for (i = 0; i < priv->ring_size; i++) {
if (priv->rx_head[i].skb) {
dma_unmap_single(&dev->dev,
priv->rx_head[i].data_mapping,
CPMAC_SKB_SIZE,
DMA_FROM_DEVICE);
kfree_skb(priv->rx_head[i].skb);
}
}
dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) * size,
priv->desc_ring, priv->dma_ring);
fail_alloc:
iounmap(priv->regs);
fail_remap:
release_mem_region(mem->start, resource_size(mem));
fail_reserve:
return res;
}
static int cpmac_stop(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
struct resource *mem;
netif_tx_stop_all_queues(dev);
cancel_work_sync(&priv->reset_work);
napi_disable(&priv->napi);
phy_stop(dev->phydev);
cpmac_hw_stop(dev);
for (i = 0; i < 8; i++)
cpmac_write(priv->regs, CPMAC_TX_PTR(i), 0);
cpmac_write(priv->regs, CPMAC_RX_PTR(0), 0);
cpmac_write(priv->regs, CPMAC_MBP, 0);
free_irq(dev->irq, dev);
iounmap(priv->regs);
mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
release_mem_region(mem->start, resource_size(mem));
priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
for (i = 0; i < priv->ring_size; i++) {
if (priv->rx_head[i].skb) {
dma_unmap_single(&dev->dev,
priv->rx_head[i].data_mapping,
CPMAC_SKB_SIZE,
DMA_FROM_DEVICE);
kfree_skb(priv->rx_head[i].skb);
}
}
dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
(CPMAC_QUEUES + priv->ring_size),
priv->desc_ring, priv->dma_ring);
return 0;
}
static const struct net_device_ops cpmac_netdev_ops = {
.ndo_open = cpmac_open,
.ndo_stop = cpmac_stop,
.ndo_start_xmit = cpmac_start_xmit,
.ndo_tx_timeout = cpmac_tx_timeout,
.ndo_set_rx_mode = cpmac_set_multicast_list,
.ndo_do_ioctl = phy_do_ioctl_running,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
};
static int external_switch;
static int cpmac_probe(struct platform_device *pdev)
{
int rc, phy_id;
char mdio_bus_id[MII_BUS_ID_SIZE];
struct resource *mem;
struct cpmac_priv *priv;
struct net_device *dev;
struct plat_cpmac_data *pdata;
struct phy_device *phydev = NULL;
pdata = dev_get_platdata(&pdev->dev);
if (external_switch || dumb_switch) {
strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
} else {
for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) {
if (!(pdata->phy_mask & (1 << phy_id)))
continue;
if (!mdiobus_get_phy(cpmac_mii, phy_id))
continue;
strncpy(mdio_bus_id, cpmac_mii->id, MII_BUS_ID_SIZE);
break;
}
}
if (phy_id == PHY_MAX_ADDR) {
dev_err(&pdev->dev, "no PHY present, falling back "
"to switch on MDIO bus 0\n");
strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
}
mdio_bus_id[sizeof(mdio_bus_id) - 1] = '\0';
dev = alloc_etherdev_mq(sizeof(*priv), CPMAC_QUEUES);
if (!dev)
return -ENOMEM;
SET_NETDEV_DEV(dev, &pdev->dev);
platform_set_drvdata(pdev, dev);
priv = netdev_priv(dev);
priv->pdev = pdev;
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!mem) {
rc = -ENODEV;
goto fail;
}
dev->irq = platform_get_irq_byname(pdev, "irq");
dev->netdev_ops = &cpmac_netdev_ops;
dev->ethtool_ops = &cpmac_ethtool_ops;
netif_napi_add(dev, &priv->napi, cpmac_poll, 64);
spin_lock_init(&priv->lock);
spin_lock_init(&priv->rx_lock);
priv->dev = dev;
priv->ring_size = 64;
priv->msg_enable = netif_msg_init(debug_level, 0xff);
memcpy(dev->dev_addr, pdata->dev_addr, sizeof(pdata->dev_addr));
snprintf(priv->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT,
mdio_bus_id, phy_id);
phydev = phy_connect(dev, priv->phy_name, cpmac_adjust_link,
PHY_INTERFACE_MODE_MII);
if (IS_ERR(phydev)) {
if (netif_msg_drv(priv))
dev_err(&pdev->dev, "Could not attach to PHY\n");
rc = PTR_ERR(phydev);
goto fail;
}
rc = register_netdev(dev);
if (rc) {
dev_err(&pdev->dev, "Could not register net device\n");
goto fail;
}
if (netif_msg_probe(priv)) {
dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
"mac: %pM\n", (void *)mem->start, dev->irq,
priv->phy_name, dev->dev_addr);
}
return 0;
fail:
free_netdev(dev);
return rc;
}
static int cpmac_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
free_netdev(dev);
return 0;
}
static struct platform_driver cpmac_driver = {
.driver = {
.name = "cpmac",
},
.probe = cpmac_probe,
.remove = cpmac_remove,
};
int cpmac_init(void)
{
u32 mask;
int i, res;
cpmac_mii = mdiobus_alloc();
if (cpmac_mii == NULL)
return -ENOMEM;
cpmac_mii->name = "cpmac-mii";
cpmac_mii->read = cpmac_mdio_read;
cpmac_mii->write = cpmac_mdio_write;
cpmac_mii->reset = cpmac_mdio_reset;
cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
if (!cpmac_mii->priv) {
pr_err("Can't ioremap mdio registers\n");
res = -ENXIO;
goto fail_alloc;
}
/* FIXME: unhardcode gpio&reset bits */
ar7_gpio_disable(26);
ar7_gpio_disable(27);
ar7_device_reset(AR7_RESET_BIT_CPMAC_LO);
ar7_device_reset(AR7_RESET_BIT_CPMAC_HI);
ar7_device_reset(AR7_RESET_BIT_EPHY);
cpmac_mii->reset(cpmac_mii);
for (i = 0; i < 300; i++) {
mask = cpmac_read(cpmac_mii->priv, CPMAC_MDIO_ALIVE);
if (mask)
break;
else
msleep(10);
}
mask &= 0x7fffffff;
if (mask & (mask - 1)) {
external_switch = 1;
mask = 0;
}
cpmac_mii->phy_mask = ~(mask | 0x80000000);
snprintf(cpmac_mii->id, MII_BUS_ID_SIZE, "cpmac-1");
res = mdiobus_register(cpmac_mii);
if (res)
goto fail_mii;
res = platform_driver_register(&cpmac_driver);
if (res)
goto fail_cpmac;
return 0;
fail_cpmac:
mdiobus_unregister(cpmac_mii);
fail_mii:
iounmap(cpmac_mii->priv);
fail_alloc:
mdiobus_free(cpmac_mii);
return res;
}
void cpmac_exit(void)
{
platform_driver_unregister(&cpmac_driver);
mdiobus_unregister(cpmac_mii);
iounmap(cpmac_mii->priv);
mdiobus_free(cpmac_mii);
}
module_init(cpmac_init);
module_exit(cpmac_exit);
| gpl-2.0 |
Andiry/linux-test | net/mac80211/aes_gcm.c | 140 | 2596 | /*
* Copyright 2014-2015, Qualcomm Atheros, Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/crypto.h>
#include <linux/err.h>
#include <crypto/aes.h>
#include <net/mac80211.h>
#include "key.h"
#include "aes_gcm.h"
void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist assoc, pt, ct[2];
char aead_req_data[sizeof(struct aead_request) +
crypto_aead_reqsize(tfm)]
__aligned(__alignof__(struct aead_request));
struct aead_request *aead_req = (void *)aead_req_data;
memset(aead_req, 0, sizeof(aead_req_data));
sg_init_one(&pt, data, data_len);
sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
sg_init_table(ct, 2);
sg_set_buf(&ct[0], data, data_len);
sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
aead_request_set_tfm(aead_req, tfm);
aead_request_set_assoc(aead_req, &assoc, assoc.length);
aead_request_set_crypt(aead_req, &pt, ct, data_len, j_0);
crypto_aead_encrypt(aead_req);
}
int ieee80211_aes_gcm_decrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
u8 *data, size_t data_len, u8 *mic)
{
struct scatterlist assoc, pt, ct[2];
char aead_req_data[sizeof(struct aead_request) +
crypto_aead_reqsize(tfm)]
__aligned(__alignof__(struct aead_request));
struct aead_request *aead_req = (void *)aead_req_data;
if (data_len == 0)
return -EINVAL;
memset(aead_req, 0, sizeof(aead_req_data));
sg_init_one(&pt, data, data_len);
sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
sg_init_table(ct, 2);
sg_set_buf(&ct[0], data, data_len);
sg_set_buf(&ct[1], mic, IEEE80211_GCMP_MIC_LEN);
aead_request_set_tfm(aead_req, tfm);
aead_request_set_assoc(aead_req, &assoc, assoc.length);
aead_request_set_crypt(aead_req, ct, &pt,
data_len + IEEE80211_GCMP_MIC_LEN, j_0);
return crypto_aead_decrypt(aead_req);
}
struct crypto_aead *ieee80211_aes_gcm_key_setup_encrypt(const u8 key[],
size_t key_len)
{
struct crypto_aead *tfm;
int err;
tfm = crypto_alloc_aead("gcm(aes)", 0, CRYPTO_ALG_ASYNC);
if (IS_ERR(tfm))
return tfm;
err = crypto_aead_setkey(tfm, key, key_len);
if (!err)
err = crypto_aead_setauthsize(tfm, IEEE80211_GCMP_MIC_LEN);
if (!err)
return tfm;
crypto_free_aead(tfm);
return ERR_PTR(err);
}
void ieee80211_aes_gcm_key_free(struct crypto_aead *tfm)
{
crypto_free_aead(tfm);
}
| gpl-2.0 |
zhangjie201412/goldfish_kernel | arch/arm/plat-s3c24xx/dma.c | 140 | 34682 | /* linux/arch/arm/plat-s3c24xx/dma.c
*
* Copyright (c) 2003-2005,2006 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C2410 DMA core
*
* http://armlinux.simtec.co.uk/
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifdef CONFIG_S3C2410_DMA_DEBUG
#define DEBUG
#endif
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
#include <linux/sysdev.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/io.h>
#include <asm/system.h>
#include <asm/irq.h>
#include <mach/hardware.h>
#include <mach/dma.h>
#include <mach/map.h>
#include <plat/dma.h>
/* io map for dma */
static void __iomem *dma_base;
static struct kmem_cache *dma_kmem;
static int dma_channels;
static struct s3c24xx_dma_selection dma_sel;
/* dma channel state information */
struct s3c2410_dma_chan s3c2410_chans[S3C2410_DMA_CHANNELS];
/* debugging functions */
#define BUF_MAGIC (0xcafebabe)
#define dmawarn(fmt...) printk(KERN_DEBUG fmt)
#define dma_regaddr(chan, reg) ((chan)->regs + (reg))
#if 1
#define dma_wrreg(chan, reg, val) writel((val), (chan)->regs + (reg))
#else
static inline void
dma_wrreg(struct s3c2410_dma_chan *chan, int reg, unsigned long val)
{
pr_debug("writing %08x to register %08x\n",(unsigned int)val,reg);
writel(val, dma_regaddr(chan, reg));
}
#endif
#define dma_rdreg(chan, reg) readl((chan)->regs + (reg))
/* captured register state for debug */
struct s3c2410_dma_regstate {
unsigned long dcsrc;
unsigned long disrc;
unsigned long dstat;
unsigned long dcon;
unsigned long dmsktrig;
};
#ifdef CONFIG_S3C2410_DMA_DEBUG
/* dmadbg_showregs
*
* simple debug routine to print the current state of the dma registers
*/
static void
dmadbg_capture(struct s3c2410_dma_chan *chan, struct s3c2410_dma_regstate *regs)
{
regs->dcsrc = dma_rdreg(chan, S3C2410_DMA_DCSRC);
regs->disrc = dma_rdreg(chan, S3C2410_DMA_DISRC);
regs->dstat = dma_rdreg(chan, S3C2410_DMA_DSTAT);
regs->dcon = dma_rdreg(chan, S3C2410_DMA_DCON);
regs->dmsktrig = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
}
static void
dmadbg_dumpregs(const char *fname, int line, struct s3c2410_dma_chan *chan,
struct s3c2410_dma_regstate *regs)
{
printk(KERN_DEBUG "dma%d: %s:%d: DCSRC=%08lx, DISRC=%08lx, DSTAT=%08lx DMT=%02lx, DCON=%08lx\n",
chan->number, fname, line,
regs->dcsrc, regs->disrc, regs->dstat, regs->dmsktrig,
regs->dcon);
}
static void
dmadbg_showchan(const char *fname, int line, struct s3c2410_dma_chan *chan)
{
struct s3c2410_dma_regstate state;
dmadbg_capture(chan, &state);
printk(KERN_DEBUG "dma%d: %s:%d: ls=%d, cur=%p, %p %p\n",
chan->number, fname, line, chan->load_state,
chan->curr, chan->next, chan->end);
dmadbg_dumpregs(fname, line, chan, &state);
}
static void
dmadbg_showregs(const char *fname, int line, struct s3c2410_dma_chan *chan)
{
struct s3c2410_dma_regstate state;
dmadbg_capture(chan, &state);
dmadbg_dumpregs(fname, line, chan, &state);
}
#define dbg_showregs(chan) dmadbg_showregs(__func__, __LINE__, (chan))
#define dbg_showchan(chan) dmadbg_showchan(__func__, __LINE__, (chan))
#else
#define dbg_showregs(chan) do { } while(0)
#define dbg_showchan(chan) do { } while(0)
#endif /* CONFIG_S3C2410_DMA_DEBUG */
static struct s3c2410_dma_chan *dma_chan_map[DMACH_MAX];
/* lookup_dma_channel
*
* change the dma channel number given into a real dma channel id
*/
static struct s3c2410_dma_chan *lookup_dma_channel(unsigned int channel)
{
if (channel & DMACH_LOW_LEVEL)
return &s3c2410_chans[channel & ~DMACH_LOW_LEVEL];
else
return dma_chan_map[channel];
}
/* s3c2410_dma_stats_timeout
*
* Update DMA stats from timeout info
*/
static void
s3c2410_dma_stats_timeout(struct s3c2410_dma_stats *stats, int val)
{
if (stats == NULL)
return;
if (val > stats->timeout_longest)
stats->timeout_longest = val;
if (val < stats->timeout_shortest)
stats->timeout_shortest = val;
stats->timeout_avg += val;
}
/* s3c2410_dma_waitforload
*
* wait for the DMA engine to load a buffer, and update the state accordingly
*/
static int
s3c2410_dma_waitforload(struct s3c2410_dma_chan *chan, int line)
{
int timeout = chan->load_timeout;
int took;
if (chan->load_state != S3C2410_DMALOAD_1LOADED) {
printk(KERN_ERR "dma%d: s3c2410_dma_waitforload() called in loadstate %d from line %d\n", chan->number, chan->load_state, line);
return 0;
}
if (chan->stats != NULL)
chan->stats->loads++;
while (--timeout > 0) {
if ((dma_rdreg(chan, S3C2410_DMA_DSTAT) << (32-20)) != 0) {
took = chan->load_timeout - timeout;
s3c2410_dma_stats_timeout(chan->stats, took);
switch (chan->load_state) {
case S3C2410_DMALOAD_1LOADED:
chan->load_state = S3C2410_DMALOAD_1RUNNING;
break;
default:
printk(KERN_ERR "dma%d: unknown load_state in s3c2410_dma_waitforload() %d\n", chan->number, chan->load_state);
}
return 1;
}
}
if (chan->stats != NULL) {
chan->stats->timeout_failed++;
}
return 0;
}
/* s3c2410_dma_loadbuffer
*
* load a buffer, and update the channel state
*/
static inline int
s3c2410_dma_loadbuffer(struct s3c2410_dma_chan *chan,
struct s3c2410_dma_buf *buf)
{
unsigned long reload;
pr_debug("s3c2410_chan_loadbuffer: loading buff %p (0x%08lx,0x%06x)\n",
buf, (unsigned long)buf->data, buf->size);
if (buf == NULL) {
dmawarn("buffer is NULL\n");
return -EINVAL;
}
/* check the state of the channel before we do anything */
if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
dmawarn("load_state is S3C2410_DMALOAD_1LOADED\n");
}
if (chan->load_state == S3C2410_DMALOAD_1LOADED_1RUNNING) {
dmawarn("state is S3C2410_DMALOAD_1LOADED_1RUNNING\n");
}
/* it would seem sensible if we are the last buffer to not bother
* with the auto-reload bit, so that the DMA engine will not try
* and load another transfer after this one has finished...
*/
if (chan->load_state == S3C2410_DMALOAD_NONE) {
pr_debug("load_state is none, checking for noreload (next=%p)\n",
buf->next);
reload = (buf->next == NULL) ? S3C2410_DCON_NORELOAD : 0;
} else {
//pr_debug("load_state is %d => autoreload\n", chan->load_state);
reload = S3C2410_DCON_AUTORELOAD;
}
if ((buf->data & 0xf0000000) != 0x30000000) {
dmawarn("dmaload: buffer is %p\n", (void *)buf->data);
}
writel(buf->data, chan->addr_reg);
dma_wrreg(chan, S3C2410_DMA_DCON,
chan->dcon | reload | (buf->size/chan->xfer_unit));
chan->next = buf->next;
/* update the state of the channel */
switch (chan->load_state) {
case S3C2410_DMALOAD_NONE:
chan->load_state = S3C2410_DMALOAD_1LOADED;
break;
case S3C2410_DMALOAD_1RUNNING:
chan->load_state = S3C2410_DMALOAD_1LOADED_1RUNNING;
break;
default:
dmawarn("dmaload: unknown state %d in loadbuffer\n",
chan->load_state);
break;
}
return 0;
}
/* s3c2410_dma_call_op
*
* small routine to call the op routine with the given op if it has been
* registered
*/
static void
s3c2410_dma_call_op(struct s3c2410_dma_chan *chan, enum s3c2410_chan_op op)
{
if (chan->op_fn != NULL) {
(chan->op_fn)(chan, op);
}
}
/* s3c2410_dma_buffdone
*
* small wrapper to check if callback routine needs to be called, and
* if so, call it
*/
static inline void
s3c2410_dma_buffdone(struct s3c2410_dma_chan *chan, struct s3c2410_dma_buf *buf,
enum s3c2410_dma_buffresult result)
{
#if 0
pr_debug("callback_fn=%p, buf=%p, id=%p, size=%d, result=%d\n",
chan->callback_fn, buf, buf->id, buf->size, result);
#endif
if (chan->callback_fn != NULL) {
(chan->callback_fn)(chan, buf->id, buf->size, result);
}
}
/* s3c2410_dma_start
*
* start a dma channel going
*/
static int s3c2410_dma_start(struct s3c2410_dma_chan *chan)
{
unsigned long tmp;
unsigned long flags;
pr_debug("s3c2410_start_dma: channel=%d\n", chan->number);
local_irq_save(flags);
if (chan->state == S3C2410_DMA_RUNNING) {
pr_debug("s3c2410_start_dma: already running (%d)\n", chan->state);
local_irq_restore(flags);
return 0;
}
chan->state = S3C2410_DMA_RUNNING;
/* check wether there is anything to load, and if not, see
* if we can find anything to load
*/
if (chan->load_state == S3C2410_DMALOAD_NONE) {
if (chan->next == NULL) {
printk(KERN_ERR "dma%d: channel has nothing loaded\n",
chan->number);
chan->state = S3C2410_DMA_IDLE;
local_irq_restore(flags);
return -EINVAL;
}
s3c2410_dma_loadbuffer(chan, chan->next);
}
dbg_showchan(chan);
/* enable the channel */
if (!chan->irq_enabled) {
enable_irq(chan->irq);
chan->irq_enabled = 1;
}
/* start the channel going */
tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
tmp &= ~S3C2410_DMASKTRIG_STOP;
tmp |= S3C2410_DMASKTRIG_ON;
dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
pr_debug("dma%d: %08lx to DMASKTRIG\n", chan->number, tmp);
#if 0
/* the dma buffer loads should take care of clearing the AUTO
* reloading feature */
tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
tmp &= ~S3C2410_DCON_NORELOAD;
dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
#endif
s3c2410_dma_call_op(chan, S3C2410_DMAOP_START);
dbg_showchan(chan);
/* if we've only loaded one buffer onto the channel, then chec
* to see if we have another, and if so, try and load it so when
* the first buffer is finished, the new one will be loaded onto
* the channel */
if (chan->next != NULL) {
if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
pr_debug("%s: buff not yet loaded, no more todo\n",
__func__);
} else {
chan->load_state = S3C2410_DMALOAD_1RUNNING;
s3c2410_dma_loadbuffer(chan, chan->next);
}
} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
s3c2410_dma_loadbuffer(chan, chan->next);
}
}
local_irq_restore(flags);
return 0;
}
/* s3c2410_dma_canload
*
* work out if we can queue another buffer into the DMA engine
*/
static int
s3c2410_dma_canload(struct s3c2410_dma_chan *chan)
{
if (chan->load_state == S3C2410_DMALOAD_NONE ||
chan->load_state == S3C2410_DMALOAD_1RUNNING)
return 1;
return 0;
}
/* s3c2410_dma_enqueue
*
* queue an given buffer for dma transfer.
*
* id the device driver's id information for this buffer
* data the physical address of the buffer data
* size the size of the buffer in bytes
*
* If the channel is not running, then the flag S3C2410_DMAF_AUTOSTART
* is checked, and if set, the channel is started. If this flag isn't set,
* then an error will be returned.
*
* It is possible to queue more than one DMA buffer onto a channel at
* once, and the code will deal with the re-loading of the next buffer
* when necessary.
*/
int s3c2410_dma_enqueue(unsigned int channel, void *id,
dma_addr_t data, int size)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
struct s3c2410_dma_buf *buf;
unsigned long flags;
if (chan == NULL)
return -EINVAL;
pr_debug("%s: id=%p, data=%08x, size=%d\n",
__func__, id, (unsigned int)data, size);
buf = kmem_cache_alloc(dma_kmem, GFP_ATOMIC);
if (buf == NULL) {
pr_debug("%s: out of memory (%ld alloc)\n",
__func__, (long)sizeof(*buf));
return -ENOMEM;
}
//pr_debug("%s: new buffer %p\n", __func__, buf);
//dbg_showchan(chan);
buf->next = NULL;
buf->data = buf->ptr = data;
buf->size = size;
buf->id = id;
buf->magic = BUF_MAGIC;
local_irq_save(flags);
if (chan->curr == NULL) {
/* we've got nothing loaded... */
pr_debug("%s: buffer %p queued onto empty channel\n",
__func__, buf);
chan->curr = buf;
chan->end = buf;
chan->next = NULL;
} else {
pr_debug("dma%d: %s: buffer %p queued onto non-empty channel\n",
chan->number, __func__, buf);
if (chan->end == NULL)
pr_debug("dma%d: %s: %p not empty, and chan->end==NULL?\n",
chan->number, __func__, chan);
chan->end->next = buf;
chan->end = buf;
}
/* if necessary, update the next buffer field */
if (chan->next == NULL)
chan->next = buf;
/* check to see if we can load a buffer */
if (chan->state == S3C2410_DMA_RUNNING) {
if (chan->load_state == S3C2410_DMALOAD_1LOADED && 1) {
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
printk(KERN_ERR "dma%d: loadbuffer:"
"timeout loading buffer\n",
chan->number);
dbg_showchan(chan);
local_irq_restore(flags);
return -EINVAL;
}
}
while (s3c2410_dma_canload(chan) && chan->next != NULL) {
s3c2410_dma_loadbuffer(chan, chan->next);
}
} else if (chan->state == S3C2410_DMA_IDLE) {
if (chan->flags & S3C2410_DMAF_AUTOSTART) {
s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL,
S3C2410_DMAOP_START);
}
}
local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_enqueue);
static inline void
s3c2410_dma_freebuf(struct s3c2410_dma_buf *buf)
{
int magicok = (buf->magic == BUF_MAGIC);
buf->magic = -1;
if (magicok) {
kmem_cache_free(dma_kmem, buf);
} else {
printk("s3c2410_dma_freebuf: buff %p with bad magic\n", buf);
}
}
/* s3c2410_dma_lastxfer
*
* called when the system is out of buffers, to ensure that the channel
* is prepared for shutdown.
*/
static inline void
s3c2410_dma_lastxfer(struct s3c2410_dma_chan *chan)
{
#if 0
pr_debug("dma%d: s3c2410_dma_lastxfer: load_state %d\n",
chan->number, chan->load_state);
#endif
switch (chan->load_state) {
case S3C2410_DMALOAD_NONE:
break;
case S3C2410_DMALOAD_1LOADED:
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
/* flag error? */
printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
chan->number, __func__);
return;
}
break;
case S3C2410_DMALOAD_1LOADED_1RUNNING:
/* I belive in this case we do not have anything to do
* until the next buffer comes along, and we turn off the
* reload */
return;
default:
pr_debug("dma%d: lastxfer: unhandled load_state %d with no next\n",
chan->number, chan->load_state);
return;
}
/* hopefully this'll shut the damned thing up after the transfer... */
dma_wrreg(chan, S3C2410_DMA_DCON, chan->dcon | S3C2410_DCON_NORELOAD);
}
#define dmadbg2(x...)
static irqreturn_t
s3c2410_dma_irq(int irq, void *devpw)
{
struct s3c2410_dma_chan *chan = (struct s3c2410_dma_chan *)devpw;
struct s3c2410_dma_buf *buf;
buf = chan->curr;
dbg_showchan(chan);
/* modify the channel state */
switch (chan->load_state) {
case S3C2410_DMALOAD_1RUNNING:
/* TODO - if we are running only one buffer, we probably
* want to reload here, and then worry about the buffer
* callback */
chan->load_state = S3C2410_DMALOAD_NONE;
break;
case S3C2410_DMALOAD_1LOADED:
/* iirc, we should go back to NONE loaded here, we
* had a buffer, and it was never verified as being
* loaded.
*/
chan->load_state = S3C2410_DMALOAD_NONE;
break;
case S3C2410_DMALOAD_1LOADED_1RUNNING:
/* we'll worry about checking to see if another buffer is
* ready after we've called back the owner. This should
* ensure we do not wait around too long for the DMA
* engine to start the next transfer
*/
chan->load_state = S3C2410_DMALOAD_1LOADED;
break;
case S3C2410_DMALOAD_NONE:
printk(KERN_ERR "dma%d: IRQ with no loaded buffer?\n",
chan->number);
break;
default:
printk(KERN_ERR "dma%d: IRQ in invalid load_state %d\n",
chan->number, chan->load_state);
break;
}
if (buf != NULL) {
/* update the chain to make sure that if we load any more
* buffers when we call the callback function, things should
* work properly */
chan->curr = buf->next;
buf->next = NULL;
if (buf->magic != BUF_MAGIC) {
printk(KERN_ERR "dma%d: %s: buf %p incorrect magic\n",
chan->number, __func__, buf);
return IRQ_HANDLED;
}
s3c2410_dma_buffdone(chan, buf, S3C2410_RES_OK);
/* free resouces */
s3c2410_dma_freebuf(buf);
} else {
}
/* only reload if the channel is still running... our buffer done
* routine may have altered the state by requesting the dma channel
* to stop or shutdown... */
/* todo: check that when the channel is shut-down from inside this
* function, we cope with unsetting reload, etc */
if (chan->next != NULL && chan->state != S3C2410_DMA_IDLE) {
unsigned long flags;
switch (chan->load_state) {
case S3C2410_DMALOAD_1RUNNING:
/* don't need to do anything for this state */
break;
case S3C2410_DMALOAD_NONE:
/* can load buffer immediately */
break;
case S3C2410_DMALOAD_1LOADED:
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
/* flag error? */
printk(KERN_ERR "dma%d: timeout waiting for load (%s)\n",
chan->number, __func__);
return IRQ_HANDLED;
}
break;
case S3C2410_DMALOAD_1LOADED_1RUNNING:
goto no_load;
default:
printk(KERN_ERR "dma%d: unknown load_state in irq, %d\n",
chan->number, chan->load_state);
return IRQ_HANDLED;
}
local_irq_save(flags);
s3c2410_dma_loadbuffer(chan, chan->next);
local_irq_restore(flags);
} else {
s3c2410_dma_lastxfer(chan);
/* see if we can stop this channel.. */
if (chan->load_state == S3C2410_DMALOAD_NONE) {
pr_debug("dma%d: end of transfer, stopping channel (%ld)\n",
chan->number, jiffies);
s3c2410_dma_ctrl(chan->number | DMACH_LOW_LEVEL,
S3C2410_DMAOP_STOP);
}
}
no_load:
return IRQ_HANDLED;
}
static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel);
/* s3c2410_request_dma
*
* get control of an dma channel
*/
int s3c2410_dma_request(unsigned int channel,
struct s3c2410_dma_client *client,
void *dev)
{
struct s3c2410_dma_chan *chan;
unsigned long flags;
int err;
pr_debug("dma%d: s3c2410_request_dma: client=%s, dev=%p\n",
channel, client->name, dev);
local_irq_save(flags);
chan = s3c2410_dma_map_channel(channel);
if (chan == NULL) {
local_irq_restore(flags);
return -EBUSY;
}
dbg_showchan(chan);
chan->client = client;
chan->in_use = 1;
if (!chan->irq_claimed) {
pr_debug("dma%d: %s : requesting irq %d\n",
channel, __func__, chan->irq);
chan->irq_claimed = 1;
local_irq_restore(flags);
err = request_irq(chan->irq, s3c2410_dma_irq, IRQF_DISABLED,
client->name, (void *)chan);
local_irq_save(flags);
if (err) {
chan->in_use = 0;
chan->irq_claimed = 0;
local_irq_restore(flags);
printk(KERN_ERR "%s: cannot get IRQ %d for DMA %d\n",
client->name, chan->irq, chan->number);
return err;
}
chan->irq_enabled = 1;
}
local_irq_restore(flags);
/* need to setup */
pr_debug("%s: channel initialised, %p\n", __func__, chan);
return chan->number | DMACH_LOW_LEVEL;
}
EXPORT_SYMBOL(s3c2410_dma_request);
/* s3c2410_dma_free
*
* release the given channel back to the system, will stop and flush
* any outstanding transfers, and ensure the channel is ready for the
* next claimant.
*
* Note, although a warning is currently printed if the freeing client
* info is not the same as the registrant's client info, the free is still
* allowed to go through.
*/
int s3c2410_dma_free(unsigned int channel, struct s3c2410_dma_client *client)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
unsigned long flags;
if (chan == NULL)
return -EINVAL;
local_irq_save(flags);
if (chan->client != client) {
printk(KERN_WARNING "dma%d: possible free from different client (channel %p, passed %p)\n",
channel, chan->client, client);
}
/* sort out stopping and freeing the channel */
if (chan->state != S3C2410_DMA_IDLE) {
pr_debug("%s: need to stop dma channel %p\n",
__func__, chan);
/* possibly flush the channel */
s3c2410_dma_ctrl(channel, S3C2410_DMAOP_STOP);
}
chan->client = NULL;
chan->in_use = 0;
if (chan->irq_claimed)
free_irq(chan->irq, (void *)chan);
chan->irq_claimed = 0;
if (!(channel & DMACH_LOW_LEVEL))
dma_chan_map[channel] = NULL;
local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_free);
static int s3c2410_dma_dostop(struct s3c2410_dma_chan *chan)
{
unsigned long flags;
unsigned long tmp;
pr_debug("%s:\n", __func__);
dbg_showchan(chan);
local_irq_save(flags);
s3c2410_dma_call_op(chan, S3C2410_DMAOP_STOP);
tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
tmp |= S3C2410_DMASKTRIG_STOP;
//tmp &= ~S3C2410_DMASKTRIG_ON;
dma_wrreg(chan, S3C2410_DMA_DMASKTRIG, tmp);
#if 0
/* should also clear interrupts, according to WinCE BSP */
tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
tmp |= S3C2410_DCON_NORELOAD;
dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
#endif
/* should stop do this, or should we wait for flush? */
chan->state = S3C2410_DMA_IDLE;
chan->load_state = S3C2410_DMALOAD_NONE;
local_irq_restore(flags);
return 0;
}
static void s3c2410_dma_waitforstop(struct s3c2410_dma_chan *chan)
{
unsigned long tmp;
unsigned int timeout = 0x10000;
while (timeout-- > 0) {
tmp = dma_rdreg(chan, S3C2410_DMA_DMASKTRIG);
if (!(tmp & S3C2410_DMASKTRIG_ON))
return;
}
pr_debug("dma%d: failed to stop?\n", chan->number);
}
/* s3c2410_dma_flush
*
* stop the channel, and remove all current and pending transfers
*/
static int s3c2410_dma_flush(struct s3c2410_dma_chan *chan)
{
struct s3c2410_dma_buf *buf, *next;
unsigned long flags;
pr_debug("%s: chan %p (%d)\n", __func__, chan, chan->number);
dbg_showchan(chan);
local_irq_save(flags);
if (chan->state != S3C2410_DMA_IDLE) {
pr_debug("%s: stopping channel...\n", __func__ );
s3c2410_dma_ctrl(chan->number, S3C2410_DMAOP_STOP);
}
buf = chan->curr;
if (buf == NULL)
buf = chan->next;
chan->curr = chan->next = chan->end = NULL;
if (buf != NULL) {
for ( ; buf != NULL; buf = next) {
next = buf->next;
pr_debug("%s: free buffer %p, next %p\n",
__func__, buf, buf->next);
s3c2410_dma_buffdone(chan, buf, S3C2410_RES_ABORT);
s3c2410_dma_freebuf(buf);
}
}
dbg_showregs(chan);
s3c2410_dma_waitforstop(chan);
#if 0
/* should also clear interrupts, according to WinCE BSP */
{
unsigned long tmp;
tmp = dma_rdreg(chan, S3C2410_DMA_DCON);
tmp |= S3C2410_DCON_NORELOAD;
dma_wrreg(chan, S3C2410_DMA_DCON, tmp);
}
#endif
dbg_showregs(chan);
local_irq_restore(flags);
return 0;
}
static int s3c2410_dma_started(struct s3c2410_dma_chan *chan)
{
unsigned long flags;
local_irq_save(flags);
dbg_showchan(chan);
/* if we've only loaded one buffer onto the channel, then chec
* to see if we have another, and if so, try and load it so when
* the first buffer is finished, the new one will be loaded onto
* the channel */
if (chan->next != NULL) {
if (chan->load_state == S3C2410_DMALOAD_1LOADED) {
if (s3c2410_dma_waitforload(chan, __LINE__) == 0) {
pr_debug("%s: buff not yet loaded, no more todo\n",
__func__);
} else {
chan->load_state = S3C2410_DMALOAD_1RUNNING;
s3c2410_dma_loadbuffer(chan, chan->next);
}
} else if (chan->load_state == S3C2410_DMALOAD_1RUNNING) {
s3c2410_dma_loadbuffer(chan, chan->next);
}
}
local_irq_restore(flags);
return 0;
}
int
s3c2410_dma_ctrl(unsigned int channel, enum s3c2410_chan_op op)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
if (chan == NULL)
return -EINVAL;
switch (op) {
case S3C2410_DMAOP_START:
return s3c2410_dma_start(chan);
case S3C2410_DMAOP_STOP:
return s3c2410_dma_dostop(chan);
case S3C2410_DMAOP_PAUSE:
case S3C2410_DMAOP_RESUME:
return -ENOENT;
case S3C2410_DMAOP_FLUSH:
return s3c2410_dma_flush(chan);
case S3C2410_DMAOP_STARTED:
return s3c2410_dma_started(chan);
case S3C2410_DMAOP_TIMEOUT:
return 0;
}
return -ENOENT; /* unknown, don't bother */
}
EXPORT_SYMBOL(s3c2410_dma_ctrl);
/* DMA configuration for each channel
*
* DISRCC -> source of the DMA (AHB,APB)
* DISRC -> source address of the DMA
* DIDSTC -> destination of the DMA (AHB,APD)
* DIDST -> destination address of the DMA
*/
/* s3c2410_dma_config
*
* xfersize: size of unit in bytes (1,2,4)
* dcon: base value of the DCONx register
*/
int s3c2410_dma_config(unsigned int channel,
int xferunit,
int dcon)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
pr_debug("%s: chan=%d, xfer_unit=%d, dcon=%08x\n",
__func__, channel, xferunit, dcon);
if (chan == NULL)
return -EINVAL;
pr_debug("%s: Initial dcon is %08x\n", __func__, dcon);
dcon |= chan->dcon & dma_sel.dcon_mask;
pr_debug("%s: New dcon is %08x\n", __func__, dcon);
switch (xferunit) {
case 1:
dcon |= S3C2410_DCON_BYTE;
break;
case 2:
dcon |= S3C2410_DCON_HALFWORD;
break;
case 4:
dcon |= S3C2410_DCON_WORD;
break;
default:
pr_debug("%s: bad transfer size %d\n", __func__, xferunit);
return -EINVAL;
}
dcon |= S3C2410_DCON_HWTRIG;
dcon |= S3C2410_DCON_INTREQ;
pr_debug("%s: dcon now %08x\n", __func__, dcon);
chan->dcon = dcon;
chan->xfer_unit = xferunit;
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_config);
int s3c2410_dma_setflags(unsigned int channel, unsigned int flags)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
if (chan == NULL)
return -EINVAL;
pr_debug("%s: chan=%p, flags=%08x\n", __func__, chan, flags);
chan->flags = flags;
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_setflags);
/* do we need to protect the settings of the fields from
* irq?
*/
int s3c2410_dma_set_opfn(unsigned int channel, s3c2410_dma_opfn_t rtn)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
if (chan == NULL)
return -EINVAL;
pr_debug("%s: chan=%p, op rtn=%p\n", __func__, chan, rtn);
chan->op_fn = rtn;
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_set_opfn);
int s3c2410_dma_set_buffdone_fn(unsigned int channel, s3c2410_dma_cbfn_t rtn)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
if (chan == NULL)
return -EINVAL;
pr_debug("%s: chan=%p, callback rtn=%p\n", __func__, chan, rtn);
chan->callback_fn = rtn;
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_set_buffdone_fn);
/* s3c2410_dma_devconfig
*
* configure the dma source/destination hardware type and address
*
* source: S3C2410_DMASRC_HW: source is hardware
* S3C2410_DMASRC_MEM: source is memory
*
* hwcfg: the value for xxxSTCn register,
* bit 0: 0=increment pointer, 1=leave pointer
* bit 1: 0=source is AHB, 1=source is APB
*
* devaddr: physical address of the source
*/
int s3c2410_dma_devconfig(int channel,
enum s3c2410_dmasrc source,
int hwcfg,
unsigned long devaddr)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
if (chan == NULL)
return -EINVAL;
pr_debug("%s: source=%d, hwcfg=%08x, devaddr=%08lx\n",
__func__, (int)source, hwcfg, devaddr);
chan->source = source;
chan->dev_addr = devaddr;
chan->hw_cfg = hwcfg;
switch (source) {
case S3C2410_DMASRC_HW:
/* source is hardware */
pr_debug("%s: hw source, devaddr=%08lx, hwcfg=%d\n",
__func__, devaddr, hwcfg);
dma_wrreg(chan, S3C2410_DMA_DISRCC, hwcfg & 3);
dma_wrreg(chan, S3C2410_DMA_DISRC, devaddr);
dma_wrreg(chan, S3C2410_DMA_DIDSTC, (0<<1) | (0<<0));
chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DIDST);
break;
case S3C2410_DMASRC_MEM:
/* source is memory */
pr_debug("%s: mem source, devaddr=%08lx, hwcfg=%d\n",
__func__, devaddr, hwcfg);
dma_wrreg(chan, S3C2410_DMA_DISRCC, (0<<1) | (0<<0));
dma_wrreg(chan, S3C2410_DMA_DIDST, devaddr);
dma_wrreg(chan, S3C2410_DMA_DIDSTC, hwcfg & 3);
chan->addr_reg = dma_regaddr(chan, S3C2410_DMA_DISRC);
break;
default:
printk(KERN_ERR "dma%d: invalid source type (%d)\n",
channel, source);
return -EINVAL;
}
if (dma_sel.direction != NULL)
(dma_sel.direction)(chan, chan->map, source);
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_devconfig);
/* s3c2410_dma_getposition
*
* returns the current transfer points for the dma source and destination
*/
int s3c2410_dma_getposition(unsigned int channel, dma_addr_t *src, dma_addr_t *dst)
{
struct s3c2410_dma_chan *chan = lookup_dma_channel(channel);
if (chan == NULL)
return -EINVAL;
if (src != NULL)
*src = dma_rdreg(chan, S3C2410_DMA_DCSRC);
if (dst != NULL)
*dst = dma_rdreg(chan, S3C2410_DMA_DCDST);
return 0;
}
EXPORT_SYMBOL(s3c2410_dma_getposition);
static struct s3c2410_dma_chan *to_dma_chan(struct sys_device *dev)
{
return container_of(dev, struct s3c2410_dma_chan, dev);
}
/* system device class */
#ifdef CONFIG_PM
static int s3c2410_dma_suspend(struct sys_device *dev, pm_message_t state)
{
struct s3c2410_dma_chan *cp = to_dma_chan(dev);
printk(KERN_DEBUG "suspending dma channel %d\n", cp->number);
if (dma_rdreg(cp, S3C2410_DMA_DMASKTRIG) & S3C2410_DMASKTRIG_ON) {
/* the dma channel is still working, which is probably
* a bad thing to do over suspend/resume. We stop the
* channel and assume that the client is either going to
* retry after resume, or that it is broken.
*/
printk(KERN_INFO "dma: stopping channel %d due to suspend\n",
cp->number);
s3c2410_dma_dostop(cp);
}
return 0;
}
static int s3c2410_dma_resume(struct sys_device *dev)
{
struct s3c2410_dma_chan *cp = to_dma_chan(dev);
unsigned int no = cp->number | DMACH_LOW_LEVEL;
/* restore channel's hardware configuration */
if (!cp->in_use)
return 0;
printk(KERN_INFO "dma%d: restoring configuration\n", cp->number);
s3c2410_dma_config(no, cp->xfer_unit, cp->dcon);
s3c2410_dma_devconfig(no, cp->source, cp->hw_cfg, cp->dev_addr);
/* re-select the dma source for this channel */
if (cp->map != NULL)
dma_sel.select(cp, cp->map);
return 0;
}
#else
#define s3c2410_dma_suspend NULL
#define s3c2410_dma_resume NULL
#endif /* CONFIG_PM */
struct sysdev_class dma_sysclass = {
.name = "s3c24xx-dma",
.suspend = s3c2410_dma_suspend,
.resume = s3c2410_dma_resume,
};
/* kmem cache implementation */
static void s3c2410_dma_cache_ctor(void *p)
{
memset(p, 0, sizeof(struct s3c2410_dma_buf));
}
/* initialisation code */
static int __init s3c24xx_dma_sysclass_init(void)
{
int ret = sysdev_class_register(&dma_sysclass);
if (ret != 0)
printk(KERN_ERR "dma sysclass registration failed\n");
return ret;
}
core_initcall(s3c24xx_dma_sysclass_init);
static int __init s3c24xx_dma_sysdev_register(void)
{
struct s3c2410_dma_chan *cp = s3c2410_chans;
int channel, ret;
for (channel = 0; channel < dma_channels; cp++, channel++) {
cp->dev.cls = &dma_sysclass;
cp->dev.id = channel;
ret = sysdev_register(&cp->dev);
if (ret) {
printk(KERN_ERR "error registering dev for dma %d\n",
channel);
return ret;
}
}
return 0;
}
late_initcall(s3c24xx_dma_sysdev_register);
int __init s3c24xx_dma_init(unsigned int channels, unsigned int irq,
unsigned int stride)
{
struct s3c2410_dma_chan *cp;
int channel;
int ret;
printk("S3C24XX DMA Driver, (c) 2003-2004,2006 Simtec Electronics\n");
dma_channels = channels;
dma_base = ioremap(S3C24XX_PA_DMA, stride * channels);
if (dma_base == NULL) {
printk(KERN_ERR "dma failed to remap register block\n");
return -ENOMEM;
}
dma_kmem = kmem_cache_create("dma_desc",
sizeof(struct s3c2410_dma_buf), 0,
SLAB_HWCACHE_ALIGN,
s3c2410_dma_cache_ctor);
if (dma_kmem == NULL) {
printk(KERN_ERR "dma failed to make kmem cache\n");
ret = -ENOMEM;
goto err;
}
for (channel = 0; channel < channels; channel++) {
cp = &s3c2410_chans[channel];
memset(cp, 0, sizeof(struct s3c2410_dma_chan));
/* dma channel irqs are in order.. */
cp->number = channel;
cp->irq = channel + irq;
cp->regs = dma_base + (channel * stride);
/* point current stats somewhere */
cp->stats = &cp->stats_store;
cp->stats_store.timeout_shortest = LONG_MAX;
/* basic channel configuration */
cp->load_timeout = 1<<18;
printk("DMA channel %d at %p, irq %d\n",
cp->number, cp->regs, cp->irq);
}
return 0;
err:
kmem_cache_destroy(dma_kmem);
iounmap(dma_base);
dma_base = NULL;
return ret;
}
int __init s3c2410_dma_init(void)
{
return s3c24xx_dma_init(4, IRQ_DMA0, 0x40);
}
static inline int is_channel_valid(unsigned int channel)
{
return (channel & DMA_CH_VALID);
}
static struct s3c24xx_dma_order *dma_order;
/* s3c2410_dma_map_channel()
*
* turn the virtual channel number into a real, and un-used hardware
* channel.
*
* first, try the dma ordering given to us by either the relevant
* dma code, or the board. Then just find the first usable free
* channel
*/
static struct s3c2410_dma_chan *s3c2410_dma_map_channel(int channel)
{
struct s3c24xx_dma_order_ch *ord = NULL;
struct s3c24xx_dma_map *ch_map;
struct s3c2410_dma_chan *dmach;
int ch;
if (dma_sel.map == NULL || channel > dma_sel.map_size)
return NULL;
ch_map = dma_sel.map + channel;
/* first, try the board mapping */
if (dma_order) {
ord = &dma_order->channels[channel];
for (ch = 0; ch < dma_channels; ch++) {
if (!is_channel_valid(ord->list[ch]))
continue;
if (s3c2410_chans[ord->list[ch]].in_use == 0) {
ch = ord->list[ch] & ~DMA_CH_VALID;
goto found;
}
}
if (ord->flags & DMA_CH_NEVER)
return NULL;
}
/* second, search the channel map for first free */
for (ch = 0; ch < dma_channels; ch++) {
if (!is_channel_valid(ch_map->channels[ch]))
continue;
if (s3c2410_chans[ch].in_use == 0) {
printk("mapped channel %d to %d\n", channel, ch);
break;
}
}
if (ch >= dma_channels)
return NULL;
/* update our channel mapping */
found:
dmach = &s3c2410_chans[ch];
dmach->map = ch_map;
dma_chan_map[channel] = dmach;
/* select the channel */
(dma_sel.select)(dmach, ch_map);
return dmach;
}
static int s3c24xx_dma_check_entry(struct s3c24xx_dma_map *map, int ch)
{
return 0;
}
int __init s3c24xx_dma_init_map(struct s3c24xx_dma_selection *sel)
{
struct s3c24xx_dma_map *nmap;
size_t map_sz = sizeof(*nmap) * sel->map_size;
int ptr;
nmap = kmalloc(map_sz, GFP_KERNEL);
if (nmap == NULL)
return -ENOMEM;
memcpy(nmap, sel->map, map_sz);
memcpy(&dma_sel, sel, sizeof(*sel));
dma_sel.map = nmap;
for (ptr = 0; ptr < sel->map_size; ptr++)
s3c24xx_dma_check_entry(nmap+ptr, ptr);
return 0;
}
int __init s3c24xx_dma_order_set(struct s3c24xx_dma_order *ord)
{
struct s3c24xx_dma_order *nord = dma_order;
if (nord == NULL)
nord = kmalloc(sizeof(struct s3c24xx_dma_order), GFP_KERNEL);
if (nord == NULL) {
printk(KERN_ERR "no memory to store dma channel order\n");
return -ENOMEM;
}
dma_order = nord;
memcpy(nord, ord, sizeof(struct s3c24xx_dma_order));
return 0;
}
| gpl-2.0 |
nian0114/CherryS.AOSP-smdk4412 | drivers/infiniband/ulp/ipoib/ipoib_cm.c | 652 | 43530 | /*
* Copyright (c) 2006 Mellanox Technologies. All rights reserved
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <rdma/ib_cm.h>
#include <net/dst.h>
#include <net/icmp.h>
#include <linux/icmpv6.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include "ipoib.h"
int ipoib_max_conn_qp = 128;
module_param_named(max_nonsrq_conn_qp, ipoib_max_conn_qp, int, 0444);
MODULE_PARM_DESC(max_nonsrq_conn_qp,
"Max number of connected-mode QPs per interface "
"(applied only if shared receive queue is not available)");
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA
static int data_debug_level;
module_param_named(cm_data_debug_level, data_debug_level, int, 0644);
MODULE_PARM_DESC(cm_data_debug_level,
"Enable data path debug tracing for connected mode if > 0");
#endif
#define IPOIB_CM_IETF_ID 0x1000000000000000ULL
#define IPOIB_CM_RX_UPDATE_TIME (256 * HZ)
#define IPOIB_CM_RX_TIMEOUT (2 * 256 * HZ)
#define IPOIB_CM_RX_DELAY (3 * 256 * HZ)
#define IPOIB_CM_RX_UPDATE_MASK (0x3)
static struct ib_qp_attr ipoib_cm_err_attr = {
.qp_state = IB_QPS_ERR
};
#define IPOIB_CM_RX_DRAIN_WRID 0xffffffff
static struct ib_send_wr ipoib_cm_rx_drain_wr = {
.wr_id = IPOIB_CM_RX_DRAIN_WRID,
.opcode = IB_WR_SEND,
};
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event);
static void ipoib_cm_dma_unmap_rx(struct ipoib_dev_priv *priv, int frags,
u64 mapping[IPOIB_CM_RX_SG])
{
int i;
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (i = 0; i < frags; ++i)
ib_dma_unmap_single(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
}
static int ipoib_cm_post_receive_srq(struct net_device *dev, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_recv_wr *bad_wr;
int i, ret;
priv->cm.rx_wr.wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
for (i = 0; i < priv->cm.num_frags; ++i)
priv->cm.rx_sge[i].addr = priv->cm.srq_ring[id].mapping[i];
ret = ib_post_srq_recv(priv->cm.srq, &priv->cm.rx_wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "post srq failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, priv->cm.num_frags - 1,
priv->cm.srq_ring[id].mapping);
dev_kfree_skb_any(priv->cm.srq_ring[id].skb);
priv->cm.srq_ring[id].skb = NULL;
}
return ret;
}
static int ipoib_cm_post_receive_nonsrq(struct net_device *dev,
struct ipoib_cm_rx *rx,
struct ib_recv_wr *wr,
struct ib_sge *sge, int id)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_recv_wr *bad_wr;
int i, ret;
wr->wr_id = id | IPOIB_OP_CM | IPOIB_OP_RECV;
for (i = 0; i < IPOIB_CM_RX_SG; ++i)
sge[i].addr = rx->rx_ring[id].mapping[i];
ret = ib_post_recv(rx->qp, wr, &bad_wr);
if (unlikely(ret)) {
ipoib_warn(priv, "post recv failed for buf %d (%d)\n", id, ret);
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
rx->rx_ring[id].mapping);
dev_kfree_skb_any(rx->rx_ring[id].skb);
rx->rx_ring[id].skb = NULL;
}
return ret;
}
static struct sk_buff *ipoib_cm_alloc_rx_skb(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring,
int id, int frags,
u64 mapping[IPOIB_CM_RX_SG])
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct sk_buff *skb;
int i;
skb = dev_alloc_skb(IPOIB_CM_HEAD_SIZE + 12);
if (unlikely(!skb))
return NULL;
/*
* IPoIB adds a 4 byte header. So we need 12 more bytes to align the
* IP header to a multiple of 16.
*/
skb_reserve(skb, 12);
mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
dev_kfree_skb_any(skb);
return NULL;
}
for (i = 0; i < frags; i++) {
struct page *page = alloc_page(GFP_ATOMIC);
if (!page)
goto partial_error;
skb_fill_page_desc(skb, i, page, 0, PAGE_SIZE);
mapping[i + 1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[i].page,
0, PAGE_SIZE, DMA_FROM_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
goto partial_error;
}
rx_ring[id].skb = skb;
return skb;
partial_error:
ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
for (; i > 0; --i)
ib_dma_unmap_single(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
return NULL;
}
static void ipoib_cm_free_rx_ring(struct net_device *dev,
struct ipoib_cm_rx_buf *rx_ring)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < ipoib_recvq_size; ++i)
if (rx_ring[i].skb) {
ipoib_cm_dma_unmap_rx(priv, IPOIB_CM_RX_SG - 1,
rx_ring[i].mapping);
dev_kfree_skb_any(rx_ring[i].skb);
}
vfree(rx_ring);
}
static void ipoib_cm_start_rx_drain(struct ipoib_dev_priv *priv)
{
struct ib_send_wr *bad_wr;
struct ipoib_cm_rx *p;
/* We only reserved 1 extra slot in CQ for drain WRs, so
* make sure we have at most 1 outstanding WR. */
if (list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list))
return;
/*
* QPs on flush list are error state. This way, a "flush
* error" WC will be immediately generated for each WR we post.
*/
p = list_entry(priv->cm.rx_flush_list.next, typeof(*p), list);
if (ib_post_send(p->qp, &ipoib_cm_rx_drain_wr, &bad_wr))
ipoib_warn(priv, "failed to post drain wr\n");
list_splice_init(&priv->cm.rx_flush_list, &priv->cm.rx_drain_list);
}
static void ipoib_cm_rx_event_handler(struct ib_event *event, void *ctx)
{
struct ipoib_cm_rx *p = ctx;
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
unsigned long flags;
if (event->event != IB_EVENT_QP_LAST_WQE_REACHED)
return;
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_flush_list);
p->state = IPOIB_CM_RX_FLUSH;
ipoib_cm_start_rx_drain(priv);
spin_unlock_irqrestore(&priv->lock, flags);
}
static struct ib_qp *ipoib_cm_create_rx_qp(struct net_device *dev,
struct ipoib_cm_rx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_init_attr attr = {
.event_handler = ipoib_cm_rx_event_handler,
.send_cq = priv->recv_cq, /* For drain WR */
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = 1, /* For drain WR */
.cap.max_send_sge = 1, /* FIXME: 0 Seems not to work */
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = p,
};
if (!ipoib_cm_has_srq(dev)) {
attr.cap.max_recv_wr = ipoib_recvq_size;
attr.cap.max_recv_sge = IPOIB_CM_RX_SG;
}
return ib_create_qp(priv->pd, &attr);
}
static int ipoib_cm_modify_rx_qp(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp,
unsigned psn)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
qp_attr.qp_state = IB_QPS_INIT;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for INIT: %d\n", ret);
return ret;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to INIT: %d\n", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
return ret;
}
qp_attr.rq_psn = psn;
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
return ret;
}
/*
* Current Mellanox HCA firmware won't generate completions
* with error for drain WRs unless the QP has been moved to
* RTS first. This work-around leaves a window where a QP has
* moved to error asynchronously, but this will eventually get
* fixed in firmware, so let's not error out if modify QP
* fails.
*/
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
return 0;
}
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
return 0;
}
return 0;
}
static void ipoib_cm_init_rx_wr(struct net_device *dev,
struct ib_recv_wr *wr,
struct ib_sge *sge)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < priv->cm.num_frags; ++i)
sge[i].lkey = priv->mr->lkey;
sge[0].length = IPOIB_CM_HEAD_SIZE;
for (i = 1; i < priv->cm.num_frags; ++i)
sge[i].length = PAGE_SIZE;
wr->next = NULL;
wr->sg_list = sge;
wr->num_sge = priv->cm.num_frags;
}
static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_id,
struct ipoib_cm_rx *rx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct {
struct ib_recv_wr wr;
struct ib_sge sge[IPOIB_CM_RX_SG];
} *t;
int ret;
int i;
rx->rx_ring = vzalloc(ipoib_recvq_size * sizeof *rx->rx_ring);
if (!rx->rx_ring) {
printk(KERN_WARNING "%s: failed to allocate CM non-SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
return -ENOMEM;
}
t = kmalloc(sizeof *t, GFP_KERNEL);
if (!t) {
ret = -ENOMEM;
goto err_free;
}
ipoib_cm_init_rx_wr(dev, &t->wr, t->sge);
spin_lock_irq(&priv->lock);
if (priv->cm.nonsrq_conn_qp >= ipoib_max_conn_qp) {
spin_unlock_irq(&priv->lock);
ib_send_cm_rej(cm_id, IB_CM_REJ_NO_QP, NULL, 0, NULL, 0);
ret = -EINVAL;
goto err_free;
} else
++priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, rx->rx_ring, i, IPOIB_CM_RX_SG - 1,
rx->rx_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
ret = -ENOMEM;
goto err_count;
}
ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
if (ret) {
ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq "
"failed for buf %d\n", i);
ret = -EIO;
goto err_count;
}
}
rx->recv_count = ipoib_recvq_size;
kfree(t);
return 0;
err_count:
spin_lock_irq(&priv->lock);
--priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
err_free:
kfree(t);
ipoib_cm_free_rx_ring(dev, rx->rx_ring);
return ret;
}
static int ipoib_cm_send_rep(struct net_device *dev, struct ib_cm_id *cm_id,
struct ib_qp *qp, struct ib_cm_req_event_param *req,
unsigned psn)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_rep_param rep = {};
data.qpn = cpu_to_be32(priv->qp->qp_num);
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
rep.private_data = &data;
rep.private_data_len = sizeof data;
rep.flow_control = 0;
rep.rnr_retry_count = req->rnr_retry_count;
rep.srq = ipoib_cm_has_srq(dev);
rep.qp_num = qp->qp_num;
rep.starting_psn = psn;
return ib_send_cm_rep(cm_id, &rep);
}
static int ipoib_cm_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct net_device *dev = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
unsigned psn;
int ret;
ipoib_dbg(priv, "REQ arrived\n");
p = kzalloc(sizeof *p, GFP_KERNEL);
if (!p)
return -ENOMEM;
p->dev = dev;
p->id = cm_id;
cm_id->context = p;
p->state = IPOIB_CM_RX_LIVE;
p->jiffies = jiffies;
INIT_LIST_HEAD(&p->list);
p->qp = ipoib_cm_create_rx_qp(dev, p);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
goto err_qp;
}
psn = random32() & 0xffffff;
ret = ipoib_cm_modify_rx_qp(dev, cm_id, p->qp, psn);
if (ret)
goto err_modify;
if (!ipoib_cm_has_srq(dev)) {
ret = ipoib_cm_nonsrq_init_rx(dev, cm_id, p);
if (ret)
goto err_modify;
}
spin_lock_irq(&priv->lock);
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
/* Add this entry to passive ids list head, but do not re-add it
* if IB_EVENT_QP_LAST_WQE_REACHED has moved it to flush list. */
p->jiffies = jiffies;
if (p->state == IPOIB_CM_RX_LIVE)
list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irq(&priv->lock);
ret = ipoib_cm_send_rep(dev, cm_id, p->qp, &event->param.req_rcvd, psn);
if (ret) {
ipoib_warn(priv, "failed to send REP: %d\n", ret);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
}
return 0;
err_modify:
ib_destroy_qp(p->qp);
err_qp:
kfree(p);
return ret;
}
static int ipoib_cm_rx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event)
{
struct ipoib_cm_rx *p;
struct ipoib_dev_priv *priv;
switch (event->event) {
case IB_CM_REQ_RECEIVED:
return ipoib_cm_req_handler(cm_id, event);
case IB_CM_DREQ_RECEIVED:
p = cm_id->context;
ib_send_cm_drep(cm_id, NULL, 0);
/* Fall through */
case IB_CM_REJ_RECEIVED:
p = cm_id->context;
priv = netdev_priv(p->dev);
if (ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE))
ipoib_warn(priv, "unable to move qp to error state\n");
/* Fall through */
default:
return 0;
}
}
/* Adjust length of skb with fragments to match received data */
static void skb_put_frags(struct sk_buff *skb, unsigned int hdr_space,
unsigned int length, struct sk_buff *toskb)
{
int i, num_frags;
unsigned int size;
/* put header into skb */
size = min(length, hdr_space);
skb->tail += size;
skb->len += size;
length -= size;
num_frags = skb_shinfo(skb)->nr_frags;
for (i = 0; i < num_frags; i++) {
skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
if (length == 0) {
/* don't need this page */
skb_fill_page_desc(toskb, i, frag->page, 0, PAGE_SIZE);
--skb_shinfo(skb)->nr_frags;
} else {
size = min(length, (unsigned) PAGE_SIZE);
frag->size = size;
skb->data_len += size;
skb->truesize += size;
skb->len += size;
length -= size;
}
}
}
void ipoib_cm_handle_rx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx_buf *rx_ring;
unsigned int wr_id = wc->wr_id & ~(IPOIB_OP_CM | IPOIB_OP_RECV);
struct sk_buff *skb, *newskb;
struct ipoib_cm_rx *p;
unsigned long flags;
u64 mapping[IPOIB_CM_RX_SG];
int frags;
int has_srq;
struct sk_buff *small_skb;
ipoib_dbg_data(priv, "cm recv completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_recvq_size)) {
if (wr_id == (IPOIB_CM_RX_DRAIN_WRID & ~(IPOIB_OP_CM | IPOIB_OP_RECV))) {
spin_lock_irqsave(&priv->lock, flags);
list_splice_init(&priv->cm.rx_drain_list, &priv->cm.rx_reap_list);
ipoib_cm_start_rx_drain(priv);
queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
spin_unlock_irqrestore(&priv->lock, flags);
} else
ipoib_warn(priv, "cm recv completion event with wrid %d (> %d)\n",
wr_id, ipoib_recvq_size);
return;
}
p = wc->qp->qp_context;
has_srq = ipoib_cm_has_srq(dev);
rx_ring = has_srq ? priv->cm.srq_ring : p->rx_ring;
skb = rx_ring[wr_id].skb;
if (unlikely(wc->status != IB_WC_SUCCESS)) {
ipoib_dbg(priv, "cm recv error "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
++dev->stats.rx_dropped;
if (has_srq)
goto repost;
else {
if (!--p->recv_count) {
spin_lock_irqsave(&priv->lock, flags);
list_move(&p->list, &priv->cm.rx_reap_list);
spin_unlock_irqrestore(&priv->lock, flags);
queue_work(ipoib_workqueue, &priv->cm.rx_reap_task);
}
return;
}
}
if (unlikely(!(wr_id & IPOIB_CM_RX_UPDATE_MASK))) {
if (p && time_after_eq(jiffies, p->jiffies + IPOIB_CM_RX_UPDATE_TIME)) {
spin_lock_irqsave(&priv->lock, flags);
p->jiffies = jiffies;
/* Move this entry to list head, but do not re-add it
* if it has been moved out of list. */
if (p->state == IPOIB_CM_RX_LIVE)
list_move(&p->list, &priv->cm.passive_ids);
spin_unlock_irqrestore(&priv->lock, flags);
}
}
if (wc->byte_len < IPOIB_CM_COPYBREAK) {
int dlen = wc->byte_len;
small_skb = dev_alloc_skb(dlen + 12);
if (small_skb) {
skb_reserve(small_skb, 12);
ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_copy_from_linear_data(skb, small_skb->data, dlen);
ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
dlen, DMA_FROM_DEVICE);
skb_put(small_skb, dlen);
skb = small_skb;
goto copied;
}
}
frags = PAGE_ALIGN(wc->byte_len - min(wc->byte_len,
(unsigned)IPOIB_CM_HEAD_SIZE)) / PAGE_SIZE;
newskb = ipoib_cm_alloc_rx_skb(dev, rx_ring, wr_id, frags, mapping);
if (unlikely(!newskb)) {
/*
* If we can't allocate a new RX buffer, dump
* this packet and reuse the old buffer.
*/
ipoib_dbg(priv, "failed to allocate receive buffer %d\n", wr_id);
++dev->stats.rx_dropped;
goto repost;
}
ipoib_cm_dma_unmap_rx(priv, frags, rx_ring[wr_id].mapping);
memcpy(rx_ring[wr_id].mapping, mapping, (frags + 1) * sizeof *mapping);
ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n",
wc->byte_len, wc->slid);
skb_put_frags(skb, IPOIB_CM_HEAD_SIZE, wc->byte_len, newskb);
copied:
skb->protocol = ((struct ipoib_header *) skb->data)->proto;
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
skb->dev = dev;
/* XXX get correct PACKET_ type here */
skb->pkt_type = PACKET_HOST;
netif_receive_skb(skb);
repost:
if (has_srq) {
if (unlikely(ipoib_cm_post_receive_srq(dev, wr_id)))
ipoib_warn(priv, "ipoib_cm_post_receive_srq failed "
"for buf %d\n", wr_id);
} else {
if (unlikely(ipoib_cm_post_receive_nonsrq(dev, p,
&priv->cm.rx_wr,
priv->cm.rx_sge,
wr_id))) {
--p->recv_count;
ipoib_warn(priv, "ipoib_cm_post_receive_nonsrq failed "
"for buf %d\n", wr_id);
}
}
}
static inline int post_send(struct ipoib_dev_priv *priv,
struct ipoib_cm_tx *tx,
unsigned int wr_id,
u64 addr, int len)
{
struct ib_send_wr *bad_wr;
priv->tx_sge[0].addr = addr;
priv->tx_sge[0].length = len;
priv->tx_wr.num_sge = 1;
priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM;
return ib_post_send(tx->qp, &priv->tx_wr, &bad_wr);
}
void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx_buf *tx_req;
u64 addr;
int rc;
if (unlikely(skb->len > tx->mtu)) {
ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n",
skb->len, tx->mtu);
++dev->stats.tx_dropped;
++dev->stats.tx_errors;
ipoib_cm_skb_too_long(dev, skb, tx->mtu - IPOIB_ENCAP_LEN);
return;
}
ipoib_dbg_data(priv, "sending packet: head 0x%x length %d connection 0x%x\n",
tx->tx_head, skb->len, tx->qp->qp_num);
/*
* We put the skb into the tx_ring _before_ we call post_send()
* because it's entirely possible that the completion handler will
* run before we execute anything after the post_send(). That
* means we have to make sure everything is properly recorded and
* our state is consistent before we call post_send().
*/
tx_req = &tx->tx_ring[tx->tx_head & (ipoib_sendq_size - 1)];
tx_req->skb = skb;
addr = ib_dma_map_single(priv->ca, skb->data, skb->len, DMA_TO_DEVICE);
if (unlikely(ib_dma_mapping_error(priv->ca, addr))) {
++dev->stats.tx_errors;
dev_kfree_skb_any(skb);
return;
}
tx_req->mapping = addr;
rc = post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1),
addr, skb->len);
if (unlikely(rc)) {
ipoib_warn(priv, "post_send failed, error %d\n", rc);
++dev->stats.tx_errors;
ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE);
dev_kfree_skb_any(skb);
} else {
dev->trans_start = jiffies;
++tx->tx_head;
if (++priv->tx_outstanding == ipoib_sendq_size) {
ipoib_dbg(priv, "TX ring 0x%x full, stopping kernel net queue\n",
tx->qp->qp_num);
netif_stop_queue(dev);
rc = ib_req_notify_cq(priv->send_cq,
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS);
if (rc < 0)
ipoib_warn(priv, "request notify on send CQ failed\n");
else if (rc)
ipoib_send_comp_handler(priv->send_cq, dev);
}
}
}
void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx = wc->qp->qp_context;
unsigned int wr_id = wc->wr_id & ~IPOIB_OP_CM;
struct ipoib_cm_tx_buf *tx_req;
unsigned long flags;
ipoib_dbg_data(priv, "cm send completion: id %d, status: %d\n",
wr_id, wc->status);
if (unlikely(wr_id >= ipoib_sendq_size)) {
ipoib_warn(priv, "cm send completion event with wrid %d (> %d)\n",
wr_id, ipoib_sendq_size);
return;
}
tx_req = &tx->tx_ring[wr_id];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE);
/* FIXME: is this right? Shouldn't we only increment on success? */
++dev->stats.tx_packets;
dev->stats.tx_bytes += tx_req->skb->len;
dev_kfree_skb_any(tx_req->skb);
netif_tx_lock(dev);
++tx->tx_tail;
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(dev);
if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) {
struct ipoib_neigh *neigh;
ipoib_dbg(priv, "failed cm send event "
"(status=%d, wrid=%d vend_err %x)\n",
wc->status, wr_id, wc->vendor_err);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
tx->neigh = NULL;
}
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
clear_bit(IPOIB_FLAG_OPER_UP, &tx->flags);
spin_unlock_irqrestore(&priv->lock, flags);
}
netif_tx_unlock(dev);
}
int ipoib_cm_dev_open(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr))
return 0;
priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
if (IS_ERR(priv->cm.id)) {
printk(KERN_WARNING "%s: failed to create CM ID\n", priv->ca->name);
ret = PTR_ERR(priv->cm.id);
goto err_cm;
}
ret = ib_cm_listen(priv->cm.id, cpu_to_be64(IPOIB_CM_IETF_ID | priv->qp->qp_num),
0, NULL);
if (ret) {
printk(KERN_WARNING "%s: failed to listen on ID 0x%llx\n", priv->ca->name,
IPOIB_CM_IETF_ID | priv->qp->qp_num);
goto err_listen;
}
return 0;
err_listen:
ib_destroy_cm_id(priv->cm.id);
err_cm:
priv->cm.id = NULL;
return ret;
}
static void ipoib_cm_free_rx_reap_list(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *rx, *n;
LIST_HEAD(list);
spin_lock_irq(&priv->lock);
list_splice_init(&priv->cm.rx_reap_list, &list);
spin_unlock_irq(&priv->lock);
list_for_each_entry_safe(rx, n, &list, list) {
ib_destroy_cm_id(rx->id);
ib_destroy_qp(rx->qp);
if (!ipoib_cm_has_srq(dev)) {
ipoib_cm_free_rx_ring(priv->dev, rx->rx_ring);
spin_lock_irq(&priv->lock);
--priv->cm.nonsrq_conn_qp;
spin_unlock_irq(&priv->lock);
}
kfree(rx);
}
}
void ipoib_cm_dev_stop(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_rx *p;
unsigned long begin;
int ret;
if (!IPOIB_CM_SUPPORTED(dev->dev_addr) || !priv->cm.id)
return;
ib_destroy_cm_id(priv->cm.id);
priv->cm.id = NULL;
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
p = list_entry(priv->cm.passive_ids.next, typeof(*p), list);
list_move(&p->list, &priv->cm.rx_error_list);
p->state = IPOIB_CM_RX_ERROR;
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
if (ret)
ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
spin_lock_irq(&priv->lock);
}
/* Wait for all RX to be drained */
begin = jiffies;
while (!list_empty(&priv->cm.rx_error_list) ||
!list_empty(&priv->cm.rx_flush_list) ||
!list_empty(&priv->cm.rx_drain_list)) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "RX drain timing out\n");
/*
* assume the HW is wedged and just free up everything.
*/
list_splice_init(&priv->cm.rx_flush_list,
&priv->cm.rx_reap_list);
list_splice_init(&priv->cm.rx_error_list,
&priv->cm.rx_reap_list);
list_splice_init(&priv->cm.rx_drain_list,
&priv->cm.rx_reap_list);
break;
}
spin_unlock_irq(&priv->lock);
msleep(1);
ipoib_drain_cq(dev);
spin_lock_irq(&priv->lock);
}
spin_unlock_irq(&priv->lock);
ipoib_cm_free_rx_reap_list(dev);
cancel_delayed_work(&priv->cm.stale_task);
}
static int ipoib_cm_rep_handler(struct ib_cm_id *cm_id, struct ib_cm_event *event)
{
struct ipoib_cm_tx *p = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
struct ipoib_cm_data *data = event->private_data;
struct sk_buff_head skqueue;
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
struct sk_buff *skb;
p->mtu = be32_to_cpu(data->mtu);
if (p->mtu <= IPOIB_ENCAP_LEN) {
ipoib_warn(priv, "Rejecting connection: mtu %d <= %d\n",
p->mtu, IPOIB_ENCAP_LEN);
return -EINVAL;
}
qp_attr.qp_state = IB_QPS_RTR;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTR: %d\n", ret);
return ret;
}
qp_attr.rq_psn = 0 /* FIXME */;
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTR: %d\n", ret);
return ret;
}
qp_attr.qp_state = IB_QPS_RTS;
ret = ib_cm_init_qp_attr(cm_id, &qp_attr, &qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to init QP attr for RTS: %d\n", ret);
return ret;
}
ret = ib_modify_qp(p->qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify QP to RTS: %d\n", ret);
return ret;
}
skb_queue_head_init(&skqueue);
spin_lock_irq(&priv->lock);
set_bit(IPOIB_FLAG_OPER_UP, &p->flags);
if (p->neigh)
while ((skb = __skb_dequeue(&p->neigh->queue)))
__skb_queue_tail(&skqueue, skb);
spin_unlock_irq(&priv->lock);
while ((skb = __skb_dequeue(&skqueue))) {
skb->dev = p->dev;
if (dev_queue_xmit(skb))
ipoib_warn(priv, "dev_queue_xmit failed "
"to requeue packet\n");
}
ret = ib_send_cm_rtu(cm_id, NULL, 0);
if (ret) {
ipoib_warn(priv, "failed to send RTU: %d\n", ret);
return ret;
}
return 0;
}
static struct ib_qp *ipoib_cm_create_tx_qp(struct net_device *dev, struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_init_attr attr = {
.send_cq = priv->recv_cq,
.recv_cq = priv->recv_cq,
.srq = priv->cm.srq,
.cap.max_send_wr = ipoib_sendq_size,
.cap.max_send_sge = 1,
.sq_sig_type = IB_SIGNAL_ALL_WR,
.qp_type = IB_QPT_RC,
.qp_context = tx
};
return ib_create_qp(priv->pd, &attr);
}
static int ipoib_cm_send_req(struct net_device *dev,
struct ib_cm_id *id, struct ib_qp *qp,
u32 qpn,
struct ib_sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_data data = {};
struct ib_cm_req_param req = {};
data.qpn = cpu_to_be32(priv->qp->qp_num);
data.mtu = cpu_to_be32(IPOIB_CM_BUF_SIZE);
req.primary_path = pathrec;
req.alternate_path = NULL;
req.service_id = cpu_to_be64(IPOIB_CM_IETF_ID | qpn);
req.qp_num = qp->qp_num;
req.qp_type = qp->qp_type;
req.private_data = &data;
req.private_data_len = sizeof data;
req.flow_control = 0;
req.starting_psn = 0; /* FIXME */
/*
* Pick some arbitrary defaults here; we could make these
* module parameters if anyone cared about setting them.
*/
req.responder_resources = 4;
req.remote_cm_response_timeout = 20;
req.local_cm_response_timeout = 20;
req.retry_count = 0; /* RFC draft warns against retries */
req.rnr_retry_count = 0; /* RFC draft warns against retries */
req.max_cm_retries = 15;
req.srq = ipoib_cm_has_srq(dev);
return ib_send_cm_req(id, &req);
}
static int ipoib_cm_modify_tx_init(struct net_device *dev,
struct ib_cm_id *cm_id, struct ib_qp *qp)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_qp_attr qp_attr;
int qp_attr_mask, ret;
ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
if (ret) {
ipoib_warn(priv, "pkey 0x%x not found: %d\n", priv->pkey, ret);
return ret;
}
qp_attr.qp_state = IB_QPS_INIT;
qp_attr.qp_access_flags = IB_ACCESS_LOCAL_WRITE;
qp_attr.port_num = priv->port;
qp_attr_mask = IB_QP_STATE | IB_QP_ACCESS_FLAGS | IB_QP_PKEY_INDEX | IB_QP_PORT;
ret = ib_modify_qp(qp, &qp_attr, qp_attr_mask);
if (ret) {
ipoib_warn(priv, "failed to modify tx QP to INIT: %d\n", ret);
return ret;
}
return 0;
}
static int ipoib_cm_tx_init(struct ipoib_cm_tx *p, u32 qpn,
struct ib_sa_path_rec *pathrec)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
int ret;
p->tx_ring = vzalloc(ipoib_sendq_size * sizeof *p->tx_ring);
if (!p->tx_ring) {
ipoib_warn(priv, "failed to allocate tx ring\n");
ret = -ENOMEM;
goto err_tx;
}
p->qp = ipoib_cm_create_tx_qp(p->dev, p);
if (IS_ERR(p->qp)) {
ret = PTR_ERR(p->qp);
ipoib_warn(priv, "failed to allocate tx qp: %d\n", ret);
goto err_qp;
}
p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
if (IS_ERR(p->id)) {
ret = PTR_ERR(p->id);
ipoib_warn(priv, "failed to create tx cm id: %d\n", ret);
goto err_id;
}
ret = ipoib_cm_modify_tx_init(p->dev, p->id, p->qp);
if (ret) {
ipoib_warn(priv, "failed to modify tx qp to rtr: %d\n", ret);
goto err_modify;
}
ret = ipoib_cm_send_req(p->dev, p->id, p->qp, qpn, pathrec);
if (ret) {
ipoib_warn(priv, "failed to send cm req: %d\n", ret);
goto err_send_cm;
}
ipoib_dbg(priv, "Request connection 0x%x for gid %pI6 qpn 0x%x\n",
p->qp->qp_num, pathrec->dgid.raw, qpn);
return 0;
err_send_cm:
err_modify:
ib_destroy_cm_id(p->id);
err_id:
p->id = NULL;
ib_destroy_qp(p->qp);
err_qp:
p->qp = NULL;
vfree(p->tx_ring);
err_tx:
return ret;
}
static void ipoib_cm_tx_destroy(struct ipoib_cm_tx *p)
{
struct ipoib_dev_priv *priv = netdev_priv(p->dev);
struct ipoib_cm_tx_buf *tx_req;
unsigned long begin;
ipoib_dbg(priv, "Destroy active connection 0x%x head 0x%x tail 0x%x\n",
p->qp ? p->qp->qp_num : 0, p->tx_head, p->tx_tail);
if (p->id)
ib_destroy_cm_id(p->id);
if (p->tx_ring) {
/* Wait for all sends to complete */
begin = jiffies;
while ((int) p->tx_tail - (int) p->tx_head < 0) {
if (time_after(jiffies, begin + 5 * HZ)) {
ipoib_warn(priv, "timing out; %d sends not completed\n",
p->tx_head - p->tx_tail);
goto timeout;
}
msleep(1);
}
}
timeout:
while ((int) p->tx_tail - (int) p->tx_head < 0) {
tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)];
ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len,
DMA_TO_DEVICE);
dev_kfree_skb_any(tx_req->skb);
++p->tx_tail;
netif_tx_lock_bh(p->dev);
if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) &&
netif_queue_stopped(p->dev) &&
test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags))
netif_wake_queue(p->dev);
netif_tx_unlock_bh(p->dev);
}
if (p->qp)
ib_destroy_qp(p->qp);
vfree(p->tx_ring);
kfree(p);
}
static int ipoib_cm_tx_handler(struct ib_cm_id *cm_id,
struct ib_cm_event *event)
{
struct ipoib_cm_tx *tx = cm_id->context;
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
unsigned long flags;
int ret;
switch (event->event) {
case IB_CM_DREQ_RECEIVED:
ipoib_dbg(priv, "DREQ received.\n");
ib_send_cm_drep(cm_id, NULL, 0);
break;
case IB_CM_REP_RECEIVED:
ipoib_dbg(priv, "REP received.\n");
ret = ipoib_cm_rep_handler(cm_id, event);
if (ret)
ib_send_cm_rej(cm_id, IB_CM_REJ_CONSUMER_DEFINED,
NULL, 0, NULL, 0);
break;
case IB_CM_REQ_ERROR:
case IB_CM_REJ_RECEIVED:
case IB_CM_TIMEWAIT_EXIT:
ipoib_dbg(priv, "CM error %d.\n", event->event);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
neigh = tx->neigh;
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
tx->neigh = NULL;
}
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
break;
default:
break;
}
return 0;
}
struct ipoib_cm_tx *ipoib_cm_create_tx(struct net_device *dev, struct ipoib_path *path,
struct ipoib_neigh *neigh)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ipoib_cm_tx *tx;
tx = kzalloc(sizeof *tx, GFP_ATOMIC);
if (!tx)
return NULL;
neigh->cm = tx;
tx->neigh = neigh;
tx->path = path;
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
queue_work(ipoib_workqueue, &priv->cm.start_task);
return tx;
}
void ipoib_cm_destroy_tx(struct ipoib_cm_tx *tx)
{
struct ipoib_dev_priv *priv = netdev_priv(tx->dev);
if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &tx->flags)) {
list_move(&tx->list, &priv->cm.reap_list);
queue_work(ipoib_workqueue, &priv->cm.reap_task);
ipoib_dbg(priv, "Reap connection for gid %pI6\n",
tx->neigh->dgid.raw);
tx->neigh = NULL;
}
}
static void ipoib_cm_tx_start(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.start_task);
struct net_device *dev = priv->dev;
struct ipoib_neigh *neigh;
struct ipoib_cm_tx *p;
unsigned long flags;
int ret;
struct ib_sa_path_rec pathrec;
u32 qpn;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while (!list_empty(&priv->cm.start_list)) {
p = list_entry(priv->cm.start_list.next, typeof(*p), list);
list_del_init(&p->list);
neigh = p->neigh;
qpn = IPOIB_QPN(neigh->neighbour->ha);
memcpy(&pathrec, &p->path->pathrec, sizeof pathrec);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ret = ipoib_cm_tx_init(p, qpn, &pathrec);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
if (ret) {
neigh = p->neigh;
if (neigh) {
neigh->cm = NULL;
list_del(&neigh->list);
if (neigh->ah)
ipoib_put_ah(neigh->ah);
ipoib_neigh_free(dev, neigh);
}
list_del(&p->list);
kfree(p);
}
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
static void ipoib_cm_tx_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.reap_task);
struct net_device *dev = priv->dev;
struct ipoib_cm_tx *p;
unsigned long flags;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while (!list_empty(&priv->cm.reap_list)) {
p = list_entry(priv->cm.reap_list.next, typeof(*p), list);
list_del(&p->list);
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
ipoib_cm_tx_destroy(p);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
static void ipoib_cm_skb_reap(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.skb_task);
struct net_device *dev = priv->dev;
struct sk_buff *skb;
unsigned long flags;
unsigned mtu = priv->mcast_mtu;
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
while ((skb = skb_dequeue(&priv->cm.skb_queue))) {
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
if (skb->protocol == htons(ETH_P_IP))
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
else if (skb->protocol == htons(ETH_P_IPV6))
icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
#endif
dev_kfree_skb_any(skb);
netif_tx_lock_bh(dev);
spin_lock_irqsave(&priv->lock, flags);
}
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
}
void ipoib_cm_skb_too_long(struct net_device *dev, struct sk_buff *skb,
unsigned int mtu)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int e = skb_queue_empty(&priv->cm.skb_queue);
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
skb_queue_tail(&priv->cm.skb_queue, skb);
if (e)
queue_work(ipoib_workqueue, &priv->cm.skb_task);
}
static void ipoib_cm_rx_reap(struct work_struct *work)
{
ipoib_cm_free_rx_reap_list(container_of(work, struct ipoib_dev_priv,
cm.rx_reap_task)->dev);
}
static void ipoib_cm_stale_task(struct work_struct *work)
{
struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
cm.stale_task.work);
struct ipoib_cm_rx *p;
int ret;
spin_lock_irq(&priv->lock);
while (!list_empty(&priv->cm.passive_ids)) {
/* List is sorted by LRU, start from tail,
* stop when we see a recently used entry */
p = list_entry(priv->cm.passive_ids.prev, typeof(*p), list);
if (time_before_eq(jiffies, p->jiffies + IPOIB_CM_RX_TIMEOUT))
break;
list_move(&p->list, &priv->cm.rx_error_list);
p->state = IPOIB_CM_RX_ERROR;
spin_unlock_irq(&priv->lock);
ret = ib_modify_qp(p->qp, &ipoib_cm_err_attr, IB_QP_STATE);
if (ret)
ipoib_warn(priv, "unable to move qp to error state: %d\n", ret);
spin_lock_irq(&priv->lock);
}
if (!list_empty(&priv->cm.passive_ids))
queue_delayed_work(ipoib_workqueue,
&priv->cm.stale_task, IPOIB_CM_RX_DELAY);
spin_unlock_irq(&priv->lock);
}
static ssize_t show_mode(struct device *d, struct device_attribute *attr,
char *buf)
{
struct ipoib_dev_priv *priv = netdev_priv(to_net_dev(d));
if (test_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags))
return sprintf(buf, "connected\n");
else
return sprintf(buf, "datagram\n");
}
static ssize_t set_mode(struct device *d, struct device_attribute *attr,
const char *buf, size_t count)
{
struct net_device *dev = to_net_dev(d);
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (!rtnl_trylock())
return restart_syscall();
/* flush paths if we switch modes so that connections are restarted */
if (IPOIB_CM_SUPPORTED(dev->dev_addr) && !strcmp(buf, "connected\n")) {
set_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
ipoib_warn(priv, "enabling connected mode "
"will cause multicast packet drops\n");
netdev_update_features(dev);
rtnl_unlock();
priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM;
ipoib_flush_paths(dev);
return count;
}
if (!strcmp(buf, "datagram\n")) {
clear_bit(IPOIB_FLAG_ADMIN_CM, &priv->flags);
netdev_update_features(dev);
dev_set_mtu(dev, min(priv->mcast_mtu, dev->mtu));
rtnl_unlock();
ipoib_flush_paths(dev);
return count;
}
rtnl_unlock();
return -EINVAL;
}
static DEVICE_ATTR(mode, S_IWUSR | S_IRUGO, show_mode, set_mode);
int ipoib_cm_add_mode_attr(struct net_device *dev)
{
return device_create_file(&dev->dev, &dev_attr_mode);
}
static void ipoib_cm_create_srq(struct net_device *dev, int max_sge)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_srq_init_attr srq_init_attr = {
.attr = {
.max_wr = ipoib_recvq_size,
.max_sge = max_sge
}
};
priv->cm.srq = ib_create_srq(priv->pd, &srq_init_attr);
if (IS_ERR(priv->cm.srq)) {
if (PTR_ERR(priv->cm.srq) != -ENOSYS)
printk(KERN_WARNING "%s: failed to allocate SRQ, error %ld\n",
priv->ca->name, PTR_ERR(priv->cm.srq));
priv->cm.srq = NULL;
return;
}
priv->cm.srq_ring = vzalloc(ipoib_recvq_size * sizeof *priv->cm.srq_ring);
if (!priv->cm.srq_ring) {
printk(KERN_WARNING "%s: failed to allocate CM SRQ ring (%d entries)\n",
priv->ca->name, ipoib_recvq_size);
ib_destroy_srq(priv->cm.srq);
priv->cm.srq = NULL;
return;
}
}
int ipoib_cm_dev_init(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int i, ret;
struct ib_device_attr attr;
INIT_LIST_HEAD(&priv->cm.passive_ids);
INIT_LIST_HEAD(&priv->cm.reap_list);
INIT_LIST_HEAD(&priv->cm.start_list);
INIT_LIST_HEAD(&priv->cm.rx_error_list);
INIT_LIST_HEAD(&priv->cm.rx_flush_list);
INIT_LIST_HEAD(&priv->cm.rx_drain_list);
INIT_LIST_HEAD(&priv->cm.rx_reap_list);
INIT_WORK(&priv->cm.start_task, ipoib_cm_tx_start);
INIT_WORK(&priv->cm.reap_task, ipoib_cm_tx_reap);
INIT_WORK(&priv->cm.skb_task, ipoib_cm_skb_reap);
INIT_WORK(&priv->cm.rx_reap_task, ipoib_cm_rx_reap);
INIT_DELAYED_WORK(&priv->cm.stale_task, ipoib_cm_stale_task);
skb_queue_head_init(&priv->cm.skb_queue);
ret = ib_query_device(priv->ca, &attr);
if (ret) {
printk(KERN_WARNING "ib_query_device() failed with %d\n", ret);
return ret;
}
ipoib_dbg(priv, "max_srq_sge=%d\n", attr.max_srq_sge);
attr.max_srq_sge = min_t(int, IPOIB_CM_RX_SG, attr.max_srq_sge);
ipoib_cm_create_srq(dev, attr.max_srq_sge);
if (ipoib_cm_has_srq(dev)) {
priv->cm.max_cm_mtu = attr.max_srq_sge * PAGE_SIZE - 0x10;
priv->cm.num_frags = attr.max_srq_sge;
ipoib_dbg(priv, "max_cm_mtu = 0x%x, num_frags=%d\n",
priv->cm.max_cm_mtu, priv->cm.num_frags);
} else {
priv->cm.max_cm_mtu = IPOIB_CM_MTU;
priv->cm.num_frags = IPOIB_CM_RX_SG;
}
ipoib_cm_init_rx_wr(dev, &priv->cm.rx_wr, priv->cm.rx_sge);
if (ipoib_cm_has_srq(dev)) {
for (i = 0; i < ipoib_recvq_size; ++i) {
if (!ipoib_cm_alloc_rx_skb(dev, priv->cm.srq_ring, i,
priv->cm.num_frags - 1,
priv->cm.srq_ring[i].mapping)) {
ipoib_warn(priv, "failed to allocate "
"receive buffer %d\n", i);
ipoib_cm_dev_cleanup(dev);
return -ENOMEM;
}
if (ipoib_cm_post_receive_srq(dev, i)) {
ipoib_warn(priv, "ipoib_cm_post_receive_srq "
"failed for buf %d\n", i);
ipoib_cm_dev_cleanup(dev);
return -EIO;
}
}
}
priv->dev->dev_addr[0] = IPOIB_FLAGS_RC;
return 0;
}
void ipoib_cm_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
int ret;
if (!priv->cm.srq)
return;
ipoib_dbg(priv, "Cleanup ipoib connected mode.\n");
ret = ib_destroy_srq(priv->cm.srq);
if (ret)
ipoib_warn(priv, "ib_destroy_srq failed: %d\n", ret);
priv->cm.srq = NULL;
if (!priv->cm.srq_ring)
return;
ipoib_cm_free_rx_ring(dev, priv->cm.srq_ring);
priv->cm.srq_ring = NULL;
}
| gpl-2.0 |
sirinsoftware/ipipe6410 | drivers/usb/gadget/goku_udc.c | 908 | 47631 | /*
* Toshiba TC86C001 ("Goku-S") USB Device Controller driver
*
* Copyright (C) 2000-2002 Lineo
* by Stuart Lynne, Tom Rushworth, and Bruce Balden
* Copyright (C) 2002 Toshiba Corporation
* Copyright (C) 2003 MontaVista Software (source@mvista.com)
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
/*
* This device has ep0 and three semi-configurable bulk/interrupt endpoints.
*
* - Endpoint numbering is fixed: ep{1,2,3}-bulk
* - Gadget drivers can choose ep maxpacket (8/16/32/64)
* - Gadget drivers can choose direction (IN, OUT)
* - DMA works with ep1 (OUT transfers) and ep2 (IN transfers).
*/
// #define VERBOSE /* extra debug messages (success too) */
// #define USB_TRACE /* packet-level success messages */
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/device.h>
#include <linux/usb/ch9.h>
#include <linux/usb/gadget.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/unaligned.h>
#include "goku_udc.h"
#define DRIVER_DESC "TC86C001 USB Device Controller"
#define DRIVER_VERSION "30-Oct 2003"
#define DMA_ADDR_INVALID (~(dma_addr_t)0)
static const char driver_name [] = "goku_udc";
static const char driver_desc [] = DRIVER_DESC;
MODULE_AUTHOR("source@mvista.com");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* IN dma behaves ok under testing, though the IN-dma abort paths don't
* seem to behave quite as expected. Used by default.
*
* OUT dma documents design problems handling the common "short packet"
* transfer termination policy; it couldn't be enabled by default, even
* if the OUT-dma abort problems had a resolution.
*/
static unsigned use_dma = 1;
#if 0
//#include <linux/moduleparam.h>
/* "modprobe goku_udc use_dma=1" etc
* 0 to disable dma
* 1 to use IN dma only (normal operation)
* 2 to use IN and OUT dma
*/
module_param(use_dma, uint, S_IRUGO);
#endif
/*-------------------------------------------------------------------------*/
static void nuke(struct goku_ep *, int status);
static inline void
command(struct goku_udc_regs __iomem *regs, int command, unsigned epnum)
{
writel(COMMAND_EP(epnum) | command, ®s->Command);
udelay(300);
}
static int
goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
{
struct goku_udc *dev;
struct goku_ep *ep;
u32 mode;
u16 max;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !desc || ep->desc
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dev = ep->dev;
if (ep == &dev->ep[0])
return -EINVAL;
if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
return -ESHUTDOWN;
if (ep->num != usb_endpoint_num(desc))
return -EINVAL;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
case USB_ENDPOINT_XFER_INT:
break;
default:
return -EINVAL;
}
if ((readl(ep->reg_status) & EPxSTATUS_EP_MASK)
!= EPxSTATUS_EP_INVALID)
return -EBUSY;
/* enabling the no-toggle interrupt mode would need an api hook */
mode = 0;
max = get_unaligned_le16(&desc->wMaxPacketSize);
switch (max) {
case 64: mode++;
case 32: mode++;
case 16: mode++;
case 8: mode <<= 3;
break;
default:
return -EINVAL;
}
mode |= 2 << 1; /* bulk, or intr-with-toggle */
/* ep1/ep2 dma direction is chosen early; it works in the other
* direction, with pio. be cautious with out-dma.
*/
ep->is_in = usb_endpoint_dir_in(desc);
if (ep->is_in) {
mode |= 1;
ep->dma = (use_dma != 0) && (ep->num == UDC_MSTRD_ENDPOINT);
} else {
ep->dma = (use_dma == 2) && (ep->num == UDC_MSTWR_ENDPOINT);
if (ep->dma)
DBG(dev, "%s out-dma hides short packets\n",
ep->ep.name);
}
spin_lock_irqsave(&ep->dev->lock, flags);
/* ep1 and ep2 can do double buffering and/or dma */
if (ep->num < 3) {
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 tmp;
/* double buffer except (for now) with pio in */
tmp = ((ep->dma || !ep->is_in)
? 0x10 /* double buffered */
: 0x11 /* single buffer */
) << ep->num;
tmp |= readl(®s->EPxSingle);
writel(tmp, ®s->EPxSingle);
tmp = (ep->dma ? 0x10/*dma*/ : 0x11/*pio*/) << ep->num;
tmp |= readl(®s->EPxBCS);
writel(tmp, ®s->EPxBCS);
}
writel(mode, ep->reg_mode);
command(ep->dev->regs, COMMAND_RESET, ep->num);
ep->ep.maxpacket = max;
ep->stopped = 0;
ep->desc = desc;
spin_unlock_irqrestore(&ep->dev->lock, flags);
DBG(dev, "enable %s %s %s maxpacket %u\n", ep->ep.name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
max);
return 0;
}
static void ep_reset(struct goku_udc_regs __iomem *regs, struct goku_ep *ep)
{
struct goku_udc *dev = ep->dev;
if (regs) {
command(regs, COMMAND_INVALID, ep->num);
if (ep->num) {
if (ep->num == UDC_MSTWR_ENDPOINT)
dev->int_enable &= ~(INT_MSTWREND
|INT_MSTWRTMOUT);
else if (ep->num == UDC_MSTRD_ENDPOINT)
dev->int_enable &= ~INT_MSTRDEND;
dev->int_enable &= ~INT_EPxDATASET (ep->num);
} else
dev->int_enable &= ~INT_EP0;
writel(dev->int_enable, ®s->int_enable);
readl(®s->int_enable);
if (ep->num < 3) {
struct goku_udc_regs __iomem *r = ep->dev->regs;
u32 tmp;
tmp = readl(&r->EPxSingle);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxSingle);
tmp = readl(&r->EPxBCS);
tmp &= ~(0x11 << ep->num);
writel(tmp, &r->EPxBCS);
}
/* reset dma in case we're still using it */
if (ep->dma) {
u32 master;
master = readl(®s->dma_master) & MST_RW_BITS;
if (ep->num == UDC_MSTWR_ENDPOINT) {
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
} else {
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
}
writel(master, ®s->dma_master);
}
}
ep->ep.maxpacket = MAX_FIFO_SIZE;
ep->desc = NULL;
ep->stopped = 1;
ep->irqs = 0;
ep->dma = 0;
}
static int goku_ep_disable(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !ep->desc)
return -ENODEV;
dev = ep->dev;
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "disable %s\n", _ep->name);
spin_lock_irqsave(&dev->lock, flags);
nuke(ep, -ESHUTDOWN);
ep_reset(dev->regs, ep);
spin_unlock_irqrestore(&dev->lock, flags);
return 0;
}
/*-------------------------------------------------------------------------*/
static struct usb_request *
goku_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
{
struct goku_request *req;
if (!_ep)
return NULL;
req = kzalloc(sizeof *req, gfp_flags);
if (!req)
return NULL;
req->req.dma = DMA_ADDR_INVALID;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void
goku_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
if (!_ep || !_req)
return;
req = container_of(_req, struct goku_request, req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
/*-------------------------------------------------------------------------*/
static void
done(struct goku_ep *ep, struct goku_request *req, int status)
{
struct goku_udc *dev;
unsigned stopped = ep->stopped;
list_del_init(&req->queue);
if (likely(req->req.status == -EINPROGRESS))
req->req.status = status;
else
status = req->req.status;
dev = ep->dev;
if (req->mapped) {
pci_unmap_single(dev->pdev, req->req.dma, req->req.length,
ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
req->req.dma = DMA_ADDR_INVALID;
req->mapped = 0;
}
#ifndef USB_TRACE
if (status && status != -ESHUTDOWN)
#endif
VDBG(dev, "complete %s req %p stat %d len %u/%u\n",
ep->ep.name, &req->req, status,
req->req.actual, req->req.length);
/* don't modify queue heads during completion callback */
ep->stopped = 1;
spin_unlock(&dev->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dev->lock);
ep->stopped = stopped;
}
/*-------------------------------------------------------------------------*/
static inline int
write_packet(u32 __iomem *fifo, u8 *buf, struct goku_request *req, unsigned max)
{
unsigned length, count;
length = min(req->req.length - req->req.actual, max);
req->req.actual += length;
count = length;
while (likely(count--))
writel(*buf++, fifo);
return length;
}
// return: 0 = still running, 1 = completed, negative = errno
static int write_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc *dev = ep->dev;
u32 tmp;
u8 *buf;
unsigned count;
int is_last;
tmp = readl(&dev->regs->DataSet);
buf = req->req.buf + req->req.actual;
prefetch(buf);
dev = ep->dev;
if (unlikely(ep->num == 0 && dev->ep0state != EP0_IN))
return -EL2HLT;
/* NOTE: just single-buffered PIO-IN for now. */
if (unlikely((tmp & DATASET_A(ep->num)) != 0))
return 0;
/* clear our "packet available" irq */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), &dev->regs->int_status);
count = write_packet(ep->reg_fifo, buf, req, ep->ep.maxpacket);
/* last packet often short (sometimes a zlp, especially on ep0) */
if (unlikely(count != ep->ep.maxpacket)) {
writel(~(1<<ep->num), &dev->regs->EOP);
if (ep->num == 0) {
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
}
is_last = 1;
} else {
if (likely(req->req.length != req->req.actual)
|| req->req.zero)
is_last = 0;
else
is_last = 1;
}
#if 0 /* printk seemed to trash is_last...*/
//#ifdef USB_TRACE
VDBG(dev, "wrote %s %u bytes%s IN %u left %p\n",
ep->ep.name, count, is_last ? "/last" : "",
req->req.length - req->req.actual, req);
#endif
/* requests complete when all IN data is in the FIFO,
* or sometimes later, if a zlp was needed.
*/
if (is_last) {
done(ep, req, 0);
return 1;
}
return 0;
}
static int read_fifo(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs;
u32 size, set;
u8 *buf;
unsigned bufferspace, is_short, dbuff;
regs = ep->dev->regs;
top:
buf = req->req.buf + req->req.actual;
prefetchw(buf);
if (unlikely(ep->num == 0 && ep->dev->ep0state != EP0_OUT))
return -EL2HLT;
dbuff = (ep->num == 1 || ep->num == 2);
do {
/* ack dataset irq matching the status we'll handle */
if (ep->num != 0)
writel(~INT_EPxDATASET(ep->num), ®s->int_status);
set = readl(®s->DataSet) & DATASET_AB(ep->num);
size = readl(®s->EPxSizeLA[ep->num]);
bufferspace = req->req.length - req->req.actual;
/* usually do nothing without an OUT packet */
if (likely(ep->num != 0 || bufferspace != 0)) {
if (unlikely(set == 0))
break;
/* use ep1/ep2 double-buffering for OUT */
if (!(size & PACKET_ACTIVE))
size = readl(®s->EPxSizeLB[ep->num]);
if (!(size & PACKET_ACTIVE)) /* "can't happen" */
break;
size &= DATASIZE; /* EPxSizeH == 0 */
/* ep0out no-out-data case for set_config, etc */
} else
size = 0;
/* read all bytes from this packet */
req->req.actual += size;
is_short = (size < ep->ep.maxpacket);
#ifdef USB_TRACE
VDBG(ep->dev, "read %s %u bytes%s OUT req %p %u/%u\n",
ep->ep.name, size, is_short ? "/S" : "",
req, req->req.actual, req->req.length);
#endif
while (likely(size-- != 0)) {
u8 byte = (u8) readl(ep->reg_fifo);
if (unlikely(bufferspace == 0)) {
/* this happens when the driver's buffer
* is smaller than what the host sent.
* discard the extra data in this packet.
*/
if (req->req.status != -EOVERFLOW)
DBG(ep->dev, "%s overflow %u\n",
ep->ep.name, size);
req->req.status = -EOVERFLOW;
} else {
*buf++ = byte;
bufferspace--;
}
}
/* completion */
if (unlikely(is_short || req->req.actual == req->req.length)) {
if (unlikely(ep->num == 0)) {
/* non-control endpoints now usable? */
if (ep->dev->req_config)
writel(ep->dev->configured
? USBSTATE_CONFIGURED
: 0,
®s->UsbState);
/* ep0out status stage */
writel(~(1<<0), ®s->EOP);
ep->stopped = 1;
ep->dev->ep0state = EP0_STATUS;
}
done(ep, req, 0);
/* empty the second buffer asap */
if (dbuff && !list_empty(&ep->queue)) {
req = list_entry(ep->queue.next,
struct goku_request, queue);
goto top;
}
return 1;
}
} while (dbuff);
return 0;
}
static inline void
pio_irq_enable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable |= INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_irq_disable(struct goku_udc *dev,
struct goku_udc_regs __iomem *regs, int epnum)
{
dev->int_enable &= ~INT_EPxDATASET (epnum);
writel(dev->int_enable, ®s->int_enable);
/* write may still be posted */
}
static inline void
pio_advance(struct goku_ep *ep)
{
struct goku_request *req;
if (unlikely(list_empty (&ep->queue)))
return;
req = list_entry(ep->queue.next, struct goku_request, queue);
(ep->is_in ? write_fifo : read_fifo)(ep, req);
}
/*-------------------------------------------------------------------------*/
// return: 0 = q running, 1 = q stopped, negative = errno
static int start_dma(struct goku_ep *ep, struct goku_request *req)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
u32 start = req->req.dma;
u32 end = start + req->req.length - 1;
master = readl(®s->dma_master) & MST_RW_BITS;
/* re-init the bits affecting IN dma; careful with zlps */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA)) {
DBG (ep->dev, "start, IN active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->in_dma_end);
writel(start, ®s->in_dma_start);
master &= ~MST_R_BITS;
if (unlikely(req->req.length == 0))
master = MST_RD_ENA | MST_RD_EOPB;
else if ((req->req.length % ep->ep.maxpacket) != 0
|| req->req.zero)
master = MST_RD_ENA | MST_EOPB_ENA;
else
master = MST_RD_ENA | MST_EOPB_DIS;
ep->dev->int_enable |= INT_MSTRDEND;
/* Goku DMA-OUT merges short packets, which plays poorly with
* protocols where short packets mark the transfer boundaries.
* The chip supports a nonstandard policy with INT_MSTWRTMOUT,
* ending transfers after 3 SOFs; we don't turn it on.
*/
} else {
if (unlikely(master & MST_WR_ENA)) {
DBG (ep->dev, "start, OUT active dma %03x!!\n",
master);
// return -EL2HLT;
}
writel(end, ®s->out_dma_end);
writel(start, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_ENA | MST_TIMEOUT_DIS;
ep->dev->int_enable |= INT_MSTWREND|INT_MSTWRTMOUT;
}
writel(master, ®s->dma_master);
writel(ep->dev->int_enable, ®s->int_enable);
return 0;
}
static void dma_advance(struct goku_udc *dev, struct goku_ep *ep)
{
struct goku_request *req;
struct goku_udc_regs __iomem *regs = ep->dev->regs;
u32 master;
master = readl(®s->dma_master);
if (unlikely(list_empty(&ep->queue))) {
stop:
if (ep->is_in)
dev->int_enable &= ~INT_MSTRDEND;
else
dev->int_enable &= ~(INT_MSTWREND|INT_MSTWRTMOUT);
writel(dev->int_enable, ®s->int_enable);
return;
}
req = list_entry(ep->queue.next, struct goku_request, queue);
/* normal hw dma completion (not abort) */
if (likely(ep->is_in)) {
if (unlikely(master & MST_RD_ENA))
return;
req->req.actual = readl(®s->in_dma_current);
} else {
if (unlikely(master & MST_WR_ENA))
return;
/* hardware merges short packets, and also hides packet
* overruns. a partial packet MAY be in the fifo here.
*/
req->req.actual = readl(®s->out_dma_current);
}
req->req.actual -= req->req.dma;
req->req.actual++;
#ifdef USB_TRACE
VDBG(dev, "done %s %s dma, %u/%u bytes, req %p\n",
ep->ep.name, ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length, req);
#endif
done(ep, req, 0);
if (list_empty(&ep->queue))
goto stop;
req = list_entry(ep->queue.next, struct goku_request, queue);
(void) start_dma(ep, req);
}
static void abort_dma(struct goku_ep *ep, int status)
{
struct goku_udc_regs __iomem *regs = ep->dev->regs;
struct goku_request *req;
u32 curr, master;
/* NAK future host requests, hoping the implicit delay lets the
* dma engine finish reading (or writing) its latest packet and
* empty the dma buffer (up to 16 bytes).
*
* This avoids needing to clean up a partial packet in the fifo;
* we can't do that for IN without side effects to HALT and TOGGLE.
*/
command(regs, COMMAND_FIFO_DISABLE, ep->num);
req = list_entry(ep->queue.next, struct goku_request, queue);
master = readl(®s->dma_master) & MST_RW_BITS;
/* FIXME using these resets isn't usably documented. this may
* not work unless it's followed by disabling the endpoint.
*
* FIXME the OUT reset path doesn't even behave consistently.
*/
if (ep->is_in) {
if (unlikely((readl(®s->dma_master) & MST_RD_ENA) == 0))
goto finished;
curr = readl(®s->in_dma_current);
writel(curr, ®s->in_dma_end);
writel(curr, ®s->in_dma_start);
master &= ~MST_R_BITS;
master |= MST_RD_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_RD_ENA)
DBG(ep->dev, "IN dma active after reset!\n");
} else {
if (unlikely((readl(®s->dma_master) & MST_WR_ENA) == 0))
goto finished;
curr = readl(®s->out_dma_current);
writel(curr, ®s->out_dma_end);
writel(curr, ®s->out_dma_start);
master &= ~MST_W_BITS;
master |= MST_WR_RESET;
writel(master, ®s->dma_master);
if (readl(®s->dma_master) & MST_WR_ENA)
DBG(ep->dev, "OUT dma active after reset!\n");
}
req->req.actual = (curr - req->req.dma) + 1;
req->req.status = status;
VDBG(ep->dev, "%s %s %s %d/%d\n", __func__, ep->ep.name,
ep->is_in ? "IN" : "OUT",
req->req.actual, req->req.length);
command(regs, COMMAND_FIFO_ENABLE, ep->num);
return;
finished:
/* dma already completed; no abort needed */
command(regs, COMMAND_FIFO_ENABLE, ep->num);
req->req.actual = req->req.length;
req->req.status = 0;
}
/*-------------------------------------------------------------------------*/
static int
goku_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
int status;
/* always require a cpu-view buffer so pio works */
req = container_of(_req, struct goku_request, req);
if (unlikely(!_req || !_req->complete
|| !_req->buf || !list_empty(&req->queue)))
return -EINVAL;
ep = container_of(_ep, struct goku_ep, ep);
if (unlikely(!_ep || (!ep->desc && ep->num != 0)))
return -EINVAL;
dev = ep->dev;
if (unlikely(!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN))
return -ESHUTDOWN;
/* can't touch registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
/* set up dma mapping in case the caller didn't */
if (ep->dma && _req->dma == DMA_ADDR_INVALID) {
_req->dma = pci_map_single(dev->pdev, _req->buf, _req->length,
ep->is_in ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
req->mapped = 1;
}
#ifdef USB_TRACE
VDBG(dev, "%s queue req %p, len %u buf %p\n",
_ep->name, _req, _req->length, _req->buf);
#endif
spin_lock_irqsave(&dev->lock, flags);
_req->status = -EINPROGRESS;
_req->actual = 0;
/* for ep0 IN without premature status, zlp is required and
* writing EOP starts the status stage (OUT).
*/
if (unlikely(ep->num == 0 && ep->is_in))
_req->zero = 1;
/* kickstart this i/o queue? */
status = 0;
if (list_empty(&ep->queue) && likely(!ep->stopped)) {
/* dma: done after dma completion IRQ (or error)
* pio: done after last fifo operation
*/
if (ep->dma)
status = start_dma(ep, req);
else
status = (ep->is_in ? write_fifo : read_fifo)(ep, req);
if (unlikely(status != 0)) {
if (status > 0)
status = 0;
req = NULL;
}
} /* else pio or dma irq handler advances the queue. */
if (likely(req != 0))
list_add_tail(&req->queue, &ep->queue);
if (likely(!list_empty(&ep->queue))
&& likely(ep->num != 0)
&& !ep->dma
&& !(dev->int_enable & INT_EPxDATASET (ep->num)))
pio_irq_enable(dev, dev->regs, ep->num);
spin_unlock_irqrestore(&dev->lock, flags);
/* pci writes may still be posted */
return status;
}
/* dequeue ALL requests */
static void nuke(struct goku_ep *ep, int status)
{
struct goku_request *req;
ep->stopped = 1;
if (list_empty(&ep->queue))
return;
if (ep->dma)
abort_dma(ep, status);
while (!list_empty(&ep->queue)) {
req = list_entry(ep->queue.next, struct goku_request, queue);
done(ep, req, status);
}
}
/* dequeue JUST ONE request */
static int goku_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct goku_request *req;
struct goku_ep *ep;
struct goku_udc *dev;
unsigned long flags;
ep = container_of(_ep, struct goku_ep, ep);
if (!_ep || !_req || (!ep->desc && ep->num != 0))
return -EINVAL;
dev = ep->dev;
if (!dev->driver)
return -ESHUTDOWN;
/* we can't touch (dma) registers when suspended */
if (dev->ep0state == EP0_SUSPEND)
return -EBUSY;
VDBG(dev, "%s %s %s %s %p\n", __func__, _ep->name,
ep->is_in ? "IN" : "OUT",
ep->dma ? "dma" : "pio",
_req);
spin_lock_irqsave(&dev->lock, flags);
/* make sure it's actually queued on this endpoint */
list_for_each_entry (req, &ep->queue, queue) {
if (&req->req == _req)
break;
}
if (&req->req != _req) {
spin_unlock_irqrestore (&dev->lock, flags);
return -EINVAL;
}
if (ep->dma && ep->queue.next == &req->queue && !ep->stopped) {
abort_dma(ep, -ECONNRESET);
done(ep, req, -ECONNRESET);
dma_advance(dev, ep);
} else if (!list_empty(&req->queue))
done(ep, req, -ECONNRESET);
else
req = NULL;
spin_unlock_irqrestore(&dev->lock, flags);
return req ? 0 : -EOPNOTSUPP;
}
/*-------------------------------------------------------------------------*/
static void goku_clear_halt(struct goku_ep *ep)
{
// assert (ep->num !=0)
VDBG(ep->dev, "%s clear halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_SETDATA0, ep->num);
command(ep->dev->regs, COMMAND_STALL_CLEAR, ep->num);
if (ep->stopped) {
ep->stopped = 0;
if (ep->dma) {
struct goku_request *req;
if (list_empty(&ep->queue))
return;
req = list_entry(ep->queue.next, struct goku_request,
queue);
(void) start_dma(ep, req);
} else
pio_advance(ep);
}
}
static int goku_set_halt(struct usb_ep *_ep, int value)
{
struct goku_ep *ep;
unsigned long flags;
int retval = 0;
if (!_ep)
return -ENODEV;
ep = container_of (_ep, struct goku_ep, ep);
if (ep->num == 0) {
if (value) {
ep->dev->ep0state = EP0_STALL;
ep->dev->ep[0].stopped = 1;
} else
return -EINVAL;
/* don't change EPxSTATUS_EP_INVALID to READY */
} else if (!ep->desc) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return -EINVAL;
}
spin_lock_irqsave(&ep->dev->lock, flags);
if (!list_empty(&ep->queue))
retval = -EAGAIN;
else if (ep->is_in && value
/* data in (either) packet buffer? */
&& (readl(&ep->dev->regs->DataSet)
& DATASET_AB(ep->num)))
retval = -EAGAIN;
else if (!value)
goku_clear_halt(ep);
else {
ep->stopped = 1;
VDBG(ep->dev, "%s set halt\n", ep->ep.name);
command(ep->dev->regs, COMMAND_STALL, ep->num);
readl(ep->reg_status);
}
spin_unlock_irqrestore(&ep->dev->lock, flags);
return retval;
}
static int goku_fifo_status(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return -ENODEV;
ep = container_of(_ep, struct goku_ep, ep);
/* size is only reported sanely for OUT */
if (ep->is_in)
return -EOPNOTSUPP;
/* ignores 16-byte dma buffer; SizeH == 0 */
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]) & DATASIZE;
size += readl(®s->EPxSizeLB[ep->num]) & DATASIZE;
VDBG(ep->dev, "%s %s %u\n", __func__, ep->ep.name, size);
return size;
}
static void goku_fifo_flush(struct usb_ep *_ep)
{
struct goku_ep *ep;
struct goku_udc_regs __iomem *regs;
u32 size;
if (!_ep)
return;
ep = container_of(_ep, struct goku_ep, ep);
VDBG(ep->dev, "%s %s\n", __func__, ep->ep.name);
/* don't change EPxSTATUS_EP_INVALID to READY */
if (!ep->desc && ep->num != 0) {
DBG(ep->dev, "%s %s inactive?\n", __func__, ep->ep.name);
return;
}
regs = ep->dev->regs;
size = readl(®s->EPxSizeLA[ep->num]);
size &= DATASIZE;
/* Non-desirable behavior: FIFO_CLEAR also clears the
* endpoint halt feature. For OUT, we _could_ just read
* the bytes out (PIO, if !ep->dma); for in, no choice.
*/
if (size)
command(regs, COMMAND_FIFO_CLEAR, ep->num);
}
static struct usb_ep_ops goku_ep_ops = {
.enable = goku_ep_enable,
.disable = goku_ep_disable,
.alloc_request = goku_alloc_request,
.free_request = goku_free_request,
.queue = goku_queue,
.dequeue = goku_dequeue,
.set_halt = goku_set_halt,
.fifo_status = goku_fifo_status,
.fifo_flush = goku_fifo_flush,
};
/*-------------------------------------------------------------------------*/
static int goku_get_frame(struct usb_gadget *_gadget)
{
return -EOPNOTSUPP;
}
static const struct usb_gadget_ops goku_ops = {
.get_frame = goku_get_frame,
// no remote wakeup
// not selfpowered
};
/*-------------------------------------------------------------------------*/
static inline char *dmastr(void)
{
if (use_dma == 0)
return "(dma disabled)";
else if (use_dma == 2)
return "(dma IN and OUT)";
else
return "(dma IN)";
}
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
static const char proc_node_name [] = "driver/udc";
#define FOURBITS "%s%s%s%s"
#define EIGHTBITS FOURBITS FOURBITS
static void
dump_intmask(const char *label, u32 mask, char **next, unsigned *size)
{
int t;
/* int_status is the same format ... */
t = scnprintf(*next, *size,
"%s %05X =" FOURBITS EIGHTBITS EIGHTBITS "\n",
label, mask,
(mask & INT_PWRDETECT) ? " power" : "",
(mask & INT_SYSERROR) ? " sys" : "",
(mask & INT_MSTRDEND) ? " in-dma" : "",
(mask & INT_MSTWRTMOUT) ? " wrtmo" : "",
(mask & INT_MSTWREND) ? " out-dma" : "",
(mask & INT_MSTWRSET) ? " wrset" : "",
(mask & INT_ERR) ? " err" : "",
(mask & INT_SOF) ? " sof" : "",
(mask & INT_EP3NAK) ? " ep3nak" : "",
(mask & INT_EP2NAK) ? " ep2nak" : "",
(mask & INT_EP1NAK) ? " ep1nak" : "",
(mask & INT_EP3DATASET) ? " ep3" : "",
(mask & INT_EP2DATASET) ? " ep2" : "",
(mask & INT_EP1DATASET) ? " ep1" : "",
(mask & INT_STATUSNAK) ? " ep0snak" : "",
(mask & INT_STATUS) ? " ep0status" : "",
(mask & INT_SETUP) ? " setup" : "",
(mask & INT_ENDPOINT0) ? " ep0" : "",
(mask & INT_USBRESET) ? " reset" : "",
(mask & INT_SUSPEND) ? " suspend" : "");
*size -= t;
*next += t;
}
static int
udc_proc_read(char *buffer, char **start, off_t off, int count,
int *eof, void *_dev)
{
char *buf = buffer;
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
char *next = buf;
unsigned size = count;
unsigned long flags;
int i, t, is_usb_connected;
u32 tmp;
if (off != 0)
return 0;
local_irq_save(flags);
/* basic device status */
tmp = readl(®s->power_detect);
is_usb_connected = tmp & PW_DETECT;
t = scnprintf(next, size,
"%s - %s\n"
"%s version: %s %s\n"
"Gadget driver: %s\n"
"Host %s, %s\n"
"\n",
pci_name(dev->pdev), driver_desc,
driver_name, DRIVER_VERSION, dmastr(),
dev->driver ? dev->driver->driver.name : "(none)",
is_usb_connected
? ((tmp & PW_PULLUP) ? "full speed" : "powered")
: "disconnected",
({char *state;
switch(dev->ep0state){
case EP0_DISCONNECT: state = "ep0_disconnect"; break;
case EP0_IDLE: state = "ep0_idle"; break;
case EP0_IN: state = "ep0_in"; break;
case EP0_OUT: state = "ep0_out"; break;
case EP0_STATUS: state = "ep0_status"; break;
case EP0_STALL: state = "ep0_stall"; break;
case EP0_SUSPEND: state = "ep0_suspend"; break;
default: state = "ep0_?"; break;
} state; })
);
size -= t;
next += t;
dump_intmask("int_status", readl(®s->int_status), &next, &size);
dump_intmask("int_enable", readl(®s->int_enable), &next, &size);
if (!is_usb_connected || !dev->driver || (tmp & PW_PULLUP) == 0)
goto done;
/* registers for (active) device and ep0 */
t = scnprintf(next, size, "\nirqs %lu\ndataset %02x "
"single.bcs %02x.%02x state %x addr %u\n",
dev->irqs, readl(®s->DataSet),
readl(®s->EPxSingle), readl(®s->EPxBCS),
readl(®s->UsbState),
readl(®s->address));
size -= t;
next += t;
tmp = readl(®s->dma_master);
t = scnprintf(next, size,
"dma %03X =" EIGHTBITS "%s %s\n", tmp,
(tmp & MST_EOPB_DIS) ? " eopb-" : "",
(tmp & MST_EOPB_ENA) ? " eopb+" : "",
(tmp & MST_TIMEOUT_DIS) ? " tmo-" : "",
(tmp & MST_TIMEOUT_ENA) ? " tmo+" : "",
(tmp & MST_RD_EOPB) ? " eopb" : "",
(tmp & MST_RD_RESET) ? " in_reset" : "",
(tmp & MST_WR_RESET) ? " out_reset" : "",
(tmp & MST_RD_ENA) ? " IN" : "",
(tmp & MST_WR_ENA) ? " OUT" : "",
(tmp & MST_CONNECTION)
? "ep1in/ep2out"
: "ep1out/ep2in");
size -= t;
next += t;
/* dump endpoint queues */
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep [i];
struct goku_request *req;
if (i && !ep->desc)
continue;
tmp = readl(ep->reg_status);
t = scnprintf(next, size,
"%s %s max %u %s, irqs %lu, "
"status %02x (%s) " FOURBITS "\n",
ep->ep.name,
ep->is_in ? "in" : "out",
ep->ep.maxpacket,
ep->dma ? "dma" : "pio",
ep->irqs,
tmp, ({ char *s;
switch (tmp & EPxSTATUS_EP_MASK) {
case EPxSTATUS_EP_READY:
s = "ready"; break;
case EPxSTATUS_EP_DATAIN:
s = "packet"; break;
case EPxSTATUS_EP_FULL:
s = "full"; break;
case EPxSTATUS_EP_TX_ERR: // host will retry
s = "tx_err"; break;
case EPxSTATUS_EP_RX_ERR:
s = "rx_err"; break;
case EPxSTATUS_EP_BUSY: /* ep0 only */
s = "busy"; break;
case EPxSTATUS_EP_STALL:
s = "stall"; break;
case EPxSTATUS_EP_INVALID: // these "can't happen"
s = "invalid"; break;
default:
s = "?"; break;
}; s; }),
(tmp & EPxSTATUS_TOGGLE) ? "data1" : "data0",
(tmp & EPxSTATUS_SUSPEND) ? " suspend" : "",
(tmp & EPxSTATUS_FIFO_DISABLE) ? " disable" : "",
(tmp & EPxSTATUS_STAGE_ERROR) ? " ep0stat" : ""
);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
if (list_empty(&ep->queue)) {
t = scnprintf(next, size, "\t(nothing queued)\n");
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
continue;
}
list_for_each_entry(req, &ep->queue, queue) {
if (ep->dma && req->queue.prev == &ep->queue) {
if (i == UDC_MSTRD_ENDPOINT)
tmp = readl(®s->in_dma_current);
else
tmp = readl(®s->out_dma_current);
tmp -= req->req.dma;
tmp++;
} else
tmp = req->req.actual;
t = scnprintf(next, size,
"\treq %p len %u/%u buf %p\n",
&req->req, tmp, req->req.length,
req->req.buf);
if (t <= 0 || t > size)
goto done;
size -= t;
next += t;
}
}
done:
local_irq_restore(flags);
*eof = 1;
return count - size;
}
#endif /* CONFIG_USB_GADGET_DEBUG_FILES */
/*-------------------------------------------------------------------------*/
static void udc_reinit (struct goku_udc *dev)
{
static char *names [] = { "ep0", "ep1-bulk", "ep2-bulk", "ep3-bulk" };
unsigned i;
INIT_LIST_HEAD (&dev->gadget.ep_list);
dev->gadget.ep0 = &dev->ep [0].ep;
dev->gadget.speed = USB_SPEED_UNKNOWN;
dev->ep0state = EP0_DISCONNECT;
dev->irqs = 0;
for (i = 0; i < 4; i++) {
struct goku_ep *ep = &dev->ep[i];
ep->num = i;
ep->ep.name = names[i];
ep->reg_fifo = &dev->regs->ep_fifo [i];
ep->reg_status = &dev->regs->ep_status [i];
ep->reg_mode = &dev->regs->ep_mode[i];
ep->ep.ops = &goku_ep_ops;
list_add_tail (&ep->ep.ep_list, &dev->gadget.ep_list);
ep->dev = dev;
INIT_LIST_HEAD (&ep->queue);
ep_reset(NULL, ep);
}
dev->ep[0].reg_mode = NULL;
dev->ep[0].ep.maxpacket = MAX_EP0_SIZE;
list_del_init (&dev->ep[0].ep.ep_list);
}
static void udc_reset(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
writel(0, ®s->power_detect);
writel(0, ®s->int_enable);
readl(®s->int_enable);
dev->int_enable = 0;
/* deassert reset, leave USB D+ at hi-Z (no pullup)
* don't let INT_PWRDETECT sequence begin
*/
udelay(250);
writel(PW_RESETB, ®s->power_detect);
readl(®s->int_enable);
}
static void ep0_start(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
unsigned i;
VDBG(dev, "%s\n", __func__);
udc_reset(dev);
udc_reinit (dev);
//writel(MST_EOPB_ENA | MST_TIMEOUT_ENA, ®s->dma_master);
/* hw handles set_address, set_feature, get_status; maybe more */
writel( G_REQMODE_SET_INTF | G_REQMODE_GET_INTF
| G_REQMODE_SET_CONF | G_REQMODE_GET_CONF
| G_REQMODE_GET_DESC
| G_REQMODE_CLEAR_FEAT
, ®s->reqmode);
for (i = 0; i < 4; i++)
dev->ep[i].irqs = 0;
/* can't modify descriptors after writing UsbReady */
for (i = 0; i < DESC_LEN; i++)
writel(0, ®s->descriptors[i]);
writel(0, ®s->UsbReady);
/* expect ep0 requests when the host drops reset */
writel(PW_RESETB | PW_PULLUP, ®s->power_detect);
dev->int_enable = INT_DEVWIDE | INT_EP0;
writel(dev->int_enable, &dev->regs->int_enable);
readl(®s->int_enable);
dev->gadget.speed = USB_SPEED_FULL;
dev->ep0state = EP0_IDLE;
}
static void udc_enable(struct goku_udc *dev)
{
/* start enumeration now, or after power detect irq */
if (readl(&dev->regs->power_detect) & PW_DETECT)
ep0_start(dev);
else {
DBG(dev, "%s\n", __func__);
dev->int_enable = INT_PWRDETECT;
writel(dev->int_enable, &dev->regs->int_enable);
}
}
/*-------------------------------------------------------------------------*/
/* keeping it simple:
* - one bus driver, initted first;
* - one function driver, initted second
*/
static struct goku_udc *the_controller;
/* when a driver is successfully registered, it will receive
* control requests including set_configuration(), which enables
* non-control requests. then usb traffic follows until a
* disconnect is reported. then a host may connect again, or
* the driver might get unbound.
*/
int usb_gadget_register_driver(struct usb_gadget_driver *driver)
{
struct goku_udc *dev = the_controller;
int retval;
if (!driver
|| driver->speed < USB_SPEED_FULL
|| !driver->bind
|| !driver->disconnect
|| !driver->setup)
return -EINVAL;
if (!dev)
return -ENODEV;
if (dev->driver)
return -EBUSY;
/* hook up the driver */
driver->driver.bus = NULL;
dev->driver = driver;
dev->gadget.dev.driver = &driver->driver;
retval = driver->bind(&dev->gadget);
if (retval) {
DBG(dev, "bind to driver %s --> error %d\n",
driver->driver.name, retval);
dev->driver = NULL;
dev->gadget.dev.driver = NULL;
return retval;
}
/* then enable host detection and ep0; and we're ready
* for set_configuration as well as eventual disconnect.
*/
udc_enable(dev);
DBG(dev, "registered gadget driver '%s'\n", driver->driver.name);
return 0;
}
EXPORT_SYMBOL(usb_gadget_register_driver);
static void
stop_activity(struct goku_udc *dev, struct usb_gadget_driver *driver)
{
unsigned i;
DBG (dev, "%s\n", __func__);
if (dev->gadget.speed == USB_SPEED_UNKNOWN)
driver = NULL;
/* disconnect gadget driver after quiesceing hw and the driver */
udc_reset (dev);
for (i = 0; i < 4; i++)
nuke(&dev->ep [i], -ESHUTDOWN);
if (driver) {
spin_unlock(&dev->lock);
driver->disconnect(&dev->gadget);
spin_lock(&dev->lock);
}
if (dev->driver)
udc_enable(dev);
}
int usb_gadget_unregister_driver(struct usb_gadget_driver *driver)
{
struct goku_udc *dev = the_controller;
unsigned long flags;
if (!dev)
return -ENODEV;
if (!driver || driver != dev->driver || !driver->unbind)
return -EINVAL;
spin_lock_irqsave(&dev->lock, flags);
dev->driver = NULL;
stop_activity(dev, driver);
spin_unlock_irqrestore(&dev->lock, flags);
driver->unbind(&dev->gadget);
dev->gadget.dev.driver = NULL;
DBG(dev, "unregistered driver '%s'\n", driver->driver.name);
return 0;
}
EXPORT_SYMBOL(usb_gadget_unregister_driver);
/*-------------------------------------------------------------------------*/
static void ep0_setup(struct goku_udc *dev)
{
struct goku_udc_regs __iomem *regs = dev->regs;
struct usb_ctrlrequest ctrl;
int tmp;
/* read SETUP packet and enter DATA stage */
ctrl.bRequestType = readl(®s->bRequestType);
ctrl.bRequest = readl(®s->bRequest);
ctrl.wValue = cpu_to_le16((readl(®s->wValueH) << 8)
| readl(®s->wValueL));
ctrl.wIndex = cpu_to_le16((readl(®s->wIndexH) << 8)
| readl(®s->wIndexL));
ctrl.wLength = cpu_to_le16((readl(®s->wLengthH) << 8)
| readl(®s->wLengthL));
writel(0, ®s->SetupRecv);
nuke(&dev->ep[0], 0);
dev->ep[0].stopped = 0;
if (likely(ctrl.bRequestType & USB_DIR_IN)) {
dev->ep[0].is_in = 1;
dev->ep0state = EP0_IN;
/* detect early status stages */
writel(ICONTROL_STATUSNAK, &dev->regs->IntControl);
} else {
dev->ep[0].is_in = 0;
dev->ep0state = EP0_OUT;
/* NOTE: CLEAR_FEATURE is done in software so that we can
* synchronize transfer restarts after bulk IN stalls. data
* won't even enter the fifo until the halt is cleared.
*/
switch (ctrl.bRequest) {
case USB_REQ_CLEAR_FEATURE:
switch (ctrl.bRequestType) {
case USB_RECIP_ENDPOINT:
tmp = le16_to_cpu(ctrl.wIndex) & 0x0f;
/* active endpoint */
if (tmp > 3 || (!dev->ep[tmp].desc && tmp != 0))
goto stall;
if (ctrl.wIndex & cpu_to_le16(
USB_DIR_IN)) {
if (!dev->ep[tmp].is_in)
goto stall;
} else {
if (dev->ep[tmp].is_in)
goto stall;
}
if (ctrl.wValue != cpu_to_le16(
USB_ENDPOINT_HALT))
goto stall;
if (tmp)
goku_clear_halt(&dev->ep[tmp]);
succeed:
/* start ep0out status stage */
writel(~(1<<0), ®s->EOP);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STATUS;
return;
case USB_RECIP_DEVICE:
/* device remote wakeup: always clear */
if (ctrl.wValue != cpu_to_le16(1))
goto stall;
VDBG(dev, "clear dev remote wakeup\n");
goto succeed;
case USB_RECIP_INTERFACE:
goto stall;
default: /* pass to gadget driver */
break;
}
break;
default:
break;
}
}
#ifdef USB_TRACE
VDBG(dev, "SETUP %02x.%02x v%04x i%04x l%04x\n",
ctrl.bRequestType, ctrl.bRequest,
le16_to_cpu(ctrl.wValue), le16_to_cpu(ctrl.wIndex),
le16_to_cpu(ctrl.wLength));
#endif
/* hw wants to know when we're configured (or not) */
dev->req_config = (ctrl.bRequest == USB_REQ_SET_CONFIGURATION
&& ctrl.bRequestType == USB_RECIP_DEVICE);
if (unlikely(dev->req_config))
dev->configured = (ctrl.wValue != cpu_to_le16(0));
/* delegate everything to the gadget driver.
* it may respond after this irq handler returns.
*/
spin_unlock (&dev->lock);
tmp = dev->driver->setup(&dev->gadget, &ctrl);
spin_lock (&dev->lock);
if (unlikely(tmp < 0)) {
stall:
#ifdef USB_TRACE
VDBG(dev, "req %02x.%02x protocol STALL; err %d\n",
ctrl.bRequestType, ctrl.bRequest, tmp);
#endif
command(regs, COMMAND_STALL, 0);
dev->ep[0].stopped = 1;
dev->ep0state = EP0_STALL;
}
/* expect at least one data or status stage irq */
}
#define ACK(irqbit) { \
stat &= ~irqbit; \
writel(~irqbit, ®s->int_status); \
handled = 1; \
}
static irqreturn_t goku_irq(int irq, void *_dev)
{
struct goku_udc *dev = _dev;
struct goku_udc_regs __iomem *regs = dev->regs;
struct goku_ep *ep;
u32 stat, handled = 0;
unsigned i, rescans = 5;
spin_lock(&dev->lock);
rescan:
stat = readl(®s->int_status) & dev->int_enable;
if (!stat)
goto done;
dev->irqs++;
/* device-wide irqs */
if (unlikely(stat & INT_DEVWIDE)) {
if (stat & INT_SYSERROR) {
ERROR(dev, "system error\n");
stop_activity(dev, dev->driver);
stat = 0;
handled = 1;
// FIXME have a neater way to prevent re-enumeration
dev->driver = NULL;
goto done;
}
if (stat & INT_PWRDETECT) {
writel(~stat, ®s->int_status);
if (readl(&dev->regs->power_detect) & PW_DETECT) {
VDBG(dev, "connect\n");
ep0_start(dev);
} else {
DBG(dev, "disconnect\n");
if (dev->gadget.speed == USB_SPEED_FULL)
stop_activity(dev, dev->driver);
dev->ep0state = EP0_DISCONNECT;
dev->int_enable = INT_DEVWIDE;
writel(dev->int_enable, &dev->regs->int_enable);
}
stat = 0;
handled = 1;
goto done;
}
if (stat & INT_SUSPEND) {
ACK(INT_SUSPEND);
if (readl(®s->ep_status[0]) & EPxSTATUS_SUSPEND) {
switch (dev->ep0state) {
case EP0_DISCONNECT:
case EP0_SUSPEND:
goto pm_next;
default:
break;
}
DBG(dev, "USB suspend\n");
dev->ep0state = EP0_SUSPEND;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->suspend) {
spin_unlock(&dev->lock);
dev->driver->suspend(&dev->gadget);
spin_lock(&dev->lock);
}
} else {
if (dev->ep0state != EP0_SUSPEND) {
DBG(dev, "bogus USB resume %d\n",
dev->ep0state);
goto pm_next;
}
DBG(dev, "USB resume\n");
dev->ep0state = EP0_IDLE;
if (dev->gadget.speed != USB_SPEED_UNKNOWN
&& dev->driver
&& dev->driver->resume) {
spin_unlock(&dev->lock);
dev->driver->resume(&dev->gadget);
spin_lock(&dev->lock);
}
}
}
pm_next:
if (stat & INT_USBRESET) { /* hub reset done */
ACK(INT_USBRESET);
INFO(dev, "USB reset done, gadget %s\n",
dev->driver->driver.name);
}
// and INT_ERR on some endpoint's crc/bitstuff/... problem
}
/* progress ep0 setup, data, or status stages.
* no transition {EP0_STATUS, EP0_STALL} --> EP0_IDLE; saves irqs
*/
if (stat & INT_SETUP) {
ACK(INT_SETUP);
dev->ep[0].irqs++;
ep0_setup(dev);
}
if (stat & INT_STATUSNAK) {
ACK(INT_STATUSNAK|INT_ENDPOINT0);
if (dev->ep0state == EP0_IN) {
ep = &dev->ep[0];
ep->irqs++;
nuke(ep, 0);
writel(~(1<<0), ®s->EOP);
dev->ep0state = EP0_STATUS;
}
}
if (stat & INT_ENDPOINT0) {
ACK(INT_ENDPOINT0);
ep = &dev->ep[0];
ep->irqs++;
pio_advance(ep);
}
/* dma completion */
if (stat & INT_MSTRDEND) { /* IN */
ACK(INT_MSTRDEND);
ep = &dev->ep[UDC_MSTRD_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWREND) { /* OUT */
ACK(INT_MSTWREND);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
dma_advance(dev, ep);
}
if (stat & INT_MSTWRTMOUT) { /* OUT */
ACK(INT_MSTWRTMOUT);
ep = &dev->ep[UDC_MSTWR_ENDPOINT];
ep->irqs++;
ERROR(dev, "%s write timeout ?\n", ep->ep.name);
// reset dma? then dma_advance()
}
/* pio */
for (i = 1; i < 4; i++) {
u32 tmp = INT_EPxDATASET(i);
if (!(stat & tmp))
continue;
ep = &dev->ep[i];
pio_advance(ep);
if (list_empty (&ep->queue))
pio_irq_disable(dev, regs, i);
stat &= ~tmp;
handled = 1;
ep->irqs++;
}
if (rescans--)
goto rescan;
done:
(void)readl(®s->int_enable);
spin_unlock(&dev->lock);
if (stat)
DBG(dev, "unhandled irq status: %05x (%05x, %05x)\n", stat,
readl(®s->int_status), dev->int_enable);
return IRQ_RETVAL(handled);
}
#undef ACK
/*-------------------------------------------------------------------------*/
static void gadget_release(struct device *_dev)
{
struct goku_udc *dev = dev_get_drvdata(_dev);
kfree(dev);
}
/* tear down the binding between this driver and the pci device */
static void goku_remove(struct pci_dev *pdev)
{
struct goku_udc *dev = pci_get_drvdata(pdev);
DBG(dev, "%s\n", __func__);
BUG_ON(dev->driver);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
remove_proc_entry(proc_node_name, NULL);
#endif
if (dev->regs)
udc_reset(dev);
if (dev->got_irq)
free_irq(pdev->irq, dev);
if (dev->regs)
iounmap(dev->regs);
if (dev->got_region)
release_mem_region(pci_resource_start (pdev, 0),
pci_resource_len (pdev, 0));
if (dev->enabled)
pci_disable_device(pdev);
device_unregister(&dev->gadget.dev);
pci_set_drvdata(pdev, NULL);
dev->regs = NULL;
the_controller = NULL;
INFO(dev, "unbind\n");
}
/* wrap this driver around the specified pci device, but
* don't respond over USB until a gadget driver binds to us.
*/
static int goku_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
struct goku_udc *dev = NULL;
unsigned long resource, len;
void __iomem *base = NULL;
int retval;
/* if you want to support more than one controller in a system,
* usb_gadget_driver_{register,unregister}() must change.
*/
if (the_controller) {
pr_warning("ignoring %s\n", pci_name(pdev));
return -EBUSY;
}
if (!pdev->irq) {
printk(KERN_ERR "Check PCI %s IRQ setup!\n", pci_name(pdev));
retval = -ENODEV;
goto done;
}
/* alloc, and start init */
dev = kzalloc (sizeof *dev, GFP_KERNEL);
if (dev == NULL){
pr_debug("enomem %s\n", pci_name(pdev));
retval = -ENOMEM;
goto done;
}
spin_lock_init(&dev->lock);
dev->pdev = pdev;
dev->gadget.ops = &goku_ops;
/* the "gadget" abstracts/virtualizes the controller */
dev_set_name(&dev->gadget.dev, "gadget");
dev->gadget.dev.parent = &pdev->dev;
dev->gadget.dev.dma_mask = pdev->dev.dma_mask;
dev->gadget.dev.release = gadget_release;
dev->gadget.name = driver_name;
/* now all the pci goodies ... */
retval = pci_enable_device(pdev);
if (retval < 0) {
DBG(dev, "can't enable, %d\n", retval);
goto done;
}
dev->enabled = 1;
resource = pci_resource_start(pdev, 0);
len = pci_resource_len(pdev, 0);
if (!request_mem_region(resource, len, driver_name)) {
DBG(dev, "controller already in use\n");
retval = -EBUSY;
goto done;
}
dev->got_region = 1;
base = ioremap_nocache(resource, len);
if (base == NULL) {
DBG(dev, "can't map memory\n");
retval = -EFAULT;
goto done;
}
dev->regs = (struct goku_udc_regs __iomem *) base;
pci_set_drvdata(pdev, dev);
INFO(dev, "%s\n", driver_desc);
INFO(dev, "version: " DRIVER_VERSION " %s\n", dmastr());
INFO(dev, "irq %d, pci mem %p\n", pdev->irq, base);
/* init to known state, then setup irqs */
udc_reset(dev);
udc_reinit (dev);
if (request_irq(pdev->irq, goku_irq, IRQF_SHARED/*|IRQF_SAMPLE_RANDOM*/,
driver_name, dev) != 0) {
DBG(dev, "request interrupt %d failed\n", pdev->irq);
retval = -EBUSY;
goto done;
}
dev->got_irq = 1;
if (use_dma)
pci_set_master(pdev);
#ifdef CONFIG_USB_GADGET_DEBUG_FILES
create_proc_read_entry(proc_node_name, 0, NULL, udc_proc_read, dev);
#endif
/* done */
the_controller = dev;
retval = device_register(&dev->gadget.dev);
if (retval == 0)
return 0;
done:
if (dev)
goku_remove (pdev);
return retval;
}
/*-------------------------------------------------------------------------*/
static const struct pci_device_id pci_ids[] = { {
.class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
.class_mask = ~0,
.vendor = 0x102f, /* Toshiba */
.device = 0x0107, /* this UDC */
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
}, { /* end: all zeroes */ }
};
MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver goku_pci_driver = {
.name = (char *) driver_name,
.id_table = pci_ids,
.probe = goku_probe,
.remove = goku_remove,
/* FIXME add power management support */
};
static int __init init (void)
{
return pci_register_driver (&goku_pci_driver);
}
module_init (init);
static void __exit cleanup (void)
{
pci_unregister_driver (&goku_pci_driver);
}
module_exit (cleanup);
| gpl-2.0 |
a1d3s/linux-bpi | drivers/net/wireless/brcm80211/brcmfmac/common.c | 908 | 5969 | /*
* Copyright (c) 2010 Broadcom Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <brcmu_wifi.h>
#include <brcmu_utils.h>
#include "core.h"
#include "bus.h"
#include "debug.h"
#include "fwil.h"
#include "fwil_types.h"
#include "tracepoint.h"
#include "common.h"
const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
#define BRCMF_DEFAULT_BCN_TIMEOUT 3
#define BRCMF_DEFAULT_SCAN_CHANNEL_TIME 40
#define BRCMF_DEFAULT_SCAN_UNASSOC_TIME 40
/* boost value for RSSI_DELTA in preferred join selection */
#define BRCMF_JOIN_PREF_RSSI_BOOST 8
int brcmf_c_preinit_dcmds(struct brcmf_if *ifp)
{
s8 eventmask[BRCMF_EVENTING_MASK_LEN];
u8 buf[BRCMF_DCMD_SMLEN];
struct brcmf_join_pref_params join_pref_params[2];
struct brcmf_rev_info_le revinfo;
struct brcmf_rev_info *ri;
char *ptr;
s32 err;
/* retreive mac address */
err = brcmf_fil_iovar_data_get(ifp, "cur_etheraddr", ifp->mac_addr,
sizeof(ifp->mac_addr));
if (err < 0) {
brcmf_err("Retreiving cur_etheraddr failed, %d\n", err);
goto done;
}
memcpy(ifp->drvr->mac, ifp->mac_addr, sizeof(ifp->drvr->mac));
err = brcmf_fil_cmd_data_get(ifp, BRCMF_C_GET_REVINFO,
&revinfo, sizeof(revinfo));
ri = &ifp->drvr->revinfo;
if (err < 0) {
brcmf_err("retrieving revision info failed, %d\n", err);
} else {
ri->vendorid = le32_to_cpu(revinfo.vendorid);
ri->deviceid = le32_to_cpu(revinfo.deviceid);
ri->radiorev = le32_to_cpu(revinfo.radiorev);
ri->chiprev = le32_to_cpu(revinfo.chiprev);
ri->corerev = le32_to_cpu(revinfo.corerev);
ri->boardid = le32_to_cpu(revinfo.boardid);
ri->boardvendor = le32_to_cpu(revinfo.boardvendor);
ri->boardrev = le32_to_cpu(revinfo.boardrev);
ri->driverrev = le32_to_cpu(revinfo.driverrev);
ri->ucoderev = le32_to_cpu(revinfo.ucoderev);
ri->bus = le32_to_cpu(revinfo.bus);
ri->chipnum = le32_to_cpu(revinfo.chipnum);
ri->phytype = le32_to_cpu(revinfo.phytype);
ri->phyrev = le32_to_cpu(revinfo.phyrev);
ri->anarev = le32_to_cpu(revinfo.anarev);
ri->chippkg = le32_to_cpu(revinfo.chippkg);
ri->nvramrev = le32_to_cpu(revinfo.nvramrev);
}
ri->result = err;
/* query for 'ver' to get version info from firmware */
memset(buf, 0, sizeof(buf));
strcpy(buf, "ver");
err = brcmf_fil_iovar_data_get(ifp, "ver", buf, sizeof(buf));
if (err < 0) {
brcmf_err("Retreiving version information failed, %d\n",
err);
goto done;
}
ptr = (char *)buf;
strsep(&ptr, "\n");
/* Print fw version info */
brcmf_err("Firmware version = %s\n", buf);
/* locate firmware version number for ethtool */
ptr = strrchr(buf, ' ') + 1;
strlcpy(ifp->drvr->fwver, ptr, sizeof(ifp->drvr->fwver));
/* set mpc */
err = brcmf_fil_iovar_int_set(ifp, "mpc", 1);
if (err) {
brcmf_err("failed setting mpc\n");
goto done;
}
/*
* Setup timeout if Beacons are lost and roam is off to report
* link down
*/
err = brcmf_fil_iovar_int_set(ifp, "bcn_timeout",
BRCMF_DEFAULT_BCN_TIMEOUT);
if (err) {
brcmf_err("bcn_timeout error (%d)\n", err);
goto done;
}
/* Enable/Disable build-in roaming to allowed ext supplicant to take
* of romaing
*/
err = brcmf_fil_iovar_int_set(ifp, "roam_off", 1);
if (err) {
brcmf_err("roam_off error (%d)\n", err);
goto done;
}
/* Setup join_pref to select target by RSSI(with boost on 5GHz) */
join_pref_params[0].type = BRCMF_JOIN_PREF_RSSI_DELTA;
join_pref_params[0].len = 2;
join_pref_params[0].rssi_gain = BRCMF_JOIN_PREF_RSSI_BOOST;
join_pref_params[0].band = WLC_BAND_5G;
join_pref_params[1].type = BRCMF_JOIN_PREF_RSSI;
join_pref_params[1].len = 2;
join_pref_params[1].rssi_gain = 0;
join_pref_params[1].band = 0;
err = brcmf_fil_iovar_data_set(ifp, "join_pref", join_pref_params,
sizeof(join_pref_params));
if (err)
brcmf_err("Set join_pref error (%d)\n", err);
/* Setup event_msgs, enable E_IF */
err = brcmf_fil_iovar_data_get(ifp, "event_msgs", eventmask,
BRCMF_EVENTING_MASK_LEN);
if (err) {
brcmf_err("Get event_msgs error (%d)\n", err);
goto done;
}
setbit(eventmask, BRCMF_E_IF);
err = brcmf_fil_iovar_data_set(ifp, "event_msgs", eventmask,
BRCMF_EVENTING_MASK_LEN);
if (err) {
brcmf_err("Set event_msgs error (%d)\n", err);
goto done;
}
/* Setup default scan channel time */
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_CHANNEL_TIME,
BRCMF_DEFAULT_SCAN_CHANNEL_TIME);
if (err) {
brcmf_err("BRCMF_C_SET_SCAN_CHANNEL_TIME error (%d)\n",
err);
goto done;
}
/* Setup default scan unassoc time */
err = brcmf_fil_cmd_int_set(ifp, BRCMF_C_SET_SCAN_UNASSOC_TIME,
BRCMF_DEFAULT_SCAN_UNASSOC_TIME);
if (err) {
brcmf_err("BRCMF_C_SET_SCAN_UNASSOC_TIME error (%d)\n",
err);
goto done;
}
/* do bus specific preinit here */
err = brcmf_bus_preinit(ifp->drvr->bus_if);
done:
return err;
}
#if defined(CONFIG_BRCM_TRACING) || defined(CONFIG_BRCMDBG)
void __brcmf_dbg(u32 level, const char *func, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
va_start(args, fmt);
vaf.va = &args;
if (brcmf_msg_level & level)
pr_debug("%s %pV", func, &vaf);
trace_brcmf_dbg(level, func, &vaf);
va_end(args);
}
#endif
| gpl-2.0 |
ajeet17181/fathom-kernel | arch/powerpc/kvm/e500_emulate.c | 1164 | 5089 | /*
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
* Description:
* This file is derived from arch/powerpc/kvm/44x_emulate.c,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/kvm_e500.h>
#include "booke.h"
#include "e500_tlb.h"
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
#define XOP_TLBWE 978
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int ra;
int rb;
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case XOP_TLBRE:
emulated = kvmppc_e500_emul_tlbre(vcpu);
break;
case XOP_TLBWE:
emulated = kvmppc_e500_emul_tlbwe(vcpu);
break;
case XOP_TLBSX:
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
break;
case XOP_TLBIVAX:
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
if (emulated == EMULATE_FAIL)
emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
return emulated;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_PID:
vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
vcpu->arch.pid = spr_val;
break;
case SPRN_PID1:
vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
vcpu_e500->mas0 = spr_val; break;
case SPRN_MAS1:
vcpu_e500->mas1 = spr_val; break;
case SPRN_MAS2:
vcpu_e500->mas2 = spr_val; break;
case SPRN_MAS3:
vcpu_e500->mas3 = spr_val; break;
case SPRN_MAS4:
vcpu_e500->mas4 = spr_val; break;
case SPRN_MAS6:
vcpu_e500->mas6 = spr_val; break;
case SPRN_MAS7:
vcpu_e500->mas7 = spr_val; break;
case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
break;
case SPRN_L1CSR1:
vcpu_e500->l1csr1 = spr_val; break;
case SPRN_HID0:
vcpu_e500->hid0 = spr_val; break;
case SPRN_HID1:
vcpu_e500->hid1 = spr_val; break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
spr_val);
break;
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
break;
case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
}
return emulated;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
case SPRN_PID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
case SPRN_PID2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
case SPRN_MAS0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
case SPRN_MAS1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
case SPRN_MAS2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
case SPRN_MAS3:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
case SPRN_MAS4:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
case SPRN_MAS6:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
case SPRN_MAS7:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
case SPRN_TLB0CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
case SPRN_TLB1CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
case SPRN_L1CSR0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
case SPRN_L1CSR1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
case SPRN_HID0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
case SPRN_HID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
case SPRN_MMUCSR0:
kvmppc_set_gpr(vcpu, rt, 0); break;
case SPRN_MMUCFG:
kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
/* extra exceptions */
case SPRN_IVOR32:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
break;
case SPRN_IVOR33:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
break;
case SPRN_IVOR34:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
break;
case SPRN_IVOR35:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
}
return emulated;
}
| gpl-2.0 |
pocketbook-free/kernel_622 | arch/powerpc/kvm/e500_emulate.c | 1164 | 5089 | /*
* Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved.
*
* Author: Yu Liu, <yu.liu@freescale.com>
*
* Description:
* This file is derived from arch/powerpc/kvm/44x_emulate.c,
* by Hollis Blanchard <hollisb@us.ibm.com>.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*/
#include <asm/kvm_ppc.h>
#include <asm/disassemble.h>
#include <asm/kvm_e500.h>
#include "booke.h"
#include "e500_tlb.h"
#define XOP_TLBIVAX 786
#define XOP_TLBSX 914
#define XOP_TLBRE 946
#define XOP_TLBWE 978
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
unsigned int inst, int *advance)
{
int emulated = EMULATE_DONE;
int ra;
int rb;
switch (get_op(inst)) {
case 31:
switch (get_xop(inst)) {
case XOP_TLBRE:
emulated = kvmppc_e500_emul_tlbre(vcpu);
break;
case XOP_TLBWE:
emulated = kvmppc_e500_emul_tlbwe(vcpu);
break;
case XOP_TLBSX:
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
break;
case XOP_TLBIVAX:
ra = get_ra(inst);
rb = get_rb(inst);
emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
break;
default:
emulated = EMULATE_FAIL;
}
break;
default:
emulated = EMULATE_FAIL;
}
if (emulated == EMULATE_FAIL)
emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);
return emulated;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
ulong spr_val = kvmppc_get_gpr(vcpu, rs);
switch (sprn) {
case SPRN_PID:
vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
vcpu->arch.pid = spr_val;
break;
case SPRN_PID1:
vcpu_e500->pid[1] = spr_val; break;
case SPRN_PID2:
vcpu_e500->pid[2] = spr_val; break;
case SPRN_MAS0:
vcpu_e500->mas0 = spr_val; break;
case SPRN_MAS1:
vcpu_e500->mas1 = spr_val; break;
case SPRN_MAS2:
vcpu_e500->mas2 = spr_val; break;
case SPRN_MAS3:
vcpu_e500->mas3 = spr_val; break;
case SPRN_MAS4:
vcpu_e500->mas4 = spr_val; break;
case SPRN_MAS6:
vcpu_e500->mas6 = spr_val; break;
case SPRN_MAS7:
vcpu_e500->mas7 = spr_val; break;
case SPRN_L1CSR0:
vcpu_e500->l1csr0 = spr_val;
vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
break;
case SPRN_L1CSR1:
vcpu_e500->l1csr1 = spr_val; break;
case SPRN_HID0:
vcpu_e500->hid0 = spr_val; break;
case SPRN_HID1:
vcpu_e500->hid1 = spr_val; break;
case SPRN_MMUCSR0:
emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
spr_val);
break;
/* extra exceptions */
case SPRN_IVOR32:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
break;
case SPRN_IVOR33:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
break;
case SPRN_IVOR34:
vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
break;
case SPRN_IVOR35:
vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
break;
default:
emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
}
return emulated;
}
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
{
struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
int emulated = EMULATE_DONE;
switch (sprn) {
case SPRN_PID:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break;
case SPRN_PID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break;
case SPRN_PID2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
case SPRN_MAS0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break;
case SPRN_MAS1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break;
case SPRN_MAS2:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break;
case SPRN_MAS3:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break;
case SPRN_MAS4:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break;
case SPRN_MAS6:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break;
case SPRN_MAS7:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break;
case SPRN_TLB0CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
case SPRN_TLB1CFG:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break;
case SPRN_L1CSR0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break;
case SPRN_L1CSR1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break;
case SPRN_HID0:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break;
case SPRN_HID1:
kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break;
case SPRN_MMUCSR0:
kvmppc_set_gpr(vcpu, rt, 0); break;
case SPRN_MMUCFG:
kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break;
/* extra exceptions */
case SPRN_IVOR32:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]);
break;
case SPRN_IVOR33:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]);
break;
case SPRN_IVOR34:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]);
break;
case SPRN_IVOR35:
kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]);
break;
default:
emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt);
}
return emulated;
}
| gpl-2.0 |
gmtorg/stm32_uclinux | arch/arm/mach-netx/nxdb500.c | 1676 | 4723 | /*
* arch/arm/mach-netx/nxdb500.c
*
* Copyright (c) 2005 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/dma-mapping.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/mtd/plat-ram.h>
#include <linux/platform_device.h>
#include <linux/amba/bus.h>
#include <linux/amba/clcd.h>
#include <mach/hardware.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/netx-regs.h>
#include <mach/eth.h>
#include "generic.h"
#include "fb.h"
static struct clcd_panel qvga = {
.mode = {
.name = "QVGA",
.refresh = 60,
.xres = 240,
.yres = 320,
.pixclock = 187617,
.left_margin = 6,
.right_margin = 26,
.upper_margin = 0,
.lower_margin = 6,
.hsync_len = 6,
.vsync_len = 1,
.sync = 0,
.vmode = FB_VMODE_NONINTERLACED,
},
.width = -1,
.height = -1,
.tim2 = 16,
.cntl = CNTL_LCDTFT | CNTL_BGR,
.bpp = 16,
.grayscale = 0,
};
static inline int nxdb500_check(struct clcd_fb *fb, struct fb_var_screeninfo *var)
{
var->green.length = 5;
var->green.msb_right = 0;
return clcdfb_check(fb, var);
}
static int nxdb500_clcd_setup(struct clcd_fb *fb)
{
unsigned int val;
fb->fb.var.green.length = 5;
fb->fb.var.green.msb_right = 0;
/* enable asic control */
val = readl(NETX_SYSTEM_IOC_ACCESS_KEY);
writel(val, NETX_SYSTEM_IOC_ACCESS_KEY);
writel(3, NETX_SYSTEM_IOC_CR);
val = readl(NETX_PIO_OUTPIO);
writel(val | 1, NETX_PIO_OUTPIO);
val = readl(NETX_PIO_OEPIO);
writel(val | 1, NETX_PIO_OEPIO);
return netx_clcd_setup(fb);
}
static struct clcd_board clcd_data = {
.name = "netX",
.check = nxdb500_check,
.decode = clcdfb_decode,
.enable = netx_clcd_enable,
.setup = nxdb500_clcd_setup,
.mmap = netx_clcd_mmap,
.remove = netx_clcd_remove,
};
static struct netxeth_platform_data eth0_platform_data = {
.xcno = 0,
};
static struct platform_device netx_eth0_device = {
.name = "netx-eth",
.id = 0,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð0_platform_data,
}
};
static struct netxeth_platform_data eth1_platform_data = {
.xcno = 1,
};
static struct platform_device netx_eth1_device = {
.name = "netx-eth",
.id = 1,
.num_resources = 0,
.resource = NULL,
.dev = {
.platform_data = ð1_platform_data,
}
};
static struct resource netx_uart0_resources[] = {
[0] = {
.start = 0x00100A00,
.end = 0x00100A3F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART0),
.end = (NETX_IRQ_UART0),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart0_device = {
.name = "netx-uart",
.id = 0,
.num_resources = ARRAY_SIZE(netx_uart0_resources),
.resource = netx_uart0_resources,
};
static struct resource netx_uart1_resources[] = {
[0] = {
.start = 0x00100A40,
.end = 0x00100A7F,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART1),
.end = (NETX_IRQ_UART1),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart1_device = {
.name = "netx-uart",
.id = 1,
.num_resources = ARRAY_SIZE(netx_uart1_resources),
.resource = netx_uart1_resources,
};
static struct resource netx_uart2_resources[] = {
[0] = {
.start = 0x00100A80,
.end = 0x00100ABF,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = (NETX_IRQ_UART2),
.end = (NETX_IRQ_UART2),
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device netx_uart2_device = {
.name = "netx-uart",
.id = 2,
.num_resources = ARRAY_SIZE(netx_uart2_resources),
.resource = netx_uart2_resources,
};
static struct platform_device *devices[] __initdata = {
&netx_eth0_device,
&netx_eth1_device,
&netx_uart0_device,
&netx_uart1_device,
&netx_uart2_device,
};
static void __init nxdb500_init(void)
{
netx_fb_init(&clcd_data, &qvga);
platform_add_devices(devices, ARRAY_SIZE(devices));
}
MACHINE_START(NXDB500, "Hilscher nxdb500")
.phys_io = 0x00100000,
.io_pg_offst = (io_p2v(0x00100000) >> 18) & 0xfffc,
.boot_params = 0x80000100,
.map_io = netx_map_io,
.init_irq = netx_init_irq,
.timer = &netx_timer,
.init_machine = nxdb500_init,
MACHINE_END
| gpl-2.0 |
Wonfee/android_kernel_asus_grouper | fs/quota/quota.c | 2188 | 9487 | /*
* Quota code necessary even when VFS quota support is not compiled
* into the kernel. The interesting stuff is over in dquot.c, here
* we have symbols for initial quotactl(2) handling, the sysctl(2)
* variables, etc - things needed even when quota support disabled.
*/
#include <linux/fs.h>
#include <linux/namei.h>
#include <linux/slab.h>
#include <asm/current.h>
#include <asm/uaccess.h>
#include <linux/kernel.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/buffer_head.h>
#include <linux/capability.h>
#include <linux/quotaops.h>
#include <linux/types.h>
#include <linux/writeback.h>
static int check_quotactl_permission(struct super_block *sb, int type, int cmd,
qid_t id)
{
switch (cmd) {
/* these commands do not require any special privilegues */
case Q_GETFMT:
case Q_SYNC:
case Q_GETINFO:
case Q_XGETQSTAT:
case Q_XQUOTASYNC:
break;
/* allow to query information for dquots we "own" */
case Q_GETQUOTA:
case Q_XGETQUOTA:
if ((type == USRQUOTA && current_euid() == id) ||
(type == GRPQUOTA && in_egroup_p(id)))
break;
/*FALLTHROUGH*/
default:
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
}
return security_quotactl(cmd, type, id, sb);
}
static void quota_sync_one(struct super_block *sb, void *arg)
{
if (sb->s_qcop && sb->s_qcop->quota_sync)
sb->s_qcop->quota_sync(sb, *(int *)arg, 1);
}
static int quota_sync_all(int type)
{
int ret;
if (type >= MAXQUOTAS)
return -EINVAL;
ret = security_quotactl(Q_SYNC, type, 0, NULL);
if (!ret)
iterate_supers(quota_sync_one, &type);
return ret;
}
static int quota_quotaon(struct super_block *sb, int type, int cmd, qid_t id,
struct path *path)
{
if (!sb->s_qcop->quota_on && !sb->s_qcop->quota_on_meta)
return -ENOSYS;
if (sb->s_qcop->quota_on_meta)
return sb->s_qcop->quota_on_meta(sb, type, id);
if (IS_ERR(path))
return PTR_ERR(path);
return sb->s_qcop->quota_on(sb, type, id, path);
}
static int quota_getfmt(struct super_block *sb, int type, void __user *addr)
{
__u32 fmt;
down_read(&sb_dqopt(sb)->dqptr_sem);
if (!sb_has_quota_active(sb, type)) {
up_read(&sb_dqopt(sb)->dqptr_sem);
return -ESRCH;
}
fmt = sb_dqopt(sb)->info[type].dqi_format->qf_fmt_id;
up_read(&sb_dqopt(sb)->dqptr_sem);
if (copy_to_user(addr, &fmt, sizeof(fmt)))
return -EFAULT;
return 0;
}
static int quota_getinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
int ret;
if (!sb->s_qcop->get_info)
return -ENOSYS;
ret = sb->s_qcop->get_info(sb, type, &info);
if (!ret && copy_to_user(addr, &info, sizeof(info)))
return -EFAULT;
return ret;
}
static int quota_setinfo(struct super_block *sb, int type, void __user *addr)
{
struct if_dqinfo info;
if (copy_from_user(&info, addr, sizeof(info)))
return -EFAULT;
if (!sb->s_qcop->set_info)
return -ENOSYS;
return sb->s_qcop->set_info(sb, type, &info);
}
static void copy_to_if_dqblk(struct if_dqblk *dst, struct fs_disk_quota *src)
{
dst->dqb_bhardlimit = src->d_blk_hardlimit;
dst->dqb_bsoftlimit = src->d_blk_softlimit;
dst->dqb_curspace = src->d_bcount;
dst->dqb_ihardlimit = src->d_ino_hardlimit;
dst->dqb_isoftlimit = src->d_ino_softlimit;
dst->dqb_curinodes = src->d_icount;
dst->dqb_btime = src->d_btimer;
dst->dqb_itime = src->d_itimer;
dst->dqb_valid = QIF_ALL;
}
static int quota_getquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
struct if_dqblk idq;
int ret;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (ret)
return ret;
copy_to_if_dqblk(&idq, &fdq);
if (copy_to_user(addr, &idq, sizeof(idq)))
return -EFAULT;
return 0;
}
static void copy_from_if_dqblk(struct fs_disk_quota *dst, struct if_dqblk *src)
{
dst->d_blk_hardlimit = src->dqb_bhardlimit;
dst->d_blk_softlimit = src->dqb_bsoftlimit;
dst->d_bcount = src->dqb_curspace;
dst->d_ino_hardlimit = src->dqb_ihardlimit;
dst->d_ino_softlimit = src->dqb_isoftlimit;
dst->d_icount = src->dqb_curinodes;
dst->d_btimer = src->dqb_btime;
dst->d_itimer = src->dqb_itime;
dst->d_fieldmask = 0;
if (src->dqb_valid & QIF_BLIMITS)
dst->d_fieldmask |= FS_DQ_BSOFT | FS_DQ_BHARD;
if (src->dqb_valid & QIF_SPACE)
dst->d_fieldmask |= FS_DQ_BCOUNT;
if (src->dqb_valid & QIF_ILIMITS)
dst->d_fieldmask |= FS_DQ_ISOFT | FS_DQ_IHARD;
if (src->dqb_valid & QIF_INODES)
dst->d_fieldmask |= FS_DQ_ICOUNT;
if (src->dqb_valid & QIF_BTIME)
dst->d_fieldmask |= FS_DQ_BTIMER;
if (src->dqb_valid & QIF_ITIME)
dst->d_fieldmask |= FS_DQ_ITIMER;
}
static int quota_setquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
struct if_dqblk idq;
if (copy_from_user(&idq, addr, sizeof(idq)))
return -EFAULT;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
copy_from_if_dqblk(&fdq, &idq);
return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_setxstate(struct super_block *sb, int cmd, void __user *addr)
{
__u32 flags;
if (copy_from_user(&flags, addr, sizeof(flags)))
return -EFAULT;
if (!sb->s_qcop->set_xstate)
return -ENOSYS;
return sb->s_qcop->set_xstate(sb, flags, cmd);
}
static int quota_getxstate(struct super_block *sb, void __user *addr)
{
struct fs_quota_stat fqs;
int ret;
if (!sb->s_qcop->get_xstate)
return -ENOSYS;
ret = sb->s_qcop->get_xstate(sb, &fqs);
if (!ret && copy_to_user(addr, &fqs, sizeof(fqs)))
return -EFAULT;
return ret;
}
static int quota_setxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
if (copy_from_user(&fdq, addr, sizeof(fdq)))
return -EFAULT;
if (!sb->s_qcop->set_dqblk)
return -ENOSYS;
return sb->s_qcop->set_dqblk(sb, type, id, &fdq);
}
static int quota_getxquota(struct super_block *sb, int type, qid_t id,
void __user *addr)
{
struct fs_disk_quota fdq;
int ret;
if (!sb->s_qcop->get_dqblk)
return -ENOSYS;
ret = sb->s_qcop->get_dqblk(sb, type, id, &fdq);
if (!ret && copy_to_user(addr, &fdq, sizeof(fdq)))
return -EFAULT;
return ret;
}
/* Copy parameters and call proper function */
static int do_quotactl(struct super_block *sb, int type, int cmd, qid_t id,
void __user *addr, struct path *path)
{
int ret;
if (type >= (XQM_COMMAND(cmd) ? XQM_MAXQUOTAS : MAXQUOTAS))
return -EINVAL;
if (!sb->s_qcop)
return -ENOSYS;
ret = check_quotactl_permission(sb, type, cmd, id);
if (ret < 0)
return ret;
switch (cmd) {
case Q_QUOTAON:
return quota_quotaon(sb, type, cmd, id, path);
case Q_QUOTAOFF:
if (!sb->s_qcop->quota_off)
return -ENOSYS;
return sb->s_qcop->quota_off(sb, type);
case Q_GETFMT:
return quota_getfmt(sb, type, addr);
case Q_GETINFO:
return quota_getinfo(sb, type, addr);
case Q_SETINFO:
return quota_setinfo(sb, type, addr);
case Q_GETQUOTA:
return quota_getquota(sb, type, id, addr);
case Q_SETQUOTA:
return quota_setquota(sb, type, id, addr);
case Q_SYNC:
if (!sb->s_qcop->quota_sync)
return -ENOSYS;
return sb->s_qcop->quota_sync(sb, type, 1);
case Q_XQUOTAON:
case Q_XQUOTAOFF:
case Q_XQUOTARM:
return quota_setxstate(sb, cmd, addr);
case Q_XGETQSTAT:
return quota_getxstate(sb, addr);
case Q_XSETQLIM:
return quota_setxquota(sb, type, id, addr);
case Q_XGETQUOTA:
return quota_getxquota(sb, type, id, addr);
case Q_XQUOTASYNC:
/* caller already holds s_umount */
if (sb->s_flags & MS_RDONLY)
return -EROFS;
writeback_inodes_sb(sb);
return 0;
default:
return -EINVAL;
}
}
/*
* look up a superblock on which quota ops will be performed
* - use the name of a block device to find the superblock thereon
*/
static struct super_block *quotactl_block(const char __user *special)
{
#ifdef CONFIG_BLOCK
struct block_device *bdev;
struct super_block *sb;
char *tmp = getname(special);
if (IS_ERR(tmp))
return ERR_CAST(tmp);
bdev = lookup_bdev(tmp);
putname(tmp);
if (IS_ERR(bdev))
return ERR_CAST(bdev);
sb = get_super(bdev);
bdput(bdev);
if (!sb)
return ERR_PTR(-ENODEV);
return sb;
#else
return ERR_PTR(-ENODEV);
#endif
}
/*
* This is the system call interface. This communicates with
* the user-level programs. Currently this only supports diskquota
* calls. Maybe we need to add the process quotas etc. in the future,
* but we probably should use rlimits for that.
*/
SYSCALL_DEFINE4(quotactl, unsigned int, cmd, const char __user *, special,
qid_t, id, void __user *, addr)
{
uint cmds, type;
struct super_block *sb = NULL;
struct path path, *pathp = NULL;
int ret;
cmds = cmd >> SUBCMDSHIFT;
type = cmd & SUBCMDMASK;
/*
* As a special case Q_SYNC can be called without a specific device.
* It will iterate all superblocks that have quota enabled and call
* the sync action on each of them.
*/
if (!special) {
if (cmds == Q_SYNC)
return quota_sync_all(type);
return -ENODEV;
}
/*
* Path for quotaon has to be resolved before grabbing superblock
* because that gets s_umount sem which is also possibly needed by path
* resolution (think about autofs) and thus deadlocks could arise.
*/
if (cmds == Q_QUOTAON) {
ret = user_path_at(AT_FDCWD, addr, LOOKUP_FOLLOW|LOOKUP_AUTOMOUNT, &path);
if (ret)
pathp = ERR_PTR(ret);
else
pathp = &path;
}
sb = quotactl_block(special);
if (IS_ERR(sb))
return PTR_ERR(sb);
ret = do_quotactl(sb, type, cmds, id, addr, pathp);
drop_super(sb);
if (pathp && !IS_ERR(pathp))
path_put(pathp);
return ret;
}
| gpl-2.0 |
173210/android_kernel_samsung_exynos4210jpn | arch/arm/mach-s3c64xx/cpufreq.c | 2444 | 6481 | /* linux/arch/arm/plat-s3c64xx/cpufreq.c
*
* Copyright 2009 Wolfson Microelectronics plc
*
* S3C64xx CPUfreq Support
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/cpufreq.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/regulator/consumer.h>
static struct clk *armclk;
static struct regulator *vddarm;
static unsigned long regulator_latency;
#ifdef CONFIG_CPU_S3C6410
struct s3c64xx_dvfs {
unsigned int vddarm_min;
unsigned int vddarm_max;
};
static struct s3c64xx_dvfs s3c64xx_dvfs_table[] = {
[0] = { 1000000, 1150000 },
[1] = { 1050000, 1150000 },
[2] = { 1100000, 1150000 },
[3] = { 1200000, 1350000 },
};
static struct cpufreq_frequency_table s3c64xx_freq_table[] = {
{ 0, 66000 },
{ 0, 133000 },
{ 1, 222000 },
{ 1, 266000 },
{ 2, 333000 },
{ 2, 400000 },
{ 2, 532000 },
{ 2, 533000 },
{ 3, 667000 },
{ 0, CPUFREQ_TABLE_END },
};
#endif
static int s3c64xx_cpufreq_verify_speed(struct cpufreq_policy *policy)
{
if (policy->cpu != 0)
return -EINVAL;
return cpufreq_frequency_table_verify(policy, s3c64xx_freq_table);
}
static unsigned int s3c64xx_cpufreq_get_speed(unsigned int cpu)
{
if (cpu != 0)
return 0;
return clk_get_rate(armclk) / 1000;
}
static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret;
unsigned int i;
struct cpufreq_freqs freqs;
struct s3c64xx_dvfs *dvfs;
ret = cpufreq_frequency_table_target(policy, s3c64xx_freq_table,
target_freq, relation, &i);
if (ret != 0)
return ret;
freqs.cpu = 0;
freqs.old = clk_get_rate(armclk) / 1000;
freqs.new = s3c64xx_freq_table[i].frequency;
freqs.flags = 0;
dvfs = &s3c64xx_dvfs_table[s3c64xx_freq_table[i].index];
if (freqs.old == freqs.new)
return 0;
pr_debug("cpufreq: Transition %d-%dkHz\n", freqs.old, freqs.new);
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new > freqs.old) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err;
}
}
#endif
ret = clk_set_rate(armclk, freqs.new * 1000);
if (ret < 0) {
pr_err("cpufreq: Failed to set rate %dkHz: %d\n",
freqs.new, ret);
goto err;
}
#ifdef CONFIG_REGULATOR
if (vddarm && freqs.new < freqs.old) {
ret = regulator_set_voltage(vddarm,
dvfs->vddarm_min,
dvfs->vddarm_max);
if (ret != 0) {
pr_err("cpufreq: Failed to set VDDARM for %dkHz: %d\n",
freqs.new, ret);
goto err_clk;
}
}
#endif
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
pr_debug("cpufreq: Set actual frequency %lukHz\n",
clk_get_rate(armclk) / 1000);
return 0;
err_clk:
if (clk_set_rate(armclk, freqs.old * 1000) < 0)
pr_err("Failed to restore original clock rate\n");
err:
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
#ifdef CONFIG_REGULATOR
static void __init s3c64xx_cpufreq_config_regulator(void)
{
int count, v, i, found;
struct cpufreq_frequency_table *freq;
struct s3c64xx_dvfs *dvfs;
count = regulator_count_voltages(vddarm);
if (count < 0) {
pr_err("cpufreq: Unable to check supported voltages\n");
}
freq = s3c64xx_freq_table;
while (count > 0 && freq->frequency != CPUFREQ_TABLE_END) {
if (freq->frequency == CPUFREQ_ENTRY_INVALID)
continue;
dvfs = &s3c64xx_dvfs_table[freq->index];
found = 0;
for (i = 0; i < count; i++) {
v = regulator_list_voltage(vddarm, i);
if (v >= dvfs->vddarm_min && v <= dvfs->vddarm_max)
found = 1;
}
if (!found) {
pr_debug("cpufreq: %dkHz unsupported by regulator\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
freq++;
}
/* Guess based on having to do an I2C/SPI write; in future we
* will be able to query the regulator performance here. */
regulator_latency = 1 * 1000 * 1000;
}
#endif
static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy)
{
int ret;
struct cpufreq_frequency_table *freq;
if (policy->cpu != 0)
return -EINVAL;
if (s3c64xx_freq_table == NULL) {
pr_err("cpufreq: No frequency information for this CPU\n");
return -ENODEV;
}
armclk = clk_get(NULL, "armclk");
if (IS_ERR(armclk)) {
pr_err("cpufreq: Unable to obtain ARMCLK: %ld\n",
PTR_ERR(armclk));
return PTR_ERR(armclk);
}
#ifdef CONFIG_REGULATOR
vddarm = regulator_get(NULL, "vddarm");
if (IS_ERR(vddarm)) {
ret = PTR_ERR(vddarm);
pr_err("cpufreq: Failed to obtain VDDARM: %d\n", ret);
pr_err("cpufreq: Only frequency scaling available\n");
vddarm = NULL;
} else {
s3c64xx_cpufreq_config_regulator();
}
#endif
freq = s3c64xx_freq_table;
while (freq->frequency != CPUFREQ_TABLE_END) {
unsigned long r;
/* Check for frequencies we can generate */
r = clk_round_rate(armclk, freq->frequency * 1000);
r /= 1000;
if (r != freq->frequency) {
pr_debug("cpufreq: %dkHz unsupported by clock\n",
freq->frequency);
freq->frequency = CPUFREQ_ENTRY_INVALID;
}
/* If we have no regulator then assume startup
* frequency is the maximum we can support. */
if (!vddarm && freq->frequency > s3c64xx_cpufreq_get_speed(0))
freq->frequency = CPUFREQ_ENTRY_INVALID;
freq++;
}
policy->cur = clk_get_rate(armclk) / 1000;
/* Datasheet says PLL stabalisation time (if we were to use
* the PLLs, which we don't currently) is ~300us worst case,
* but add some fudge.
*/
policy->cpuinfo.transition_latency = (500 * 1000) + regulator_latency;
ret = cpufreq_frequency_table_cpuinfo(policy, s3c64xx_freq_table);
if (ret != 0) {
pr_err("cpufreq: Failed to configure frequency table: %d\n",
ret);
regulator_put(vddarm);
clk_put(armclk);
}
return ret;
}
static struct cpufreq_driver s3c64xx_cpufreq_driver = {
.owner = THIS_MODULE,
.flags = 0,
.verify = s3c64xx_cpufreq_verify_speed,
.target = s3c64xx_cpufreq_set_target,
.get = s3c64xx_cpufreq_get_speed,
.init = s3c64xx_cpufreq_driver_init,
.name = "s3c",
};
static int __init s3c64xx_cpufreq_init(void)
{
return cpufreq_register_driver(&s3c64xx_cpufreq_driver);
}
module_init(s3c64xx_cpufreq_init);
| gpl-2.0 |
zhenyw/linux | drivers/video/console/fbcon_ccw.c | 3212 | 10890 | /*
* linux/drivers/video/console/fbcon_ccw.c -- Software Rotation - 270 degrees
*
* Copyright (C) 2005 Antonino Daplas <adaplas @pol.net>
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive for
* more details.
*/
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/fb.h>
#include <linux/vt_kern.h>
#include <linux/console.h>
#include <asm/types.h>
#include "fbcon.h"
#include "fbcon_rotate.h"
/*
* Rotation 270 degrees
*/
static void ccw_update_attr(u8 *dst, u8 *src, int attribute,
struct vc_data *vc)
{
int i, j, offset = (vc->vc_font.height < 10) ? 1 : 2;
int width = (vc->vc_font.height + 7) >> 3;
int mod = vc->vc_font.height % 8;
u8 c, msk = ~(0xff << offset), msk1 = 0;
if (mod)
msk <<= (8 - mod);
if (offset > mod)
msk1 |= 0x01;
for (i = 0; i < vc->vc_font.width; i++) {
for (j = 0; j < width; j++) {
c = *src;
if (attribute & FBCON_ATTRIBUTE_UNDERLINE) {
if (j == width - 1)
c |= msk;
if (msk1 && j == width - 2)
c |= msk1;
}
if (attribute & FBCON_ATTRIBUTE_BOLD && i)
*(dst - width) |= c;
if (attribute & FBCON_ATTRIBUTE_REVERSE)
c = ~c;
src++;
*dst++ = c;
}
}
}
static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int dy, int dx, int height, int width)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_copyarea area;
u32 vyres = GETVYRES(ops->p->scrollmode, info);
area.sx = sy * vc->vc_font.height;
area.sy = vyres - ((sx + width) * vc->vc_font.width);
area.dx = dy * vc->vc_font.height;
area.dy = vyres - ((dx + width) * vc->vc_font.width);
area.width = height * vc->vc_font.height;
area.height = width * vc->vc_font.width;
info->fbops->fb_copyarea(info, &area);
}
static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy,
int sx, int height, int width)
{
struct fbcon_ops *ops = info->fbcon_par;
struct fb_fillrect region;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
u32 vyres = GETVYRES(ops->p->scrollmode, info);
region.color = attr_bgcol_ec(bgshift,vc,info);
region.dx = sy * vc->vc_font.height;
region.dy = vyres - ((sx + width) * vc->vc_font.width);
region.height = width * vc->vc_font.width;
region.width = height * vc->vc_font.height;
region.rop = ROP_COPY;
info->fbops->fb_fillrect(info, ®ion);
}
static inline void ccw_putcs_aligned(struct vc_data *vc, struct fb_info *info,
const u16 *s, u32 attr, u32 cnt,
u32 d_pitch, u32 s_pitch, u32 cellsize,
struct fb_image *image, u8 *buf, u8 *dst)
{
struct fbcon_ops *ops = info->fbcon_par;
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
u32 idx = (vc->vc_font.height + 7) >> 3;
u8 *src;
while (cnt--) {
src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize;
if (attr) {
ccw_update_attr(buf, src, attr, vc);
src = buf;
}
if (likely(idx == 1))
__fb_pad_aligned_buffer(dst, d_pitch, src, idx,
vc->vc_font.width);
else
fb_pad_aligned_buffer(dst, d_pitch, src, idx,
vc->vc_font.width);
dst += d_pitch * vc->vc_font.width;
}
info->fbops->fb_imageblit(info, image);
}
static void ccw_putcs(struct vc_data *vc, struct fb_info *info,
const unsigned short *s, int count, int yy, int xx,
int fg, int bg)
{
struct fb_image image;
struct fbcon_ops *ops = info->fbcon_par;
u32 width = (vc->vc_font.height + 7)/8;
u32 cellsize = width * vc->vc_font.width;
u32 maxcnt = info->pixmap.size/cellsize;
u32 scan_align = info->pixmap.scan_align - 1;
u32 buf_align = info->pixmap.buf_align - 1;
u32 cnt, pitch, size;
u32 attribute = get_attribute(info, scr_readw(s));
u8 *dst, *buf = NULL;
u32 vyres = GETVYRES(ops->p->scrollmode, info);
if (!ops->fontbuffer)
return;
image.fg_color = fg;
image.bg_color = bg;
image.dx = yy * vc->vc_font.height;
image.dy = vyres - ((xx + count) * vc->vc_font.width);
image.width = vc->vc_font.height;
image.depth = 1;
if (attribute) {
buf = kmalloc(cellsize, GFP_KERNEL);
if (!buf)
return;
}
s += count - 1;
while (count) {
if (count > maxcnt)
cnt = maxcnt;
else
cnt = count;
image.height = vc->vc_font.width * cnt;
pitch = ((image.width + 7) >> 3) + scan_align;
pitch &= ~scan_align;
size = pitch * image.height + buf_align;
size &= ~buf_align;
dst = fb_get_buffer_offset(info, &info->pixmap, size);
image.data = dst;
ccw_putcs_aligned(vc, info, s, attribute, cnt, pitch,
width, cellsize, &image, buf, dst);
image.dy += image.height;
count -= cnt;
s -= cnt;
}
/* buf is always NULL except when in monochrome mode, so in this case
it's a gain to check buf against NULL even though kfree() handles
NULL pointers just fine */
if (unlikely(buf))
kfree(buf);
}
static void ccw_clear_margins(struct vc_data *vc, struct fb_info *info,
int bottom_only)
{
unsigned int cw = vc->vc_font.width;
unsigned int ch = vc->vc_font.height;
unsigned int rw = info->var.yres - (vc->vc_cols*cw);
unsigned int bh = info->var.xres - (vc->vc_rows*ch);
unsigned int bs = vc->vc_rows*ch;
struct fb_fillrect region;
region.color = 0;
region.rop = ROP_COPY;
if (rw && !bottom_only) {
region.dx = 0;
region.dy = info->var.yoffset;
region.height = rw;
region.width = info->var.xres_virtual;
info->fbops->fb_fillrect(info, ®ion);
}
if (bh) {
region.dx = info->var.xoffset + bs;
region.dy = 0;
region.height = info->var.yres_virtual;
region.width = bh;
info->fbops->fb_fillrect(info, ®ion);
}
}
static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode,
int softback_lines, int fg, int bg)
{
struct fb_cursor cursor;
struct fbcon_ops *ops = info->fbcon_par;
unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
int w = (vc->vc_font.height + 7) >> 3, c;
int y = real_y(ops->p, vc->vc_y);
int attribute, use_sw = (vc->vc_cursor_type & 0x10);
int err = 1, dx, dy;
char *src;
u32 vyres = GETVYRES(ops->p->scrollmode, info);
if (!ops->fontbuffer)
return;
cursor.set = 0;
if (softback_lines) {
if (y + softback_lines >= vc->vc_rows) {
mode = CM_ERASE;
ops->cursor_flash = 0;
return;
} else
y += softback_lines;
}
c = scr_readw((u16 *) vc->vc_pos);
attribute = get_attribute(info, c);
src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.width));
if (ops->cursor_state.image.data != src ||
ops->cursor_reset) {
ops->cursor_state.image.data = src;
cursor.set |= FB_CUR_SETIMAGE;
}
if (attribute) {
u8 *dst;
dst = kmalloc(w * vc->vc_font.width, GFP_ATOMIC);
if (!dst)
return;
kfree(ops->cursor_data);
ops->cursor_data = dst;
ccw_update_attr(dst, src, attribute, vc);
src = dst;
}
if (ops->cursor_state.image.fg_color != fg ||
ops->cursor_state.image.bg_color != bg ||
ops->cursor_reset) {
ops->cursor_state.image.fg_color = fg;
ops->cursor_state.image.bg_color = bg;
cursor.set |= FB_CUR_SETCMAP;
}
if (ops->cursor_state.image.height != vc->vc_font.width ||
ops->cursor_state.image.width != vc->vc_font.height ||
ops->cursor_reset) {
ops->cursor_state.image.height = vc->vc_font.width;
ops->cursor_state.image.width = vc->vc_font.height;
cursor.set |= FB_CUR_SETSIZE;
}
dx = y * vc->vc_font.height;
dy = vyres - ((vc->vc_x + 1) * vc->vc_font.width);
if (ops->cursor_state.image.dx != dx ||
ops->cursor_state.image.dy != dy ||
ops->cursor_reset) {
ops->cursor_state.image.dx = dx;
ops->cursor_state.image.dy = dy;
cursor.set |= FB_CUR_SETPOS;
}
if (ops->cursor_state.hot.x || ops->cursor_state.hot.y ||
ops->cursor_reset) {
ops->cursor_state.hot.x = cursor.hot.y = 0;
cursor.set |= FB_CUR_SETHOT;
}
if (cursor.set & FB_CUR_SETSIZE ||
vc->vc_cursor_type != ops->p->cursor_shape ||
ops->cursor_state.mask == NULL ||
ops->cursor_reset) {
char *tmp, *mask = kmalloc(w*vc->vc_font.width, GFP_ATOMIC);
int cur_height, size, i = 0;
int width = (vc->vc_font.width + 7)/8;
if (!mask)
return;
tmp = kmalloc(width * vc->vc_font.height, GFP_ATOMIC);
if (!tmp) {
kfree(mask);
return;
}
kfree(ops->cursor_state.mask);
ops->cursor_state.mask = mask;
ops->p->cursor_shape = vc->vc_cursor_type;
cursor.set |= FB_CUR_SETSHAPE;
switch (ops->p->cursor_shape & CUR_HWMASK) {
case CUR_NONE:
cur_height = 0;
break;
case CUR_UNDERLINE:
cur_height = (vc->vc_font.height < 10) ? 1 : 2;
break;
case CUR_LOWER_THIRD:
cur_height = vc->vc_font.height/3;
break;
case CUR_LOWER_HALF:
cur_height = vc->vc_font.height >> 1;
break;
case CUR_TWO_THIRDS:
cur_height = (vc->vc_font.height << 1)/3;
break;
case CUR_BLOCK:
default:
cur_height = vc->vc_font.height;
break;
}
size = (vc->vc_font.height - cur_height) * width;
while (size--)
tmp[i++] = 0;
size = cur_height * width;
while (size--)
tmp[i++] = 0xff;
memset(mask, 0, w * vc->vc_font.width);
rotate_ccw(tmp, mask, vc->vc_font.width, vc->vc_font.height);
kfree(tmp);
}
switch (mode) {
case CM_ERASE:
ops->cursor_state.enable = 0;
break;
case CM_DRAW:
case CM_MOVE:
default:
ops->cursor_state.enable = (use_sw) ? 0 : 1;
break;
}
cursor.image.data = src;
cursor.image.fg_color = ops->cursor_state.image.fg_color;
cursor.image.bg_color = ops->cursor_state.image.bg_color;
cursor.image.dx = ops->cursor_state.image.dx;
cursor.image.dy = ops->cursor_state.image.dy;
cursor.image.height = ops->cursor_state.image.height;
cursor.image.width = ops->cursor_state.image.width;
cursor.hot.x = ops->cursor_state.hot.x;
cursor.hot.y = ops->cursor_state.hot.y;
cursor.mask = ops->cursor_state.mask;
cursor.enable = ops->cursor_state.enable;
cursor.image.depth = 1;
cursor.rop = ROP_XOR;
if (info->fbops->fb_cursor)
err = info->fbops->fb_cursor(info, &cursor);
if (err)
soft_cursor(info, &cursor);
ops->cursor_reset = 0;
}
static int ccw_update_start(struct fb_info *info)
{
struct fbcon_ops *ops = info->fbcon_par;
u32 yoffset;
u32 vyres = GETVYRES(ops->p->scrollmode, info);
int err;
yoffset = (vyres - info->var.yres) - ops->var.xoffset;
ops->var.xoffset = ops->var.yoffset;
ops->var.yoffset = yoffset;
err = fb_pan_display(info, &ops->var);
ops->var.xoffset = info->var.xoffset;
ops->var.yoffset = info->var.yoffset;
ops->var.vmode = info->var.vmode;
return err;
}
void fbcon_rotate_ccw(struct fbcon_ops *ops)
{
ops->bmove = ccw_bmove;
ops->clear = ccw_clear;
ops->putcs = ccw_putcs;
ops->clear_margins = ccw_clear_margins;
ops->cursor = ccw_cursor;
ops->update_start = ccw_update_start;
}
EXPORT_SYMBOL(fbcon_rotate_ccw);
MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>");
MODULE_DESCRIPTION("Console Rotation (270 degrees) Support");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Howpathetic/ShooterU_kernel | drivers/uwb/rsv.c | 3980 | 27535 | /*
* UWB reservation management.
*
* Copyright (C) 2008 Cambridge Silicon Radio Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kernel.h>
#include <linux/uwb.h>
#include <linux/slab.h>
#include <linux/random.h>
#include "uwb-internal.h"
static void uwb_rsv_timer(unsigned long arg);
static const char *rsv_states[] = {
[UWB_RSV_STATE_NONE] = "none ",
[UWB_RSV_STATE_O_INITIATED] = "o initiated ",
[UWB_RSV_STATE_O_PENDING] = "o pending ",
[UWB_RSV_STATE_O_MODIFIED] = "o modified ",
[UWB_RSV_STATE_O_ESTABLISHED] = "o established ",
[UWB_RSV_STATE_O_TO_BE_MOVED] = "o to be moved ",
[UWB_RSV_STATE_O_MOVE_EXPANDING] = "o move expanding",
[UWB_RSV_STATE_O_MOVE_COMBINING] = "o move combining",
[UWB_RSV_STATE_O_MOVE_REDUCING] = "o move reducing ",
[UWB_RSV_STATE_T_ACCEPTED] = "t accepted ",
[UWB_RSV_STATE_T_CONFLICT] = "t conflict ",
[UWB_RSV_STATE_T_PENDING] = "t pending ",
[UWB_RSV_STATE_T_DENIED] = "t denied ",
[UWB_RSV_STATE_T_RESIZED] = "t resized ",
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = "t expanding acc ",
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = "t expanding conf",
[UWB_RSV_STATE_T_EXPANDING_PENDING] = "t expanding pend",
[UWB_RSV_STATE_T_EXPANDING_DENIED] = "t expanding den ",
};
static const char *rsv_types[] = {
[UWB_DRP_TYPE_ALIEN_BP] = "alien-bp",
[UWB_DRP_TYPE_HARD] = "hard",
[UWB_DRP_TYPE_SOFT] = "soft",
[UWB_DRP_TYPE_PRIVATE] = "private",
[UWB_DRP_TYPE_PCA] = "pca",
};
bool uwb_rsv_has_two_drp_ies(struct uwb_rsv *rsv)
{
static const bool has_two_drp_ies[] = {
[UWB_RSV_STATE_O_INITIATED] = false,
[UWB_RSV_STATE_O_PENDING] = false,
[UWB_RSV_STATE_O_MODIFIED] = false,
[UWB_RSV_STATE_O_ESTABLISHED] = false,
[UWB_RSV_STATE_O_TO_BE_MOVED] = false,
[UWB_RSV_STATE_O_MOVE_COMBINING] = false,
[UWB_RSV_STATE_O_MOVE_REDUCING] = false,
[UWB_RSV_STATE_O_MOVE_EXPANDING] = true,
[UWB_RSV_STATE_T_ACCEPTED] = false,
[UWB_RSV_STATE_T_CONFLICT] = false,
[UWB_RSV_STATE_T_PENDING] = false,
[UWB_RSV_STATE_T_DENIED] = false,
[UWB_RSV_STATE_T_RESIZED] = false,
[UWB_RSV_STATE_T_EXPANDING_ACCEPTED] = true,
[UWB_RSV_STATE_T_EXPANDING_CONFLICT] = true,
[UWB_RSV_STATE_T_EXPANDING_PENDING] = true,
[UWB_RSV_STATE_T_EXPANDING_DENIED] = true,
};
return has_two_drp_ies[rsv->state];
}
/**
* uwb_rsv_state_str - return a string for a reservation state
* @state: the reservation state.
*/
const char *uwb_rsv_state_str(enum uwb_rsv_state state)
{
if (state < UWB_RSV_STATE_NONE || state >= UWB_RSV_STATE_LAST)
return "unknown";
return rsv_states[state];
}
EXPORT_SYMBOL_GPL(uwb_rsv_state_str);
/**
* uwb_rsv_type_str - return a string for a reservation type
* @type: the reservation type
*/
const char *uwb_rsv_type_str(enum uwb_drp_type type)
{
if (type < UWB_DRP_TYPE_ALIEN_BP || type > UWB_DRP_TYPE_PCA)
return "invalid";
return rsv_types[type];
}
EXPORT_SYMBOL_GPL(uwb_rsv_type_str);
void uwb_rsv_dump(char *text, struct uwb_rsv *rsv)
{
struct device *dev = &rsv->rc->uwb_dev.dev;
struct uwb_dev_addr devaddr;
char owner[UWB_ADDR_STRSIZE], target[UWB_ADDR_STRSIZE];
uwb_dev_addr_print(owner, sizeof(owner), &rsv->owner->dev_addr);
if (rsv->target.type == UWB_RSV_TARGET_DEV)
devaddr = rsv->target.dev->dev_addr;
else
devaddr = rsv->target.devaddr;
uwb_dev_addr_print(target, sizeof(target), &devaddr);
dev_dbg(dev, "rsv %s %s -> %s: %s\n",
text, owner, target, uwb_rsv_state_str(rsv->state));
}
static void uwb_rsv_release(struct kref *kref)
{
struct uwb_rsv *rsv = container_of(kref, struct uwb_rsv, kref);
kfree(rsv);
}
void uwb_rsv_get(struct uwb_rsv *rsv)
{
kref_get(&rsv->kref);
}
void uwb_rsv_put(struct uwb_rsv *rsv)
{
kref_put(&rsv->kref, uwb_rsv_release);
}
/*
* Get a free stream index for a reservation.
*
* If the target is a DevAddr (e.g., a WUSB cluster reservation) then
* the stream is allocated from a pool of per-RC stream indexes,
* otherwise a unique stream index for the target is selected.
*/
static int uwb_rsv_get_stream(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
struct device *dev = &rc->uwb_dev.dev;
unsigned long *streams_bm;
int stream;
switch (rsv->target.type) {
case UWB_RSV_TARGET_DEV:
streams_bm = rsv->target.dev->streams;
break;
case UWB_RSV_TARGET_DEVADDR:
streams_bm = rc->uwb_dev.streams;
break;
default:
return -EINVAL;
}
stream = find_first_zero_bit(streams_bm, UWB_NUM_STREAMS);
if (stream >= UWB_NUM_STREAMS)
return -EBUSY;
rsv->stream = stream;
set_bit(stream, streams_bm);
dev_dbg(dev, "get stream %d\n", rsv->stream);
return 0;
}
static void uwb_rsv_put_stream(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
struct device *dev = &rc->uwb_dev.dev;
unsigned long *streams_bm;
switch (rsv->target.type) {
case UWB_RSV_TARGET_DEV:
streams_bm = rsv->target.dev->streams;
break;
case UWB_RSV_TARGET_DEVADDR:
streams_bm = rc->uwb_dev.streams;
break;
default:
return;
}
clear_bit(rsv->stream, streams_bm);
dev_dbg(dev, "put stream %d\n", rsv->stream);
}
void uwb_rsv_backoff_win_timer(unsigned long arg)
{
struct uwb_drp_backoff_win *bow = (struct uwb_drp_backoff_win *)arg;
struct uwb_rc *rc = container_of(bow, struct uwb_rc, bow);
struct device *dev = &rc->uwb_dev.dev;
bow->can_reserve_extra_mases = true;
if (bow->total_expired <= 4) {
bow->total_expired++;
} else {
/* after 4 backoff window has expired we can exit from
* the backoff procedure */
bow->total_expired = 0;
bow->window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
}
dev_dbg(dev, "backoff_win_timer total_expired=%d, n=%d\n: ", bow->total_expired, bow->n);
/* try to relocate all the "to be moved" relocations */
uwb_rsv_handle_drp_avail_change(rc);
}
void uwb_rsv_backoff_win_increment(struct uwb_rc *rc)
{
struct uwb_drp_backoff_win *bow = &rc->bow;
struct device *dev = &rc->uwb_dev.dev;
unsigned timeout_us;
dev_dbg(dev, "backoff_win_increment: window=%d\n", bow->window);
bow->can_reserve_extra_mases = false;
if((bow->window << 1) == UWB_DRP_BACKOFF_WIN_MAX)
return;
bow->window <<= 1;
bow->n = random32() & (bow->window - 1);
dev_dbg(dev, "new_window=%d, n=%d\n: ", bow->window, bow->n);
/* reset the timer associated variables */
timeout_us = bow->n * UWB_SUPERFRAME_LENGTH_US;
bow->total_expired = 0;
mod_timer(&bow->timer, jiffies + usecs_to_jiffies(timeout_us));
}
static void uwb_rsv_stroke_timer(struct uwb_rsv *rsv)
{
int sframes = UWB_MAX_LOST_BEACONS;
/*
* Multicast reservations can become established within 1
* super frame and should not be terminated if no response is
* received.
*/
if (rsv->is_multicast) {
if (rsv->state == UWB_RSV_STATE_O_INITIATED
|| rsv->state == UWB_RSV_STATE_O_MOVE_EXPANDING
|| rsv->state == UWB_RSV_STATE_O_MOVE_COMBINING
|| rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING)
sframes = 1;
if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED)
sframes = 0;
}
if (sframes > 0) {
/*
* Add an additional 2 superframes to account for the
* time to send the SET DRP IE command.
*/
unsigned timeout_us = (sframes + 2) * UWB_SUPERFRAME_LENGTH_US;
mod_timer(&rsv->timer, jiffies + usecs_to_jiffies(timeout_us));
} else
del_timer(&rsv->timer);
}
/*
* Update a reservations state, and schedule an update of the
* transmitted DRP IEs.
*/
static void uwb_rsv_state_update(struct uwb_rsv *rsv,
enum uwb_rsv_state new_state)
{
rsv->state = new_state;
rsv->ie_valid = false;
uwb_rsv_dump("SU", rsv);
uwb_rsv_stroke_timer(rsv);
uwb_rsv_sched_update(rsv->rc);
}
static void uwb_rsv_callback(struct uwb_rsv *rsv)
{
if (rsv->callback)
rsv->callback(rsv);
}
void uwb_rsv_set_state(struct uwb_rsv *rsv, enum uwb_rsv_state new_state)
{
struct uwb_rsv_move *mv = &rsv->mv;
if (rsv->state == new_state) {
switch (rsv->state) {
case UWB_RSV_STATE_O_ESTABLISHED:
case UWB_RSV_STATE_O_MOVE_EXPANDING:
case UWB_RSV_STATE_O_MOVE_COMBINING:
case UWB_RSV_STATE_O_MOVE_REDUCING:
case UWB_RSV_STATE_T_ACCEPTED:
case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
case UWB_RSV_STATE_T_RESIZED:
case UWB_RSV_STATE_NONE:
uwb_rsv_stroke_timer(rsv);
break;
default:
/* Expecting a state transition so leave timer
as-is. */
break;
}
return;
}
uwb_rsv_dump("SC", rsv);
switch (new_state) {
case UWB_RSV_STATE_NONE:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_NONE);
uwb_rsv_callback(rsv);
break;
case UWB_RSV_STATE_O_INITIATED:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_INITIATED);
break;
case UWB_RSV_STATE_O_PENDING:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_PENDING);
break;
case UWB_RSV_STATE_O_MODIFIED:
/* in the companion there are the MASes to drop */
bitmap_andnot(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MODIFIED);
break;
case UWB_RSV_STATE_O_ESTABLISHED:
if (rsv->state == UWB_RSV_STATE_O_MODIFIED
|| rsv->state == UWB_RSV_STATE_O_MOVE_REDUCING) {
uwb_drp_avail_release(rsv->rc, &mv->companion_mas);
rsv->needs_release_companion_mas = false;
}
uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_ESTABLISHED);
uwb_rsv_callback(rsv);
break;
case UWB_RSV_STATE_O_MOVE_EXPANDING:
rsv->needs_release_companion_mas = true;
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
break;
case UWB_RSV_STATE_O_MOVE_COMBINING:
rsv->needs_release_companion_mas = false;
uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
bitmap_or(rsv->mas.bm, rsv->mas.bm, mv->companion_mas.bm, UWB_NUM_MAS);
rsv->mas.safe += mv->companion_mas.safe;
rsv->mas.unsafe += mv->companion_mas.unsafe;
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
break;
case UWB_RSV_STATE_O_MOVE_REDUCING:
bitmap_andnot(mv->companion_mas.bm, rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
rsv->needs_release_companion_mas = true;
rsv->mas.safe = mv->final_mas.safe;
rsv->mas.unsafe = mv->final_mas.unsafe;
bitmap_copy(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS);
bitmap_copy(rsv->mas.unsafe_bm, mv->final_mas.unsafe_bm, UWB_NUM_MAS);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
break;
case UWB_RSV_STATE_T_ACCEPTED:
case UWB_RSV_STATE_T_RESIZED:
rsv->needs_release_companion_mas = false;
uwb_drp_avail_reserve(rsv->rc, &rsv->mas);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_ACCEPTED);
uwb_rsv_callback(rsv);
break;
case UWB_RSV_STATE_T_DENIED:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_DENIED);
break;
case UWB_RSV_STATE_T_CONFLICT:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_CONFLICT);
break;
case UWB_RSV_STATE_T_PENDING:
uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_PENDING);
break;
case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
rsv->needs_release_companion_mas = true;
uwb_drp_avail_reserve(rsv->rc, &mv->companion_mas);
uwb_rsv_state_update(rsv, UWB_RSV_STATE_T_EXPANDING_ACCEPTED);
break;
default:
dev_err(&rsv->rc->uwb_dev.dev, "unhandled state: %s (%d)\n",
uwb_rsv_state_str(new_state), new_state);
}
}
static void uwb_rsv_handle_timeout_work(struct work_struct *work)
{
struct uwb_rsv *rsv = container_of(work, struct uwb_rsv,
handle_timeout_work);
struct uwb_rc *rc = rsv->rc;
mutex_lock(&rc->rsvs_mutex);
uwb_rsv_dump("TO", rsv);
switch (rsv->state) {
case UWB_RSV_STATE_O_INITIATED:
if (rsv->is_multicast) {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
goto unlock;
}
break;
case UWB_RSV_STATE_O_MOVE_EXPANDING:
if (rsv->is_multicast) {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_COMBINING);
goto unlock;
}
break;
case UWB_RSV_STATE_O_MOVE_COMBINING:
if (rsv->is_multicast) {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_REDUCING);
goto unlock;
}
break;
case UWB_RSV_STATE_O_MOVE_REDUCING:
if (rsv->is_multicast) {
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_ESTABLISHED);
goto unlock;
}
break;
case UWB_RSV_STATE_O_ESTABLISHED:
if (rsv->is_multicast)
goto unlock;
break;
case UWB_RSV_STATE_T_EXPANDING_ACCEPTED:
/*
* The time out could be for the main or of the
* companion DRP, assume it's for the companion and
* drop that first. A further time out is required to
* drop the main.
*/
uwb_rsv_set_state(rsv, UWB_RSV_STATE_T_ACCEPTED);
uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
goto unlock;
default:
break;
}
uwb_rsv_remove(rsv);
unlock:
mutex_unlock(&rc->rsvs_mutex);
}
static struct uwb_rsv *uwb_rsv_alloc(struct uwb_rc *rc)
{
struct uwb_rsv *rsv;
rsv = kzalloc(sizeof(struct uwb_rsv), GFP_KERNEL);
if (!rsv)
return NULL;
INIT_LIST_HEAD(&rsv->rc_node);
INIT_LIST_HEAD(&rsv->pal_node);
kref_init(&rsv->kref);
init_timer(&rsv->timer);
rsv->timer.function = uwb_rsv_timer;
rsv->timer.data = (unsigned long)rsv;
rsv->rc = rc;
INIT_WORK(&rsv->handle_timeout_work, uwb_rsv_handle_timeout_work);
return rsv;
}
/**
* uwb_rsv_create - allocate and initialize a UWB reservation structure
* @rc: the radio controller
* @cb: callback to use when the reservation completes or terminates
* @pal_priv: data private to the PAL to be passed in the callback
*
* The callback is called when the state of the reservation changes from:
*
* - pending to accepted
* - pending to denined
* - accepted to terminated
* - pending to terminated
*/
struct uwb_rsv *uwb_rsv_create(struct uwb_rc *rc, uwb_rsv_cb_f cb, void *pal_priv)
{
struct uwb_rsv *rsv;
rsv = uwb_rsv_alloc(rc);
if (!rsv)
return NULL;
rsv->callback = cb;
rsv->pal_priv = pal_priv;
return rsv;
}
EXPORT_SYMBOL_GPL(uwb_rsv_create);
void uwb_rsv_remove(struct uwb_rsv *rsv)
{
uwb_rsv_dump("RM", rsv);
if (rsv->state != UWB_RSV_STATE_NONE)
uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
if (rsv->needs_release_companion_mas)
uwb_drp_avail_release(rsv->rc, &rsv->mv.companion_mas);
uwb_drp_avail_release(rsv->rc, &rsv->mas);
if (uwb_rsv_is_owner(rsv))
uwb_rsv_put_stream(rsv);
uwb_dev_put(rsv->owner);
if (rsv->target.type == UWB_RSV_TARGET_DEV)
uwb_dev_put(rsv->target.dev);
list_del_init(&rsv->rc_node);
uwb_rsv_put(rsv);
}
/**
* uwb_rsv_destroy - free a UWB reservation structure
* @rsv: the reservation to free
*
* The reservation must already be terminated.
*/
void uwb_rsv_destroy(struct uwb_rsv *rsv)
{
uwb_rsv_put(rsv);
}
EXPORT_SYMBOL_GPL(uwb_rsv_destroy);
/**
* usb_rsv_establish - start a reservation establishment
* @rsv: the reservation
*
* The PAL should fill in @rsv's owner, target, type, max_mas,
* min_mas, max_interval and is_multicast fields. If the target is a
* uwb_dev it must be referenced.
*
* The reservation's callback will be called when the reservation is
* accepted, denied or times out.
*/
int uwb_rsv_establish(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
struct uwb_mas_bm available;
int ret;
mutex_lock(&rc->rsvs_mutex);
ret = uwb_rsv_get_stream(rsv);
if (ret)
goto out;
rsv->tiebreaker = random32() & 1;
/* get available mas bitmap */
uwb_drp_available(rc, &available);
ret = uwb_rsv_find_best_allocation(rsv, &available, &rsv->mas);
if (ret == UWB_RSV_ALLOC_NOT_FOUND) {
ret = -EBUSY;
uwb_rsv_put_stream(rsv);
goto out;
}
ret = uwb_drp_avail_reserve_pending(rc, &rsv->mas);
if (ret != 0) {
uwb_rsv_put_stream(rsv);
goto out;
}
uwb_rsv_get(rsv);
list_add_tail(&rsv->rc_node, &rc->reservations);
rsv->owner = &rc->uwb_dev;
uwb_dev_get(rsv->owner);
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_INITIATED);
out:
mutex_unlock(&rc->rsvs_mutex);
return ret;
}
EXPORT_SYMBOL_GPL(uwb_rsv_establish);
/**
* uwb_rsv_modify - modify an already established reservation
* @rsv: the reservation to modify
* @max_mas: new maximum MAS to reserve
* @min_mas: new minimum MAS to reserve
* @max_interval: new max_interval to use
*
* FIXME: implement this once there are PALs that use it.
*/
int uwb_rsv_modify(struct uwb_rsv *rsv, int max_mas, int min_mas, int max_interval)
{
return -ENOSYS;
}
EXPORT_SYMBOL_GPL(uwb_rsv_modify);
/*
* move an already established reservation (rc->rsvs_mutex must to be
* taken when tis function is called)
*/
int uwb_rsv_try_move(struct uwb_rsv *rsv, struct uwb_mas_bm *available)
{
struct uwb_rc *rc = rsv->rc;
struct uwb_drp_backoff_win *bow = &rc->bow;
struct device *dev = &rc->uwb_dev.dev;
struct uwb_rsv_move *mv;
int ret = 0;
if (bow->can_reserve_extra_mases == false)
return -EBUSY;
mv = &rsv->mv;
if (uwb_rsv_find_best_allocation(rsv, available, &mv->final_mas) == UWB_RSV_ALLOC_FOUND) {
if (!bitmap_equal(rsv->mas.bm, mv->final_mas.bm, UWB_NUM_MAS)) {
/* We want to move the reservation */
bitmap_andnot(mv->companion_mas.bm, mv->final_mas.bm, rsv->mas.bm, UWB_NUM_MAS);
uwb_drp_avail_reserve_pending(rc, &mv->companion_mas);
uwb_rsv_set_state(rsv, UWB_RSV_STATE_O_MOVE_EXPANDING);
}
} else {
dev_dbg(dev, "new allocation not found\n");
}
return ret;
}
/* It will try to move every reservation in state O_ESTABLISHED giving
* to the MAS allocator algorithm an availability that is the real one
* plus the allocation already established from the reservation. */
void uwb_rsv_handle_drp_avail_change(struct uwb_rc *rc)
{
struct uwb_drp_backoff_win *bow = &rc->bow;
struct uwb_rsv *rsv;
struct uwb_mas_bm mas;
if (bow->can_reserve_extra_mases == false)
return;
list_for_each_entry(rsv, &rc->reservations, rc_node) {
if (rsv->state == UWB_RSV_STATE_O_ESTABLISHED ||
rsv->state == UWB_RSV_STATE_O_TO_BE_MOVED) {
uwb_drp_available(rc, &mas);
bitmap_or(mas.bm, mas.bm, rsv->mas.bm, UWB_NUM_MAS);
uwb_rsv_try_move(rsv, &mas);
}
}
}
/**
* uwb_rsv_terminate - terminate an established reservation
* @rsv: the reservation to terminate
*
* A reservation is terminated by removing the DRP IE from the beacon,
* the other end will consider the reservation to be terminated when
* it does not see the DRP IE for at least mMaxLostBeacons.
*
* If applicable, the reference to the target uwb_dev will be released.
*/
void uwb_rsv_terminate(struct uwb_rsv *rsv)
{
struct uwb_rc *rc = rsv->rc;
mutex_lock(&rc->rsvs_mutex);
if (rsv->state != UWB_RSV_STATE_NONE)
uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
mutex_unlock(&rc->rsvs_mutex);
}
EXPORT_SYMBOL_GPL(uwb_rsv_terminate);
/**
* uwb_rsv_accept - accept a new reservation from a peer
* @rsv: the reservation
* @cb: call back for reservation changes
* @pal_priv: data to be passed in the above call back
*
* Reservation requests from peers are denied unless a PAL accepts it
* by calling this function.
*
* The PAL call uwb_rsv_destroy() for all accepted reservations before
* calling uwb_pal_unregister().
*/
void uwb_rsv_accept(struct uwb_rsv *rsv, uwb_rsv_cb_f cb, void *pal_priv)
{
uwb_rsv_get(rsv);
rsv->callback = cb;
rsv->pal_priv = pal_priv;
rsv->state = UWB_RSV_STATE_T_ACCEPTED;
}
EXPORT_SYMBOL_GPL(uwb_rsv_accept);
/*
* Is a received DRP IE for this reservation?
*/
static bool uwb_rsv_match(struct uwb_rsv *rsv, struct uwb_dev *src,
struct uwb_ie_drp *drp_ie)
{
struct uwb_dev_addr *rsv_src;
int stream;
stream = uwb_ie_drp_stream_index(drp_ie);
if (rsv->stream != stream)
return false;
switch (rsv->target.type) {
case UWB_RSV_TARGET_DEVADDR:
return rsv->stream == stream;
case UWB_RSV_TARGET_DEV:
if (uwb_ie_drp_owner(drp_ie))
rsv_src = &rsv->owner->dev_addr;
else
rsv_src = &rsv->target.dev->dev_addr;
return uwb_dev_addr_cmp(&src->dev_addr, rsv_src) == 0;
}
return false;
}
static struct uwb_rsv *uwb_rsv_new_target(struct uwb_rc *rc,
struct uwb_dev *src,
struct uwb_ie_drp *drp_ie)
{
struct uwb_rsv *rsv;
struct uwb_pal *pal;
enum uwb_rsv_state state;
rsv = uwb_rsv_alloc(rc);
if (!rsv)
return NULL;
rsv->rc = rc;
rsv->owner = src;
uwb_dev_get(rsv->owner);
rsv->target.type = UWB_RSV_TARGET_DEV;
rsv->target.dev = &rc->uwb_dev;
uwb_dev_get(&rc->uwb_dev);
rsv->type = uwb_ie_drp_type(drp_ie);
rsv->stream = uwb_ie_drp_stream_index(drp_ie);
uwb_drp_ie_to_bm(&rsv->mas, drp_ie);
/*
* See if any PALs are interested in this reservation. If not,
* deny the request.
*/
rsv->state = UWB_RSV_STATE_T_DENIED;
mutex_lock(&rc->uwb_dev.mutex);
list_for_each_entry(pal, &rc->pals, node) {
if (pal->new_rsv)
pal->new_rsv(pal, rsv);
if (rsv->state == UWB_RSV_STATE_T_ACCEPTED)
break;
}
mutex_unlock(&rc->uwb_dev.mutex);
list_add_tail(&rsv->rc_node, &rc->reservations);
state = rsv->state;
rsv->state = UWB_RSV_STATE_NONE;
/* FIXME: do something sensible here */
if (state == UWB_RSV_STATE_T_ACCEPTED
&& uwb_drp_avail_reserve_pending(rc, &rsv->mas) == -EBUSY) {
/* FIXME: do something sensible here */
} else {
uwb_rsv_set_state(rsv, state);
}
return rsv;
}
/**
* uwb_rsv_get_usable_mas - get the bitmap of the usable MAS of a reservations
* @rsv: the reservation.
* @mas: returns the available MAS.
*
* The usable MAS of a reservation may be less than the negotiated MAS
* if alien BPs are present.
*/
void uwb_rsv_get_usable_mas(struct uwb_rsv *rsv, struct uwb_mas_bm *mas)
{
bitmap_zero(mas->bm, UWB_NUM_MAS);
bitmap_andnot(mas->bm, rsv->mas.bm, rsv->rc->cnflt_alien_bitmap.bm, UWB_NUM_MAS);
}
EXPORT_SYMBOL_GPL(uwb_rsv_get_usable_mas);
/**
* uwb_rsv_find - find a reservation for a received DRP IE.
* @rc: the radio controller
* @src: source of the DRP IE
* @drp_ie: the DRP IE
*
* If the reservation cannot be found and the DRP IE is from a peer
* attempting to establish a new reservation, create a new reservation
* and add it to the list.
*/
struct uwb_rsv *uwb_rsv_find(struct uwb_rc *rc, struct uwb_dev *src,
struct uwb_ie_drp *drp_ie)
{
struct uwb_rsv *rsv;
list_for_each_entry(rsv, &rc->reservations, rc_node) {
if (uwb_rsv_match(rsv, src, drp_ie))
return rsv;
}
if (uwb_ie_drp_owner(drp_ie))
return uwb_rsv_new_target(rc, src, drp_ie);
return NULL;
}
/*
* Go through all the reservations and check for timeouts and (if
* necessary) update their DRP IEs.
*
* FIXME: look at building the SET_DRP_IE command here rather than
* having to rescan the list in uwb_rc_send_all_drp_ie().
*/
static bool uwb_rsv_update_all(struct uwb_rc *rc)
{
struct uwb_rsv *rsv, *t;
bool ie_updated = false;
list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
if (!rsv->ie_valid) {
uwb_drp_ie_update(rsv);
ie_updated = true;
}
}
return ie_updated;
}
void uwb_rsv_queue_update(struct uwb_rc *rc)
{
unsigned long delay_us = UWB_MAS_LENGTH_US * UWB_MAS_PER_ZONE;
queue_delayed_work(rc->rsv_workq, &rc->rsv_update_work, usecs_to_jiffies(delay_us));
}
/**
* uwb_rsv_sched_update - schedule an update of the DRP IEs
* @rc: the radio controller.
*
* To improve performance and ensure correctness with [ECMA-368] the
* number of SET-DRP-IE commands that are done are limited.
*
* DRP IEs update come from two sources: DRP events from the hardware
* which all occur at the beginning of the superframe ('syncronous'
* events) and reservation establishment/termination requests from
* PALs or timers ('asynchronous' events).
*
* A delayed work ensures that all the synchronous events result in
* one SET-DRP-IE command.
*
* Additional logic (the set_drp_ie_pending and rsv_updated_postponed
* flags) will prevent an asynchrous event starting a SET-DRP-IE
* command if one is currently awaiting a response.
*
* FIXME: this does leave a window where an asynchrous event can delay
* the SET-DRP-IE for a synchronous event by one superframe.
*/
void uwb_rsv_sched_update(struct uwb_rc *rc)
{
spin_lock_bh(&rc->rsvs_lock);
if (!delayed_work_pending(&rc->rsv_update_work)) {
if (rc->set_drp_ie_pending > 0) {
rc->set_drp_ie_pending++;
goto unlock;
}
uwb_rsv_queue_update(rc);
}
unlock:
spin_unlock_bh(&rc->rsvs_lock);
}
/*
* Update DRP IEs and, if necessary, the DRP Availability IE and send
* the updated IEs to the radio controller.
*/
static void uwb_rsv_update_work(struct work_struct *work)
{
struct uwb_rc *rc = container_of(work, struct uwb_rc,
rsv_update_work.work);
bool ie_updated;
mutex_lock(&rc->rsvs_mutex);
ie_updated = uwb_rsv_update_all(rc);
if (!rc->drp_avail.ie_valid) {
uwb_drp_avail_ie_update(rc);
ie_updated = true;
}
if (ie_updated && (rc->set_drp_ie_pending == 0))
uwb_rc_send_all_drp_ie(rc);
mutex_unlock(&rc->rsvs_mutex);
}
static void uwb_rsv_alien_bp_work(struct work_struct *work)
{
struct uwb_rc *rc = container_of(work, struct uwb_rc,
rsv_alien_bp_work.work);
struct uwb_rsv *rsv;
mutex_lock(&rc->rsvs_mutex);
list_for_each_entry(rsv, &rc->reservations, rc_node) {
if (rsv->type != UWB_DRP_TYPE_ALIEN_BP) {
rsv->callback(rsv);
}
}
mutex_unlock(&rc->rsvs_mutex);
}
static void uwb_rsv_timer(unsigned long arg)
{
struct uwb_rsv *rsv = (struct uwb_rsv *)arg;
queue_work(rsv->rc->rsv_workq, &rsv->handle_timeout_work);
}
/**
* uwb_rsv_remove_all - remove all reservations
* @rc: the radio controller
*
* A DRP IE update is not done.
*/
void uwb_rsv_remove_all(struct uwb_rc *rc)
{
struct uwb_rsv *rsv, *t;
mutex_lock(&rc->rsvs_mutex);
list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
if (rsv->state != UWB_RSV_STATE_NONE)
uwb_rsv_set_state(rsv, UWB_RSV_STATE_NONE);
del_timer_sync(&rsv->timer);
}
/* Cancel any postponed update. */
rc->set_drp_ie_pending = 0;
mutex_unlock(&rc->rsvs_mutex);
cancel_delayed_work_sync(&rc->rsv_update_work);
flush_workqueue(rc->rsv_workq);
mutex_lock(&rc->rsvs_mutex);
list_for_each_entry_safe(rsv, t, &rc->reservations, rc_node) {
uwb_rsv_remove(rsv);
}
mutex_unlock(&rc->rsvs_mutex);
}
void uwb_rsv_init(struct uwb_rc *rc)
{
INIT_LIST_HEAD(&rc->reservations);
INIT_LIST_HEAD(&rc->cnflt_alien_list);
mutex_init(&rc->rsvs_mutex);
spin_lock_init(&rc->rsvs_lock);
INIT_DELAYED_WORK(&rc->rsv_update_work, uwb_rsv_update_work);
INIT_DELAYED_WORK(&rc->rsv_alien_bp_work, uwb_rsv_alien_bp_work);
rc->bow.can_reserve_extra_mases = true;
rc->bow.total_expired = 0;
rc->bow.window = UWB_DRP_BACKOFF_WIN_MIN >> 1;
init_timer(&rc->bow.timer);
rc->bow.timer.function = uwb_rsv_backoff_win_timer;
rc->bow.timer.data = (unsigned long)&rc->bow;
bitmap_complement(rc->uwb_dev.streams, rc->uwb_dev.streams, UWB_NUM_STREAMS);
}
int uwb_rsv_setup(struct uwb_rc *rc)
{
char name[16];
snprintf(name, sizeof(name), "%s_rsvd", dev_name(&rc->uwb_dev.dev));
rc->rsv_workq = create_singlethread_workqueue(name);
if (rc->rsv_workq == NULL)
return -ENOMEM;
return 0;
}
void uwb_rsv_cleanup(struct uwb_rc *rc)
{
uwb_rsv_remove_all(rc);
destroy_workqueue(rc->rsv_workq);
}
| gpl-2.0 |
bbelos/YP-G1_GB_Kernel | drivers/isdn/hisax/bkm_a8.c | 4236 | 11771 | /* $Id: bkm_a8.c,v 1.22.2.4 2004/01/15 14:02:34 keil Exp $
*
* low level stuff for Scitel Quadro (4*S0, passive)
*
* Author Roland Klabunde
* Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "isac.h"
#include "ipac.h"
#include "hscx.h"
#include "isdnl1.h"
#include <linux/pci.h>
#include "bkm_ax.h"
#define ATTEMPT_PCI_REMAPPING /* Required for PLX rev 1 */
static const char sct_quadro_revision[] = "$Revision: 1.22.2.4 $";
static const char *sct_quadro_subtypes[] =
{
"",
"#1",
"#2",
"#3",
"#4"
};
#define wordout(addr,val) outw(val,addr)
#define wordin(addr) inw(addr)
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
wordout(ale, off);
ret = wordin(adr) & 0xFF;
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
int i;
wordout(ale, off);
for (i = 0; i < size; i++)
data[i] = wordin(adr) & 0xFF;
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
wordout(ale, off);
wordout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
int i;
wordout(ale, off);
for (i = 0; i < size; i++)
wordout(adr, data[i]);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0), value);
}
/* Set the specific ipac to active */
static void
set_ipac_active(struct IsdnCardState *cs, u_int active)
{
/* set irq mask */
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK,
active ? 0xc0 : 0xff);
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readreg(cs->hw.ax.base, \
cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ax.base, \
cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ax.base, \
cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ax.base, \
cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
bkm_interrupt_ipac(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char ista, val, icnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
if (!(ista & 0x3f)) { /* not this IPAC */
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_NONE;
}
Start_IPAC:
if (cs->debug & L1_DEB_IPAC)
debugl1(cs, "IPAC ISTA %02X", ista);
if (ista & 0x0f) {
val = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, HSCX_ISTA + 0x40);
if (ista & 0x01)
val |= 0x01;
if (ista & 0x04)
val |= 0x02;
if (ista & 0x08)
val |= 0x04;
if (val) {
hscx_int_main(cs, val);
}
}
if (ista & 0x20) {
val = 0xfe & readreg(cs->hw.ax.base, cs->hw.ax.data_adr, ISAC_ISTA | 0x80);
if (val) {
isac_interrupt(cs, val);
}
}
if (ista & 0x10) {
val = 0x01;
isac_interrupt(cs, val);
}
ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
if ((ista & 0x3f) && icnt) {
icnt--;
goto Start_IPAC;
}
if (!icnt)
printk(KERN_WARNING "HiSax: Scitel Quadro (%s) IRQ LOOP\n",
sct_quadro_subtypes[cs->subtyp]);
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xFF);
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xC0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_sct_quadro(struct IsdnCardState *cs)
{
release_region(cs->hw.ax.base & 0xffffffc0, 128);
if (cs->subtyp == SCT_1)
release_region(cs->hw.ax.plx_adr, 64);
}
static void
enable_bkm_int(struct IsdnCardState *cs, unsigned bEnable)
{
if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
if (bEnable)
wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) | 0x41));
else
wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) & ~0x41));
}
}
static void
reset_bkm(struct IsdnCardState *cs)
{
if (cs->subtyp == SCT_1) {
wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) & ~4));
mdelay(10);
/* Remove the soft reset */
wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) | 4));
mdelay(10);
}
}
static int
BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
/* Disable ints */
set_ipac_active(cs, 0);
enable_bkm_int(cs, 0);
reset_bkm(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_RELEASE:
/* Sanity */
spin_lock_irqsave(&cs->lock, flags);
set_ipac_active(cs, 0);
enable_bkm_int(cs, 0);
spin_unlock_irqrestore(&cs->lock, flags);
release_io_sct_quadro(cs);
return (0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
cs->debug |= L1_DEB_IPAC;
set_ipac_active(cs, 1);
inithscxisac(cs, 3);
/* Enable ints */
enable_bkm_int(cs, 1);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_TEST:
return (0);
}
return (0);
}
static int __devinit
sct_alloc_io(u_int adr, u_int len)
{
if (!request_region(adr, len, "scitel")) {
printk(KERN_WARNING
"HiSax: Scitel port %#x-%#x already in use\n",
adr, adr + len);
return (1);
}
return(0);
}
static struct pci_dev *dev_a8 __devinitdata = NULL;
static u16 sub_vendor_id __devinitdata = 0;
static u16 sub_sys_id __devinitdata = 0;
static u_char pci_bus __devinitdata = 0;
static u_char pci_device_fn __devinitdata = 0;
static u_char pci_irq __devinitdata = 0;
int __devinit
setup_sct_quadro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
u_int found = 0;
u_int pci_ioaddr1, pci_ioaddr2, pci_ioaddr3, pci_ioaddr4, pci_ioaddr5;
strcpy(tmp, sct_quadro_revision);
printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
cs->subtyp = SCT_1; /* Preset */
} else
return (0);
/* Identify subtype by para[0] */
if (card->para[0] >= SCT_1 && card->para[0] <= SCT_4)
cs->subtyp = card->para[0];
else {
printk(KERN_WARNING "HiSax: Scitel Quadro: Invalid "
"subcontroller in configuration, default to 1\n");
return (0);
}
if ((cs->subtyp != SCT_1) && ((sub_sys_id != PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) ||
(sub_vendor_id != PCI_VENDOR_ID_BERKOM)))
return (0);
if (cs->subtyp == SCT_1) {
while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_9050, dev_a8))) {
sub_vendor_id = dev_a8->subsystem_vendor;
sub_sys_id = dev_a8->subsystem_device;
if ((sub_sys_id == PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) &&
(sub_vendor_id == PCI_VENDOR_ID_BERKOM)) {
if (pci_enable_device(dev_a8))
return(0);
pci_ioaddr1 = pci_resource_start(dev_a8, 1);
pci_irq = dev_a8->irq;
pci_bus = dev_a8->bus->number;
pci_device_fn = dev_a8->devfn;
found = 1;
break;
}
}
if (!found) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"Card not found\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
#ifdef ATTEMPT_PCI_REMAPPING
/* HACK: PLX revision 1 bug: PLX address bit 7 must not be set */
if ((pci_ioaddr1 & 0x80) && (dev_a8->revision == 1)) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"PLX rev 1, remapping required!\n",
sct_quadro_subtypes[cs->subtyp]);
/* Restart PCI negotiation */
pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, (u_int) - 1);
/* Move up by 0x80 byte */
pci_ioaddr1 += 0x80;
pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, pci_ioaddr1);
dev_a8->resource[ 1].start = pci_ioaddr1;
}
#endif /* End HACK */
}
if (!pci_irq) { /* IRQ range check ?? */
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): No IRQ\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_1, &pci_ioaddr1);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_2, &pci_ioaddr2);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_3, &pci_ioaddr3);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_4, &pci_ioaddr4);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_5, &pci_ioaddr5);
if (!pci_ioaddr1 || !pci_ioaddr2 || !pci_ioaddr3 || !pci_ioaddr4 || !pci_ioaddr5) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"No IO base address(es)\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr2 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr3 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr4 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr5 &= PCI_BASE_ADDRESS_IO_MASK;
/* Take over */
cs->irq = pci_irq;
cs->irq_flags |= IRQF_SHARED;
/* pci_ioaddr1 is unique to all subdevices */
/* pci_ioaddr2 is for the fourth subdevice only */
/* pci_ioaddr3 is for the third subdevice only */
/* pci_ioaddr4 is for the second subdevice only */
/* pci_ioaddr5 is for the first subdevice only */
cs->hw.ax.plx_adr = pci_ioaddr1;
/* Enter all ipac_base addresses */
switch(cs->subtyp) {
case 1:
cs->hw.ax.base = pci_ioaddr5 + 0x00;
if (sct_alloc_io(pci_ioaddr1, 128))
return(0);
if (sct_alloc_io(pci_ioaddr5, 64))
return(0);
/* disable all IPAC */
writereg(pci_ioaddr5, pci_ioaddr5 + 4,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr4 + 0x08, pci_ioaddr4 + 0x0c,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr3 + 0x10, pci_ioaddr3 + 0x14,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr2 + 0x20, pci_ioaddr2 + 0x24,
IPAC_MASK, 0xFF);
break;
case 2:
cs->hw.ax.base = pci_ioaddr4 + 0x08;
if (sct_alloc_io(pci_ioaddr4, 64))
return(0);
break;
case 3:
cs->hw.ax.base = pci_ioaddr3 + 0x10;
if (sct_alloc_io(pci_ioaddr3, 64))
return(0);
break;
case 4:
cs->hw.ax.base = pci_ioaddr2 + 0x20;
if (sct_alloc_io(pci_ioaddr2, 64))
return(0);
break;
}
/* For isac and hscx data path */
cs->hw.ax.data_adr = cs->hw.ax.base + 4;
printk(KERN_INFO "HiSax: Scitel Quadro (%s) configured at "
"0x%.4lX, 0x%.4lX, 0x%.4lX and IRQ %d\n",
sct_quadro_subtypes[cs->subtyp],
cs->hw.ax.plx_adr,
cs->hw.ax.base,
cs->hw.ax.data_adr,
cs->irq);
test_and_set_bit(HW_IPAC, &cs->HW_Flags);
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &BKM_card_msg;
cs->irq_func = &bkm_interrupt_ipac;
printk(KERN_INFO "HiSax: Scitel Quadro (%s): IPAC Version %d\n",
sct_quadro_subtypes[cs->subtyp],
readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ID));
return (1);
}
| gpl-2.0 |
adeepv/android-kernel-zte-v9a | drivers/isdn/hisax/bkm_a8.c | 4236 | 11771 | /* $Id: bkm_a8.c,v 1.22.2.4 2004/01/15 14:02:34 keil Exp $
*
* low level stuff for Scitel Quadro (4*S0, passive)
*
* Author Roland Klabunde
* Copyright by Roland Klabunde <R.Klabunde@Berkom.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include "hisax.h"
#include "isac.h"
#include "ipac.h"
#include "hscx.h"
#include "isdnl1.h"
#include <linux/pci.h>
#include "bkm_ax.h"
#define ATTEMPT_PCI_REMAPPING /* Required for PLX rev 1 */
static const char sct_quadro_revision[] = "$Revision: 1.22.2.4 $";
static const char *sct_quadro_subtypes[] =
{
"",
"#1",
"#2",
"#3",
"#4"
};
#define wordout(addr,val) outw(val,addr)
#define wordin(addr) inw(addr)
static inline u_char
readreg(unsigned int ale, unsigned int adr, u_char off)
{
register u_char ret;
wordout(ale, off);
ret = wordin(adr) & 0xFF;
return (ret);
}
static inline void
readfifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
int i;
wordout(ale, off);
for (i = 0; i < size; i++)
data[i] = wordin(adr) & 0xFF;
}
static inline void
writereg(unsigned int ale, unsigned int adr, u_char off, u_char data)
{
wordout(ale, off);
wordout(adr, data);
}
static inline void
writefifo(unsigned int ale, unsigned int adr, u_char off, u_char * data, int size)
{
int i;
wordout(ale, off);
for (i = 0; i < size; i++)
wordout(adr, data[i]);
}
/* Interface functions */
static u_char
ReadISAC(struct IsdnCardState *cs, u_char offset)
{
return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80));
}
static void
WriteISAC(struct IsdnCardState *cs, u_char offset, u_char value)
{
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset | 0x80, value);
}
static void
ReadISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
readfifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
}
static void
WriteISACfifo(struct IsdnCardState *cs, u_char * data, int size)
{
writefifo(cs->hw.ax.base, cs->hw.ax.data_adr, 0x80, data, size);
}
static u_char
ReadHSCX(struct IsdnCardState *cs, int hscx, u_char offset)
{
return (readreg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0)));
}
static void
WriteHSCX(struct IsdnCardState *cs, int hscx, u_char offset, u_char value)
{
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, offset + (hscx ? 0x40 : 0), value);
}
/* Set the specific ipac to active */
static void
set_ipac_active(struct IsdnCardState *cs, u_int active)
{
/* set irq mask */
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK,
active ? 0xc0 : 0xff);
}
/*
* fast interrupt HSCX stuff goes here
*/
#define READHSCX(cs, nr, reg) readreg(cs->hw.ax.base, \
cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0))
#define WRITEHSCX(cs, nr, reg, data) writereg(cs->hw.ax.base, \
cs->hw.ax.data_adr, reg + (nr ? 0x40 : 0), data)
#define READHSCXFIFO(cs, nr, ptr, cnt) readfifo(cs->hw.ax.base, \
cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
#define WRITEHSCXFIFO(cs, nr, ptr, cnt) writefifo(cs->hw.ax.base, \
cs->hw.ax.data_adr, (nr ? 0x40 : 0), ptr, cnt)
#include "hscx_irq.c"
static irqreturn_t
bkm_interrupt_ipac(int intno, void *dev_id)
{
struct IsdnCardState *cs = dev_id;
u_char ista, val, icnt = 5;
u_long flags;
spin_lock_irqsave(&cs->lock, flags);
ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
if (!(ista & 0x3f)) { /* not this IPAC */
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_NONE;
}
Start_IPAC:
if (cs->debug & L1_DEB_IPAC)
debugl1(cs, "IPAC ISTA %02X", ista);
if (ista & 0x0f) {
val = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, HSCX_ISTA + 0x40);
if (ista & 0x01)
val |= 0x01;
if (ista & 0x04)
val |= 0x02;
if (ista & 0x08)
val |= 0x04;
if (val) {
hscx_int_main(cs, val);
}
}
if (ista & 0x20) {
val = 0xfe & readreg(cs->hw.ax.base, cs->hw.ax.data_adr, ISAC_ISTA | 0x80);
if (val) {
isac_interrupt(cs, val);
}
}
if (ista & 0x10) {
val = 0x01;
isac_interrupt(cs, val);
}
ista = readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ISTA);
if ((ista & 0x3f) && icnt) {
icnt--;
goto Start_IPAC;
}
if (!icnt)
printk(KERN_WARNING "HiSax: Scitel Quadro (%s) IRQ LOOP\n",
sct_quadro_subtypes[cs->subtyp]);
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xFF);
writereg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_MASK, 0xC0);
spin_unlock_irqrestore(&cs->lock, flags);
return IRQ_HANDLED;
}
static void
release_io_sct_quadro(struct IsdnCardState *cs)
{
release_region(cs->hw.ax.base & 0xffffffc0, 128);
if (cs->subtyp == SCT_1)
release_region(cs->hw.ax.plx_adr, 64);
}
static void
enable_bkm_int(struct IsdnCardState *cs, unsigned bEnable)
{
if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
if (bEnable)
wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) | 0x41));
else
wordout(cs->hw.ax.plx_adr + 0x4C, (wordin(cs->hw.ax.plx_adr + 0x4C) & ~0x41));
}
}
static void
reset_bkm(struct IsdnCardState *cs)
{
if (cs->subtyp == SCT_1) {
wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) & ~4));
mdelay(10);
/* Remove the soft reset */
wordout(cs->hw.ax.plx_adr + 0x50, (wordin(cs->hw.ax.plx_adr + 0x50) | 4));
mdelay(10);
}
}
static int
BKM_card_msg(struct IsdnCardState *cs, int mt, void *arg)
{
u_long flags;
switch (mt) {
case CARD_RESET:
spin_lock_irqsave(&cs->lock, flags);
/* Disable ints */
set_ipac_active(cs, 0);
enable_bkm_int(cs, 0);
reset_bkm(cs);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_RELEASE:
/* Sanity */
spin_lock_irqsave(&cs->lock, flags);
set_ipac_active(cs, 0);
enable_bkm_int(cs, 0);
spin_unlock_irqrestore(&cs->lock, flags);
release_io_sct_quadro(cs);
return (0);
case CARD_INIT:
spin_lock_irqsave(&cs->lock, flags);
cs->debug |= L1_DEB_IPAC;
set_ipac_active(cs, 1);
inithscxisac(cs, 3);
/* Enable ints */
enable_bkm_int(cs, 1);
spin_unlock_irqrestore(&cs->lock, flags);
return (0);
case CARD_TEST:
return (0);
}
return (0);
}
static int __devinit
sct_alloc_io(u_int adr, u_int len)
{
if (!request_region(adr, len, "scitel")) {
printk(KERN_WARNING
"HiSax: Scitel port %#x-%#x already in use\n",
adr, adr + len);
return (1);
}
return(0);
}
static struct pci_dev *dev_a8 __devinitdata = NULL;
static u16 sub_vendor_id __devinitdata = 0;
static u16 sub_sys_id __devinitdata = 0;
static u_char pci_bus __devinitdata = 0;
static u_char pci_device_fn __devinitdata = 0;
static u_char pci_irq __devinitdata = 0;
int __devinit
setup_sct_quadro(struct IsdnCard *card)
{
struct IsdnCardState *cs = card->cs;
char tmp[64];
u_int found = 0;
u_int pci_ioaddr1, pci_ioaddr2, pci_ioaddr3, pci_ioaddr4, pci_ioaddr5;
strcpy(tmp, sct_quadro_revision);
printk(KERN_INFO "HiSax: T-Berkom driver Rev. %s\n", HiSax_getrev(tmp));
if (cs->typ == ISDN_CTYPE_SCT_QUADRO) {
cs->subtyp = SCT_1; /* Preset */
} else
return (0);
/* Identify subtype by para[0] */
if (card->para[0] >= SCT_1 && card->para[0] <= SCT_4)
cs->subtyp = card->para[0];
else {
printk(KERN_WARNING "HiSax: Scitel Quadro: Invalid "
"subcontroller in configuration, default to 1\n");
return (0);
}
if ((cs->subtyp != SCT_1) && ((sub_sys_id != PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) ||
(sub_vendor_id != PCI_VENDOR_ID_BERKOM)))
return (0);
if (cs->subtyp == SCT_1) {
while ((dev_a8 = hisax_find_pci_device(PCI_VENDOR_ID_PLX,
PCI_DEVICE_ID_PLX_9050, dev_a8))) {
sub_vendor_id = dev_a8->subsystem_vendor;
sub_sys_id = dev_a8->subsystem_device;
if ((sub_sys_id == PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO) &&
(sub_vendor_id == PCI_VENDOR_ID_BERKOM)) {
if (pci_enable_device(dev_a8))
return(0);
pci_ioaddr1 = pci_resource_start(dev_a8, 1);
pci_irq = dev_a8->irq;
pci_bus = dev_a8->bus->number;
pci_device_fn = dev_a8->devfn;
found = 1;
break;
}
}
if (!found) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"Card not found\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
#ifdef ATTEMPT_PCI_REMAPPING
/* HACK: PLX revision 1 bug: PLX address bit 7 must not be set */
if ((pci_ioaddr1 & 0x80) && (dev_a8->revision == 1)) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"PLX rev 1, remapping required!\n",
sct_quadro_subtypes[cs->subtyp]);
/* Restart PCI negotiation */
pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, (u_int) - 1);
/* Move up by 0x80 byte */
pci_ioaddr1 += 0x80;
pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
pci_write_config_dword(dev_a8, PCI_BASE_ADDRESS_1, pci_ioaddr1);
dev_a8->resource[ 1].start = pci_ioaddr1;
}
#endif /* End HACK */
}
if (!pci_irq) { /* IRQ range check ?? */
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): No IRQ\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_1, &pci_ioaddr1);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_2, &pci_ioaddr2);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_3, &pci_ioaddr3);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_4, &pci_ioaddr4);
pci_read_config_dword(dev_a8, PCI_BASE_ADDRESS_5, &pci_ioaddr5);
if (!pci_ioaddr1 || !pci_ioaddr2 || !pci_ioaddr3 || !pci_ioaddr4 || !pci_ioaddr5) {
printk(KERN_WARNING "HiSax: Scitel Quadro (%s): "
"No IO base address(es)\n",
sct_quadro_subtypes[cs->subtyp]);
return (0);
}
pci_ioaddr1 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr2 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr3 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr4 &= PCI_BASE_ADDRESS_IO_MASK;
pci_ioaddr5 &= PCI_BASE_ADDRESS_IO_MASK;
/* Take over */
cs->irq = pci_irq;
cs->irq_flags |= IRQF_SHARED;
/* pci_ioaddr1 is unique to all subdevices */
/* pci_ioaddr2 is for the fourth subdevice only */
/* pci_ioaddr3 is for the third subdevice only */
/* pci_ioaddr4 is for the second subdevice only */
/* pci_ioaddr5 is for the first subdevice only */
cs->hw.ax.plx_adr = pci_ioaddr1;
/* Enter all ipac_base addresses */
switch(cs->subtyp) {
case 1:
cs->hw.ax.base = pci_ioaddr5 + 0x00;
if (sct_alloc_io(pci_ioaddr1, 128))
return(0);
if (sct_alloc_io(pci_ioaddr5, 64))
return(0);
/* disable all IPAC */
writereg(pci_ioaddr5, pci_ioaddr5 + 4,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr4 + 0x08, pci_ioaddr4 + 0x0c,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr3 + 0x10, pci_ioaddr3 + 0x14,
IPAC_MASK, 0xFF);
writereg(pci_ioaddr2 + 0x20, pci_ioaddr2 + 0x24,
IPAC_MASK, 0xFF);
break;
case 2:
cs->hw.ax.base = pci_ioaddr4 + 0x08;
if (sct_alloc_io(pci_ioaddr4, 64))
return(0);
break;
case 3:
cs->hw.ax.base = pci_ioaddr3 + 0x10;
if (sct_alloc_io(pci_ioaddr3, 64))
return(0);
break;
case 4:
cs->hw.ax.base = pci_ioaddr2 + 0x20;
if (sct_alloc_io(pci_ioaddr2, 64))
return(0);
break;
}
/* For isac and hscx data path */
cs->hw.ax.data_adr = cs->hw.ax.base + 4;
printk(KERN_INFO "HiSax: Scitel Quadro (%s) configured at "
"0x%.4lX, 0x%.4lX, 0x%.4lX and IRQ %d\n",
sct_quadro_subtypes[cs->subtyp],
cs->hw.ax.plx_adr,
cs->hw.ax.base,
cs->hw.ax.data_adr,
cs->irq);
test_and_set_bit(HW_IPAC, &cs->HW_Flags);
cs->readisac = &ReadISAC;
cs->writeisac = &WriteISAC;
cs->readisacfifo = &ReadISACfifo;
cs->writeisacfifo = &WriteISACfifo;
cs->BC_Read_Reg = &ReadHSCX;
cs->BC_Write_Reg = &WriteHSCX;
cs->BC_Send_Data = &hscx_fill_fifo;
cs->cardmsg = &BKM_card_msg;
cs->irq_func = &bkm_interrupt_ipac;
printk(KERN_INFO "HiSax: Scitel Quadro (%s): IPAC Version %d\n",
sct_quadro_subtypes[cs->subtyp],
readreg(cs->hw.ax.base, cs->hw.ax.data_adr, IPAC_ID));
return (1);
}
| gpl-2.0 |
cherojeong/vega-iron_kernel | drivers/media/video/mem2mem_testdev.c | 4236 | 23928 | /*
* A virtual v4l2-mem2mem example device.
*
* This is a virtual device driver for testing mem-to-mem videobuf framework.
* It simulates a device that uses memory buffers for both source and
* destination, processes the data and issues an "irq" (simulated by a timer).
* The device is capable of multi-instance, multi-buffer-per-transaction
* operation (via the mem2mem framework).
*
* Copyright (c) 2009-2010 Samsung Electronics Co., Ltd.
* Pawel Osciak, <pawel@osciak.com>
* Marek Szyprowski, <m.szyprowski@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the
* License, or (at your option) any later version
*/
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
#include <media/v4l2-mem2mem.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <media/videobuf2-vmalloc.h>
#define MEM2MEM_TEST_MODULE_NAME "mem2mem-testdev"
MODULE_DESCRIPTION("Virtual device for mem2mem framework testing");
MODULE_AUTHOR("Pawel Osciak, <pawel@osciak.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION("0.1.1");
#define MIN_W 32
#define MIN_H 32
#define MAX_W 640
#define MAX_H 480
#define DIM_ALIGN_MASK 0x08 /* 8-alignment for dimensions */
/* Flags that indicate a format can be used for capture/output */
#define MEM2MEM_CAPTURE (1 << 0)
#define MEM2MEM_OUTPUT (1 << 1)
#define MEM2MEM_NAME "m2m-testdev"
/* Per queue */
#define MEM2MEM_DEF_NUM_BUFS VIDEO_MAX_FRAME
/* In bytes, per queue */
#define MEM2MEM_VID_MEM_LIMIT (16 * 1024 * 1024)
/* Default transaction time in msec */
#define MEM2MEM_DEF_TRANSTIME 1000
/* Default number of buffers per transaction */
#define MEM2MEM_DEF_TRANSLEN 1
#define MEM2MEM_COLOR_STEP (0xff >> 4)
#define MEM2MEM_NUM_TILES 8
#define dprintk(dev, fmt, arg...) \
v4l2_dbg(1, 1, &dev->v4l2_dev, "%s: " fmt, __func__, ## arg)
void m2mtest_dev_release(struct device *dev)
{}
static struct platform_device m2mtest_pdev = {
.name = MEM2MEM_NAME,
.dev.release = m2mtest_dev_release,
};
struct m2mtest_fmt {
char *name;
u32 fourcc;
int depth;
/* Types the format can be used for */
u32 types;
};
static struct m2mtest_fmt formats[] = {
{
.name = "RGB565 (BE)",
.fourcc = V4L2_PIX_FMT_RGB565X, /* rrrrrggg gggbbbbb */
.depth = 16,
/* Both capture and output format */
.types = MEM2MEM_CAPTURE | MEM2MEM_OUTPUT,
},
{
.name = "4:2:2, packed, YUYV",
.fourcc = V4L2_PIX_FMT_YUYV,
.depth = 16,
/* Output-only format */
.types = MEM2MEM_OUTPUT,
},
};
/* Per-queue, driver-specific private data */
struct m2mtest_q_data {
unsigned int width;
unsigned int height;
unsigned int sizeimage;
struct m2mtest_fmt *fmt;
};
enum {
V4L2_M2M_SRC = 0,
V4L2_M2M_DST = 1,
};
/* Source and destination queue data */
static struct m2mtest_q_data q_data[2];
static struct m2mtest_q_data *get_q_data(enum v4l2_buf_type type)
{
switch (type) {
case V4L2_BUF_TYPE_VIDEO_OUTPUT:
return &q_data[V4L2_M2M_SRC];
case V4L2_BUF_TYPE_VIDEO_CAPTURE:
return &q_data[V4L2_M2M_DST];
default:
BUG();
}
return NULL;
}
#define V4L2_CID_TRANS_TIME_MSEC V4L2_CID_PRIVATE_BASE
#define V4L2_CID_TRANS_NUM_BUFS (V4L2_CID_PRIVATE_BASE + 1)
static struct v4l2_queryctrl m2mtest_ctrls[] = {
{
.id = V4L2_CID_TRANS_TIME_MSEC,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Transaction time (msec)",
.minimum = 1,
.maximum = 10000,
.step = 100,
.default_value = 1000,
.flags = 0,
}, {
.id = V4L2_CID_TRANS_NUM_BUFS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Buffers per transaction",
.minimum = 1,
.maximum = MEM2MEM_DEF_NUM_BUFS,
.step = 1,
.default_value = 1,
.flags = 0,
},
};
#define NUM_FORMATS ARRAY_SIZE(formats)
static struct m2mtest_fmt *find_format(struct v4l2_format *f)
{
struct m2mtest_fmt *fmt;
unsigned int k;
for (k = 0; k < NUM_FORMATS; k++) {
fmt = &formats[k];
if (fmt->fourcc == f->fmt.pix.pixelformat)
break;
}
if (k == NUM_FORMATS)
return NULL;
return &formats[k];
}
struct m2mtest_dev {
struct v4l2_device v4l2_dev;
struct video_device *vfd;
atomic_t num_inst;
struct mutex dev_mutex;
spinlock_t irqlock;
struct timer_list timer;
struct v4l2_m2m_dev *m2m_dev;
};
struct m2mtest_ctx {
struct m2mtest_dev *dev;
/* Processed buffers in this transaction */
u8 num_processed;
/* Transaction length (i.e. how many buffers per transaction) */
u32 translen;
/* Transaction time (i.e. simulated processing time) in milliseconds */
u32 transtime;
/* Abort requested by m2m */
int aborting;
struct v4l2_m2m_ctx *m2m_ctx;
};
static struct v4l2_queryctrl *get_ctrl(int id)
{
int i;
for (i = 0; i < ARRAY_SIZE(m2mtest_ctrls); ++i) {
if (id == m2mtest_ctrls[i].id)
return &m2mtest_ctrls[i];
}
return NULL;
}
static int device_process(struct m2mtest_ctx *ctx,
struct vb2_buffer *in_vb,
struct vb2_buffer *out_vb)
{
struct m2mtest_dev *dev = ctx->dev;
struct m2mtest_q_data *q_data;
u8 *p_in, *p_out;
int x, y, t, w;
int tile_w, bytes_left;
int width, height, bytesperline;
q_data = get_q_data(V4L2_BUF_TYPE_VIDEO_OUTPUT);
width = q_data->width;
height = q_data->height;
bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
p_in = vb2_plane_vaddr(in_vb, 0);
p_out = vb2_plane_vaddr(out_vb, 0);
if (!p_in || !p_out) {
v4l2_err(&dev->v4l2_dev,
"Acquiring kernel pointers to buffers failed\n");
return -EFAULT;
}
if (vb2_plane_size(in_vb, 0) > vb2_plane_size(out_vb, 0)) {
v4l2_err(&dev->v4l2_dev, "Output buffer is too small\n");
return -EINVAL;
}
tile_w = (width * (q_data[V4L2_M2M_DST].fmt->depth >> 3))
/ MEM2MEM_NUM_TILES;
bytes_left = bytesperline - tile_w * MEM2MEM_NUM_TILES;
w = 0;
for (y = 0; y < height; ++y) {
for (t = 0; t < MEM2MEM_NUM_TILES; ++t) {
if (w & 0x1) {
for (x = 0; x < tile_w; ++x)
*p_out++ = *p_in++ + MEM2MEM_COLOR_STEP;
} else {
for (x = 0; x < tile_w; ++x)
*p_out++ = *p_in++ - MEM2MEM_COLOR_STEP;
}
++w;
}
p_in += bytes_left;
p_out += bytes_left;
}
return 0;
}
static void schedule_irq(struct m2mtest_dev *dev, int msec_timeout)
{
dprintk(dev, "Scheduling a simulated irq\n");
mod_timer(&dev->timer, jiffies + msecs_to_jiffies(msec_timeout));
}
/*
* mem2mem callbacks
*/
/**
* job_ready() - check whether an instance is ready to be scheduled to run
*/
static int job_ready(void *priv)
{
struct m2mtest_ctx *ctx = priv;
if (v4l2_m2m_num_src_bufs_ready(ctx->m2m_ctx) < ctx->translen
|| v4l2_m2m_num_dst_bufs_ready(ctx->m2m_ctx) < ctx->translen) {
dprintk(ctx->dev, "Not enough buffers available\n");
return 0;
}
return 1;
}
static void job_abort(void *priv)
{
struct m2mtest_ctx *ctx = priv;
/* Will cancel the transaction in the next interrupt handler */
ctx->aborting = 1;
}
static void m2mtest_lock(void *priv)
{
struct m2mtest_ctx *ctx = priv;
struct m2mtest_dev *dev = ctx->dev;
mutex_lock(&dev->dev_mutex);
}
static void m2mtest_unlock(void *priv)
{
struct m2mtest_ctx *ctx = priv;
struct m2mtest_dev *dev = ctx->dev;
mutex_unlock(&dev->dev_mutex);
}
/* device_run() - prepares and starts the device
*
* This simulates all the immediate preparations required before starting
* a device. This will be called by the framework when it decides to schedule
* a particular instance.
*/
static void device_run(void *priv)
{
struct m2mtest_ctx *ctx = priv;
struct m2mtest_dev *dev = ctx->dev;
struct vb2_buffer *src_buf, *dst_buf;
src_buf = v4l2_m2m_next_src_buf(ctx->m2m_ctx);
dst_buf = v4l2_m2m_next_dst_buf(ctx->m2m_ctx);
device_process(ctx, src_buf, dst_buf);
/* Run a timer, which simulates a hardware irq */
schedule_irq(dev, ctx->transtime);
}
static void device_isr(unsigned long priv)
{
struct m2mtest_dev *m2mtest_dev = (struct m2mtest_dev *)priv;
struct m2mtest_ctx *curr_ctx;
struct vb2_buffer *src_vb, *dst_vb;
unsigned long flags;
curr_ctx = v4l2_m2m_get_curr_priv(m2mtest_dev->m2m_dev);
if (NULL == curr_ctx) {
printk(KERN_ERR
"Instance released before the end of transaction\n");
return;
}
src_vb = v4l2_m2m_src_buf_remove(curr_ctx->m2m_ctx);
dst_vb = v4l2_m2m_dst_buf_remove(curr_ctx->m2m_ctx);
curr_ctx->num_processed++;
spin_lock_irqsave(&m2mtest_dev->irqlock, flags);
v4l2_m2m_buf_done(src_vb, VB2_BUF_STATE_DONE);
v4l2_m2m_buf_done(dst_vb, VB2_BUF_STATE_DONE);
spin_unlock_irqrestore(&m2mtest_dev->irqlock, flags);
if (curr_ctx->num_processed == curr_ctx->translen
|| curr_ctx->aborting) {
dprintk(curr_ctx->dev, "Finishing transaction\n");
curr_ctx->num_processed = 0;
v4l2_m2m_job_finish(m2mtest_dev->m2m_dev, curr_ctx->m2m_ctx);
} else {
device_run(curr_ctx);
}
}
/*
* video ioctls
*/
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *cap)
{
strncpy(cap->driver, MEM2MEM_NAME, sizeof(cap->driver) - 1);
strncpy(cap->card, MEM2MEM_NAME, sizeof(cap->card) - 1);
cap->bus_info[0] = 0;
cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_VIDEO_OUTPUT
| V4L2_CAP_STREAMING;
return 0;
}
static int enum_fmt(struct v4l2_fmtdesc *f, u32 type)
{
int i, num;
struct m2mtest_fmt *fmt;
num = 0;
for (i = 0; i < NUM_FORMATS; ++i) {
if (formats[i].types & type) {
/* index-th format of type type found ? */
if (num == f->index)
break;
/* Correct type but haven't reached our index yet,
* just increment per-type index */
++num;
}
}
if (i < NUM_FORMATS) {
/* Format found */
fmt = &formats[i];
strncpy(f->description, fmt->name, sizeof(f->description) - 1);
f->pixelformat = fmt->fourcc;
return 0;
}
/* Format not found */
return -EINVAL;
}
static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return enum_fmt(f, MEM2MEM_CAPTURE);
}
static int vidioc_enum_fmt_vid_out(struct file *file, void *priv,
struct v4l2_fmtdesc *f)
{
return enum_fmt(f, MEM2MEM_OUTPUT);
}
static int vidioc_g_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
{
struct vb2_queue *vq;
struct m2mtest_q_data *q_data;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(f->type);
f->fmt.pix.width = q_data->width;
f->fmt.pix.height = q_data->height;
f->fmt.pix.field = V4L2_FIELD_NONE;
f->fmt.pix.pixelformat = q_data->fmt->fourcc;
f->fmt.pix.bytesperline = (q_data->width * q_data->fmt->depth) >> 3;
f->fmt.pix.sizeimage = q_data->sizeimage;
return 0;
}
static int vidioc_g_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
return vidioc_g_fmt(priv, f);
}
static int vidioc_g_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
return vidioc_g_fmt(priv, f);
}
static int vidioc_try_fmt(struct v4l2_format *f, struct m2mtest_fmt *fmt)
{
enum v4l2_field field;
field = f->fmt.pix.field;
if (field == V4L2_FIELD_ANY)
field = V4L2_FIELD_NONE;
else if (V4L2_FIELD_NONE != field)
return -EINVAL;
/* V4L2 specification suggests the driver corrects the format struct
* if any of the dimensions is unsupported */
f->fmt.pix.field = field;
if (f->fmt.pix.height < MIN_H)
f->fmt.pix.height = MIN_H;
else if (f->fmt.pix.height > MAX_H)
f->fmt.pix.height = MAX_H;
if (f->fmt.pix.width < MIN_W)
f->fmt.pix.width = MIN_W;
else if (f->fmt.pix.width > MAX_W)
f->fmt.pix.width = MAX_W;
f->fmt.pix.width &= ~DIM_ALIGN_MASK;
f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3;
f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline;
return 0;
}
static int vidioc_try_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
struct m2mtest_fmt *fmt;
struct m2mtest_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_CAPTURE)) {
v4l2_err(&ctx->dev->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
return vidioc_try_fmt(f, fmt);
}
static int vidioc_try_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
struct m2mtest_fmt *fmt;
struct m2mtest_ctx *ctx = priv;
fmt = find_format(f);
if (!fmt || !(fmt->types & MEM2MEM_OUTPUT)) {
v4l2_err(&ctx->dev->v4l2_dev,
"Fourcc format (0x%08x) invalid.\n",
f->fmt.pix.pixelformat);
return -EINVAL;
}
return vidioc_try_fmt(f, fmt);
}
static int vidioc_s_fmt(struct m2mtest_ctx *ctx, struct v4l2_format *f)
{
struct m2mtest_q_data *q_data;
struct vb2_queue *vq;
vq = v4l2_m2m_get_vq(ctx->m2m_ctx, f->type);
if (!vq)
return -EINVAL;
q_data = get_q_data(f->type);
if (!q_data)
return -EINVAL;
if (vb2_is_busy(vq)) {
v4l2_err(&ctx->dev->v4l2_dev, "%s queue busy\n", __func__);
return -EBUSY;
}
q_data->fmt = find_format(f);
q_data->width = f->fmt.pix.width;
q_data->height = f->fmt.pix.height;
q_data->sizeimage = q_data->width * q_data->height
* q_data->fmt->depth >> 3;
dprintk(ctx->dev,
"Setting format for type %d, wxh: %dx%d, fmt: %d\n",
f->type, q_data->width, q_data->height, q_data->fmt->fourcc);
return 0;
}
static int vidioc_s_fmt_vid_cap(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
ret = vidioc_try_fmt_vid_cap(file, priv, f);
if (ret)
return ret;
return vidioc_s_fmt(priv, f);
}
static int vidioc_s_fmt_vid_out(struct file *file, void *priv,
struct v4l2_format *f)
{
int ret;
ret = vidioc_try_fmt_vid_out(file, priv, f);
if (ret)
return ret;
return vidioc_s_fmt(priv, f);
}
static int vidioc_reqbufs(struct file *file, void *priv,
struct v4l2_requestbuffers *reqbufs)
{
struct m2mtest_ctx *ctx = priv;
return v4l2_m2m_reqbufs(file, ctx->m2m_ctx, reqbufs);
}
static int vidioc_querybuf(struct file *file, void *priv,
struct v4l2_buffer *buf)
{
struct m2mtest_ctx *ctx = priv;
return v4l2_m2m_querybuf(file, ctx->m2m_ctx, buf);
}
static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct m2mtest_ctx *ctx = priv;
return v4l2_m2m_qbuf(file, ctx->m2m_ctx, buf);
}
static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{
struct m2mtest_ctx *ctx = priv;
return v4l2_m2m_dqbuf(file, ctx->m2m_ctx, buf);
}
static int vidioc_streamon(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct m2mtest_ctx *ctx = priv;
return v4l2_m2m_streamon(file, ctx->m2m_ctx, type);
}
static int vidioc_streamoff(struct file *file, void *priv,
enum v4l2_buf_type type)
{
struct m2mtest_ctx *ctx = priv;
return v4l2_m2m_streamoff(file, ctx->m2m_ctx, type);
}
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
struct v4l2_queryctrl *c;
c = get_ctrl(qc->id);
if (!c)
return -EINVAL;
*qc = *c;
return 0;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct m2mtest_ctx *ctx = priv;
switch (ctrl->id) {
case V4L2_CID_TRANS_TIME_MSEC:
ctrl->value = ctx->transtime;
break;
case V4L2_CID_TRANS_NUM_BUFS:
ctrl->value = ctx->translen;
break;
default:
v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
return -EINVAL;
}
return 0;
}
static int check_ctrl_val(struct m2mtest_ctx *ctx, struct v4l2_control *ctrl)
{
struct v4l2_queryctrl *c;
c = get_ctrl(ctrl->id);
if (!c)
return -EINVAL;
if (ctrl->value < c->minimum || ctrl->value > c->maximum) {
v4l2_err(&ctx->dev->v4l2_dev, "Value out of range\n");
return -ERANGE;
}
return 0;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct m2mtest_ctx *ctx = priv;
int ret = 0;
ret = check_ctrl_val(ctx, ctrl);
if (ret != 0)
return ret;
switch (ctrl->id) {
case V4L2_CID_TRANS_TIME_MSEC:
ctx->transtime = ctrl->value;
break;
case V4L2_CID_TRANS_NUM_BUFS:
ctx->translen = ctrl->value;
break;
default:
v4l2_err(&ctx->dev->v4l2_dev, "Invalid control\n");
return -EINVAL;
}
return 0;
}
static const struct v4l2_ioctl_ops m2mtest_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap,
.vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap,
.vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap,
.vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap,
.vidioc_enum_fmt_vid_out = vidioc_enum_fmt_vid_out,
.vidioc_g_fmt_vid_out = vidioc_g_fmt_vid_out,
.vidioc_try_fmt_vid_out = vidioc_try_fmt_vid_out,
.vidioc_s_fmt_vid_out = vidioc_s_fmt_vid_out,
.vidioc_reqbufs = vidioc_reqbufs,
.vidioc_querybuf = vidioc_querybuf,
.vidioc_qbuf = vidioc_qbuf,
.vidioc_dqbuf = vidioc_dqbuf,
.vidioc_streamon = vidioc_streamon,
.vidioc_streamoff = vidioc_streamoff,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
};
/*
* Queue operations
*/
static int m2mtest_queue_setup(struct vb2_queue *vq,
const struct v4l2_format *fmt,
unsigned int *nbuffers, unsigned int *nplanes,
unsigned int sizes[], void *alloc_ctxs[])
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(vq);
struct m2mtest_q_data *q_data;
unsigned int size, count = *nbuffers;
q_data = get_q_data(vq->type);
size = q_data->width * q_data->height * q_data->fmt->depth >> 3;
while (size * count > MEM2MEM_VID_MEM_LIMIT)
(count)--;
*nplanes = 1;
*nbuffers = count;
sizes[0] = size;
/*
* videobuf2-vmalloc allocator is context-less so no need to set
* alloc_ctxs array.
*/
dprintk(ctx->dev, "get %d buffer(s) of size %d each.\n", count, size);
return 0;
}
static int m2mtest_buf_prepare(struct vb2_buffer *vb)
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
struct m2mtest_q_data *q_data;
dprintk(ctx->dev, "type: %d\n", vb->vb2_queue->type);
q_data = get_q_data(vb->vb2_queue->type);
if (vb2_plane_size(vb, 0) < q_data->sizeimage) {
dprintk(ctx->dev, "%s data will not fit into plane (%lu < %lu)\n",
__func__, vb2_plane_size(vb, 0), (long)q_data->sizeimage);
return -EINVAL;
}
vb2_set_plane_payload(vb, 0, q_data->sizeimage);
return 0;
}
static void m2mtest_buf_queue(struct vb2_buffer *vb)
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue);
v4l2_m2m_buf_queue(ctx->m2m_ctx, vb);
}
static void m2mtest_wait_prepare(struct vb2_queue *q)
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
m2mtest_unlock(ctx);
}
static void m2mtest_wait_finish(struct vb2_queue *q)
{
struct m2mtest_ctx *ctx = vb2_get_drv_priv(q);
m2mtest_lock(ctx);
}
static struct vb2_ops m2mtest_qops = {
.queue_setup = m2mtest_queue_setup,
.buf_prepare = m2mtest_buf_prepare,
.buf_queue = m2mtest_buf_queue,
.wait_prepare = m2mtest_wait_prepare,
.wait_finish = m2mtest_wait_finish,
};
static int queue_init(void *priv, struct vb2_queue *src_vq, struct vb2_queue *dst_vq)
{
struct m2mtest_ctx *ctx = priv;
int ret;
memset(src_vq, 0, sizeof(*src_vq));
src_vq->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
src_vq->io_modes = VB2_MMAP;
src_vq->drv_priv = ctx;
src_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
src_vq->ops = &m2mtest_qops;
src_vq->mem_ops = &vb2_vmalloc_memops;
ret = vb2_queue_init(src_vq);
if (ret)
return ret;
memset(dst_vq, 0, sizeof(*dst_vq));
dst_vq->type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
dst_vq->io_modes = VB2_MMAP;
dst_vq->drv_priv = ctx;
dst_vq->buf_struct_size = sizeof(struct v4l2_m2m_buffer);
dst_vq->ops = &m2mtest_qops;
dst_vq->mem_ops = &vb2_vmalloc_memops;
return vb2_queue_init(dst_vq);
}
/*
* File operations
*/
static int m2mtest_open(struct file *file)
{
struct m2mtest_dev *dev = video_drvdata(file);
struct m2mtest_ctx *ctx = NULL;
ctx = kzalloc(sizeof *ctx, GFP_KERNEL);
if (!ctx)
return -ENOMEM;
file->private_data = ctx;
ctx->dev = dev;
ctx->translen = MEM2MEM_DEF_TRANSLEN;
ctx->transtime = MEM2MEM_DEF_TRANSTIME;
ctx->num_processed = 0;
ctx->m2m_ctx = v4l2_m2m_ctx_init(dev->m2m_dev, ctx, &queue_init);
if (IS_ERR(ctx->m2m_ctx)) {
int ret = PTR_ERR(ctx->m2m_ctx);
kfree(ctx);
return ret;
}
atomic_inc(&dev->num_inst);
dprintk(dev, "Created instance %p, m2m_ctx: %p\n", ctx, ctx->m2m_ctx);
return 0;
}
static int m2mtest_release(struct file *file)
{
struct m2mtest_dev *dev = video_drvdata(file);
struct m2mtest_ctx *ctx = file->private_data;
dprintk(dev, "Releasing instance %p\n", ctx);
v4l2_m2m_ctx_release(ctx->m2m_ctx);
kfree(ctx);
atomic_dec(&dev->num_inst);
return 0;
}
static unsigned int m2mtest_poll(struct file *file,
struct poll_table_struct *wait)
{
struct m2mtest_ctx *ctx = file->private_data;
return v4l2_m2m_poll(file, ctx->m2m_ctx, wait);
}
static int m2mtest_mmap(struct file *file, struct vm_area_struct *vma)
{
struct m2mtest_ctx *ctx = file->private_data;
return v4l2_m2m_mmap(file, ctx->m2m_ctx, vma);
}
static const struct v4l2_file_operations m2mtest_fops = {
.owner = THIS_MODULE,
.open = m2mtest_open,
.release = m2mtest_release,
.poll = m2mtest_poll,
.unlocked_ioctl = video_ioctl2,
.mmap = m2mtest_mmap,
};
static struct video_device m2mtest_videodev = {
.name = MEM2MEM_NAME,
.fops = &m2mtest_fops,
.ioctl_ops = &m2mtest_ioctl_ops,
.minor = -1,
.release = video_device_release,
};
static struct v4l2_m2m_ops m2m_ops = {
.device_run = device_run,
.job_ready = job_ready,
.job_abort = job_abort,
.lock = m2mtest_lock,
.unlock = m2mtest_unlock,
};
static int m2mtest_probe(struct platform_device *pdev)
{
struct m2mtest_dev *dev;
struct video_device *vfd;
int ret;
dev = kzalloc(sizeof *dev, GFP_KERNEL);
if (!dev)
return -ENOMEM;
spin_lock_init(&dev->irqlock);
ret = v4l2_device_register(&pdev->dev, &dev->v4l2_dev);
if (ret)
goto free_dev;
atomic_set(&dev->num_inst, 0);
mutex_init(&dev->dev_mutex);
vfd = video_device_alloc();
if (!vfd) {
v4l2_err(&dev->v4l2_dev, "Failed to allocate video device\n");
ret = -ENOMEM;
goto unreg_dev;
}
*vfd = m2mtest_videodev;
vfd->lock = &dev->dev_mutex;
ret = video_register_device(vfd, VFL_TYPE_GRABBER, 0);
if (ret) {
v4l2_err(&dev->v4l2_dev, "Failed to register video device\n");
goto rel_vdev;
}
video_set_drvdata(vfd, dev);
snprintf(vfd->name, sizeof(vfd->name), "%s", m2mtest_videodev.name);
dev->vfd = vfd;
v4l2_info(&dev->v4l2_dev, MEM2MEM_TEST_MODULE_NAME
"Device registered as /dev/video%d\n", vfd->num);
setup_timer(&dev->timer, device_isr, (long)dev);
platform_set_drvdata(pdev, dev);
dev->m2m_dev = v4l2_m2m_init(&m2m_ops);
if (IS_ERR(dev->m2m_dev)) {
v4l2_err(&dev->v4l2_dev, "Failed to init mem2mem device\n");
ret = PTR_ERR(dev->m2m_dev);
goto err_m2m;
}
q_data[V4L2_M2M_SRC].fmt = &formats[0];
q_data[V4L2_M2M_DST].fmt = &formats[0];
return 0;
v4l2_m2m_release(dev->m2m_dev);
err_m2m:
video_unregister_device(dev->vfd);
rel_vdev:
video_device_release(vfd);
unreg_dev:
v4l2_device_unregister(&dev->v4l2_dev);
free_dev:
kfree(dev);
return ret;
}
static int m2mtest_remove(struct platform_device *pdev)
{
struct m2mtest_dev *dev =
(struct m2mtest_dev *)platform_get_drvdata(pdev);
v4l2_info(&dev->v4l2_dev, "Removing " MEM2MEM_TEST_MODULE_NAME);
v4l2_m2m_release(dev->m2m_dev);
del_timer_sync(&dev->timer);
video_unregister_device(dev->vfd);
v4l2_device_unregister(&dev->v4l2_dev);
kfree(dev);
return 0;
}
static struct platform_driver m2mtest_pdrv = {
.probe = m2mtest_probe,
.remove = m2mtest_remove,
.driver = {
.name = MEM2MEM_NAME,
.owner = THIS_MODULE,
},
};
static void __exit m2mtest_exit(void)
{
platform_driver_unregister(&m2mtest_pdrv);
platform_device_unregister(&m2mtest_pdev);
}
static int __init m2mtest_init(void)
{
int ret;
ret = platform_device_register(&m2mtest_pdev);
if (ret)
return ret;
ret = platform_driver_register(&m2mtest_pdrv);
if (ret)
platform_device_unregister(&m2mtest_pdev);
return 0;
}
module_init(m2mtest_init);
module_exit(m2mtest_exit);
| gpl-2.0 |
s0be/kernel_htc_msm7227 | drivers/isdn/hardware/avm/b1pci.c | 4236 | 10897 | /* $Id: b1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $
*
* Module for AVM B1 PCI-card.
*
* Copyright 1999 by Carsten Paeth <calle@calle.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/skbuff.h>
#include <linux/delay.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/pci.h>
#include <linux/capi.h>
#include <asm/io.h>
#include <linux/init.h>
#include <linux/isdn/capicmd.h>
#include <linux/isdn/capiutil.h>
#include <linux/isdn/capilli.h>
#include "avmcard.h"
/* ------------------------------------------------------------- */
static char *revision = "$Revision: 1.1.2.2 $";
/* ------------------------------------------------------------- */
static struct pci_device_id b1pci_pci_tbl[] = {
{ PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_B1, PCI_ANY_ID, PCI_ANY_ID },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(pci, b1pci_pci_tbl);
MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM B1 PCI card");
MODULE_AUTHOR("Carsten Paeth");
MODULE_LICENSE("GPL");
/* ------------------------------------------------------------- */
static char *b1pci_procinfo(struct capi_ctr *ctrl)
{
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
if (!cinfo)
return "";
sprintf(cinfo->infobuf, "%s %s 0x%x %d r%d",
cinfo->cardname[0] ? cinfo->cardname : "-",
cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
cinfo->card ? cinfo->card->port : 0x0,
cinfo->card ? cinfo->card->irq : 0,
cinfo->card ? cinfo->card->revision : 0
);
return cinfo->infobuf;
}
/* ------------------------------------------------------------- */
static int b1pci_probe(struct capicardparams *p, struct pci_dev *pdev)
{
avmcard *card;
avmctrl_info *cinfo;
int retval;
card = b1_alloc_card(1);
if (!card) {
printk(KERN_WARNING "b1pci: no memory.\n");
retval = -ENOMEM;
goto err;
}
cinfo = card->ctrlinfo;
sprintf(card->name, "b1pci-%x", p->port);
card->port = p->port;
card->irq = p->irq;
card->cardtype = avm_b1pci;
if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
card->port, card->port + AVMB1_PORTLEN);
retval = -EBUSY;
goto err_free;
}
b1_reset(card->port);
retval = b1_detect(card->port, card->cardtype);
if (retval) {
printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
card->port, retval);
retval = -ENODEV;
goto err_release_region;
}
b1_reset(card->port);
b1_getrevision(card);
retval = request_irq(card->irq, b1_interrupt, IRQF_SHARED, card->name, card);
if (retval) {
printk(KERN_ERR "b1pci: unable to get IRQ %d.\n", card->irq);
retval = -EBUSY;
goto err_release_region;
}
cinfo->capi_ctrl.driver_name = "b1pci";
cinfo->capi_ctrl.driverdata = cinfo;
cinfo->capi_ctrl.register_appl = b1_register_appl;
cinfo->capi_ctrl.release_appl = b1_release_appl;
cinfo->capi_ctrl.send_message = b1_send_message;
cinfo->capi_ctrl.load_firmware = b1_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pci_procinfo;
cinfo->capi_ctrl.proc_fops = &b1ctl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
cinfo->capi_ctrl.owner = THIS_MODULE;
retval = attach_capi_ctr(&cinfo->capi_ctrl);
if (retval) {
printk(KERN_ERR "b1pci: attach controller failed.\n");
goto err_free_irq;
}
if (card->revision >= 4) {
printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, revision %d (no dma)\n",
card->port, card->irq, card->revision);
} else {
printk(KERN_INFO "b1pci: AVM B1 PCI at i/o %#x, irq %d, revision %d\n",
card->port, card->irq, card->revision);
}
pci_set_drvdata(pdev, card);
return 0;
err_free_irq:
free_irq(card->irq, card);
err_release_region:
release_region(card->port, AVMB1_PORTLEN);
err_free:
b1_free_card(card);
err:
return retval;
}
static void b1pci_remove(struct pci_dev *pdev)
{
avmcard *card = pci_get_drvdata(pdev);
avmctrl_info *cinfo = card->ctrlinfo;
unsigned int port = card->port;
b1_reset(port);
b1_reset(port);
detach_capi_ctr(&cinfo->capi_ctrl);
free_irq(card->irq, card);
release_region(card->port, AVMB1_PORTLEN);
b1_free_card(card);
}
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
/* ------------------------------------------------------------- */
static char *b1pciv4_procinfo(struct capi_ctr *ctrl)
{
avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata);
if (!cinfo)
return "";
sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx r%d",
cinfo->cardname[0] ? cinfo->cardname : "-",
cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-",
cinfo->card ? cinfo->card->port : 0x0,
cinfo->card ? cinfo->card->irq : 0,
cinfo->card ? cinfo->card->membase : 0,
cinfo->card ? cinfo->card->revision : 0
);
return cinfo->infobuf;
}
/* ------------------------------------------------------------- */
static int b1pciv4_probe(struct capicardparams *p, struct pci_dev *pdev)
{
avmcard *card;
avmctrl_info *cinfo;
int retval;
card = b1_alloc_card(1);
if (!card) {
printk(KERN_WARNING "b1pci: no memory.\n");
retval = -ENOMEM;
goto err;
}
card->dma = avmcard_dma_alloc("b1pci", pdev, 2048+128, 2048+128);
if (!card->dma) {
printk(KERN_WARNING "b1pci: dma alloc.\n");
retval = -ENOMEM;
goto err_free;
}
cinfo = card->ctrlinfo;
sprintf(card->name, "b1pciv4-%x", p->port);
card->port = p->port;
card->irq = p->irq;
card->membase = p->membase;
card->cardtype = avm_b1pci;
if (!request_region(card->port, AVMB1_PORTLEN, card->name)) {
printk(KERN_WARNING "b1pci: ports 0x%03x-0x%03x in use.\n",
card->port, card->port + AVMB1_PORTLEN);
retval = -EBUSY;
goto err_free_dma;
}
card->mbase = ioremap(card->membase, 64);
if (!card->mbase) {
printk(KERN_NOTICE "b1pci: can't remap memory at 0x%lx\n",
card->membase);
retval = -ENOMEM;
goto err_release_region;
}
b1dma_reset(card);
retval = b1pciv4_detect(card);
if (retval) {
printk(KERN_NOTICE "b1pci: NO card at 0x%x (%d)\n",
card->port, retval);
retval = -ENODEV;
goto err_unmap;
}
b1dma_reset(card);
b1_getrevision(card);
retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card);
if (retval) {
printk(KERN_ERR "b1pci: unable to get IRQ %d.\n",
card->irq);
retval = -EBUSY;
goto err_unmap;
}
cinfo->capi_ctrl.owner = THIS_MODULE;
cinfo->capi_ctrl.driver_name = "b1pciv4";
cinfo->capi_ctrl.driverdata = cinfo;
cinfo->capi_ctrl.register_appl = b1dma_register_appl;
cinfo->capi_ctrl.release_appl = b1dma_release_appl;
cinfo->capi_ctrl.send_message = b1dma_send_message;
cinfo->capi_ctrl.load_firmware = b1dma_load_firmware;
cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr;
cinfo->capi_ctrl.procinfo = b1pciv4_procinfo;
cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops;
strcpy(cinfo->capi_ctrl.name, card->name);
retval = attach_capi_ctr(&cinfo->capi_ctrl);
if (retval) {
printk(KERN_ERR "b1pci: attach controller failed.\n");
goto err_free_irq;
}
card->cardnr = cinfo->capi_ctrl.cnr;
printk(KERN_INFO "b1pci: AVM B1 PCI V4 at i/o %#x, irq %d, mem %#lx, revision %d (dma)\n",
card->port, card->irq, card->membase, card->revision);
pci_set_drvdata(pdev, card);
return 0;
err_free_irq:
free_irq(card->irq, card);
err_unmap:
iounmap(card->mbase);
err_release_region:
release_region(card->port, AVMB1_PORTLEN);
err_free_dma:
avmcard_dma_free(card->dma);
err_free:
b1_free_card(card);
err:
return retval;
}
static void b1pciv4_remove(struct pci_dev *pdev)
{
avmcard *card = pci_get_drvdata(pdev);
avmctrl_info *cinfo = card->ctrlinfo;
b1dma_reset(card);
detach_capi_ctr(&cinfo->capi_ctrl);
free_irq(card->irq, card);
iounmap(card->mbase);
release_region(card->port, AVMB1_PORTLEN);
avmcard_dma_free(card->dma);
b1_free_card(card);
}
#endif /* CONFIG_ISDN_DRV_AVMB1_B1PCIV4 */
static int __devinit b1pci_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct capicardparams param;
int retval;
if (pci_enable_device(pdev) < 0) {
printk(KERN_ERR "b1pci: failed to enable AVM-B1\n");
return -ENODEV;
}
param.irq = pdev->irq;
if (pci_resource_start(pdev, 2)) { /* B1 PCI V4 */
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
pci_set_master(pdev);
#endif
param.membase = pci_resource_start(pdev, 0);
param.port = pci_resource_start(pdev, 2);
printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 V4 at i/o %#x, irq %d, mem %#x\n",
param.port, param.irq, param.membase);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
retval = b1pciv4_probe(¶m, pdev);
#else
retval = b1pci_probe(¶m, pdev);
#endif
if (retval != 0) {
printk(KERN_ERR "b1pci: no AVM-B1 V4 at i/o %#x, irq %d, mem %#x detected\n",
param.port, param.irq, param.membase);
}
} else {
param.membase = 0;
param.port = pci_resource_start(pdev, 1);
printk(KERN_INFO "b1pci: PCI BIOS reports AVM-B1 at i/o %#x, irq %d\n",
param.port, param.irq);
retval = b1pci_probe(¶m, pdev);
if (retval != 0) {
printk(KERN_ERR "b1pci: no AVM-B1 at i/o %#x, irq %d detected\n",
param.port, param.irq);
}
}
return retval;
}
static void __devexit b1pci_pci_remove(struct pci_dev *pdev)
{
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
avmcard *card = pci_get_drvdata(pdev);
if (card->dma)
b1pciv4_remove(pdev);
else
b1pci_remove(pdev);
#else
b1pci_remove(pdev);
#endif
}
static struct pci_driver b1pci_pci_driver = {
.name = "b1pci",
.id_table = b1pci_pci_tbl,
.probe = b1pci_pci_probe,
.remove = __devexit_p(b1pci_pci_remove),
};
static struct capi_driver capi_driver_b1pci = {
.name = "b1pci",
.revision = "1.0",
};
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
static struct capi_driver capi_driver_b1pciv4 = {
.name = "b1pciv4",
.revision = "1.0",
};
#endif
static int __init b1pci_init(void)
{
char *p;
char rev[32];
int err;
if ((p = strchr(revision, ':')) != NULL && p[1]) {
strlcpy(rev, p + 2, 32);
if ((p = strchr(rev, '$')) != NULL && p > rev)
*(p-1) = 0;
} else
strcpy(rev, "1.0");
err = pci_register_driver(&b1pci_pci_driver);
if (!err) {
strlcpy(capi_driver_b1pci.revision, rev, 32);
register_capi_driver(&capi_driver_b1pci);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
strlcpy(capi_driver_b1pciv4.revision, rev, 32);
register_capi_driver(&capi_driver_b1pciv4);
#endif
printk(KERN_INFO "b1pci: revision %s\n", rev);
}
return err;
}
static void __exit b1pci_exit(void)
{
unregister_capi_driver(&capi_driver_b1pci);
#ifdef CONFIG_ISDN_DRV_AVMB1_B1PCIV4
unregister_capi_driver(&capi_driver_b1pciv4);
#endif
pci_unregister_driver(&b1pci_pci_driver);
}
module_init(b1pci_init);
module_exit(b1pci_exit);
| gpl-2.0 |
rmcc/commtiva-kernel-z71 | drivers/staging/octeon/cvmx-helper-npi.c | 4748 | 3413 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for NPI initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include "cvmx-config.h"
#include "cvmx-helper.h"
#include "cvmx-pip-defs.h"
/**
* Probe a NPI interface and determine the number of ports
* connected to it. The NPI interface should still be down
* after this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_npi_probe(int interface)
{
#if CVMX_PKO_QUEUES_PER_PORT_PCI > 0
if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX))
return 4;
else if (OCTEON_IS_MODEL(OCTEON_CN56XX)
&& !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X))
/* The packet engines didn't exist before pass 2 */
return 4;
else if (OCTEON_IS_MODEL(OCTEON_CN52XX)
&& !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X))
/* The packet engines didn't exist before pass 2 */
return 4;
#if 0
/*
* Technically CN30XX, CN31XX, and CN50XX contain packet
* engines, but nobody ever uses them. Since this is the case,
* we disable them here.
*/
else if (OCTEON_IS_MODEL(OCTEON_CN31XX)
|| OCTEON_IS_MODEL(OCTEON_CN50XX))
return 2;
else if (OCTEON_IS_MODEL(OCTEON_CN30XX))
return 1;
#endif
#endif
return 0;
}
/**
* Bringup and enable a NPI interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_npi_enable(int interface)
{
/*
* On CN50XX, CN52XX, and CN56XX we need to disable length
* checking so packet < 64 bytes and jumbo frames don't get
* errors.
*/
if (!OCTEON_IS_MODEL(OCTEON_CN3XXX) &&
!OCTEON_IS_MODEL(OCTEON_CN58XX)) {
int num_ports = cvmx_helper_ports_on_interface(interface);
int port;
for (port = 0; port < num_ports; port++) {
union cvmx_pip_prt_cfgx port_cfg;
int ipd_port =
cvmx_helper_get_ipd_port(interface, port);
port_cfg.u64 =
cvmx_read_csr(CVMX_PIP_PRT_CFGX(ipd_port));
port_cfg.s.maxerr_en = 0;
port_cfg.s.minerr_en = 0;
cvmx_write_csr(CVMX_PIP_PRT_CFGX(ipd_port),
port_cfg.u64);
}
}
/* Enables are controlled by the remote host, so nothing to do here */
return 0;
}
| gpl-2.0 |
Eliminater74/cm_android_kernel_g3_patched | arch/arm/mach-pxa/himalaya.c | 5004 | 4182 | /*
* linux/arch/arm/mach-pxa/himalaya.c
*
* Hardware definitions for the HTC Himalaya
*
* Based on 2.6.21-hh20's himalaya.c and himalaya_lcd.c
*
* Copyright (c) 2008 Zbynek Michl <Zbynek.Michl@seznam.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/device.h>
#include <linux/fb.h>
#include <linux/platform_device.h>
#include <video/w100fb.h>
#include <asm/setup.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/pxa25x.h>
#include "generic.h"
/* ---------------------- Himalaya LCD definitions -------------------- */
static struct w100_gen_regs himalaya_lcd_regs = {
.lcd_format = 0x00000003,
.lcdd_cntl1 = 0x00000000,
.lcdd_cntl2 = 0x0003ffff,
.genlcd_cntl1 = 0x00fff003,
.genlcd_cntl2 = 0x00000003,
.genlcd_cntl3 = 0x000102aa,
};
static struct w100_mode himalaya4_lcd_mode = {
.xres = 240,
.yres = 320,
.left_margin = 0,
.right_margin = 31,
.upper_margin = 15,
.lower_margin = 0,
.crtc_ss = 0x80150014,
.crtc_ls = 0xa0fb00f7,
.crtc_gs = 0xc0080007,
.crtc_vpos_gs = 0x00080007,
.crtc_rev = 0x0000000a,
.crtc_dclk = 0x81700030,
.crtc_gclk = 0x8015010f,
.crtc_goe = 0x00000000,
.pll_freq = 80,
.pixclk_divider = 15,
.pixclk_divider_rotated = 15,
.pixclk_src = CLK_SRC_PLL,
.sysclk_divider = 0,
.sysclk_src = CLK_SRC_PLL,
};
static struct w100_mode himalaya6_lcd_mode = {
.xres = 240,
.yres = 320,
.left_margin = 9,
.right_margin = 8,
.upper_margin = 5,
.lower_margin = 4,
.crtc_ss = 0x80150014,
.crtc_ls = 0xa0fb00f7,
.crtc_gs = 0xc0080007,
.crtc_vpos_gs = 0x00080007,
.crtc_rev = 0x0000000a,
.crtc_dclk = 0xa1700030,
.crtc_gclk = 0x8015010f,
.crtc_goe = 0x00000000,
.pll_freq = 95,
.pixclk_divider = 0xb,
.pixclk_divider_rotated = 4,
.pixclk_src = CLK_SRC_PLL,
.sysclk_divider = 1,
.sysclk_src = CLK_SRC_PLL,
};
static struct w100_gpio_regs himalaya_w100_gpio_info = {
.init_data1 = 0xffff0000, /* GPIO_DATA */
.gpio_dir1 = 0x00000000, /* GPIO_CNTL1 */
.gpio_oe1 = 0x003c0000, /* GPIO_CNTL2 */
.init_data2 = 0x00000000, /* GPIO_DATA2 */
.gpio_dir2 = 0x00000000, /* GPIO_CNTL3 */
.gpio_oe2 = 0x00000000, /* GPIO_CNTL4 */
};
static struct w100fb_mach_info himalaya_fb_info = {
.num_modes = 1,
.regs = &himalaya_lcd_regs,
.gpio = &himalaya_w100_gpio_info,
.xtal_freq = 16000000,
};
static struct resource himalaya_fb_resources[] = {
[0] = {
.start = 0x08000000,
.end = 0x08ffffff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device himalaya_fb_device = {
.name = "w100fb",
.id = -1,
.dev = {
.platform_data = &himalaya_fb_info,
},
.num_resources = ARRAY_SIZE(himalaya_fb_resources),
.resource = himalaya_fb_resources,
};
/* ----------------------------------------------------------------------- */
static struct platform_device *devices[] __initdata = {
&himalaya_fb_device,
};
static void __init himalaya_lcd_init(void)
{
int himalaya_boardid;
himalaya_boardid = 0x4; /* hardcoded (detection needs ASIC3 functions) */
printk(KERN_INFO "himalaya LCD Driver init. boardid=%d\n",
himalaya_boardid);
switch (himalaya_boardid) {
case 0x4:
himalaya_fb_info.modelist = &himalaya4_lcd_mode;
break;
case 0x6:
himalaya_fb_info.modelist = &himalaya6_lcd_mode;
break;
default:
printk(KERN_INFO "himalaya lcd_init: unknown boardid=%d. Using 0x4\n",
himalaya_boardid);
himalaya_fb_info.modelist = &himalaya4_lcd_mode;
}
}
static void __init himalaya_init(void)
{
pxa_set_ffuart_info(NULL);
pxa_set_btuart_info(NULL);
pxa_set_stuart_info(NULL);
himalaya_lcd_init();
platform_add_devices(devices, ARRAY_SIZE(devices));
}
MACHINE_START(HIMALAYA, "HTC Himalaya")
.atag_offset = 0x100,
.map_io = pxa25x_map_io,
.nr_irqs = PXA_NR_IRQS,
.init_irq = pxa25x_init_irq,
.handle_irq = pxa25x_handle_irq,
.init_machine = himalaya_init,
.timer = &pxa_timer,
.restart = pxa_restart,
MACHINE_END
| gpl-2.0 |
TeamNuclear/android_kernel_oneplus_msm8974 | fs/yaffs2/yaffs_mtdif1.c | 5004 | 9863 | /*
* YAFFS: Yet another FFS. A NAND-flash specific file system.
*
* Copyright (C) 2002-2010 Aleph One Ltd.
* for Toby Churchill Ltd and Brightstar Engineering
*
* Created by Charles Manning <charles@aleph1.co.uk>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/*
* This module provides the interface between yaffs_nand.c and the
* MTD API. This version is used when the MTD interface supports the
* 'mtd_oob_ops' style calls to read_oob and write_oob, circa 2.6.17,
* and we have small-page NAND device.
*
* These functions are invoked via function pointers in yaffs_nand.c.
* This replaces functionality provided by functions in yaffs_mtdif.c
* and the yaffs_tags compatability functions in yaffs_tagscompat.c that are
* called in yaffs_mtdif.c when the function pointers are NULL.
* We assume the MTD layer is performing ECC (use_nand_ecc is true).
*/
#include "yportenv.h"
#include "yaffs_trace.h"
#include "yaffs_guts.h"
#include "yaffs_packedtags1.h"
#include "yaffs_tagscompat.h" /* for yaffs_calc_tags_ecc */
#include "yaffs_linux.h"
#include "linux/kernel.h"
#include "linux/version.h"
#include "linux/types.h"
#include "linux/mtd/mtd.h"
#ifndef CONFIG_YAFFS_9BYTE_TAGS
# define YTAG1_SIZE 8
#else
# define YTAG1_SIZE 9
#endif
/* Write a chunk (page) of data to NAND.
*
* Caller always provides ExtendedTags data which are converted to a more
* compact (packed) form for storage in NAND. A mini-ECC runs over the
* contents of the tags meta-data; used to valid the tags when read.
*
* - Pack ExtendedTags to packed_tags1 form
* - Compute mini-ECC for packed_tags1
* - Write data and packed tags to NAND.
*
* Note: Due to the use of the packed_tags1 meta-data which does not include
* a full sequence number (as found in the larger packed_tags2 form) it is
* necessary for Yaffs to re-write a chunk/page (just once) to mark it as
* discarded and dirty. This is not ideal: newer NAND parts are supposed
* to be written just once. When Yaffs performs this operation, this
* function is called with a NULL data pointer -- calling MTD write_oob
* without data is valid usage (2.6.17).
*
* Any underlying MTD error results in YAFFS_FAIL.
* Returns YAFFS_OK or YAFFS_FAIL.
*/
int nandmtd1_write_chunk_tags(struct yaffs_dev *dev,
int nand_chunk, const u8 * data,
const struct yaffs_ext_tags *etags)
{
struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
int chunk_bytes = dev->data_bytes_per_chunk;
loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
struct mtd_oob_ops ops;
struct yaffs_packed_tags1 pt1;
int retval;
/* we assume that packed_tags1 and struct yaffs_tags are compatible */
compile_time_assertion(sizeof(struct yaffs_packed_tags1) == 12);
compile_time_assertion(sizeof(struct yaffs_tags) == 8);
yaffs_pack_tags1(&pt1, etags);
yaffs_calc_tags_ecc((struct yaffs_tags *)&pt1);
/* When deleting a chunk, the upper layer provides only skeletal
* etags, one with is_deleted set. However, we need to update the
* tags, not erase them completely. So we use the NAND write property
* that only zeroed-bits stick and set tag bytes to all-ones and
* zero just the (not) deleted bit.
*/
#ifndef CONFIG_YAFFS_9BYTE_TAGS
if (etags->is_deleted) {
memset(&pt1, 0xff, 8);
/* clear delete status bit to indicate deleted */
pt1.deleted = 0;
}
#else
((u8 *) & pt1)[8] = 0xff;
if (etags->is_deleted) {
memset(&pt1, 0xff, 8);
/* zero page_status byte to indicate deleted */
((u8 *) & pt1)[8] = 0;
}
#endif
memset(&ops, 0, sizeof(ops));
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = (data) ? chunk_bytes : 0;
ops.ooblen = YTAG1_SIZE;
ops.datbuf = (u8 *) data;
ops.oobbuf = (u8 *) & pt1;
retval = mtd_write_oob(mtd, addr, &ops);
if (retval) {
yaffs_trace(YAFFS_TRACE_MTD,
"write_oob failed, chunk %d, mtd error %d",
nand_chunk, retval);
}
return retval ? YAFFS_FAIL : YAFFS_OK;
}
/* Return with empty ExtendedTags but add ecc_result.
*/
static int rettags(struct yaffs_ext_tags *etags, int ecc_result, int retval)
{
if (etags) {
memset(etags, 0, sizeof(*etags));
etags->ecc_result = ecc_result;
}
return retval;
}
/* Read a chunk (page) from NAND.
*
* Caller expects ExtendedTags data to be usable even on error; that is,
* all members except ecc_result and block_bad are zeroed.
*
* - Check ECC results for data (if applicable)
* - Check for blank/erased block (return empty ExtendedTags if blank)
* - Check the packed_tags1 mini-ECC (correct if necessary/possible)
* - Convert packed_tags1 to ExtendedTags
* - Update ecc_result and block_bad members to refect state.
*
* Returns YAFFS_OK or YAFFS_FAIL.
*/
int nandmtd1_read_chunk_tags(struct yaffs_dev *dev,
int nand_chunk, u8 * data,
struct yaffs_ext_tags *etags)
{
struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
int chunk_bytes = dev->data_bytes_per_chunk;
loff_t addr = ((loff_t) nand_chunk) * chunk_bytes;
int eccres = YAFFS_ECC_RESULT_NO_ERROR;
struct mtd_oob_ops ops;
struct yaffs_packed_tags1 pt1;
int retval;
int deleted;
memset(&ops, 0, sizeof(ops));
ops.mode = MTD_OPS_AUTO_OOB;
ops.len = (data) ? chunk_bytes : 0;
ops.ooblen = YTAG1_SIZE;
ops.datbuf = data;
ops.oobbuf = (u8 *) & pt1;
/* Read page and oob using MTD.
* Check status and determine ECC result.
*/
retval = mtd_read_oob(mtd, addr, &ops);
if (retval) {
yaffs_trace(YAFFS_TRACE_MTD,
"read_oob failed, chunk %d, mtd error %d",
nand_chunk, retval);
}
switch (retval) {
case 0:
/* no error */
break;
case -EUCLEAN:
/* MTD's ECC fixed the data */
eccres = YAFFS_ECC_RESULT_FIXED;
dev->n_ecc_fixed++;
break;
case -EBADMSG:
/* MTD's ECC could not fix the data */
dev->n_ecc_unfixed++;
/* fall into... */
default:
rettags(etags, YAFFS_ECC_RESULT_UNFIXED, 0);
etags->block_bad = mtd_block_isbad(mtd, addr);
return YAFFS_FAIL;
}
/* Check for a blank/erased chunk.
*/
if (yaffs_check_ff((u8 *) & pt1, 8)) {
/* when blank, upper layers want ecc_result to be <= NO_ERROR */
return rettags(etags, YAFFS_ECC_RESULT_NO_ERROR, YAFFS_OK);
}
#ifndef CONFIG_YAFFS_9BYTE_TAGS
/* Read deleted status (bit) then return it to it's non-deleted
* state before performing tags mini-ECC check. pt1.deleted is
* inverted.
*/
deleted = !pt1.deleted;
pt1.deleted = 1;
#else
deleted = (yaffs_count_bits(((u8 *) & pt1)[8]) < 7);
#endif
/* Check the packed tags mini-ECC and correct if necessary/possible.
*/
retval = yaffs_check_tags_ecc((struct yaffs_tags *)&pt1);
switch (retval) {
case 0:
/* no tags error, use MTD result */
break;
case 1:
/* recovered tags-ECC error */
dev->n_tags_ecc_fixed++;
if (eccres == YAFFS_ECC_RESULT_NO_ERROR)
eccres = YAFFS_ECC_RESULT_FIXED;
break;
default:
/* unrecovered tags-ECC error */
dev->n_tags_ecc_unfixed++;
return rettags(etags, YAFFS_ECC_RESULT_UNFIXED, YAFFS_FAIL);
}
/* Unpack the tags to extended form and set ECC result.
* [set should_be_ff just to keep yaffs_unpack_tags1 happy]
*/
pt1.should_be_ff = 0xFFFFFFFF;
yaffs_unpack_tags1(etags, &pt1);
etags->ecc_result = eccres;
/* Set deleted state */
etags->is_deleted = deleted;
return YAFFS_OK;
}
/* Mark a block bad.
*
* This is a persistant state.
* Use of this function should be rare.
*
* Returns YAFFS_OK or YAFFS_FAIL.
*/
int nandmtd1_mark_block_bad(struct yaffs_dev *dev, int block_no)
{
struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
int blocksize = dev->param.chunks_per_block * dev->data_bytes_per_chunk;
int retval;
yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
"marking block %d bad", block_no);
retval = mtd_block_markbad(mtd, (loff_t) blocksize * block_no);
return (retval) ? YAFFS_FAIL : YAFFS_OK;
}
/* Check any MTD prerequists.
*
* Returns YAFFS_OK or YAFFS_FAIL.
*/
static int nandmtd1_test_prerequists(struct mtd_info *mtd)
{
/* 2.6.18 has mtd->ecclayout->oobavail */
/* 2.6.21 has mtd->ecclayout->oobavail and mtd->oobavail */
int oobavail = mtd->ecclayout->oobavail;
if (oobavail < YTAG1_SIZE) {
yaffs_trace(YAFFS_TRACE_ERROR,
"mtd device has only %d bytes for tags, need %d",
oobavail, YTAG1_SIZE);
return YAFFS_FAIL;
}
return YAFFS_OK;
}
/* Query for the current state of a specific block.
*
* Examine the tags of the first chunk of the block and return the state:
* - YAFFS_BLOCK_STATE_DEAD, the block is marked bad
* - YAFFS_BLOCK_STATE_NEEDS_SCANNING, the block is in use
* - YAFFS_BLOCK_STATE_EMPTY, the block is clean
*
* Always returns YAFFS_OK.
*/
int nandmtd1_query_block(struct yaffs_dev *dev, int block_no,
enum yaffs_block_state *state_ptr, u32 * seq_ptr)
{
struct mtd_info *mtd = yaffs_dev_to_mtd(dev);
int chunk_num = block_no * dev->param.chunks_per_block;
loff_t addr = (loff_t) chunk_num * dev->data_bytes_per_chunk;
struct yaffs_ext_tags etags;
int state = YAFFS_BLOCK_STATE_DEAD;
int seqnum = 0;
int retval;
/* We don't yet have a good place to test for MTD config prerequists.
* Do it here as we are called during the initial scan.
*/
if (nandmtd1_test_prerequists(mtd) != YAFFS_OK)
return YAFFS_FAIL;
retval = nandmtd1_read_chunk_tags(dev, chunk_num, NULL, &etags);
etags.block_bad = mtd_block_isbad(mtd, addr);
if (etags.block_bad) {
yaffs_trace(YAFFS_TRACE_BAD_BLOCKS,
"block %d is marked bad", block_no);
state = YAFFS_BLOCK_STATE_DEAD;
} else if (etags.ecc_result != YAFFS_ECC_RESULT_NO_ERROR) {
/* bad tags, need to look more closely */
state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
} else if (etags.chunk_used) {
state = YAFFS_BLOCK_STATE_NEEDS_SCANNING;
seqnum = etags.seq_number;
} else {
state = YAFFS_BLOCK_STATE_EMPTY;
}
*state_ptr = state;
*seq_ptr = seqnum;
/* query always succeeds */
return YAFFS_OK;
}
| gpl-2.0 |
VanirAOSP/kernel_google_msm | drivers/media/video/pvrusb2/pvrusb2-devattr.c | 5004 | 17061 | /*
*
*
* Copyright (C) 2007 Mike Isely <isely@pobox.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
This source file should encompass ALL per-device type information for the
driver. To define a new device, add elements to the pvr2_device_table and
pvr2_device_desc structures.
*/
#include "pvrusb2-devattr.h"
#include <linux/usb.h>
#include <linux/module.h>
/* This is needed in order to pull in tuner type ids... */
#include <linux/i2c.h>
#include <media/tuner.h>
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
#include "pvrusb2-hdw-internal.h"
#include "lgdt330x.h"
#include "s5h1409.h"
#include "s5h1411.h"
#include "tda10048.h"
#include "tda18271.h"
#include "tda8290.h"
#include "tuner-simple.h"
#endif
/*------------------------------------------------------------------------*/
/* Hauppauge PVR-USB2 Model 29xxx */
static const struct pvr2_device_client_desc pvr2_cli_29xxx[] = {
{ .module_id = PVR2_CLIENT_ID_SAA7115 },
{ .module_id = PVR2_CLIENT_ID_MSP3400 },
{ .module_id = PVR2_CLIENT_ID_TUNER },
{ .module_id = PVR2_CLIENT_ID_DEMOD },
};
static const char *pvr2_fw1_names_29xxx[] = {
"v4l-pvrusb2-29xxx-01.fw",
};
static const struct pvr2_device_desc pvr2_device_29xxx = {
.description = "WinTV PVR USB2 Model 29xxx",
.shortname = "29xxx",
.client_table.lst = pvr2_cli_29xxx,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_29xxx),
.fx2_firmware.lst = pvr2_fw1_names_29xxx,
.fx2_firmware.cnt = ARRAY_SIZE(pvr2_fw1_names_29xxx),
.flag_has_hauppauge_rom = !0,
.flag_has_analogtuner = !0,
.flag_has_fmradio = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
.led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
.ir_scheme = PVR2_IR_SCHEME_29XXX,
};
/*------------------------------------------------------------------------*/
/* Hauppauge PVR-USB2 Model 24xxx */
static const struct pvr2_device_client_desc pvr2_cli_24xxx[] = {
{ .module_id = PVR2_CLIENT_ID_CX25840 },
{ .module_id = PVR2_CLIENT_ID_TUNER },
{ .module_id = PVR2_CLIENT_ID_WM8775 },
{ .module_id = PVR2_CLIENT_ID_DEMOD },
};
static const char *pvr2_fw1_names_24xxx[] = {
"v4l-pvrusb2-24xxx-01.fw",
};
static const struct pvr2_device_desc pvr2_device_24xxx = {
.description = "WinTV PVR USB2 Model 24xxx",
.shortname = "24xxx",
.client_table.lst = pvr2_cli_24xxx,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_24xxx),
.fx2_firmware.lst = pvr2_fw1_names_24xxx,
.fx2_firmware.cnt = ARRAY_SIZE(pvr2_fw1_names_24xxx),
.flag_has_cx25840 = !0,
.flag_has_wm8775 = !0,
.flag_has_hauppauge_rom = !0,
.flag_has_analogtuner = !0,
.flag_has_fmradio = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
.led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
.ir_scheme = PVR2_IR_SCHEME_24XXX,
};
/*------------------------------------------------------------------------*/
/* GOTVIEW USB2.0 DVD2 */
static const struct pvr2_device_client_desc pvr2_cli_gotview_2[] = {
{ .module_id = PVR2_CLIENT_ID_CX25840 },
{ .module_id = PVR2_CLIENT_ID_TUNER },
{ .module_id = PVR2_CLIENT_ID_DEMOD },
};
static const struct pvr2_device_desc pvr2_device_gotview_2 = {
.description = "Gotview USB 2.0 DVD 2",
.shortname = "gv2",
.client_table.lst = pvr2_cli_gotview_2,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_gotview_2),
.flag_has_cx25840 = !0,
.default_tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.flag_has_analogtuner = !0,
.flag_has_fmradio = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_GOTVIEW,
};
/*------------------------------------------------------------------------*/
/* GOTVIEW USB2.0 DVD Deluxe */
/* (same module list as gotview_2) */
static const struct pvr2_device_desc pvr2_device_gotview_2d = {
.description = "Gotview USB 2.0 DVD Deluxe",
.shortname = "gv2d",
.client_table.lst = pvr2_cli_gotview_2,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_gotview_2),
.flag_has_cx25840 = !0,
.default_tuner_type = TUNER_PHILIPS_FM1216ME_MK3,
.flag_has_analogtuner = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_GOTVIEW,
};
/*------------------------------------------------------------------------*/
/* Terratec Grabster AV400 */
static const struct pvr2_device_client_desc pvr2_cli_av400[] = {
{ .module_id = PVR2_CLIENT_ID_CX25840 },
};
static const struct pvr2_device_desc pvr2_device_av400 = {
.description = "Terratec Grabster AV400",
.shortname = "av400",
.flag_is_experimental = 1,
.client_table.lst = pvr2_cli_av400,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_av400),
.flag_has_cx25840 = !0,
.flag_has_analogtuner = 0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_AV400,
};
/*------------------------------------------------------------------------*/
/* OnAir Creator */
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
static struct lgdt330x_config pvr2_lgdt3303_config = {
.demod_address = 0x0e,
.demod_chip = LGDT3303,
.clock_polarity_flip = 1,
};
static int pvr2_lgdt3303_attach(struct pvr2_dvb_adapter *adap)
{
adap->fe = dvb_attach(lgdt330x_attach, &pvr2_lgdt3303_config,
&adap->channel.hdw->i2c_adap);
if (adap->fe)
return 0;
return -EIO;
}
static int pvr2_lgh06xf_attach(struct pvr2_dvb_adapter *adap)
{
dvb_attach(simple_tuner_attach, adap->fe,
&adap->channel.hdw->i2c_adap, 0x61,
TUNER_LG_TDVS_H06XF);
return 0;
}
static const struct pvr2_dvb_props pvr2_onair_creator_fe_props = {
.frontend_attach = pvr2_lgdt3303_attach,
.tuner_attach = pvr2_lgh06xf_attach,
};
#endif
static const struct pvr2_device_client_desc pvr2_cli_onair_creator[] = {
{ .module_id = PVR2_CLIENT_ID_SAA7115 },
{ .module_id = PVR2_CLIENT_ID_CS53L32A },
{ .module_id = PVR2_CLIENT_ID_TUNER },
};
static const struct pvr2_device_desc pvr2_device_onair_creator = {
.description = "OnAir Creator Hybrid USB tuner",
.shortname = "oac",
.client_table.lst = pvr2_cli_onair_creator,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_onair_creator),
.default_tuner_type = TUNER_LG_TDVS_H06XF,
.flag_has_analogtuner = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.flag_digital_requires_cx23416 = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_ONAIR,
.digital_control_scheme = PVR2_DIGITAL_SCHEME_ONAIR,
.default_std_mask = V4L2_STD_NTSC_M,
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
.dvb_props = &pvr2_onair_creator_fe_props,
#endif
};
/*------------------------------------------------------------------------*/
/* OnAir USB 2.0 */
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
static struct lgdt330x_config pvr2_lgdt3302_config = {
.demod_address = 0x0e,
.demod_chip = LGDT3302,
};
static int pvr2_lgdt3302_attach(struct pvr2_dvb_adapter *adap)
{
adap->fe = dvb_attach(lgdt330x_attach, &pvr2_lgdt3302_config,
&adap->channel.hdw->i2c_adap);
if (adap->fe)
return 0;
return -EIO;
}
static int pvr2_fcv1236d_attach(struct pvr2_dvb_adapter *adap)
{
dvb_attach(simple_tuner_attach, adap->fe,
&adap->channel.hdw->i2c_adap, 0x61,
TUNER_PHILIPS_FCV1236D);
return 0;
}
static const struct pvr2_dvb_props pvr2_onair_usb2_fe_props = {
.frontend_attach = pvr2_lgdt3302_attach,
.tuner_attach = pvr2_fcv1236d_attach,
};
#endif
static const struct pvr2_device_client_desc pvr2_cli_onair_usb2[] = {
{ .module_id = PVR2_CLIENT_ID_SAA7115 },
{ .module_id = PVR2_CLIENT_ID_CS53L32A },
{ .module_id = PVR2_CLIENT_ID_TUNER },
};
static const struct pvr2_device_desc pvr2_device_onair_usb2 = {
.description = "OnAir USB2 Hybrid USB tuner",
.shortname = "oa2",
.client_table.lst = pvr2_cli_onair_usb2,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_onair_usb2),
.default_tuner_type = TUNER_PHILIPS_FCV1236D,
.flag_has_analogtuner = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.flag_digital_requires_cx23416 = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_ONAIR,
.digital_control_scheme = PVR2_DIGITAL_SCHEME_ONAIR,
.default_std_mask = V4L2_STD_NTSC_M,
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
.dvb_props = &pvr2_onair_usb2_fe_props,
#endif
};
/*------------------------------------------------------------------------*/
/* Hauppauge PVR-USB2 Model 73xxx */
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
static struct tda10048_config hauppauge_tda10048_config = {
.demod_address = 0x10 >> 1,
.output_mode = TDA10048_PARALLEL_OUTPUT,
.fwbulkwritelen = TDA10048_BULKWRITE_50,
.inversion = TDA10048_INVERSION_ON,
.dtv6_if_freq_khz = TDA10048_IF_3300,
.dtv7_if_freq_khz = TDA10048_IF_3800,
.dtv8_if_freq_khz = TDA10048_IF_4300,
.clk_freq_khz = TDA10048_CLK_16000,
.disable_gate_access = 1,
};
static struct tda829x_config tda829x_no_probe = {
.probe_tuner = TDA829X_DONT_PROBE,
};
static struct tda18271_std_map hauppauge_tda18271_dvbt_std_map = {
.dvbt_6 = { .if_freq = 3300, .agc_mode = 3, .std = 4,
.if_lvl = 1, .rfagc_top = 0x37, },
.dvbt_7 = { .if_freq = 3800, .agc_mode = 3, .std = 5,
.if_lvl = 1, .rfagc_top = 0x37, },
.dvbt_8 = { .if_freq = 4300, .agc_mode = 3, .std = 6,
.if_lvl = 1, .rfagc_top = 0x37, },
};
static struct tda18271_config hauppauge_tda18271_dvb_config = {
.std_map = &hauppauge_tda18271_dvbt_std_map,
.gate = TDA18271_GATE_ANALOG,
.output_opt = TDA18271_OUTPUT_LT_OFF,
};
static int pvr2_tda10048_attach(struct pvr2_dvb_adapter *adap)
{
adap->fe = dvb_attach(tda10048_attach, &hauppauge_tda10048_config,
&adap->channel.hdw->i2c_adap);
if (adap->fe)
return 0;
return -EIO;
}
static int pvr2_73xxx_tda18271_8295_attach(struct pvr2_dvb_adapter *adap)
{
dvb_attach(tda829x_attach, adap->fe,
&adap->channel.hdw->i2c_adap, 0x42,
&tda829x_no_probe);
dvb_attach(tda18271_attach, adap->fe, 0x60,
&adap->channel.hdw->i2c_adap,
&hauppauge_tda18271_dvb_config);
return 0;
}
static const struct pvr2_dvb_props pvr2_73xxx_dvb_props = {
.frontend_attach = pvr2_tda10048_attach,
.tuner_attach = pvr2_73xxx_tda18271_8295_attach,
};
#endif
static const struct pvr2_device_client_desc pvr2_cli_73xxx[] = {
{ .module_id = PVR2_CLIENT_ID_CX25840 },
{ .module_id = PVR2_CLIENT_ID_TUNER,
.i2c_address_list = "\x42"},
};
static const char *pvr2_fw1_names_73xxx[] = {
"v4l-pvrusb2-73xxx-01.fw",
};
static const struct pvr2_device_desc pvr2_device_73xxx = {
.description = "WinTV HVR-1900 Model 73xxx",
.shortname = "73xxx",
.client_table.lst = pvr2_cli_73xxx,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_73xxx),
.fx2_firmware.lst = pvr2_fw1_names_73xxx,
.fx2_firmware.cnt = ARRAY_SIZE(pvr2_fw1_names_73xxx),
.flag_has_cx25840 = !0,
.flag_has_hauppauge_rom = !0,
.flag_has_analogtuner = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.flag_fx2_16kb = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
.digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
.led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
.ir_scheme = PVR2_IR_SCHEME_ZILOG,
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
.dvb_props = &pvr2_73xxx_dvb_props,
#endif
};
/*------------------------------------------------------------------------*/
/* Hauppauge PVR-USB2 Model 75xxx */
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
static struct s5h1409_config pvr2_s5h1409_config = {
.demod_address = 0x32 >> 1,
.output_mode = S5H1409_PARALLEL_OUTPUT,
.gpio = S5H1409_GPIO_OFF,
.qam_if = 4000,
.inversion = S5H1409_INVERSION_ON,
.status_mode = S5H1409_DEMODLOCKING,
};
static struct s5h1411_config pvr2_s5h1411_config = {
.output_mode = S5H1411_PARALLEL_OUTPUT,
.gpio = S5H1411_GPIO_OFF,
.vsb_if = S5H1411_IF_44000,
.qam_if = S5H1411_IF_4000,
.inversion = S5H1411_INVERSION_ON,
.status_mode = S5H1411_DEMODLOCKING,
};
static struct tda18271_std_map hauppauge_tda18271_std_map = {
.atsc_6 = { .if_freq = 5380, .agc_mode = 3, .std = 3,
.if_lvl = 6, .rfagc_top = 0x37, },
.qam_6 = { .if_freq = 4000, .agc_mode = 3, .std = 0,
.if_lvl = 6, .rfagc_top = 0x37, },
};
static struct tda18271_config hauppauge_tda18271_config = {
.std_map = &hauppauge_tda18271_std_map,
.gate = TDA18271_GATE_ANALOG,
.output_opt = TDA18271_OUTPUT_LT_OFF,
};
static int pvr2_s5h1409_attach(struct pvr2_dvb_adapter *adap)
{
adap->fe = dvb_attach(s5h1409_attach, &pvr2_s5h1409_config,
&adap->channel.hdw->i2c_adap);
if (adap->fe)
return 0;
return -EIO;
}
static int pvr2_s5h1411_attach(struct pvr2_dvb_adapter *adap)
{
adap->fe = dvb_attach(s5h1411_attach, &pvr2_s5h1411_config,
&adap->channel.hdw->i2c_adap);
if (adap->fe)
return 0;
return -EIO;
}
static int pvr2_tda18271_8295_attach(struct pvr2_dvb_adapter *adap)
{
dvb_attach(tda829x_attach, adap->fe,
&adap->channel.hdw->i2c_adap, 0x42,
&tda829x_no_probe);
dvb_attach(tda18271_attach, adap->fe, 0x60,
&adap->channel.hdw->i2c_adap,
&hauppauge_tda18271_config);
return 0;
}
static const struct pvr2_dvb_props pvr2_750xx_dvb_props = {
.frontend_attach = pvr2_s5h1409_attach,
.tuner_attach = pvr2_tda18271_8295_attach,
};
static const struct pvr2_dvb_props pvr2_751xx_dvb_props = {
.frontend_attach = pvr2_s5h1411_attach,
.tuner_attach = pvr2_tda18271_8295_attach,
};
#endif
static const char *pvr2_fw1_names_75xxx[] = {
"v4l-pvrusb2-73xxx-01.fw",
};
static const struct pvr2_device_desc pvr2_device_750xx = {
.description = "WinTV HVR-1950 Model 750xx",
.shortname = "750xx",
.client_table.lst = pvr2_cli_73xxx,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_73xxx),
.fx2_firmware.lst = pvr2_fw1_names_75xxx,
.fx2_firmware.cnt = ARRAY_SIZE(pvr2_fw1_names_75xxx),
.flag_has_cx25840 = !0,
.flag_has_hauppauge_rom = !0,
.flag_has_analogtuner = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.flag_fx2_16kb = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
.digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
.default_std_mask = V4L2_STD_NTSC_M,
.led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
.ir_scheme = PVR2_IR_SCHEME_ZILOG,
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
.dvb_props = &pvr2_750xx_dvb_props,
#endif
};
static const struct pvr2_device_desc pvr2_device_751xx = {
.description = "WinTV HVR-1950 Model 751xx",
.shortname = "751xx",
.client_table.lst = pvr2_cli_73xxx,
.client_table.cnt = ARRAY_SIZE(pvr2_cli_73xxx),
.fx2_firmware.lst = pvr2_fw1_names_75xxx,
.fx2_firmware.cnt = ARRAY_SIZE(pvr2_fw1_names_75xxx),
.flag_has_cx25840 = !0,
.flag_has_hauppauge_rom = !0,
.flag_has_analogtuner = !0,
.flag_has_composite = !0,
.flag_has_svideo = !0,
.flag_fx2_16kb = !0,
.signal_routing_scheme = PVR2_ROUTING_SCHEME_HAUPPAUGE,
.digital_control_scheme = PVR2_DIGITAL_SCHEME_HAUPPAUGE,
.default_std_mask = V4L2_STD_NTSC_M,
.led_scheme = PVR2_LED_SCHEME_HAUPPAUGE,
.ir_scheme = PVR2_IR_SCHEME_ZILOG,
#ifdef CONFIG_VIDEO_PVRUSB2_DVB
.dvb_props = &pvr2_751xx_dvb_props,
#endif
};
/*------------------------------------------------------------------------*/
struct usb_device_id pvr2_device_table[] = {
{ USB_DEVICE(0x2040, 0x2900),
.driver_info = (kernel_ulong_t)&pvr2_device_29xxx},
{ USB_DEVICE(0x2040, 0x2950), /* Logically identical to 2900 */
.driver_info = (kernel_ulong_t)&pvr2_device_29xxx},
{ USB_DEVICE(0x2040, 0x2400),
.driver_info = (kernel_ulong_t)&pvr2_device_24xxx},
{ USB_DEVICE(0x1164, 0x0622),
.driver_info = (kernel_ulong_t)&pvr2_device_gotview_2},
{ USB_DEVICE(0x1164, 0x0602),
.driver_info = (kernel_ulong_t)&pvr2_device_gotview_2d},
{ USB_DEVICE(0x11ba, 0x1003),
.driver_info = (kernel_ulong_t)&pvr2_device_onair_creator},
{ USB_DEVICE(0x11ba, 0x1001),
.driver_info = (kernel_ulong_t)&pvr2_device_onair_usb2},
{ USB_DEVICE(0x2040, 0x7300),
.driver_info = (kernel_ulong_t)&pvr2_device_73xxx},
{ USB_DEVICE(0x2040, 0x7500),
.driver_info = (kernel_ulong_t)&pvr2_device_750xx},
{ USB_DEVICE(0x2040, 0x7501),
.driver_info = (kernel_ulong_t)&pvr2_device_751xx},
{ USB_DEVICE(0x0ccd, 0x0039),
.driver_info = (kernel_ulong_t)&pvr2_device_av400},
{ }
};
MODULE_DEVICE_TABLE(usb, pvr2_device_table);
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 75 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
atl4ntis/kernel_msm | drivers/gpu/drm/nouveau/nvc0_pm.c | 5260 | 9847 | /*
* Copyright 2011 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "drmP.h"
#include "nouveau_drv.h"
#include "nouveau_bios.h"
#include "nouveau_pm.h"
static u32 read_div(struct drm_device *, int, u32, u32);
static u32 read_pll(struct drm_device *, u32);
static u32
read_vco(struct drm_device *dev, u32 dsrc)
{
u32 ssrc = nv_rd32(dev, dsrc);
if (!(ssrc & 0x00000100))
return read_pll(dev, 0x00e800);
return read_pll(dev, 0x00e820);
}
static u32
read_pll(struct drm_device *dev, u32 pll)
{
u32 ctrl = nv_rd32(dev, pll + 0);
u32 coef = nv_rd32(dev, pll + 4);
u32 P = (coef & 0x003f0000) >> 16;
u32 N = (coef & 0x0000ff00) >> 8;
u32 M = (coef & 0x000000ff) >> 0;
u32 sclk, doff;
if (!(ctrl & 0x00000001))
return 0;
switch (pll & 0xfff000) {
case 0x00e000:
sclk = 27000;
P = 1;
break;
case 0x137000:
doff = (pll - 0x137000) / 0x20;
sclk = read_div(dev, doff, 0x137120, 0x137140);
break;
case 0x132000:
switch (pll) {
case 0x132000:
sclk = read_pll(dev, 0x132020);
break;
case 0x132020:
sclk = read_div(dev, 0, 0x137320, 0x137330);
break;
default:
return 0;
}
break;
default:
return 0;
}
return sclk * N / M / P;
}
static u32
read_div(struct drm_device *dev, int doff, u32 dsrc, u32 dctl)
{
u32 ssrc = nv_rd32(dev, dsrc + (doff * 4));
u32 sctl = nv_rd32(dev, dctl + (doff * 4));
switch (ssrc & 0x00000003) {
case 0:
if ((ssrc & 0x00030000) != 0x00030000)
return 27000;
return 108000;
case 2:
return 100000;
case 3:
if (sctl & 0x80000000) {
u32 sclk = read_vco(dev, dsrc + (doff * 4));
u32 sdiv = (sctl & 0x0000003f) + 2;
return (sclk * 2) / sdiv;
}
return read_vco(dev, dsrc + (doff * 4));
default:
return 0;
}
}
static u32
read_mem(struct drm_device *dev)
{
u32 ssel = nv_rd32(dev, 0x1373f0);
if (ssel & 0x00000001)
return read_div(dev, 0, 0x137300, 0x137310);
return read_pll(dev, 0x132000);
}
static u32
read_clk(struct drm_device *dev, int clk)
{
u32 sctl = nv_rd32(dev, 0x137250 + (clk * 4));
u32 ssel = nv_rd32(dev, 0x137100);
u32 sclk, sdiv;
if (ssel & (1 << clk)) {
if (clk < 7)
sclk = read_pll(dev, 0x137000 + (clk * 0x20));
else
sclk = read_pll(dev, 0x1370e0);
sdiv = ((sctl & 0x00003f00) >> 8) + 2;
} else {
sclk = read_div(dev, clk, 0x137160, 0x1371d0);
sdiv = ((sctl & 0x0000003f) >> 0) + 2;
}
if (sctl & 0x80000000)
return (sclk * 2) / sdiv;
return sclk;
}
int
nvc0_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
perflvl->shader = read_clk(dev, 0x00);
perflvl->core = perflvl->shader / 2;
perflvl->memory = read_mem(dev);
perflvl->rop = read_clk(dev, 0x01);
perflvl->hub07 = read_clk(dev, 0x02);
perflvl->hub06 = read_clk(dev, 0x07);
perflvl->hub01 = read_clk(dev, 0x08);
perflvl->copy = read_clk(dev, 0x09);
perflvl->daemon = read_clk(dev, 0x0c);
perflvl->vdec = read_clk(dev, 0x0e);
return 0;
}
struct nvc0_pm_clock {
u32 freq;
u32 ssel;
u32 mdiv;
u32 dsrc;
u32 ddiv;
u32 coef;
};
struct nvc0_pm_state {
struct nvc0_pm_clock eng[16];
};
static u32
calc_div(struct drm_device *dev, int clk, u32 ref, u32 freq, u32 *ddiv)
{
u32 div = min((ref * 2) / freq, (u32)65);
if (div < 2)
div = 2;
*ddiv = div - 2;
return (ref * 2) / div;
}
static u32
calc_src(struct drm_device *dev, int clk, u32 freq, u32 *dsrc, u32 *ddiv)
{
u32 sclk;
/* use one of the fixed frequencies if possible */
*ddiv = 0x00000000;
switch (freq) {
case 27000:
case 108000:
*dsrc = 0x00000000;
if (freq == 108000)
*dsrc |= 0x00030000;
return freq;
case 100000:
*dsrc = 0x00000002;
return freq;
default:
*dsrc = 0x00000003;
break;
}
/* otherwise, calculate the closest divider */
sclk = read_vco(dev, clk);
if (clk < 7)
sclk = calc_div(dev, clk, sclk, freq, ddiv);
return sclk;
}
static u32
calc_pll(struct drm_device *dev, int clk, u32 freq, u32 *coef)
{
struct pll_lims limits;
int N, M, P, ret;
ret = get_pll_limits(dev, 0x137000 + (clk * 0x20), &limits);
if (ret)
return 0;
limits.refclk = read_div(dev, clk, 0x137120, 0x137140);
if (!limits.refclk)
return 0;
ret = nva3_calc_pll(dev, &limits, freq, &N, NULL, &M, &P);
if (ret <= 0)
return 0;
*coef = (P << 16) | (N << 8) | M;
return ret;
}
/* A (likely rather simplified and incomplete) view of the clock tree
*
* Key:
*
* S: source select
* D: divider
* P: pll
* F: switch
*
* Engine clocks:
*
* 137250(D) ---- 137100(F0) ---- 137160(S)/1371d0(D) ------------------- ref
* (F1) ---- 1370X0(P) ---- 137120(S)/137140(D) ---- ref
*
* Not all registers exist for all clocks. For example: clocks >= 8 don't
* have their own PLL (all tied to clock 7's PLL when in PLL mode), nor do
* they have the divider at 1371d0, though the source selection at 137160
* still exists. You must use the divider at 137250 for these instead.
*
* Memory clock:
*
* TBD, read_mem() above is likely very wrong...
*
*/
static int
calc_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info, u32 freq)
{
u32 src0, div0, div1D, div1P = 0;
u32 clk0, clk1 = 0;
/* invalid clock domain */
if (!freq)
return 0;
/* first possible path, using only dividers */
clk0 = calc_src(dev, clk, freq, &src0, &div0);
clk0 = calc_div(dev, clk, clk0, freq, &div1D);
/* see if we can get any closer using PLLs */
if (clk0 != freq && (0x00004387 & (1 << clk))) {
if (clk < 7)
clk1 = calc_pll(dev, clk, freq, &info->coef);
else
clk1 = read_pll(dev, 0x1370e0);
clk1 = calc_div(dev, clk, clk1, freq, &div1P);
}
/* select the method which gets closest to target freq */
if (abs((int)freq - clk0) <= abs((int)freq - clk1)) {
info->dsrc = src0;
if (div0) {
info->ddiv |= 0x80000000;
info->ddiv |= div0 << 8;
info->ddiv |= div0;
}
if (div1D) {
info->mdiv |= 0x80000000;
info->mdiv |= div1D;
}
info->ssel = 0;
info->freq = clk0;
} else {
if (div1P) {
info->mdiv |= 0x80000000;
info->mdiv |= div1P << 8;
}
info->ssel = (1 << clk);
info->freq = clk1;
}
return 0;
}
void *
nvc0_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nvc0_pm_state *info;
int ret;
info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return ERR_PTR(-ENOMEM);
/* NFI why this is still in the performance table, the ROPCs appear
* to get their clock from clock 2 ("hub07", actually hub05 on this
* chip, but, anyway...) as well. nvatiming confirms hub05 and ROP
* are always the same freq with the binary driver even when the
* performance table says they should differ.
*/
if (dev_priv->chipset == 0xd9)
perflvl->rop = 0;
if ((ret = calc_clk(dev, 0x00, &info->eng[0x00], perflvl->shader)) ||
(ret = calc_clk(dev, 0x01, &info->eng[0x01], perflvl->rop)) ||
(ret = calc_clk(dev, 0x02, &info->eng[0x02], perflvl->hub07)) ||
(ret = calc_clk(dev, 0x07, &info->eng[0x07], perflvl->hub06)) ||
(ret = calc_clk(dev, 0x08, &info->eng[0x08], perflvl->hub01)) ||
(ret = calc_clk(dev, 0x09, &info->eng[0x09], perflvl->copy)) ||
(ret = calc_clk(dev, 0x0c, &info->eng[0x0c], perflvl->daemon)) ||
(ret = calc_clk(dev, 0x0e, &info->eng[0x0e], perflvl->vdec))) {
kfree(info);
return ERR_PTR(ret);
}
return info;
}
static void
prog_clk(struct drm_device *dev, int clk, struct nvc0_pm_clock *info)
{
/* program dividers at 137160/1371d0 first */
if (clk < 7 && !info->ssel) {
nv_mask(dev, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
nv_wr32(dev, 0x137160 + (clk * 0x04), info->dsrc);
}
/* switch clock to non-pll mode */
nv_mask(dev, 0x137100, (1 << clk), 0x00000000);
nv_wait(dev, 0x137100, (1 << clk), 0x00000000);
/* reprogram pll */
if (clk < 7) {
/* make sure it's disabled first... */
u32 base = 0x137000 + (clk * 0x20);
u32 ctrl = nv_rd32(dev, base + 0x00);
if (ctrl & 0x00000001) {
nv_mask(dev, base + 0x00, 0x00000004, 0x00000000);
nv_mask(dev, base + 0x00, 0x00000001, 0x00000000);
}
/* program it to new values, if necessary */
if (info->ssel) {
nv_wr32(dev, base + 0x04, info->coef);
nv_mask(dev, base + 0x00, 0x00000001, 0x00000001);
nv_wait(dev, base + 0x00, 0x00020000, 0x00020000);
nv_mask(dev, base + 0x00, 0x00020004, 0x00000004);
}
}
/* select pll/non-pll mode, and program final clock divider */
nv_mask(dev, 0x137100, (1 << clk), info->ssel);
nv_wait(dev, 0x137100, (1 << clk), info->ssel);
nv_mask(dev, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
}
int
nvc0_pm_clocks_set(struct drm_device *dev, void *data)
{
struct nvc0_pm_state *info = data;
int i;
for (i = 0; i < 16; i++) {
if (!info->eng[i].freq)
continue;
prog_clk(dev, i, &info->eng[i]);
}
kfree(info);
return 0;
}
| gpl-2.0 |
InfinitiveOS-Devices/android_kernel_cyanogen_msm8916 | net/netfilter/ipset/ip_set_getport.c | 7308 | 3479 | /* Copyright (C) 2003-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
/* Get Layer-4 data from the packets */
#include <linux/ip.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/icmpv6.h>
#include <linux/sctp.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/netfilter/ipset/ip_set_getport.h>
#include <linux/export.h>
/* We must handle non-linear skbs */
static bool
get_port(const struct sk_buff *skb, int protocol, unsigned int protooff,
bool src, __be16 *port, u8 *proto)
{
switch (protocol) {
case IPPROTO_TCP: {
struct tcphdr _tcph;
const struct tcphdr *th;
th = skb_header_pointer(skb, protooff, sizeof(_tcph), &_tcph);
if (th == NULL)
/* No choice either */
return false;
*port = src ? th->source : th->dest;
break;
}
case IPPROTO_SCTP: {
sctp_sctphdr_t _sh;
const sctp_sctphdr_t *sh;
sh = skb_header_pointer(skb, protooff, sizeof(_sh), &_sh);
if (sh == NULL)
/* No choice either */
return false;
*port = src ? sh->source : sh->dest;
break;
}
case IPPROTO_UDP:
case IPPROTO_UDPLITE: {
struct udphdr _udph;
const struct udphdr *uh;
uh = skb_header_pointer(skb, protooff, sizeof(_udph), &_udph);
if (uh == NULL)
/* No choice either */
return false;
*port = src ? uh->source : uh->dest;
break;
}
case IPPROTO_ICMP: {
struct icmphdr _ich;
const struct icmphdr *ic;
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
if (ic == NULL)
return false;
*port = (__force __be16)htons((ic->type << 8) | ic->code);
break;
}
case IPPROTO_ICMPV6: {
struct icmp6hdr _ich;
const struct icmp6hdr *ic;
ic = skb_header_pointer(skb, protooff, sizeof(_ich), &_ich);
if (ic == NULL)
return false;
*port = (__force __be16)
htons((ic->icmp6_type << 8) | ic->icmp6_code);
break;
}
default:
break;
}
*proto = protocol;
return true;
}
bool
ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto)
{
const struct iphdr *iph = ip_hdr(skb);
unsigned int protooff = ip_hdrlen(skb);
int protocol = iph->protocol;
/* See comments at tcp_match in ip_tables.c */
if (protocol <= 0 || (ntohs(iph->frag_off) & IP_OFFSET))
return false;
return get_port(skb, protocol, protooff, src, port, proto);
}
EXPORT_SYMBOL_GPL(ip_set_get_ip4_port);
#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
bool
ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto)
{
int protoff;
u8 nexthdr;
__be16 frag_off;
nexthdr = ipv6_hdr(skb)->nexthdr;
protoff = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr), &nexthdr,
&frag_off);
if (protoff < 0)
return false;
return get_port(skb, nexthdr, protoff, src, port, proto);
}
EXPORT_SYMBOL_GPL(ip_set_get_ip6_port);
#endif
bool
ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src, __be16 *port)
{
bool ret;
u8 proto;
switch (pf) {
case NFPROTO_IPV4:
ret = ip_set_get_ip4_port(skb, src, port, &proto);
break;
case NFPROTO_IPV6:
ret = ip_set_get_ip6_port(skb, src, port, &proto);
break;
default:
return false;
}
if (!ret)
return ret;
switch (proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
return true;
default:
return false;
}
}
EXPORT_SYMBOL_GPL(ip_set_get_ip_port);
| gpl-2.0 |
Tommy-Geenexus/sony_sources | drivers/staging/rtl8712/rtl871x_pwrctrl.c | 7820 | 7121 | /******************************************************************************
* rtl871x_pwrctrl.c
*
* Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved.
* Linux device driver for RTL8192SU
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
* Contact information:
* WLAN FAE <wlanfae@realtek.com>
* Larry Finger <Larry.Finger@lwfinger.net>
*
******************************************************************************/
#define _RTL871X_PWRCTRL_C_
#include "osdep_service.h"
#include "drv_types.h"
#include "osdep_intf.h"
#define RTL8712_SDIO_LOCAL_BASE 0X10100000
#define SDIO_HCPWM (RTL8712_SDIO_LOCAL_BASE + 0x0081)
void r8712_set_rpwm(struct _adapter *padapter, u8 val8)
{
u8 rpwm;
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
if (pwrpriv->rpwm == val8) {
if (pwrpriv->rpwm_retry == 0)
return;
}
if ((padapter->bDriverStopped == true) ||
(padapter->bSurpriseRemoved == true))
return;
rpwm = val8 | pwrpriv->tog;
switch (val8) {
case PS_STATE_S1:
pwrpriv->cpwm = val8;
break;
case PS_STATE_S2:/* only for USB normal powersave mode use,
* temp mark some code. */
case PS_STATE_S3:
case PS_STATE_S4:
pwrpriv->cpwm = val8;
break;
default:
break;
}
pwrpriv->rpwm_retry = 0;
pwrpriv->rpwm = val8;
r8712_write8(padapter, 0x1025FE58, rpwm);
pwrpriv->tog += 0x80;
}
void r8712_set_ps_mode(struct _adapter *padapter, uint ps_mode, uint smart_ps)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
if (ps_mode > PM_Card_Disable)
return;
/* if driver is in active state, we dont need set smart_ps.*/
if (ps_mode == PS_MODE_ACTIVE)
smart_ps = 0;
if ((pwrpriv->pwr_mode != ps_mode) || (pwrpriv->smart_ps != smart_ps)) {
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
pwrpriv->bSleep = true;
else
pwrpriv->bSleep = false;
pwrpriv->pwr_mode = ps_mode;
pwrpriv->smart_ps = smart_ps;
_set_workitem(&(pwrpriv->SetPSModeWorkItem));
}
}
/*
* Caller:ISR handler...
*
* This will be called when CPWM interrupt is up.
*
* using to update cpwn of drv; and drv will make a decision to up or
* down pwr level
*/
void r8712_cpwm_int_hdl(struct _adapter *padapter,
struct reportpwrstate_parm *preportpwrstate)
{
struct pwrctrl_priv *pwrpriv = &(padapter->pwrctrlpriv);
struct cmd_priv *pcmdpriv = &(padapter->cmdpriv);
if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
return;
_cancel_timer_ex(&padapter->pwrctrlpriv. rpwm_check_timer);
_enter_pwrlock(&pwrpriv->lock);
pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
if (pwrpriv->cpwm >= PS_STATE_S2) {
if (pwrpriv->alives & CMD_ALIVE)
up(&(pcmdpriv->cmd_queue_sema));
}
pwrpriv->cpwm_tog = (preportpwrstate->state) & 0x80;
up(&pwrpriv->lock);
}
static inline void register_task_alive(struct pwrctrl_priv *pwrctrl, uint tag)
{
pwrctrl->alives |= tag;
}
static inline void unregister_task_alive(struct pwrctrl_priv *pwrctrl, uint tag)
{
if (pwrctrl->alives & tag)
pwrctrl->alives ^= tag;
}
static void _rpwm_check_handler (struct _adapter *padapter)
{
struct pwrctrl_priv *pwrpriv = &padapter->pwrctrlpriv;
if (padapter->bDriverStopped == true ||
padapter->bSurpriseRemoved == true)
return;
if (pwrpriv->cpwm != pwrpriv->rpwm)
_set_workitem(&(pwrpriv->rpwm_workitem));
}
static void SetPSModeWorkItemCallback(struct work_struct *work)
{
struct pwrctrl_priv *pwrpriv = container_of(work,
struct pwrctrl_priv, SetPSModeWorkItem);
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
if (!pwrpriv->bSleep) {
_enter_pwrlock(&pwrpriv->lock);
if (pwrpriv->pwr_mode == PS_MODE_ACTIVE)
r8712_set_rpwm(padapter, PS_STATE_S4);
up(&pwrpriv->lock);
}
}
static void rpwm_workitem_callback(struct work_struct *work)
{
struct pwrctrl_priv *pwrpriv = container_of(work,
struct pwrctrl_priv, rpwm_workitem);
struct _adapter *padapter = container_of(pwrpriv,
struct _adapter, pwrctrlpriv);
u8 cpwm = pwrpriv->cpwm;
if (pwrpriv->cpwm != pwrpriv->rpwm) {
_enter_pwrlock(&pwrpriv->lock);
cpwm = r8712_read8(padapter, SDIO_HCPWM);
pwrpriv->rpwm_retry = 1;
r8712_set_rpwm(padapter, pwrpriv->rpwm);
up(&pwrpriv->lock);
}
}
static void rpwm_check_handler (void *FunctionContext)
{
struct _adapter *adapter = (struct _adapter *)FunctionContext;
_rpwm_check_handler(adapter);
}
void r8712_init_pwrctrl_priv(struct _adapter *padapter)
{
struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv;
memset((unsigned char *)pwrctrlpriv, 0, sizeof(struct pwrctrl_priv));
sema_init(&pwrctrlpriv->lock, 1);
pwrctrlpriv->cpwm = PS_STATE_S4;
pwrctrlpriv->pwr_mode = PS_MODE_ACTIVE;
pwrctrlpriv->smart_ps = 0;
pwrctrlpriv->tog = 0x80;
/* clear RPWM to ensure driver and fw back to initial state. */
r8712_write8(padapter, 0x1025FE58, 0);
_init_workitem(&(pwrctrlpriv->SetPSModeWorkItem),
SetPSModeWorkItemCallback, padapter);
_init_workitem(&(pwrctrlpriv->rpwm_workitem),
rpwm_workitem_callback, padapter);
_init_timer(&(pwrctrlpriv->rpwm_check_timer),
padapter->pnetdev, rpwm_check_handler, (u8 *)padapter);
}
/*
Caller: r8712_cmd_thread
Check if the fw_pwrstate is okay for issuing cmd.
If not (cpwm should be is less than P2 state), then the sub-routine
will raise the cpwm to be greater than or equal to P2.
Calling Context: Passive
Return Value:
_SUCCESS: r8712_cmd_thread can issue cmds to firmware afterwards.
_FAIL: r8712_cmd_thread can not do anything.
*/
sint r8712_register_cmd_alive(struct _adapter *padapter)
{
uint res = _SUCCESS;
struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
_enter_pwrlock(&pwrctrl->lock);
register_task_alive(pwrctrl, CMD_ALIVE);
if (pwrctrl->cpwm < PS_STATE_S2) {
r8712_set_rpwm(padapter, PS_STATE_S3);
res = _FAIL;
}
up(&pwrctrl->lock);
return res;
}
/*
Caller: ISR
If ISR's txdone,
No more pkts for TX,
Then driver shall call this fun. to power down firmware again.
*/
void r8712_unregister_cmd_alive(struct _adapter *padapter)
{
struct pwrctrl_priv *pwrctrl = &padapter->pwrctrlpriv;
_enter_pwrlock(&pwrctrl->lock);
unregister_task_alive(pwrctrl, CMD_ALIVE);
if ((pwrctrl->cpwm > PS_STATE_S2) &&
(pwrctrl->pwr_mode > PS_MODE_ACTIVE)) {
if ((pwrctrl->alives == 0) &&
(check_fwstate(&padapter->mlmepriv,
_FW_UNDER_LINKING) != true)) {
r8712_set_rpwm(padapter, PS_STATE_S0);
}
}
up(&pwrctrl->lock);
}
| gpl-2.0 |
sktjdgns1189/android_kernel_samsung_aries_KOR | drivers/input/keyboard/lkkbd.c | 9100 | 18529 | /*
* Copyright (C) 2004 by Jan-Benedict Glaw <jbglaw@lug-owl.de>
*/
/*
* LK keyboard driver for Linux, based on sunkbd.c (C) by Vojtech Pavlik
*/
/*
* DEC LK201 and LK401 keyboard driver for Linux (primary for DECstations
* and VAXstations, but can also be used on any standard RS232 with an
* adaptor).
*
* DISCLAIMER: This works for _me_. If you break anything by using the
* information given below, I will _not_ be liable!
*
* RJ10 pinout: To DE9: Or DB25:
* 1 - RxD <----> Pin 3 (TxD) <-> Pin 2 (TxD)
* 2 - GND <----> Pin 5 (GND) <-> Pin 7 (GND)
* 4 - TxD <----> Pin 2 (RxD) <-> Pin 3 (RxD)
* 3 - +12V (from HDD drive connector), DON'T connect to DE9 or DB25!!!
*
* Pin numbers for DE9 and DB25 are noted on the plug (quite small:). For
* RJ10, it's like this:
*
* __=__ Hold the plug in front of you, cable downwards,
* /___/| nose is hidden behind the plug. Now, pin 1 is at
* |1234|| the left side, pin 4 at the right and 2 and 3 are
* |IIII|| in between, of course:)
* | ||
* |____|/
* || So the adaptor consists of three connected cables
* || for data transmission (RxD and TxD) and signal ground.
* Additionally, you have to get +12V from somewhere.
* Most easily, you'll get that from a floppy or HDD power connector.
* It's the yellow cable there (black is ground and red is +5V).
*
* The keyboard and all the commands it understands are documented in
* "VCB02 Video Subsystem - Technical Manual", EK-104AA-TM-001. This
* document is LK201 specific, but LK401 is mostly compatible. It comes
* up in LK201 mode and doesn't report any of the additional keys it
* has. These need to be switched on with the LK_CMD_ENABLE_LK401
* command. You'll find this document (scanned .pdf file) on MANX,
* a search engine specific to DEC documentation. Try
* http://www.vt100.net/manx/details?pn=EK-104AA-TM-001;id=21;cp=1
*/
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <linux/input.h>
#include <linux/serio.h>
#include <linux/workqueue.h>
#define DRIVER_DESC "LK keyboard driver"
MODULE_AUTHOR("Jan-Benedict Glaw <jbglaw@lug-owl.de>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
/*
* Known parameters:
* bell_volume
* keyclick_volume
* ctrlclick_volume
*
* Please notice that there's not yet an API to set these at runtime.
*/
static int bell_volume = 100; /* % */
module_param(bell_volume, int, 0);
MODULE_PARM_DESC(bell_volume, "Bell volume (in %). default is 100%");
static int keyclick_volume = 100; /* % */
module_param(keyclick_volume, int, 0);
MODULE_PARM_DESC(keyclick_volume, "Keyclick volume (in %), default is 100%");
static int ctrlclick_volume = 100; /* % */
module_param(ctrlclick_volume, int, 0);
MODULE_PARM_DESC(ctrlclick_volume, "Ctrlclick volume (in %), default is 100%");
static int lk201_compose_is_alt;
module_param(lk201_compose_is_alt, int, 0);
MODULE_PARM_DESC(lk201_compose_is_alt,
"If set non-zero, LK201' Compose key will act as an Alt key");
#undef LKKBD_DEBUG
#ifdef LKKBD_DEBUG
#define DBG(x...) printk(x)
#else
#define DBG(x...) do {} while (0)
#endif
/* LED control */
#define LK_LED_WAIT 0x81
#define LK_LED_COMPOSE 0x82
#define LK_LED_SHIFTLOCK 0x84
#define LK_LED_SCROLLLOCK 0x88
#define LK_CMD_LED_ON 0x13
#define LK_CMD_LED_OFF 0x11
/* Mode control */
#define LK_MODE_DOWN 0x80
#define LK_MODE_AUTODOWN 0x82
#define LK_MODE_UPDOWN 0x86
#define LK_CMD_SET_MODE(mode, div) ((mode) | ((div) << 3))
/* Misc commands */
#define LK_CMD_ENABLE_KEYCLICK 0x1b
#define LK_CMD_DISABLE_KEYCLICK 0x99
#define LK_CMD_DISABLE_BELL 0xa1
#define LK_CMD_SOUND_BELL 0xa7
#define LK_CMD_ENABLE_BELL 0x23
#define LK_CMD_DISABLE_CTRCLICK 0xb9
#define LK_CMD_ENABLE_CTRCLICK 0xbb
#define LK_CMD_SET_DEFAULTS 0xd3
#define LK_CMD_POWERCYCLE_RESET 0xfd
#define LK_CMD_ENABLE_LK401 0xe9
#define LK_CMD_REQUEST_ID 0xab
/* Misc responses from keyboard */
#define LK_STUCK_KEY 0x3d
#define LK_SELFTEST_FAILED 0x3e
#define LK_ALL_KEYS_UP 0xb3
#define LK_METRONOME 0xb4
#define LK_OUTPUT_ERROR 0xb5
#define LK_INPUT_ERROR 0xb6
#define LK_KBD_LOCKED 0xb7
#define LK_KBD_TEST_MODE_ACK 0xb8
#define LK_PREFIX_KEY_DOWN 0xb9
#define LK_MODE_CHANGE_ACK 0xba
#define LK_RESPONSE_RESERVED 0xbb
#define LK_NUM_KEYCODES 256
#define LK_NUM_IGNORE_BYTES 6
static unsigned short lkkbd_keycode[LK_NUM_KEYCODES] = {
[0x56] = KEY_F1,
[0x57] = KEY_F2,
[0x58] = KEY_F3,
[0x59] = KEY_F4,
[0x5a] = KEY_F5,
[0x64] = KEY_F6,
[0x65] = KEY_F7,
[0x66] = KEY_F8,
[0x67] = KEY_F9,
[0x68] = KEY_F10,
[0x71] = KEY_F11,
[0x72] = KEY_F12,
[0x73] = KEY_F13,
[0x74] = KEY_F14,
[0x7c] = KEY_F15,
[0x7d] = KEY_F16,
[0x80] = KEY_F17,
[0x81] = KEY_F18,
[0x82] = KEY_F19,
[0x83] = KEY_F20,
[0x8a] = KEY_FIND,
[0x8b] = KEY_INSERT,
[0x8c] = KEY_DELETE,
[0x8d] = KEY_SELECT,
[0x8e] = KEY_PAGEUP,
[0x8f] = KEY_PAGEDOWN,
[0x92] = KEY_KP0,
[0x94] = KEY_KPDOT,
[0x95] = KEY_KPENTER,
[0x96] = KEY_KP1,
[0x97] = KEY_KP2,
[0x98] = KEY_KP3,
[0x99] = KEY_KP4,
[0x9a] = KEY_KP5,
[0x9b] = KEY_KP6,
[0x9c] = KEY_KPCOMMA,
[0x9d] = KEY_KP7,
[0x9e] = KEY_KP8,
[0x9f] = KEY_KP9,
[0xa0] = KEY_KPMINUS,
[0xa1] = KEY_PROG1,
[0xa2] = KEY_PROG2,
[0xa3] = KEY_PROG3,
[0xa4] = KEY_PROG4,
[0xa7] = KEY_LEFT,
[0xa8] = KEY_RIGHT,
[0xa9] = KEY_DOWN,
[0xaa] = KEY_UP,
[0xab] = KEY_RIGHTSHIFT,
[0xac] = KEY_LEFTALT,
[0xad] = KEY_COMPOSE, /* Right Compose, that is. */
[0xae] = KEY_LEFTSHIFT, /* Same as KEY_RIGHTSHIFT on LK201 */
[0xaf] = KEY_LEFTCTRL,
[0xb0] = KEY_CAPSLOCK,
[0xb1] = KEY_COMPOSE, /* Left Compose, that is. */
[0xb2] = KEY_RIGHTALT,
[0xbc] = KEY_BACKSPACE,
[0xbd] = KEY_ENTER,
[0xbe] = KEY_TAB,
[0xbf] = KEY_ESC,
[0xc0] = KEY_1,
[0xc1] = KEY_Q,
[0xc2] = KEY_A,
[0xc3] = KEY_Z,
[0xc5] = KEY_2,
[0xc6] = KEY_W,
[0xc7] = KEY_S,
[0xc8] = KEY_X,
[0xc9] = KEY_102ND,
[0xcb] = KEY_3,
[0xcc] = KEY_E,
[0xcd] = KEY_D,
[0xce] = KEY_C,
[0xd0] = KEY_4,
[0xd1] = KEY_R,
[0xd2] = KEY_F,
[0xd3] = KEY_V,
[0xd4] = KEY_SPACE,
[0xd6] = KEY_5,
[0xd7] = KEY_T,
[0xd8] = KEY_G,
[0xd9] = KEY_B,
[0xdb] = KEY_6,
[0xdc] = KEY_Y,
[0xdd] = KEY_H,
[0xde] = KEY_N,
[0xe0] = KEY_7,
[0xe1] = KEY_U,
[0xe2] = KEY_J,
[0xe3] = KEY_M,
[0xe5] = KEY_8,
[0xe6] = KEY_I,
[0xe7] = KEY_K,
[0xe8] = KEY_COMMA,
[0xea] = KEY_9,
[0xeb] = KEY_O,
[0xec] = KEY_L,
[0xed] = KEY_DOT,
[0xef] = KEY_0,
[0xf0] = KEY_P,
[0xf2] = KEY_SEMICOLON,
[0xf3] = KEY_SLASH,
[0xf5] = KEY_EQUAL,
[0xf6] = KEY_RIGHTBRACE,
[0xf7] = KEY_BACKSLASH,
[0xf9] = KEY_MINUS,
[0xfa] = KEY_LEFTBRACE,
[0xfb] = KEY_APOSTROPHE,
};
#define CHECK_LED(LK, VAR_ON, VAR_OFF, LED, BITS) do { \
if (test_bit(LED, (LK)->dev->led)) \
VAR_ON |= BITS; \
else \
VAR_OFF |= BITS; \
} while (0)
/*
* Per-keyboard data
*/
struct lkkbd {
unsigned short keycode[LK_NUM_KEYCODES];
int ignore_bytes;
unsigned char id[LK_NUM_IGNORE_BYTES];
struct input_dev *dev;
struct serio *serio;
struct work_struct tq;
char name[64];
char phys[32];
char type;
int bell_volume;
int keyclick_volume;
int ctrlclick_volume;
};
#ifdef LKKBD_DEBUG
/*
* Responses from the keyboard and mapping back to their names.
*/
static struct {
unsigned char value;
unsigned char *name;
} lk_response[] = {
#define RESPONSE(x) { .value = (x), .name = #x, }
RESPONSE(LK_STUCK_KEY),
RESPONSE(LK_SELFTEST_FAILED),
RESPONSE(LK_ALL_KEYS_UP),
RESPONSE(LK_METRONOME),
RESPONSE(LK_OUTPUT_ERROR),
RESPONSE(LK_INPUT_ERROR),
RESPONSE(LK_KBD_LOCKED),
RESPONSE(LK_KBD_TEST_MODE_ACK),
RESPONSE(LK_PREFIX_KEY_DOWN),
RESPONSE(LK_MODE_CHANGE_ACK),
RESPONSE(LK_RESPONSE_RESERVED),
#undef RESPONSE
};
static unsigned char *response_name(unsigned char value)
{
int i;
for (i = 0; i < ARRAY_SIZE(lk_response); i++)
if (lk_response[i].value == value)
return lk_response[i].name;
return "<unknown>";
}
#endif /* LKKBD_DEBUG */
/*
* Calculate volume parameter byte for a given volume.
*/
static unsigned char volume_to_hw(int volume_percent)
{
unsigned char ret = 0;
if (volume_percent < 0)
volume_percent = 0;
if (volume_percent > 100)
volume_percent = 100;
if (volume_percent >= 0)
ret = 7;
if (volume_percent >= 13) /* 12.5 */
ret = 6;
if (volume_percent >= 25)
ret = 5;
if (volume_percent >= 38) /* 37.5 */
ret = 4;
if (volume_percent >= 50)
ret = 3;
if (volume_percent >= 63) /* 62.5 */
ret = 2; /* This is the default volume */
if (volume_percent >= 75)
ret = 1;
if (volume_percent >= 88) /* 87.5 */
ret = 0;
ret |= 0x80;
return ret;
}
static void lkkbd_detection_done(struct lkkbd *lk)
{
int i;
/*
* Reset setting for Compose key. Let Compose be KEY_COMPOSE.
*/
lk->keycode[0xb1] = KEY_COMPOSE;
/*
* Print keyboard name and modify Compose=Alt on user's request.
*/
switch (lk->id[4]) {
case 1:
strlcpy(lk->name, "DEC LK201 keyboard", sizeof(lk->name));
if (lk201_compose_is_alt)
lk->keycode[0xb1] = KEY_LEFTALT;
break;
case 2:
strlcpy(lk->name, "DEC LK401 keyboard", sizeof(lk->name));
break;
default:
strlcpy(lk->name, "Unknown DEC keyboard", sizeof(lk->name));
printk(KERN_ERR
"lkkbd: keyboard on %s is unknown, please report to "
"Jan-Benedict Glaw <jbglaw@lug-owl.de>\n", lk->phys);
printk(KERN_ERR "lkkbd: keyboard ID'ed as:");
for (i = 0; i < LK_NUM_IGNORE_BYTES; i++)
printk(" 0x%02x", lk->id[i]);
printk("\n");
break;
}
printk(KERN_INFO "lkkbd: keyboard on %s identified as: %s\n",
lk->phys, lk->name);
/*
* Report errors during keyboard boot-up.
*/
switch (lk->id[2]) {
case 0x00:
/* All okay */
break;
case LK_STUCK_KEY:
printk(KERN_ERR "lkkbd: Stuck key on keyboard at %s\n",
lk->phys);
break;
case LK_SELFTEST_FAILED:
printk(KERN_ERR
"lkkbd: Selftest failed on keyboard at %s, "
"keyboard may not work properly\n", lk->phys);
break;
default:
printk(KERN_ERR
"lkkbd: Unknown error %02x on keyboard at %s\n",
lk->id[2], lk->phys);
break;
}
/*
* Try to hint user if there's a stuck key.
*/
if (lk->id[2] == LK_STUCK_KEY && lk->id[3] != 0)
printk(KERN_ERR
"Scancode of stuck key is 0x%02x, keycode is 0x%04x\n",
lk->id[3], lk->keycode[lk->id[3]]);
}
/*
* lkkbd_interrupt() is called by the low level driver when a character
* is received.
*/
static irqreturn_t lkkbd_interrupt(struct serio *serio,
unsigned char data, unsigned int flags)
{
struct lkkbd *lk = serio_get_drvdata(serio);
struct input_dev *input_dev = lk->dev;
unsigned int keycode;
int i;
DBG(KERN_INFO "Got byte 0x%02x\n", data);
if (lk->ignore_bytes > 0) {
DBG(KERN_INFO "Ignoring a byte on %s\n", lk->name);
lk->id[LK_NUM_IGNORE_BYTES - lk->ignore_bytes--] = data;
if (lk->ignore_bytes == 0)
lkkbd_detection_done(lk);
return IRQ_HANDLED;
}
switch (data) {
case LK_ALL_KEYS_UP:
for (i = 0; i < ARRAY_SIZE(lkkbd_keycode); i++)
input_report_key(input_dev, lk->keycode[i], 0);
input_sync(input_dev);
break;
case 0x01:
DBG(KERN_INFO "Got 0x01, scheduling re-initialization\n");
lk->ignore_bytes = LK_NUM_IGNORE_BYTES;
lk->id[LK_NUM_IGNORE_BYTES - lk->ignore_bytes--] = data;
schedule_work(&lk->tq);
break;
case LK_METRONOME:
case LK_OUTPUT_ERROR:
case LK_INPUT_ERROR:
case LK_KBD_LOCKED:
case LK_KBD_TEST_MODE_ACK:
case LK_PREFIX_KEY_DOWN:
case LK_MODE_CHANGE_ACK:
case LK_RESPONSE_RESERVED:
DBG(KERN_INFO "Got %s and don't know how to handle...\n",
response_name(data));
break;
default:
keycode = lk->keycode[data];
if (keycode != KEY_RESERVED) {
input_report_key(input_dev, keycode,
!test_bit(keycode, input_dev->key));
input_sync(input_dev);
} else {
printk(KERN_WARNING
"%s: Unknown key with scancode 0x%02x on %s.\n",
__FILE__, data, lk->name);
}
}
return IRQ_HANDLED;
}
static void lkkbd_toggle_leds(struct lkkbd *lk)
{
struct serio *serio = lk->serio;
unsigned char leds_on = 0;
unsigned char leds_off = 0;
CHECK_LED(lk, leds_on, leds_off, LED_CAPSL, LK_LED_SHIFTLOCK);
CHECK_LED(lk, leds_on, leds_off, LED_COMPOSE, LK_LED_COMPOSE);
CHECK_LED(lk, leds_on, leds_off, LED_SCROLLL, LK_LED_SCROLLLOCK);
CHECK_LED(lk, leds_on, leds_off, LED_SLEEP, LK_LED_WAIT);
if (leds_on != 0) {
serio_write(serio, LK_CMD_LED_ON);
serio_write(serio, leds_on);
}
if (leds_off != 0) {
serio_write(serio, LK_CMD_LED_OFF);
serio_write(serio, leds_off);
}
}
static void lkkbd_toggle_keyclick(struct lkkbd *lk, bool on)
{
struct serio *serio = lk->serio;
if (on) {
DBG("%s: Activating key clicks\n", __func__);
serio_write(serio, LK_CMD_ENABLE_KEYCLICK);
serio_write(serio, volume_to_hw(lk->keyclick_volume));
serio_write(serio, LK_CMD_ENABLE_CTRCLICK);
serio_write(serio, volume_to_hw(lk->ctrlclick_volume));
} else {
DBG("%s: Deactivating key clicks\n", __func__);
serio_write(serio, LK_CMD_DISABLE_KEYCLICK);
serio_write(serio, LK_CMD_DISABLE_CTRCLICK);
}
}
/*
* lkkbd_event() handles events from the input module.
*/
static int lkkbd_event(struct input_dev *dev,
unsigned int type, unsigned int code, int value)
{
struct lkkbd *lk = input_get_drvdata(dev);
switch (type) {
case EV_LED:
lkkbd_toggle_leds(lk);
return 0;
case EV_SND:
switch (code) {
case SND_CLICK:
lkkbd_toggle_keyclick(lk, value);
return 0;
case SND_BELL:
if (value != 0)
serio_write(lk->serio, LK_CMD_SOUND_BELL);
return 0;
}
break;
default:
printk(KERN_ERR "%s(): Got unknown type %d, code %d, value %d\n",
__func__, type, code, value);
}
return -1;
}
/*
* lkkbd_reinit() sets leds and beeps to a state the computer remembers they
* were in.
*/
static void lkkbd_reinit(struct work_struct *work)
{
struct lkkbd *lk = container_of(work, struct lkkbd, tq);
int division;
/* Ask for ID */
serio_write(lk->serio, LK_CMD_REQUEST_ID);
/* Reset parameters */
serio_write(lk->serio, LK_CMD_SET_DEFAULTS);
/* Set LEDs */
lkkbd_toggle_leds(lk);
/*
* Try to activate extended LK401 mode. This command will
* only work with a LK401 keyboard and grants access to
* LAlt, RAlt, RCompose and RShift.
*/
serio_write(lk->serio, LK_CMD_ENABLE_LK401);
/* Set all keys to UPDOWN mode */
for (division = 1; division <= 14; division++)
serio_write(lk->serio,
LK_CMD_SET_MODE(LK_MODE_UPDOWN, division));
/* Enable bell and set volume */
serio_write(lk->serio, LK_CMD_ENABLE_BELL);
serio_write(lk->serio, volume_to_hw(lk->bell_volume));
/* Enable/disable keyclick (and possibly set volume) */
lkkbd_toggle_keyclick(lk, test_bit(SND_CLICK, lk->dev->snd));
/* Sound the bell if needed */
if (test_bit(SND_BELL, lk->dev->snd))
serio_write(lk->serio, LK_CMD_SOUND_BELL);
}
/*
* lkkbd_connect() probes for a LK keyboard and fills the necessary structures.
*/
static int lkkbd_connect(struct serio *serio, struct serio_driver *drv)
{
struct lkkbd *lk;
struct input_dev *input_dev;
int i;
int err;
lk = kzalloc(sizeof(struct lkkbd), GFP_KERNEL);
input_dev = input_allocate_device();
if (!lk || !input_dev) {
err = -ENOMEM;
goto fail1;
}
lk->serio = serio;
lk->dev = input_dev;
INIT_WORK(&lk->tq, lkkbd_reinit);
lk->bell_volume = bell_volume;
lk->keyclick_volume = keyclick_volume;
lk->ctrlclick_volume = ctrlclick_volume;
memcpy(lk->keycode, lkkbd_keycode, sizeof(lk->keycode));
strlcpy(lk->name, "DEC LK keyboard", sizeof(lk->name));
snprintf(lk->phys, sizeof(lk->phys), "%s/input0", serio->phys);
input_dev->name = lk->name;
input_dev->phys = lk->phys;
input_dev->id.bustype = BUS_RS232;
input_dev->id.vendor = SERIO_LKKBD;
input_dev->id.product = 0;
input_dev->id.version = 0x0100;
input_dev->dev.parent = &serio->dev;
input_dev->event = lkkbd_event;
input_set_drvdata(input_dev, lk);
__set_bit(EV_KEY, input_dev->evbit);
__set_bit(EV_LED, input_dev->evbit);
__set_bit(EV_SND, input_dev->evbit);
__set_bit(EV_REP, input_dev->evbit);
__set_bit(LED_CAPSL, input_dev->ledbit);
__set_bit(LED_SLEEP, input_dev->ledbit);
__set_bit(LED_COMPOSE, input_dev->ledbit);
__set_bit(LED_SCROLLL, input_dev->ledbit);
__set_bit(SND_BELL, input_dev->sndbit);
__set_bit(SND_CLICK, input_dev->sndbit);
input_dev->keycode = lk->keycode;
input_dev->keycodesize = sizeof(lk->keycode[0]);
input_dev->keycodemax = ARRAY_SIZE(lk->keycode);
for (i = 0; i < LK_NUM_KEYCODES; i++)
__set_bit(lk->keycode[i], input_dev->keybit);
__clear_bit(KEY_RESERVED, input_dev->keybit);
serio_set_drvdata(serio, lk);
err = serio_open(serio, drv);
if (err)
goto fail2;
err = input_register_device(lk->dev);
if (err)
goto fail3;
serio_write(lk->serio, LK_CMD_POWERCYCLE_RESET);
return 0;
fail3: serio_close(serio);
fail2: serio_set_drvdata(serio, NULL);
fail1: input_free_device(input_dev);
kfree(lk);
return err;
}
/*
* lkkbd_disconnect() unregisters and closes behind us.
*/
static void lkkbd_disconnect(struct serio *serio)
{
struct lkkbd *lk = serio_get_drvdata(serio);
input_get_device(lk->dev);
input_unregister_device(lk->dev);
serio_close(serio);
serio_set_drvdata(serio, NULL);
input_put_device(lk->dev);
kfree(lk);
}
static struct serio_device_id lkkbd_serio_ids[] = {
{
.type = SERIO_RS232,
.proto = SERIO_LKKBD,
.id = SERIO_ANY,
.extra = SERIO_ANY,
},
{ 0 }
};
MODULE_DEVICE_TABLE(serio, lkkbd_serio_ids);
static struct serio_driver lkkbd_drv = {
.driver = {
.name = "lkkbd",
},
.description = DRIVER_DESC,
.id_table = lkkbd_serio_ids,
.connect = lkkbd_connect,
.disconnect = lkkbd_disconnect,
.interrupt = lkkbd_interrupt,
};
/*
* The functions for insering/removing us as a module.
*/
static int __init lkkbd_init(void)
{
return serio_register_driver(&lkkbd_drv);
}
static void __exit lkkbd_exit(void)
{
serio_unregister_driver(&lkkbd_drv);
}
module_init(lkkbd_init);
module_exit(lkkbd_exit);
| gpl-2.0 |
quadcores/linux | block/partitions/osf.c | 12940 | 1925 | /*
* fs/partitions/osf.c
*
* Code extracted from drivers/block/genhd.c
*
* Copyright (C) 1991-1998 Linus Torvalds
* Re-organised Feb 1998 Russell King
*/
#include "check.h"
#include "osf.h"
#define MAX_OSF_PARTITIONS 18
int osf_partition(struct parsed_partitions *state)
{
int i;
int slot = 1;
unsigned int npartitions;
Sector sect;
unsigned char *data;
struct disklabel {
__le32 d_magic;
__le16 d_type,d_subtype;
u8 d_typename[16];
u8 d_packname[16];
__le32 d_secsize;
__le32 d_nsectors;
__le32 d_ntracks;
__le32 d_ncylinders;
__le32 d_secpercyl;
__le32 d_secprtunit;
__le16 d_sparespertrack;
__le16 d_sparespercyl;
__le32 d_acylinders;
__le16 d_rpm, d_interleave, d_trackskew, d_cylskew;
__le32 d_headswitch, d_trkseek, d_flags;
__le32 d_drivedata[5];
__le32 d_spare[5];
__le32 d_magic2;
__le16 d_checksum;
__le16 d_npartitions;
__le32 d_bbsize, d_sbsize;
struct d_partition {
__le32 p_size;
__le32 p_offset;
__le32 p_fsize;
u8 p_fstype;
u8 p_frag;
__le16 p_cpg;
} d_partitions[MAX_OSF_PARTITIONS];
} * label;
struct d_partition * partition;
data = read_part_sector(state, 0, §);
if (!data)
return -1;
label = (struct disklabel *) (data+64);
partition = label->d_partitions;
if (le32_to_cpu(label->d_magic) != DISKLABELMAGIC) {
put_dev_sector(sect);
return 0;
}
if (le32_to_cpu(label->d_magic2) != DISKLABELMAGIC) {
put_dev_sector(sect);
return 0;
}
npartitions = le16_to_cpu(label->d_npartitions);
if (npartitions > MAX_OSF_PARTITIONS) {
put_dev_sector(sect);
return 0;
}
for (i = 0 ; i < npartitions; i++, partition++) {
if (slot == state->limit)
break;
if (le32_to_cpu(partition->p_size))
put_partition(state, slot,
le32_to_cpu(partition->p_offset),
le32_to_cpu(partition->p_size));
slot++;
}
strlcat(state->pp_buf, "\n", PAGE_SIZE);
put_dev_sector(sect);
return 1;
}
| gpl-2.0 |
str90/RK3188_tablet_kernel_sources | drivers/media/video/gc0309_for_td8801.c | 141 | 77404 | /*
o* Driver for MT9M001 CMOS Image Sensor from Micron
*
* Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/videodev2.h>
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/log2.h>
#include <linux/platform_device.h>
#include <linux/delay.h>
#include <linux/circ_buf.h>
#include <linux/miscdevice.h>
#include <media/v4l2-common.h>
#include <media/v4l2-chip-ident.h>
#include <media/soc_camera.h>
#include <plat/rk_camera.h>
static int debug;
module_param(debug, int, S_IRUGO|S_IWUSR);
#define dprintk(level, fmt, arg...) do { \
if (debug >= level) \
printk(KERN_WARNING fmt , ## arg); } while (0)
#define SENSOR_TR(format, ...) printk(KERN_ERR format, ## __VA_ARGS__)
#define SENSOR_DG(format, ...) dprintk(1, format, ## __VA_ARGS__)
#define _CONS(a,b) a##b
#define CONS(a,b) _CONS(a,b)
#define __STR(x) #x
#define _STR(x) __STR(x)
#define STR(x) _STR(x)
#define MIN(x,y) ((x<y) ? x: y)
#define MAX(x,y) ((x>y) ? x: y)
/* Sensor Driver Configuration */
#define SENSOR_NAME RK29_CAM_SENSOR_GC0309
#define SENSOR_V4L2_IDENT V4L2_IDENT_GC0309
#define SENSOR_ID 0xa0
#define SENSOR_MIN_WIDTH 176
#define SENSOR_MIN_HEIGHT 144
#define SENSOR_MAX_WIDTH 640
#define SENSOR_MAX_HEIGHT 480
#define SENSOR_INIT_WIDTH 640 /* Sensor pixel size for sensor_init_data array */
#define SENSOR_INIT_HEIGHT 480
#define SENSOR_INIT_WINSEQADR sensor_vga
#define SENSOR_INIT_PIXFMT V4L2_MBUS_FMT_YUYV8_2X8
#define CONFIG_SENSOR_WhiteBalance 1
#define CONFIG_SENSOR_Brightness 0
#define CONFIG_SENSOR_Contrast 0
#define CONFIG_SENSOR_Saturation 0
#define CONFIG_SENSOR_Effect 1
#define CONFIG_SENSOR_Scene 1
#define CONFIG_SENSOR_DigitalZoom 0
#define CONFIG_SENSOR_Focus 0
#define CONFIG_SENSOR_Exposure 0
#define CONFIG_SENSOR_Flash 0
#define CONFIG_SENSOR_Mirror 0
#define CONFIG_SENSOR_Flip 0
#define CONFIG_SENSOR_I2C_SPEED 250000 /* Hz */
/* Sensor write register continues by preempt_disable/preempt_enable for current process not be scheduled */
#define CONFIG_SENSOR_I2C_NOSCHED 0
#define CONFIG_SENSOR_I2C_RDWRCHK 0
#define SENSOR_BUS_PARAM (SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING|\
SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_HIGH |\
SOCAM_DATA_ACTIVE_HIGH | SOCAM_DATAWIDTH_8 |SOCAM_MCLK_24MHZ)
#define COLOR_TEMPERATURE_CLOUDY_DN 6500
#define COLOR_TEMPERATURE_CLOUDY_UP 8000
#define COLOR_TEMPERATURE_CLEARDAY_DN 5000
#define COLOR_TEMPERATURE_CLEARDAY_UP 6500
#define COLOR_TEMPERATURE_OFFICE_DN 3500
#define COLOR_TEMPERATURE_OFFICE_UP 5000
#define COLOR_TEMPERATURE_HOME_DN 2500
#define COLOR_TEMPERATURE_HOME_UP 3500
#define SENSOR_NAME_STRING(a) STR(CONS(SENSOR_NAME, a))
#define SENSOR_NAME_VARFUN(a) CONS(SENSOR_NAME, a)
#define SENSOR_AF_IS_ERR (0x00<<0)
#define SENSOR_AF_IS_OK (0x01<<0)
#define SENSOR_INIT_IS_ERR (0x00<<28)
#define SENSOR_INIT_IS_OK (0x01<<28)
struct reginfo
{
u8 reg;
u8 val;
};
/* init SVGA preview */
static struct reginfo sensor_init_data[] =
{
/*init registers code.*/
#if 1
{0xfe,0x80}, // soft reset
{0x1a,0x16},
{0xd2,0x10}, // close AEC
{0x22,0x55}, // close AWB
{0x5a,0x56},
{0x5b,0x40},
{0x5c,0x4a},
{0x22,0x57},
{0x01,0xfa},
{0x02,0x70},
{0x0f,0x01},
{0xe2,0x00},
{0xe3,0x64},
{0x03,0x01},
{0x04,0x2c},
{0x05,0x00},
{0x06,0x00},
{0x07,0x00},
{0x08,0x00},
{0x09,0x01},
{0x0a,0xe8},
{0x0b,0x02},
{0x0c,0x88},
{0x0d,0x02},
{0x0e,0x02},
{0x10,0x22},
{0x11,0x0d},
{0x12,0x2a},
{0x13,0x00},
{0x14,0x13},
{0x15,0x0a},
{0x16,0x05},
{0x17,0x01},
{0x1b,0x03},
{0x1c,0xc1},
{0x1d,0x08},
{0x1e,0x20},
{0x1f,0x16},
{0x20,0xff},
{0x21,0xf8},
{0x24,0xa0},
{0x25,0x0f},
//output sync_mode
{0x26,0x03},
{0x2f,0x01},
/////////////////////////////////////////////////////////////////////
/////////////////////////// grab_t ////////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x30,0xf7},
{0x31,0x40},
{0x32,0x00},
{0x39,0x04},
{0x3a,0x20},
{0x3b,0x20},
{0x3c,0x02},
{0x3d,0x02},
{0x3e,0x02},
{0x3f,0x02},
//gain
{0x50,0x24},
{0x53,0x82},
{0x54,0x80},
{0x55,0x80},
{0x56,0x82},
/////////////////////////////////////////////////////////////////////
/////////////////////////// LSC_t ////////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x8b,0x20},
{0x8c,0x20},
{0x8d,0x20},
{0x8e,0x10},
{0x8f,0x10},
{0x90,0x10},
{0x91,0x3c},
{0x92,0x50},
{0x5d,0x12},
{0x5e,0x1a},
{0x5f,0x24},
/////////////////////////////////////////////////////////////////////
/////////////////////////// DNDD_t ///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x60,0x07},
{0x61,0x0e},
{0x62,0x0c},
{0x64,0x03},
{0x66,0xe8},
{0x67,0x86},
{0x68,0xa2},
/////////////////////////////////////////////////////////////////////
/////////////////////////// asde_t ///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x69,0x20},
{0x6a,0x0f},
{0x6b,0x00},
{0x6c,0x53},
{0x6d,0x83},
{0x6e,0xac},
{0x6f,0xac},
{0x70,0x15},
{0x71,0x33},
/////////////////////////////////////////////////////////////////////
/////////////////////////// eeintp_t///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x72,0xdc},
{0x73,0x80},
//for high resolution in light scene
{0x74,0x02},
{0x75,0x3f},
{0x76,0x02},
{0x77,0x54},
{0x78,0x88},
{0x79,0x81},
{0x7a,0x81},
{0x7b,0x22},
{0x7c,0xff},
/////////////////////////////////////////////////////////////////////
///////////////////////////CC_t///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x93,0x45},
{0x94,0x00},
{0x95,0x00},
{0x96,0x00},
{0x97,0x45},
{0x98,0xf0},
{0x9c,0x00},
{0x9d,0x03},
{0x9e,0x00},
/////////////////////////////////////////////////////////////////////
///////////////////////////YCP_t///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0xb1,0x40},
{0xb2,0x40},
{0xb8,0x20},
{0xbe,0x36},
{0xbf,0x00},
/////////////////////////////////////////////////////////////////////
///////////////////////////AEC_t///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0xd0,0xc9},
{0xd1,0x10},
// {0xd2,0x90},
{0xd3,0x80},
{0xd5,0xf2},
{0xd6,0x16},
{0xdb,0x92},
{0xdc,0xa5},
{0xdf,0x23},
{0xd9,0x00},
{0xda,0x00},
{0xe0,0x09},
{0xec,0x20},
{0xed,0x04},
{0xee,0xa0},
{0xef,0x40},
//Y_gamma
{0xc0,0x00},
{0xc1,0x0B},
{0xc2,0x15},
{0xc3,0x27},
{0xc4,0x39},
{0xc5,0x49},
{0xc6,0x5A},
{0xc7,0x6A},
{0xc8,0x89},
{0xc9,0xA8},
{0xca,0xC6},
{0xcb,0xE3},
{0xcc,0xFF},
/////////////////////////////////////////////////////////////////
/////////////////////////// ABS_t ///////////////////////////////
/////////////////////////////////////////////////////////////////
{0xf0,0x02},
{0xf1,0x01},
{0xf2,0x00},
{0xf3,0x30},
/////////////////////////////////////////////////////////////////
/////////////////////////// Measure Window ///////////////////////
/////////////////////////////////////////////////////////////////
{0xf7,0x04},
{0xf8,0x02},
{0xf9,0x9f},
{0xfa,0x78},
#else
{0xfe,0x80}, // soft reset
{0x1a,0x16},
{0xd2,0x10}, // close AEC
{0x22,0x55}, // close AWB
{0x5a,0x56},
{0x5b,0x40},
{0x5c,0x4a},
{0x22,0x57},
{0x01,0xfa},
{0x02,0x70},
{0x0f,0x01},
{0xe2,0x00},
{0xe3,0x64},
{0x03,0x01},
{0x04,0x2c},
{0x05,0x00},
{0x06,0x00},
{0x07,0x00},
{0x08,0x00},
{0x09,0x01},
{0x0a,0xe8},
{0x0b,0x02},
{0x0c,0x88},
{0x0d,0x02},
{0x0e,0x02},
{0x10,0x22},
{0x11,0x0d},
{0x12,0x2a},
{0x13,0x00},
{0x15,0x0a},
{0x16,0x05},
{0x17,0x01},
{0x1b,0x03},
{0x1c,0xc1},
{0x1d,0x08},
{0x1e,0x20},
{0x1f,0x16},
{0x20,0xff},
{0x21,0xf8},
{0x24,0xa2},
{0x25,0x0f},
//output sync_mode
{0x26,0x03},
{0x2f,0x01},
/////////////////////////////////////////////////////////////////////
/////////////////////////// grab_t ////////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x30,0xf7},
{0x31,0x40},
{0x32,0x00},
{0x39,0x04},
{0x3a,0x20},
{0x3b,0x20},
{0x3c,0x02},
{0x3d,0x02},
{0x3e,0x02},
{0x3f,0x02},
//gain
{0x50,0x24},
{0x53,0x82},
{0x54,0x80},
{0x55,0x80},
{0x56,0x82},
/////////////////////////////////////////////////////////////////////
/////////////////////////// LSC_t ////////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x8b,0x20},
{0x8c,0x20},
{0x8d,0x20},
{0x8e,0x10},
{0x8f,0x10},
{0x90,0x10},
{0x91,0x3c},
{0x92,0x50},
{0x5d,0x12},
{0x5e,0x1a},
{0x5f,0x24},
/////////////////////////////////////////////////////////////////////
/////////////////////////// DNDD_t ///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x60,0x07},
{0x61,0x0e},
{0x62,0x0c},
{0x64,0x03},
{0x66,0xe8},
{0x67,0x86},
{0x68,0xa2},
/////////////////////////////////////////////////////////////////////
/////////////////////////// asde_t ///////////////////////////////
/////////////////////////////////////////////////////////////////////
{0x69,0x20},
{0x6a,0x0f},
{0x6b,0x00},
{0x6c,0x53},
{0x6d,0x83},
{0x6e,0xac},
{0x6f,0xac},
{0x70,0x15},
{0x71,0x33},
/////////////////////////////////////////////////////////////////////
/////////////////////////// eeintp_t///////////////////////////////
#endif
{0x23,0x00},
{0x2d,0x0a}, // 0x08
{0x20,0xff},
{0xd2,0x90},
{0x73,0x00},
{0x77,0x54},
{0xb3,0x40},
{0xb4,0x80},
{0xba,0x00},
{0xbb,0x00},
{0x00,0x00}
};
/* 640X480 VGA */
static struct reginfo sensor_vga[] =
{
//{0x45 , 0x0f}, //output enable
{0x15 , 0x0a}, //output enable
{0x0,0x0}
};
/* 352X288 CIF */
static struct reginfo sensor_cif[] =
{};
/* 320*240 QVGA */
static struct reginfo sensor_qvga[] =
{};
/* 176X144 QCIF*/
static struct reginfo sensor_qcif[] =
{};
static struct reginfo sensor_ClrFmt_YUYV[]=
{
{0x24,0xa2},
{0x00, 0x00}
};
static struct reginfo sensor_ClrFmt_UYVY[]=
{
{0x24,0xa0},
{0x00, 0x00}
};
#if CONFIG_SENSOR_WhiteBalance
static struct reginfo sensor_WhiteB_Auto[]=
{
{0x5a,0x56}, //for AWB can adjust back
{0x5b,0x40},
{0x5c,0x4a},
{0x22,0x57}, // Enable AWB
{0x00, 0x00}
};
/* Cloudy Colour Temperature : 6500K - 8000K */
static struct reginfo sensor_WhiteB_Cloudy[]=
{
{0x22,0x55}, // Disable AWB
{0x5a,0x8c}, //WB_manual_gain
{0x5b,0x50},
{0x5c,0x40},
{0x00, 0x00}
};
/* ClearDay Colour Temperature : 5000K - 6500K */
static struct reginfo sensor_WhiteB_ClearDay[]=
{
//Sunny
{0x22,0x55},
{0x5a,0x74},
{0x5b,0x52},
{0x5c,0x40},
{0x00, 0x00}
};
/* Office Colour Temperature : 3500K - 5000K */
static struct reginfo sensor_WhiteB_Incandescent[]=
{
//Incandescent
{0x22,0x55},
{0x5a,0x48},
{0x5b,0x40},
{0x5c,0x5c},
{0x00, 0x00}
};
/* Home Colour Temperature : 2500K - 3500K */
static struct reginfo sensor_WhiteB_Fluorescent[]=
{
//fluorescent
{0x22,0x55},
{0x5a,0x40},
{0x5b,0x42},
{0x5c,0x50},
{0x00, 0x00}
};
static struct reginfo *sensor_WhiteBalanceSeqe[] = {sensor_WhiteB_Auto, sensor_WhiteB_Incandescent,sensor_WhiteB_Fluorescent,
sensor_WhiteB_ClearDay, sensor_WhiteB_Cloudy,NULL,
};
#endif
#if CONFIG_SENSOR_Brightness
static struct reginfo sensor_Brightness0[]=
{
// Brightness -2
{0x00, 0x00}
};
static struct reginfo sensor_Brightness1[]=
{
// Brightness -1
{0x00, 0x00}
};
static struct reginfo sensor_Brightness2[]=
{
// Brightness 0
{0x00, 0x00}
};
static struct reginfo sensor_Brightness3[]=
{
// Brightness +1
{0x00, 0x00}
};
static struct reginfo sensor_Brightness4[]=
{
// Brightness +2
{0x00, 0x00}
};
static struct reginfo sensor_Brightness5[]=
{
// Brightness +3
{0x00, 0x00}
};
static struct reginfo *sensor_BrightnessSeqe[] = {sensor_Brightness0, sensor_Brightness1, sensor_Brightness2, sensor_Brightness3,
sensor_Brightness4, sensor_Brightness5,NULL,
};
#endif
#if CONFIG_SENSOR_Effect
static struct reginfo sensor_Effect_Normal[] =
{
{0x23,0x00},
{0x2d,0x0a}, // 0x08
{0x20,0xff},
{0xd2,0x90},
{0x73,0x00},
{0x77,0x54},
{0xb3,0x40},
{0xb4,0x80},
{0xba,0x00},
{0xbb,0x00},
{0x00, 0x00}
};
static struct reginfo sensor_Effect_WandB[] =
{
{0x23,0x02},
{0x2d,0x0a},
{0x20,0xbf},
{0xd2,0x10},
{0x73,0x01},
{0x51,0x40},
{0x52,0x40},
{0xb3,0x60},
{0xb4,0x40},
{0xba,0x00},
{0xbb,0x00},
{0x00, 0x00}
};
static struct reginfo sensor_Effect_Sepia[] =
{
{0x23,0x02},
{0x2d,0x0a},
{0x20,0xff},
{0xd2,0x90},
{0x73,0x00},
{0xb3,0x40},
{0xb4,0x80},
{0xba,0xd0},
{0xbb,0x28},
{0x00, 0x00}
};
static struct reginfo sensor_Effect_Negative[] =
{
//Negative
{0x23,0x03},
{0x2d,0x0a},
{0x20,0xff},
{0xd2,0x90},
{0x73,0x00},
{0xb3,0x40},
{0xb4,0x80},
{0xba,0x00},
{0xbb,0x00},
{0x00, 0x00}
};
static struct reginfo sensor_Effect_Bluish[] =
{
// Bluish
{0x23,0x02},
{0x2d,0x0a},
{0x20,0xff},
{0xd2,0x90},
{0x73,0x00},
{0xb3,0x40},
{0xb4,0x80},
{0xba,0x50},
{0xbb,0xe0},
{0x00, 0x00}
};
static struct reginfo sensor_Effect_Green[] =
{
// Greenish
{0x23,0x02},
{0x2d,0x0a},
{0x20,0xff},
{0xd2,0x90},
{0x77,0x88},
{0xb3,0x40},
{0xb4,0x80},
{0xba,0xc0},
{0xbb,0xc0},
{0x00, 0x00}
};
static struct reginfo sensor_Effect_Grayscale[]=
{
{0x23,0x02},
{0x2d,0x0a},
{0x20,0xff},
{0xd2,0x90},
{0x73,0x00},
{0xb3,0x40},
{0xb4,0x80},
{0xba,0x00},
{0xbb,0x00},
{0x00, 0x00}
};
static struct reginfo *sensor_EffectSeqe[] = {sensor_Effect_Normal, sensor_Effect_WandB, sensor_Effect_Negative,sensor_Effect_Sepia,
sensor_Effect_Bluish, sensor_Effect_Green,sensor_Effect_Grayscale,NULL,
};
#endif
#if CONFIG_SENSOR_Exposure
static struct reginfo sensor_Exposure0[]=
{
//-3
};
static struct reginfo sensor_Exposure1[]=
{
//-2
{0x00, 0x00}
};
static struct reginfo sensor_Exposure2[]=
{
//-0.3EV
{0x00, 0x00}
};
static struct reginfo sensor_Exposure3[]=
{
//default
{0x00, 0x00}
};
static struct reginfo sensor_Exposure4[]=
{
// 1
{0x00, 0x00}
};
static struct reginfo sensor_Exposure5[]=
{
// 2
{0x00, 0x00}
};
static struct reginfo sensor_Exposure6[]=
{
// 3
{0x00, 0x00}
};
static struct reginfo *sensor_ExposureSeqe[] = {sensor_Exposure0, sensor_Exposure1, sensor_Exposure2, sensor_Exposure3,
sensor_Exposure4, sensor_Exposure5,sensor_Exposure6,NULL,
};
#endif
#if CONFIG_SENSOR_Saturation
static struct reginfo sensor_Saturation0[]=
{
{0x00, 0x00}
};
static struct reginfo sensor_Saturation1[]=
{
{0x00, 0x00}
};
static struct reginfo sensor_Saturation2[]=
{
{0x00, 0x00}
};
static struct reginfo *sensor_SaturationSeqe[] = {sensor_Saturation0, sensor_Saturation1, sensor_Saturation2, NULL,};
#endif
#if CONFIG_SENSOR_Contrast
static struct reginfo sensor_Contrast0[]=
{
//Contrast -3
{0x00, 0x00}
};
static struct reginfo sensor_Contrast1[]=
{
//Contrast -2
{0x00, 0x00}
};
static struct reginfo sensor_Contrast2[]=
{
// Contrast -1
{0x00, 0x00}
};
static struct reginfo sensor_Contrast3[]=
{
//Contrast 0
{0x00, 0x00}
};
static struct reginfo sensor_Contrast4[]=
{
//Contrast +1
{0x00, 0x00}
};
static struct reginfo sensor_Contrast5[]=
{
//Contrast +2
{0x00, 0x00}
};
static struct reginfo sensor_Contrast6[]=
{
//Contrast +3
{0x00, 0x00}
};
static struct reginfo *sensor_ContrastSeqe[] = {sensor_Contrast0, sensor_Contrast1, sensor_Contrast2, sensor_Contrast3,
sensor_Contrast4, sensor_Contrast5, sensor_Contrast6, NULL,
};
#endif
#if CONFIG_SENSOR_Mirror
static struct reginfo sensor_MirrorOn[]=
{
{0x00, 0x00}
};
static struct reginfo sensor_MirrorOff[]=
{
{0x00, 0x00}
};
static struct reginfo *sensor_MirrorSeqe[] = {sensor_MirrorOff, sensor_MirrorOn,NULL,};
#endif
#if CONFIG_SENSOR_Flip
static struct reginfo sensor_FlipOn[]=
{
{0x00, 0x00}
};
static struct reginfo sensor_FlipOff[]=
{
{0x00, 0x00}
};
static struct reginfo *sensor_FlipSeqe[] = {sensor_FlipOff, sensor_FlipOn,NULL,};
#endif
#if CONFIG_SENSOR_Scene
static struct reginfo sensor_SceneAuto[] =
{
#if 0 /* ddl@rock-chips.com : */
{0x3014, 0x04},
{0x3015, 0x00},
{0x302e, 0x00},
{0x302d, 0x00},
{0x00, 0x00}
#else
{0xec ,0x20},
{0x20 ,0x7f}, // close cc
{0x3c ,0x02},
{0x3d ,0x02},
{0x3e ,0x02},
{0x3f ,0x02},
{0x00, 0x00}
#endif
};
static struct reginfo sensor_SceneNight[] =
{
#if 1
//30fps ~ 5fps night mode for 60/50Hz light environment, 24Mhz clock input,36Mzh pclk
{0xec ,0x30},
{0x20 ,0x5f}, // close cc
{0x3c ,0x08},
{0x3d ,0x08},
{0x3e ,0x08},
{0x3f ,0x08},
{0x00, 0x00}
#else
//15fps ~ 5fps night mode for 60/50Hz light environment, 24Mhz clock input,18Mhz pclk
{0x300e, 0x34},
{0x3011, 0x01},
{0x302c, 0x00},
{0x3071, 0x00},
{0x3070, 0x5d},
{0x301c, 0x05},
{0x3073, 0x00},
{0x3072, 0x4d},
{0x301d, 0x07},
{0x3014, 0x0c},
{0x3015, 0x50},
{0x302e, 0x00},
{0x302d, 0x00},
#endif
};
static struct reginfo *sensor_SceneSeqe[] = {sensor_SceneAuto, sensor_SceneNight,NULL,};
#endif
#if CONFIG_SENSOR_DigitalZoom
static struct reginfo sensor_Zoom0[] =
{
{0x0, 0x0},
};
static struct reginfo sensor_Zoom1[] =
{
{0x0, 0x0},
};
static struct reginfo sensor_Zoom2[] =
{
{0x0, 0x0},
};
static struct reginfo sensor_Zoom3[] =
{
{0x0, 0x0},
};
static struct reginfo *sensor_ZoomSeqe[] = {sensor_Zoom0, sensor_Zoom1, sensor_Zoom2, sensor_Zoom3, NULL,};
#endif
static const struct v4l2_querymenu sensor_menus[] =
{
#if CONFIG_SENSOR_WhiteBalance
{ .id = V4L2_CID_DO_WHITE_BALANCE, .index = 0, .name = "auto", .reserved = 0, }, { .id = V4L2_CID_DO_WHITE_BALANCE, .index = 1, .name = "incandescent", .reserved = 0,},
{ .id = V4L2_CID_DO_WHITE_BALANCE, .index = 2, .name = "fluorescent", .reserved = 0,}, { .id = V4L2_CID_DO_WHITE_BALANCE, .index = 3, .name = "daylight", .reserved = 0,},
{ .id = V4L2_CID_DO_WHITE_BALANCE, .index = 4, .name = "cloudy-daylight", .reserved = 0,},
#endif
#if CONFIG_SENSOR_Effect
{ .id = V4L2_CID_EFFECT, .index = 0, .name = "none", .reserved = 0, }, { .id = V4L2_CID_EFFECT, .index = 1, .name = "mono", .reserved = 0,},
{ .id = V4L2_CID_EFFECT, .index = 2, .name = "negative", .reserved = 0,}, { .id = V4L2_CID_EFFECT, .index = 3, .name = "sepia", .reserved = 0,},
{ .id = V4L2_CID_EFFECT, .index = 4, .name = "posterize", .reserved = 0,} ,{ .id = V4L2_CID_EFFECT, .index = 5, .name = "aqua", .reserved = 0,},
{ .id = V4L2_CID_EFFECT, .index = 6, .name = "grayscale", .reserved = 0,},
#endif
#if CONFIG_SENSOR_Scene
{ .id = V4L2_CID_SCENE, .index = 0, .name = "auto", .reserved = 0,} ,{ .id = V4L2_CID_SCENE, .index = 1, .name = "night", .reserved = 0,},
#endif
#if CONFIG_SENSOR_Flash
{ .id = V4L2_CID_FLASH, .index = 0, .name = "off", .reserved = 0, }, { .id = V4L2_CID_FLASH, .index = 1, .name = "auto", .reserved = 0,},
{ .id = V4L2_CID_FLASH, .index = 2, .name = "on", .reserved = 0,}, { .id = V4L2_CID_FLASH, .index = 3, .name = "torch", .reserved = 0,},
#endif
};
static const struct v4l2_queryctrl sensor_controls[] =
{
#if CONFIG_SENSOR_WhiteBalance
{
.id = V4L2_CID_DO_WHITE_BALANCE,
.type = V4L2_CTRL_TYPE_MENU,
.name = "White Balance Control",
.minimum = 0,
.maximum = 4,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_Brightness
{
.id = V4L2_CID_BRIGHTNESS,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Brightness Control",
.minimum = -3,
.maximum = 2,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_Effect
{
.id = V4L2_CID_EFFECT,
.type = V4L2_CTRL_TYPE_MENU,
.name = "Effect Control",
.minimum = 0,
.maximum = 6,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_Exposure
{
.id = V4L2_CID_EXPOSURE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Exposure Control",
.minimum = 0,
.maximum = 6,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_Saturation
{
.id = V4L2_CID_SATURATION,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Saturation Control",
.minimum = 0,
.maximum = 2,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_Contrast
{
.id = V4L2_CID_CONTRAST,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Contrast Control",
.minimum = -3,
.maximum = 3,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_Mirror
{
.id = V4L2_CID_HFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Mirror Control",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1,
},
#endif
#if CONFIG_SENSOR_Flip
{
.id = V4L2_CID_VFLIP,
.type = V4L2_CTRL_TYPE_BOOLEAN,
.name = "Flip Control",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 1,
},
#endif
#if CONFIG_SENSOR_Scene
{
.id = V4L2_CID_SCENE,
.type = V4L2_CTRL_TYPE_MENU,
.name = "Scene Control",
.minimum = 0,
.maximum = 1,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_DigitalZoom
{
.id = V4L2_CID_ZOOM_RELATIVE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "DigitalZoom Control",
.minimum = -1,
.maximum = 1,
.step = 1,
.default_value = 0,
}, {
.id = V4L2_CID_ZOOM_ABSOLUTE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "DigitalZoom Control",
.minimum = 0,
.maximum = 3,
.step = 1,
.default_value = 0,
},
#endif
#if CONFIG_SENSOR_Focus
{
.id = V4L2_CID_FOCUS_RELATIVE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Focus Control",
.minimum = -1,
.maximum = 1,
.step = 1,
.default_value = 0,
}, {
.id = V4L2_CID_FOCUS_ABSOLUTE,
.type = V4L2_CTRL_TYPE_INTEGER,
.name = "Focus Control",
.minimum = 0,
.maximum = 255,
.step = 1,
.default_value = 125,
},
#endif
#if CONFIG_SENSOR_Flash
{
.id = V4L2_CID_FLASH,
.type = V4L2_CTRL_TYPE_MENU,
.name = "Flash Control",
.minimum = 0,
.maximum = 3,
.step = 1,
.default_value = 0,
},
#endif
};
static int sensor_probe(struct i2c_client *client, const struct i2c_device_id *did);
static int sensor_video_probe(struct soc_camera_device *icd, struct i2c_client *client);
static int sensor_g_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
static int sensor_s_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl);
static int sensor_g_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl);
static int sensor_s_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl);
static int sensor_suspend(struct soc_camera_device *icd, pm_message_t pm_msg);
static int sensor_resume(struct soc_camera_device *icd);
static int sensor_set_bus_param(struct soc_camera_device *icd,unsigned long flags);
static unsigned long sensor_query_bus_param(struct soc_camera_device *icd);
static int sensor_set_effect(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value);
static int sensor_set_whiteBalance(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value);
static int sensor_deactivate(struct i2c_client *client);
static struct soc_camera_ops sensor_ops =
{
.suspend = sensor_suspend,
.resume = sensor_resume,
.set_bus_param = sensor_set_bus_param,
.query_bus_param = sensor_query_bus_param,
.controls = sensor_controls,
.menus = sensor_menus,
.num_controls = ARRAY_SIZE(sensor_controls),
.num_menus = ARRAY_SIZE(sensor_menus),
};
/* only one fixed colorspace per pixelcode */
struct sensor_datafmt {
enum v4l2_mbus_pixelcode code;
enum v4l2_colorspace colorspace;
};
/* Find a data format by a pixel code in an array */
static const struct sensor_datafmt *sensor_find_datafmt(
enum v4l2_mbus_pixelcode code, const struct sensor_datafmt *fmt,
int n)
{
int i;
for (i = 0; i < n; i++)
if (fmt[i].code == code)
return fmt + i;
return NULL;
}
static const struct sensor_datafmt sensor_colour_fmts[] = {
{V4L2_MBUS_FMT_UYVY8_2X8, V4L2_COLORSPACE_JPEG},
{V4L2_MBUS_FMT_YUYV8_2X8, V4L2_COLORSPACE_JPEG}
};
typedef struct sensor_info_priv_s
{
int whiteBalance;
int brightness;
int contrast;
int saturation;
int effect;
int scene;
int digitalzoom;
int focus;
int flash;
int exposure;
bool snap2preview;
bool video2preview;
unsigned char mirror; /* HFLIP */
unsigned char flip; /* VFLIP */
unsigned int winseqe_cur_addr;
struct sensor_datafmt fmt;
unsigned int funmodule_state;
} sensor_info_priv_t;
struct sensor
{
struct v4l2_subdev subdev;
struct i2c_client *client;
sensor_info_priv_t info_priv;
int model; /* V4L2_IDENT_OV* codes from v4l2-chip-ident.h */
#if CONFIG_SENSOR_I2C_NOSCHED
atomic_t tasklock_cnt;
#endif
struct rk29camera_platform_data *sensor_io_request;
struct rk29camera_gpio_res *sensor_gpio_res;
};
static struct sensor* to_sensor(const struct i2c_client *client)
{
return container_of(i2c_get_clientdata(client), struct sensor, subdev);
}
static int sensor_task_lock(struct i2c_client *client, int lock)
{
#if CONFIG_SENSOR_I2C_NOSCHED
int cnt = 3;
struct sensor *sensor = to_sensor(client);
if (lock) {
if (atomic_read(&sensor->tasklock_cnt) == 0) {
while ((atomic_read(&client->adapter->bus_lock.count) < 1) && (cnt>0)) {
SENSOR_TR("\n %s will obtain i2c in atomic, but i2c bus is locked! Wait...\n",SENSOR_NAME_STRING());
msleep(35);
cnt--;
}
if ((atomic_read(&client->adapter->bus_lock.count) < 1) && (cnt<=0)) {
SENSOR_TR("\n %s obtain i2c fail in atomic!!\n",SENSOR_NAME_STRING());
goto sensor_task_lock_err;
}
preempt_disable();
}
atomic_add(1, &sensor->tasklock_cnt);
} else {
if (atomic_read(&sensor->tasklock_cnt) > 0) {
atomic_sub(1, &sensor->tasklock_cnt);
if (atomic_read(&sensor->tasklock_cnt) == 0)
preempt_enable();
}
}
return 0;
sensor_task_lock_err:
return -1;
#else
return 0;
#endif
}
#if 0
/* sensor register */
static int sensor_read(struct i2c_client *client, u8 reg, u8 *val)
{
int ret = 0;
ret = i2c_master_reg8_recv(client, reg, val, 1, CONFIG_SENSOR_I2C_SPEED);
return (ret > 0)? 0 : ret;
}
static int sensor_write(struct i2c_client *client, u8 reg, u8 val)
{
int ret = 0;
ret = i2c_master_reg8_send(client, reg, &val, 1, CONFIG_SENSOR_I2C_SPEED);
return (ret > 0)? 0 : ret;
}
#else
static int sensor_write(struct i2c_client *client, u8 reg, u8 val)
{
int err,cnt;
u8 buf[2];
struct i2c_msg msg[1];
buf[0] = reg;
buf[1] = val;
if (reg == 0xfe)
mdelay(10);
msg->addr = client->addr;
msg->flags = client->flags;
msg->buf = buf;
msg->len = sizeof(buf);
msg->scl_rate = CONFIG_SENSOR_I2C_SPEED; /* ddl@rock-chips.com : 100kHz */
msg->read_type = 0; /* fpga i2c:0==I2C_NORMAL : direct use number not enum for don't want include spi_fpga.h */
cnt = 3;
err = -EAGAIN;
while ((cnt-- > 0) && (err < 0)) { /* ddl@rock-chips.com : Transfer again if transent is failed */
err = i2c_transfer(client->adapter, msg, 1);
if (err >= 0) {
return 0;
} else {
SENSOR_TR("\n %s write reg(0x%x, val:0x%x) failed, try to write again!\n",SENSOR_NAME_STRING(),reg, val);
udelay(10);
}
}
return err;
}
/* sensor register read */
static int sensor_read(struct i2c_client *client, u8 reg, u8 *val)
{
int err,cnt;
u8 buf[1];
struct i2c_msg msg[2];
buf[0] = reg ;
msg[0].addr = client->addr;
msg[0].flags = client->flags;
msg[0].buf = buf;
msg[0].len = sizeof(buf);
msg[0].scl_rate = CONFIG_SENSOR_I2C_SPEED; /* ddl@rock-chips.com : 100kHz */
msg[0].read_type = 2; /* fpga i2c:0==I2C_NO_STOP : direct use number not enum for don't want include spi_fpga.h */
msg[1].addr = client->addr;
msg[1].flags = client->flags|I2C_M_RD;
msg[1].buf = buf;
msg[1].len = 1;
msg[1].scl_rate = CONFIG_SENSOR_I2C_SPEED; /* ddl@rock-chips.com : 100kHz */
msg[1].read_type = 2; /* fpga i2c:0==I2C_NO_STOP : direct use number not enum for don't want include spi_fpga.h */
cnt = 3;
err = -EAGAIN;
while ((cnt-- > 0) && (err < 0)) { /* ddl@rock-chips.com : Transfer again if transent is failed */
err = i2c_transfer(client->adapter, msg, 2);
if (err >= 0) {
*val = buf[0];
return 0;
} else {
SENSOR_TR("\n %s read reg(0x%x val:0x%x) failed, try to read again! \n",SENSOR_NAME_STRING(),reg, *val);
udelay(10);
}
}
return err;
}
#endif
/* write a array of registers */
static int sensor_write_array(struct i2c_client *client, struct reginfo *regarray)
{
int err = 0, cnt;
int i = 0;
#if CONFIG_SENSOR_I2C_RDWRCHK
int j = 0;
char valchk;
#endif
cnt = 0;
if (sensor_task_lock(client, 1) < 0)
goto sensor_write_array_end;
while (regarray[i].reg != 0)
{
err = sensor_write(client, regarray[i].reg, regarray[i].val);
if (err < 0)
{
if (cnt-- > 0) {
SENSOR_TR("%s..write failed current reg:0x%x, Write array again !\n", SENSOR_NAME_STRING(),regarray[i].reg);
i = 0;
continue;
} else {
SENSOR_TR("%s..write array failed!!!\n", SENSOR_NAME_STRING());
err = -EPERM;
goto sensor_write_array_end;
}
} else {
#if CONFIG_SENSOR_I2C_RDWRCHK
//mdelay(5);
sensor_read(client, regarray[i].reg, &valchk);
if (valchk != regarray[i].val)
SENSOR_TR("%s Reg:0x%x write(0x%x, 0x%x) fail\n",SENSOR_NAME_STRING(), regarray[i].reg, regarray[i].val, valchk);
#endif
}
i++;
}
sensor_write_array_end:
sensor_task_lock(client,0);
return err;
}
#if CONFIG_SENSOR_I2C_RDWRCHK
static int sensor_readchk_array(struct i2c_client *client, struct reginfo *regarray)
{
int cnt;
int i = 0;
char valchk;
cnt = 0;
valchk = 0;
while (regarray[i].reg != 0)
{
sensor_read(client, regarray[i].reg, &valchk);
if (valchk != regarray[i].val)
SENSOR_TR("%s Reg:0x%x read(0x%x, 0x%x) error\n",SENSOR_NAME_STRING(), regarray[i].reg, regarray[i].val, valchk);
i++;
}
return 0;
}
#endif
static int sensor_ioctrl(struct soc_camera_device *icd,enum rk29sensor_power_cmd cmd, int on)
{
struct soc_camera_link *icl = to_soc_camera_link(icd);
int ret = 0;
SENSOR_DG("%s %s cmd(%d) on(%d)\n",SENSOR_NAME_STRING(),__FUNCTION__,cmd,on);
switch (cmd)
{
case Sensor_PowerDown:
{
if (icl->powerdown) {
ret = icl->powerdown(icd->pdev, on);
if (ret == RK29_CAM_IO_SUCCESS) {
if (on == 0) {
mdelay(2);
if (icl->reset)
icl->reset(icd->pdev);
}
} else if (ret == RK29_CAM_EIO_REQUESTFAIL) {
ret = -ENODEV;
goto sensor_power_end;
}
}
break;
}
case Sensor_Flash:
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
struct sensor *sensor = to_sensor(client);
if (sensor->sensor_io_request && sensor->sensor_io_request->sensor_ioctrl) {
sensor->sensor_io_request->sensor_ioctrl(icd->pdev,Cam_Flash, on);
}
break;
}
default:
{
SENSOR_TR("%s %s cmd(0x%x) is unknown!",SENSOR_NAME_STRING(),__FUNCTION__,cmd);
break;
}
}
sensor_power_end:
return ret;
}
static int sensor_init(struct v4l2_subdev *sd, u32 val)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct soc_camera_device *icd = client->dev.platform_data;
struct sensor *sensor = to_sensor(client);
const struct v4l2_queryctrl *qctrl;
const struct sensor_datafmt *fmt;
char value;
int ret;
if (sensor_ioctrl(icd, Sensor_PowerDown, 0) < 0) {
ret = -ENODEV;
goto sensor_INIT_ERR;
}
/* soft reset */
if (sensor_task_lock(client,1)<0)
goto sensor_INIT_ERR;
ret = sensor_write(client, 0xfe, 0x80);
if (ret != 0)
{
SENSOR_TR("%s soft reset sensor failed\n",SENSOR_NAME_STRING());
ret = -ENODEV;
goto sensor_INIT_ERR;
}
mdelay(5); //delay 5 microseconds
/* check if it is an sensor sensor */
ret = sensor_read(client, 0x00, &value);
if (ret != 0) {
SENSOR_TR("read chip id high byte failed\n");
ret = -ENODEV;
goto sensor_INIT_ERR;
}
if (value == SENSOR_ID) {
sensor->model = SENSOR_V4L2_IDENT;
} else {
SENSOR_TR("error: %s mismatched pid = 0x%x\n", SENSOR_NAME_STRING(), value);
ret = -ENODEV;
goto sensor_INIT_ERR;
}
ret = sensor_write_array(client, sensor_init_data);
if (ret != 0)
{
SENSOR_TR("error: %s initial failed\n",SENSOR_NAME_STRING());
goto sensor_INIT_ERR;
}
sensor_task_lock(client,0);
sensor->info_priv.winseqe_cur_addr = (int)SENSOR_INIT_WINSEQADR;
fmt = sensor_find_datafmt(SENSOR_INIT_PIXFMT,sensor_colour_fmts, ARRAY_SIZE(sensor_colour_fmts));
if (!fmt) {
SENSOR_TR("error: %s initial array colour fmts is not support!!",SENSOR_NAME_STRING());
ret = -EINVAL;
goto sensor_INIT_ERR;
}
sensor->info_priv.fmt = *fmt;
/* sensor sensor information for initialization */
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE);
if (qctrl)
sensor->info_priv.whiteBalance = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_BRIGHTNESS);
if (qctrl)
sensor->info_priv.brightness = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT);
if (qctrl)
sensor->info_priv.effect = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EXPOSURE);
if (qctrl)
sensor->info_priv.exposure = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_SATURATION);
if (qctrl)
sensor->info_priv.saturation = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_CONTRAST);
if (qctrl)
sensor->info_priv.contrast = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_HFLIP);
if (qctrl)
sensor->info_priv.mirror = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_VFLIP);
if (qctrl)
sensor->info_priv.flip = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_SCENE);
if (qctrl)
sensor->info_priv.scene = qctrl->default_value;
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_ZOOM_ABSOLUTE);
if (qctrl)
sensor->info_priv.digitalzoom = qctrl->default_value;
/* ddl@rock-chips.com : if sensor support auto focus and flash, programer must run focus and flash code */
#if CONFIG_SENSOR_Focus
//sensor_set_focus();
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_FOCUS_ABSOLUTE);
if (qctrl)
sensor->info_priv.focus = qctrl->default_value;
#endif
#if CONFIG_SENSOR_Flash
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_FLASH);
if (qctrl)
sensor->info_priv.flash = qctrl->default_value;
#endif
SENSOR_DG("\n%s..%s.. icd->width = %d.. icd->height %d\n",SENSOR_NAME_STRING(),((val == 0)?__FUNCTION__:"sensor_reinit"),icd->user_width,icd->user_height);
sensor->info_priv.funmodule_state |= SENSOR_INIT_IS_OK;
return 0;
sensor_INIT_ERR:
sensor->info_priv.funmodule_state &= ~SENSOR_INIT_IS_OK;
sensor_task_lock(client,0);
sensor_deactivate(client);
return ret;
}
static int sensor_deactivate(struct i2c_client *client)
{
struct soc_camera_device *icd = client->dev.platform_data;
struct sensor *sensor = to_sensor(client);
SENSOR_DG("\n%s..%s.. Enter\n",SENSOR_NAME_STRING(),__FUNCTION__);
/* ddl@rock-chips.com : all sensor output pin must change to input for other sensor */
sensor_ioctrl(icd, Sensor_PowerDown, 1);
msleep(100);
/* ddl@rock-chips.com : sensor config init width , because next open sensor quickly(soc_camera_open -> Try to configure with default parameters) */
icd->user_width = SENSOR_INIT_WIDTH;
icd->user_height = SENSOR_INIT_HEIGHT;
sensor->info_priv.funmodule_state &= ~SENSOR_INIT_IS_OK;
return 0;
}
static struct reginfo sensor_power_down_sequence[]=
{
{0x00,0x00}
};
static int sensor_suspend(struct soc_camera_device *icd, pm_message_t pm_msg)
{
int ret;
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if (pm_msg.event == PM_EVENT_SUSPEND) {
SENSOR_DG("\n %s Enter Suspend.. \n", SENSOR_NAME_STRING());
ret = sensor_write_array(client, sensor_power_down_sequence) ;
if (ret != 0) {
SENSOR_TR("\n %s..%s WriteReg Fail.. \n", SENSOR_NAME_STRING(),__FUNCTION__);
return ret;
} else {
ret = sensor_ioctrl(icd, Sensor_PowerDown, 1);
if (ret < 0) {
SENSOR_TR("\n %s suspend fail for turn on power!\n", SENSOR_NAME_STRING());
return -EINVAL;
}
}
} else {
SENSOR_TR("\n %s cann't suppout Suspend..\n",SENSOR_NAME_STRING());
return -EINVAL;
}
return 0;
}
static int sensor_resume(struct soc_camera_device *icd)
{
int ret;
ret = sensor_ioctrl(icd, Sensor_PowerDown, 0);
if (ret < 0) {
SENSOR_TR("\n %s resume fail for turn on power!\n", SENSOR_NAME_STRING());
return -EINVAL;
}
SENSOR_DG("\n %s Enter Resume.. \n", SENSOR_NAME_STRING());
return 0;
}
static int sensor_set_bus_param(struct soc_camera_device *icd,
unsigned long flags)
{
return 0;
}
static unsigned long sensor_query_bus_param(struct soc_camera_device *icd)
{
struct soc_camera_link *icl = to_soc_camera_link(icd);
unsigned long flags = SENSOR_BUS_PARAM;
return soc_camera_apply_sensor_flags(icl, flags);
}
static int sensor_g_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct soc_camera_device *icd = client->dev.platform_data;
struct sensor *sensor = to_sensor(client);
mf->width = icd->user_width;
mf->height = icd->user_height;
mf->code = sensor->info_priv.fmt.code;
mf->colorspace = sensor->info_priv.fmt.colorspace;
mf->field = V4L2_FIELD_NONE;
return 0;
}
static bool sensor_fmt_capturechk(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
bool ret = false;
if ((mf->width == 1024) && (mf->height == 768)) {
ret = true;
} else if ((mf->width == 1280) && (mf->height == 1024)) {
ret = true;
} else if ((mf->width == 1600) && (mf->height == 1200)) {
ret = true;
} else if ((mf->width == 2048) && (mf->height == 1536)) {
ret = true;
} else if ((mf->width == 2592) && (mf->height == 1944)) {
ret = true;
}
if (ret == true)
SENSOR_DG("%s %dx%d is capture format\n", __FUNCTION__, mf->width, mf->height);
return ret;
}
static bool sensor_fmt_videochk(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
bool ret = false;
if ((mf->width == 1280) && (mf->height == 720)) {
ret = true;
} else if ((mf->width == 1920) && (mf->height == 1080)) {
ret = true;
}
if (ret == true)
SENSOR_DG("%s %dx%d is video format\n", __FUNCTION__, mf->width, mf->height);
return ret;
}
static int sensor_s_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
const struct sensor_datafmt *fmt;
struct sensor *sensor = to_sensor(client);
const struct v4l2_queryctrl *qctrl;
struct soc_camera_device *icd = client->dev.platform_data;
struct reginfo *winseqe_set_addr=NULL;
int ret=0, set_w,set_h;
fmt = sensor_find_datafmt(mf->code, sensor_colour_fmts,
ARRAY_SIZE(sensor_colour_fmts));
if (!fmt) {
ret = -EINVAL;
goto sensor_s_fmt_end;
}
if (sensor->info_priv.fmt.code != mf->code) {
switch (mf->code)
{
case V4L2_MBUS_FMT_YUYV8_2X8:
{
winseqe_set_addr = sensor_ClrFmt_YUYV;
break;
}
case V4L2_MBUS_FMT_UYVY8_2X8:
{
winseqe_set_addr = sensor_ClrFmt_UYVY;
break;
}
default:
break;
}
if (winseqe_set_addr != NULL) {
sensor_write_array(client, winseqe_set_addr);
sensor->info_priv.fmt.code = mf->code;
sensor->info_priv.fmt.colorspace= mf->colorspace;
SENSOR_DG("%s v4l2_mbus_code:%d set success!\n", SENSOR_NAME_STRING(),mf->code);
} else {
SENSOR_TR("%s v4l2_mbus_code:%d is invalidate!\n", SENSOR_NAME_STRING(),mf->code);
}
}
set_w = mf->width;
set_h = mf->height;
if (((set_w <= 176) && (set_h <= 144)) && sensor_qcif[0].reg)
{
winseqe_set_addr = sensor_qcif;
set_w = 176;
set_h = 144;
}
else if (((set_w <= 320) && (set_h <= 240)) && sensor_qvga[0].reg)
{
winseqe_set_addr = sensor_qvga;
set_w = 320;
set_h = 240;
}
else if (((set_w <= 352) && (set_h<= 288)) && sensor_cif[0].reg)
{
winseqe_set_addr = sensor_cif;
set_w = 352;
set_h = 288;
}
else if (((set_w <= 640) && (set_h <= 480)) && sensor_vga[0].reg)
{
winseqe_set_addr = sensor_vga;
set_w = 640;
set_h = 480;
}
else
{
winseqe_set_addr = SENSOR_INIT_WINSEQADR; /* ddl@rock-chips.com : Sensor output smallest size if isn't support app */
set_w = SENSOR_INIT_WIDTH;
set_h = SENSOR_INIT_HEIGHT;
SENSOR_TR("\n %s..%s Format is Invalidate. pix->width = %d.. pix->height = %d\n",SENSOR_NAME_STRING(),__FUNCTION__,mf->width,mf->height);
}
if ((int)winseqe_set_addr != sensor->info_priv.winseqe_cur_addr) {
#if CONFIG_SENSOR_Flash
if (sensor_fmt_capturechk(sd,mf) == true) { /* ddl@rock-chips.com : Capture */
if ((sensor->info_priv.flash == 1) || (sensor->info_priv.flash == 2)) {
sensor_ioctrl(icd, Sensor_Flash, Flash_On);
SENSOR_DG("%s flash on in capture!\n", SENSOR_NAME_STRING());
}
} else { /* ddl@rock-chips.com : Video */
if ((sensor->info_priv.flash == 1) || (sensor->info_priv.flash == 2)) {
sensor_ioctrl(icd, Sensor_Flash, Flash_Off);
SENSOR_DG("%s flash off in preivew!\n", SENSOR_NAME_STRING());
}
}
#endif
ret |= sensor_write_array(client, winseqe_set_addr);
if (ret != 0) {
SENSOR_TR("%s set format capability failed\n", SENSOR_NAME_STRING());
#if CONFIG_SENSOR_Flash
if (sensor_fmt_capturechk(sd,mf) == true) {
if ((sensor->info_priv.flash == 1) || (sensor->info_priv.flash == 2)) {
sensor_ioctrl(icd, Sensor_Flash, Flash_Off);
SENSOR_TR("%s Capture format set fail, flash off !\n", SENSOR_NAME_STRING());
}
}
#endif
goto sensor_s_fmt_end;
}
sensor->info_priv.winseqe_cur_addr = (int)winseqe_set_addr;
if (sensor_fmt_capturechk(sd,mf) == true) { /* ddl@rock-chips.com : Capture */
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT);
sensor_set_effect(icd, qctrl,sensor->info_priv.effect);
if (sensor->info_priv.whiteBalance != 0) {
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE);
sensor_set_whiteBalance(icd, qctrl,sensor->info_priv.whiteBalance);
}
sensor->info_priv.snap2preview = true;
} else if (sensor_fmt_videochk(sd,mf) == true) { /* ddl@rock-chips.com : Video */
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT);
sensor_set_effect(icd, qctrl,sensor->info_priv.effect);
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE);
sensor_set_whiteBalance(icd, qctrl,sensor->info_priv.whiteBalance);
sensor->info_priv.video2preview = true;
} else if ((sensor->info_priv.snap2preview == true) || (sensor->info_priv.video2preview == true)) {
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_EFFECT);
sensor_set_effect(icd, qctrl,sensor->info_priv.effect);
qctrl = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_DO_WHITE_BALANCE);
sensor_set_whiteBalance(icd, qctrl,sensor->info_priv.whiteBalance);
sensor->info_priv.video2preview = false;
sensor->info_priv.snap2preview = false;
}
SENSOR_DG("\n%s..%s.. icd->width = %d.. icd->height %d\n",SENSOR_NAME_STRING(),__FUNCTION__,set_w,set_h);
} else {
SENSOR_DG("\n %s .. Current Format is validate. icd->width = %d.. icd->height %d\n",SENSOR_NAME_STRING(),set_w,set_h);
}
mf->width = set_w;
mf->height = set_h;
sensor_s_fmt_end:
return ret;
}
static int sensor_try_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct sensor *sensor = to_sensor(client);
const struct sensor_datafmt *fmt;
int ret = 0,set_w,set_h;
fmt = sensor_find_datafmt(mf->code, sensor_colour_fmts,
ARRAY_SIZE(sensor_colour_fmts));
if (fmt == NULL) {
fmt = &sensor->info_priv.fmt;
mf->code = fmt->code;
}
if (mf->height > SENSOR_MAX_HEIGHT)
mf->height = SENSOR_MAX_HEIGHT;
else if (mf->height < SENSOR_MIN_HEIGHT)
mf->height = SENSOR_MIN_HEIGHT;
if (mf->width > SENSOR_MAX_WIDTH)
mf->width = SENSOR_MAX_WIDTH;
else if (mf->width < SENSOR_MIN_WIDTH)
mf->width = SENSOR_MIN_WIDTH;
set_w = mf->width;
set_h = mf->height;
if (((set_w <= 176) && (set_h <= 144)) && sensor_qcif[0].reg)
{
set_w = 176;
set_h = 144;
}
else if (((set_w <= 320) && (set_h <= 240)) && sensor_qvga[0].reg)
{
set_w = 320;
set_h = 240;
}
else if (((set_w <= 352) && (set_h<= 288)) && sensor_cif[0].reg)
{
set_w = 352;
set_h = 288;
}
else if (((set_w <= 640) && (set_h <= 480)) && sensor_vga[0].reg)
{
set_w = 640;
set_h = 480;
}
else
{
set_w = SENSOR_INIT_WIDTH;
set_h = SENSOR_INIT_HEIGHT;
}
mf->width = set_w;
mf->height = set_h;
mf->colorspace = fmt->colorspace;
return ret;
}
static int sensor_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR)
return -EINVAL;
if (id->match.addr != client->addr)
return -ENODEV;
id->ident = SENSOR_V4L2_IDENT; /* ddl@rock-chips.com : Return OV2655 identifier */
id->revision = 0;
return 0;
}
#if CONFIG_SENSOR_Brightness
static int sensor_set_brightness(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_BrightnessSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_BrightnessSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Effect
static int sensor_set_effect(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_EffectSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_EffectSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Exposure
static int sensor_set_exposure(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_ExposureSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_ExposureSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Saturation
static int sensor_set_saturation(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_SaturationSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_SaturationSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Contrast
static int sensor_set_contrast(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_ContrastSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_ContrastSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Mirror
static int sensor_set_mirror(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_MirrorSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_MirrorSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Flip
static int sensor_set_flip(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_FlipSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_FlipSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Scene
static int sensor_set_scene(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_SceneSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_SceneSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_WhiteBalance
static int sensor_set_whiteBalance(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
if ((value >= qctrl->minimum) && (value <= qctrl->maximum))
{
if (sensor_WhiteBalanceSeqe[value - qctrl->minimum] != NULL)
{
if (sensor_write_array(client, sensor_WhiteBalanceSeqe[value - qctrl->minimum]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_DigitalZoom
static int sensor_set_digitalzoom(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int *value)
{
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
struct sensor *sensor = to_sensor(client);
const struct v4l2_queryctrl *qctrl_info;
int digitalzoom_cur, digitalzoom_total;
qctrl_info = soc_camera_find_qctrl(&sensor_ops, V4L2_CID_ZOOM_ABSOLUTE);
if (qctrl_info)
return -EINVAL;
digitalzoom_cur = sensor->info_priv.digitalzoom;
digitalzoom_total = qctrl_info->maximum;
if ((value > 0) && (digitalzoom_cur >= digitalzoom_total))
{
SENSOR_TR("%s digitalzoom is maximum - %x\n", SENSOR_NAME_STRING(), digitalzoom_cur);
return -EINVAL;
}
if ((value < 0) && (digitalzoom_cur <= qctrl_info->minimum))
{
SENSOR_TR("%s digitalzoom is minimum - %x\n", SENSOR_NAME_STRING(), digitalzoom_cur);
return -EINVAL;
}
if ((value > 0) && ((digitalzoom_cur + value) > digitalzoom_total))
{
value = digitalzoom_total - digitalzoom_cur;
}
if ((value < 0) && ((digitalzoom_cur + value) < 0))
{
value = 0 - digitalzoom_cur;
}
digitalzoom_cur += value;
if (sensor_ZoomSeqe[digitalzoom_cur] != NULL)
{
if (sensor_write_array(client, sensor_ZoomSeqe[digitalzoom_cur]) != 0)
{
SENSOR_TR("%s..%s WriteReg Fail.. \n",SENSOR_NAME_STRING(), __FUNCTION__);
return -EINVAL;
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
return -EINVAL;
}
#endif
#if CONFIG_SENSOR_Flash
static int sensor_set_flash(struct soc_camera_device *icd, const struct v4l2_queryctrl *qctrl, int value)
{
if ((value >= qctrl->minimum) && (value <= qctrl->maximum)) {
if (value == 3) { /* ddl@rock-chips.com: torch */
sensor_ioctrl(icd, Sensor_Flash, Flash_Torch); /* Flash On */
} else {
sensor_ioctrl(icd, Sensor_Flash, Flash_Off);
}
SENSOR_DG("%s..%s : %x\n",SENSOR_NAME_STRING(),__FUNCTION__, value);
return 0;
}
SENSOR_TR("\n %s..%s valure = %d is invalidate.. \n",SENSOR_NAME_STRING(),__FUNCTION__,value);
return -EINVAL;
}
#endif
static int sensor_g_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct sensor *sensor = to_sensor(client);
const struct v4l2_queryctrl *qctrl;
qctrl = soc_camera_find_qctrl(&sensor_ops, ctrl->id);
if (!qctrl)
{
SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ctrl->id);
return -EINVAL;
}
switch (ctrl->id)
{
case V4L2_CID_BRIGHTNESS:
{
ctrl->value = sensor->info_priv.brightness;
break;
}
case V4L2_CID_SATURATION:
{
ctrl->value = sensor->info_priv.saturation;
break;
}
case V4L2_CID_CONTRAST:
{
ctrl->value = sensor->info_priv.contrast;
break;
}
case V4L2_CID_DO_WHITE_BALANCE:
{
ctrl->value = sensor->info_priv.whiteBalance;
break;
}
case V4L2_CID_EXPOSURE:
{
ctrl->value = sensor->info_priv.exposure;
break;
}
case V4L2_CID_HFLIP:
{
ctrl->value = sensor->info_priv.mirror;
break;
}
case V4L2_CID_VFLIP:
{
ctrl->value = sensor->info_priv.flip;
break;
}
default :
break;
}
return 0;
}
static int sensor_s_control(struct v4l2_subdev *sd, struct v4l2_control *ctrl)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct sensor *sensor = to_sensor(client);
struct soc_camera_device *icd = client->dev.platform_data;
const struct v4l2_queryctrl *qctrl;
qctrl = soc_camera_find_qctrl(&sensor_ops, ctrl->id);
if (!qctrl)
{
SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ctrl->id);
return -EINVAL;
}
switch (ctrl->id)
{
#if CONFIG_SENSOR_Brightness
case V4L2_CID_BRIGHTNESS:
{
if (ctrl->value != sensor->info_priv.brightness)
{
if (sensor_set_brightness(icd, qctrl,ctrl->value) != 0)
{
return -EINVAL;
}
sensor->info_priv.brightness = ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_Exposure
case V4L2_CID_EXPOSURE:
{
if (ctrl->value != sensor->info_priv.exposure)
{
if (sensor_set_exposure(icd, qctrl,ctrl->value) != 0)
{
return -EINVAL;
}
sensor->info_priv.exposure = ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_Saturation
case V4L2_CID_SATURATION:
{
if (ctrl->value != sensor->info_priv.saturation)
{
if (sensor_set_saturation(icd, qctrl,ctrl->value) != 0)
{
return -EINVAL;
}
sensor->info_priv.saturation = ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_Contrast
case V4L2_CID_CONTRAST:
{
if (ctrl->value != sensor->info_priv.contrast)
{
if (sensor_set_contrast(icd, qctrl,ctrl->value) != 0)
{
return -EINVAL;
}
sensor->info_priv.contrast = ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_WhiteBalance
case V4L2_CID_DO_WHITE_BALANCE:
{
if (ctrl->value != sensor->info_priv.whiteBalance)
{
if (sensor_set_whiteBalance(icd, qctrl,ctrl->value) != 0)
{
return -EINVAL;
}
sensor->info_priv.whiteBalance = ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_Mirror
case V4L2_CID_HFLIP:
{
if (ctrl->value != sensor->info_priv.mirror)
{
if (sensor_set_mirror(icd, qctrl,ctrl->value) != 0)
return -EINVAL;
sensor->info_priv.mirror = ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_Flip
case V4L2_CID_VFLIP:
{
if (ctrl->value != sensor->info_priv.flip)
{
if (sensor_set_flip(icd, qctrl,ctrl->value) != 0)
return -EINVAL;
sensor->info_priv.flip = ctrl->value;
}
break;
}
#endif
default:
break;
}
return 0;
}
static int sensor_g_ext_control(struct soc_camera_device *icd , struct v4l2_ext_control *ext_ctrl)
{
const struct v4l2_queryctrl *qctrl;
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
struct sensor *sensor = to_sensor(client);
qctrl = soc_camera_find_qctrl(&sensor_ops, ext_ctrl->id);
if (!qctrl)
{
SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ext_ctrl->id);
return -EINVAL;
}
switch (ext_ctrl->id)
{
case V4L2_CID_SCENE:
{
ext_ctrl->value = sensor->info_priv.scene;
break;
}
case V4L2_CID_EFFECT:
{
ext_ctrl->value = sensor->info_priv.effect;
break;
}
case V4L2_CID_ZOOM_ABSOLUTE:
{
ext_ctrl->value = sensor->info_priv.digitalzoom;
break;
}
case V4L2_CID_ZOOM_RELATIVE:
{
return -EINVAL;
}
case V4L2_CID_FOCUS_ABSOLUTE:
{
ext_ctrl->value = sensor->info_priv.focus;
break;
}
case V4L2_CID_FOCUS_RELATIVE:
{
return -EINVAL;
}
case V4L2_CID_FLASH:
{
ext_ctrl->value = sensor->info_priv.flash;
break;
}
default :
break;
}
return 0;
}
static int sensor_s_ext_control(struct soc_camera_device *icd, struct v4l2_ext_control *ext_ctrl)
{
const struct v4l2_queryctrl *qctrl;
struct i2c_client *client = to_i2c_client(to_soc_camera_control(icd));
struct sensor *sensor = to_sensor(client);
int val_offset;
qctrl = soc_camera_find_qctrl(&sensor_ops, ext_ctrl->id);
if (!qctrl)
{
SENSOR_TR("\n %s ioctrl id = %d is invalidate \n", SENSOR_NAME_STRING(), ext_ctrl->id);
return -EINVAL;
}
val_offset = 0;
switch (ext_ctrl->id)
{
#if CONFIG_SENSOR_Scene
case V4L2_CID_SCENE:
{
if (ext_ctrl->value != sensor->info_priv.scene)
{
if (sensor_set_scene(icd, qctrl,ext_ctrl->value) != 0)
return -EINVAL;
sensor->info_priv.scene = ext_ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_Effect
case V4L2_CID_EFFECT:
{
if (ext_ctrl->value != sensor->info_priv.effect)
{
if (sensor_set_effect(icd, qctrl,ext_ctrl->value) != 0)
return -EINVAL;
sensor->info_priv.effect= ext_ctrl->value;
}
break;
}
#endif
#if CONFIG_SENSOR_DigitalZoom
case V4L2_CID_ZOOM_ABSOLUTE:
{
if ((ext_ctrl->value < qctrl->minimum) || (ext_ctrl->value > qctrl->maximum))
return -EINVAL;
if (ext_ctrl->value != sensor->info_priv.digitalzoom)
{
val_offset = ext_ctrl->value -sensor->info_priv.digitalzoom;
if (sensor_set_digitalzoom(icd, qctrl,&val_offset) != 0)
return -EINVAL;
sensor->info_priv.digitalzoom += val_offset;
SENSOR_DG("%s digitalzoom is %x\n",SENSOR_NAME_STRING(), sensor->info_priv.digitalzoom);
}
break;
}
case V4L2_CID_ZOOM_RELATIVE:
{
if (ext_ctrl->value)
{
if (sensor_set_digitalzoom(icd, qctrl,&ext_ctrl->value) != 0)
return -EINVAL;
sensor->info_priv.digitalzoom += ext_ctrl->value;
SENSOR_DG("%s digitalzoom is %x\n", SENSOR_NAME_STRING(), sensor->info_priv.digitalzoom);
}
break;
}
#endif
#if CONFIG_SENSOR_Focus
case V4L2_CID_FOCUS_ABSOLUTE:
{
if ((ext_ctrl->value < qctrl->minimum) || (ext_ctrl->value > qctrl->maximum))
return -EINVAL;
if (ext_ctrl->value != sensor->info_priv.focus)
{
val_offset = ext_ctrl->value -sensor->info_priv.focus;
sensor->info_priv.focus += val_offset;
}
break;
}
case V4L2_CID_FOCUS_RELATIVE:
{
if (ext_ctrl->value)
{
sensor->info_priv.focus += ext_ctrl->value;
SENSOR_DG("%s focus is %x\n", SENSOR_NAME_STRING(), sensor->info_priv.focus);
}
break;
}
#endif
#if CONFIG_SENSOR_Flash
case V4L2_CID_FLASH:
{
if (sensor_set_flash(icd, qctrl,ext_ctrl->value) != 0)
return -EINVAL;
sensor->info_priv.flash = ext_ctrl->value;
SENSOR_DG("%s flash is %x\n",SENSOR_NAME_STRING(), sensor->info_priv.flash);
break;
}
#endif
default:
break;
}
return 0;
}
static int sensor_g_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct soc_camera_device *icd = client->dev.platform_data;
int i, error_cnt=0, error_idx=-1;
for (i=0; i<ext_ctrl->count; i++) {
if (sensor_g_ext_control(icd, &ext_ctrl->controls[i]) != 0) {
error_cnt++;
error_idx = i;
}
}
if (error_cnt > 1)
error_idx = ext_ctrl->count;
if (error_idx != -1) {
ext_ctrl->error_idx = error_idx;
return -EINVAL;
} else {
return 0;
}
}
static int sensor_s_ext_controls(struct v4l2_subdev *sd, struct v4l2_ext_controls *ext_ctrl)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct soc_camera_device *icd = client->dev.platform_data;
int i, error_cnt=0, error_idx=-1;
for (i=0; i<ext_ctrl->count; i++) {
if (sensor_s_ext_control(icd, &ext_ctrl->controls[i]) != 0) {
error_cnt++;
error_idx = i;
}
}
if (error_cnt > 1)
error_idx = ext_ctrl->count;
if (error_idx != -1) {
ext_ctrl->error_idx = error_idx;
return -EINVAL;
} else {
return 0;
}
}
/* Interface active, can use i2c. If it fails, it can indeed mean, that
* this wasn't our capture interface, so, we wait for the right one */
static int sensor_video_probe(struct soc_camera_device *icd,
struct i2c_client *client)
{
char value;
int ret;
struct sensor *sensor = to_sensor(client);
/* We must have a parent by now. And it cannot be a wrong one.
* So this entire test is completely redundant. */
if (!icd->dev.parent ||
to_soc_camera_host(icd->dev.parent)->nr != icd->iface)
return -ENODEV;
if (sensor_ioctrl(icd, Sensor_PowerDown, 0) < 0) {
ret = -ENODEV;
goto sensor_video_probe_err;
}
/* soft reset */
ret = sensor_write(client, 0xfe, 0x80);
if (ret != 0)
{
SENSOR_TR("soft reset %s failed\n",SENSOR_NAME_STRING());
return -ENODEV;
}
mdelay(5); //delay 5 microseconds
/* check if it is an sensor sensor */
ret = sensor_read(client, 0x00, &value);
if (ret != 0) {
SENSOR_TR("read chip id high byte failed\n");
ret = -ENODEV;
goto sensor_video_probe_err;
}
if (value == SENSOR_ID) {
sensor->model = SENSOR_V4L2_IDENT;
SENSOR_TR("chip id:0x%x\n",value);
} else {
SENSOR_TR("error: %s mismatched pid = 0x%x\n", SENSOR_NAME_STRING(), value);
ret = -ENODEV;
goto sensor_video_probe_err;
}
return 0;
sensor_video_probe_err:
return ret;
}
static long sensor_ioctl(struct v4l2_subdev *sd, unsigned int cmd, void *arg)
{
struct i2c_client *client = v4l2_get_subdevdata(sd);
struct soc_camera_device *icd = client->dev.platform_data;
struct sensor *sensor = to_sensor(client);
int ret = 0;
#if CONFIG_SENSOR_Flash
int i;
#endif
SENSOR_DG("\n%s..%s..cmd:%x \n",SENSOR_NAME_STRING(),__FUNCTION__,cmd);
switch (cmd)
{
case RK29_CAM_SUBDEV_DEACTIVATE:
{
sensor_deactivate(client);
break;
}
case RK29_CAM_SUBDEV_IOREQUEST:
{
sensor->sensor_io_request = (struct rk29camera_platform_data*)arg;
if (sensor->sensor_io_request != NULL) {
sensor->sensor_gpio_res = NULL;
#if CONFIG_SENSOR_Flash
for (i=0; i<RK29_CAM_SUPPORT_NUMS;i++) {
if (sensor->sensor_io_request->gpio_res[i].dev_name &&
(strcmp(sensor->sensor_io_request->gpio_res[i].dev_name, dev_name(icd->pdev)) == 0)) {
sensor->sensor_gpio_res = (struct rk29camera_gpio_res*)&sensor->sensor_io_request->gpio_res[i];
}
}
#endif
if (sensor->sensor_gpio_res == NULL) {
SENSOR_TR("%s %s obtain gpio resource failed when RK29_CAM_SUBDEV_IOREQUEST \n",SENSOR_NAME_STRING(),__FUNCTION__);
ret = -EINVAL;
goto sensor_ioctl_end;
}
} else {
SENSOR_TR("%s %s RK29_CAM_SUBDEV_IOREQUEST fail\n",SENSOR_NAME_STRING(),__FUNCTION__);
ret = -EINVAL;
goto sensor_ioctl_end;
}
/* ddl@rock-chips.com : if gpio_flash havn't been set in board-xxx.c, sensor driver must notify is not support flash control
for this project */
#if CONFIG_SENSOR_Flash
if (sensor->sensor_gpio_res) {
if (sensor->sensor_gpio_res->gpio_flash == INVALID_GPIO) {
for (i = 0; i < icd->ops->num_controls; i++) {
if (V4L2_CID_FLASH == icd->ops->controls[i].id) {
memset((char*)&icd->ops->controls[i],0x00,sizeof(struct v4l2_queryctrl));
}
}
sensor->info_priv.flash = 0xff;
SENSOR_DG("%s flash gpio is invalidate!\n",SENSOR_NAME_STRING());
}
}
#endif
break;
}
default:
{
SENSOR_TR("%s %s cmd(0x%x) is unknown !\n",SENSOR_NAME_STRING(),__FUNCTION__,cmd);
break;
}
}
sensor_ioctl_end:
return ret;
}
static int sensor_enum_fmt(struct v4l2_subdev *sd, unsigned int index,
enum v4l2_mbus_pixelcode *code)
{
if (index >= ARRAY_SIZE(sensor_colour_fmts))
return -EINVAL;
*code = sensor_colour_fmts[index].code;
return 0;
}
static struct v4l2_subdev_core_ops sensor_subdev_core_ops = {
.init = sensor_init,
.g_ctrl = sensor_g_control,
.s_ctrl = sensor_s_control,
.g_ext_ctrls = sensor_g_ext_controls,
.s_ext_ctrls = sensor_s_ext_controls,
.g_chip_ident = sensor_g_chip_ident,
.ioctl = sensor_ioctl,
};
static struct v4l2_subdev_video_ops sensor_subdev_video_ops = {
.s_mbus_fmt = sensor_s_fmt,
.g_mbus_fmt = sensor_g_fmt,
.try_mbus_fmt = sensor_try_fmt,
.enum_mbus_fmt = sensor_enum_fmt,
};
static struct v4l2_subdev_ops sensor_subdev_ops = {
.core = &sensor_subdev_core_ops,
.video = &sensor_subdev_video_ops,
};
static int sensor_probe(struct i2c_client *client,
const struct i2c_device_id *did)
{
struct sensor *sensor;
struct soc_camera_device *icd = client->dev.platform_data;
struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
struct soc_camera_link *icl;
int ret;
SENSOR_DG("\n%s..%s..%d..\n",__FUNCTION__,__FILE__,__LINE__);
if (!icd) {
dev_err(&client->dev, "%s: missing soc-camera data!\n",SENSOR_NAME_STRING());
return -EINVAL;
}
icl = to_soc_camera_link(icd);
if (!icl) {
dev_err(&client->dev, "%s driver needs platform data\n", SENSOR_NAME_STRING());
return -EINVAL;
}
if (!i2c_check_functionality(adapter, I2C_FUNC_I2C)) {
dev_warn(&adapter->dev,
"I2C-Adapter doesn't support I2C_FUNC_I2C\n");
return -EIO;
}
sensor = kzalloc(sizeof(struct sensor), GFP_KERNEL);
if (!sensor)
return -ENOMEM;
v4l2_i2c_subdev_init(&sensor->subdev, client, &sensor_subdev_ops);
/* Second stage probe - when a capture adapter is there */
icd->ops = &sensor_ops;
sensor->info_priv.fmt = sensor_colour_fmts[0];
#if CONFIG_SENSOR_I2C_NOSCHED
atomic_set(&sensor->tasklock_cnt,0);
#endif
ret = sensor_video_probe(icd, client);
if (ret < 0) {
icd->ops = NULL;
i2c_set_clientdata(client, NULL);
kfree(sensor);
sensor = NULL;
}
SENSOR_DG("\n%s..%s..%d ret = %x \n",__FUNCTION__,__FILE__,__LINE__,ret);
return ret;
}
static int sensor_remove(struct i2c_client *client)
{
struct sensor *sensor = to_sensor(client);
struct soc_camera_device *icd = client->dev.platform_data;
icd->ops = NULL;
i2c_set_clientdata(client, NULL);
client->driver = NULL;
kfree(sensor);
sensor = NULL;
return 0;
}
static const struct i2c_device_id sensor_id[] = {
{SENSOR_NAME_STRING(), 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, sensor_id);
static struct i2c_driver sensor_i2c_driver = {
.driver = {
.name = SENSOR_NAME_STRING(),
},
.probe = sensor_probe,
.remove = sensor_remove,
.id_table = sensor_id,
};
static int __init sensor_mod_init(void)
{
SENSOR_DG("\n%s..%s.. \n",__FUNCTION__,SENSOR_NAME_STRING());
return i2c_add_driver(&sensor_i2c_driver);
}
static void __exit sensor_mod_exit(void)
{
i2c_del_driver(&sensor_i2c_driver);
}
device_initcall_sync(sensor_mod_init);
module_exit(sensor_mod_exit);
MODULE_DESCRIPTION(SENSOR_NAME_STRING(Camera sensor driver));
MODULE_AUTHOR("ddl <kernel@rock-chips>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
CSE3320/kernel-code | .backup_do_not_remove/drivers/power/supply/max8925_power.c | 141 | 15238 | // SPDX-License-Identifier: GPL-2.0-only
/*
* Battery driver for Maxim MAX8925
*
* Copyright (c) 2009-2010 Marvell International Ltd.
* Haojian Zhuang <haojian.zhuang@marvell.com>
*/
#include <linux/module.h>
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/of.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/mfd/max8925.h>
/* registers in GPM */
#define MAX8925_OUT5VEN 0x54
#define MAX8925_OUT3VEN 0x58
#define MAX8925_CHG_CNTL1 0x7c
/* bits definition */
#define MAX8925_CHG_STAT_VSYSLOW (1 << 0)
#define MAX8925_CHG_STAT_MODE_MASK (3 << 2)
#define MAX8925_CHG_STAT_EN_MASK (1 << 4)
#define MAX8925_CHG_MBDET (1 << 1)
#define MAX8925_CHG_AC_RANGE_MASK (3 << 6)
/* registers in ADC */
#define MAX8925_ADC_RES_CNFG1 0x06
#define MAX8925_ADC_AVG_CNFG1 0x07
#define MAX8925_ADC_ACQ_CNFG1 0x08
#define MAX8925_ADC_ACQ_CNFG2 0x09
/* 2 bytes registers in below. MSB is 1st, LSB is 2nd. */
#define MAX8925_ADC_AUX2 0x62
#define MAX8925_ADC_VCHG 0x64
#define MAX8925_ADC_VBBATT 0x66
#define MAX8925_ADC_VMBATT 0x68
#define MAX8925_ADC_ISNS 0x6a
#define MAX8925_ADC_THM 0x6c
#define MAX8925_ADC_TDIE 0x6e
#define MAX8925_CMD_AUX2 0xc8
#define MAX8925_CMD_VCHG 0xd0
#define MAX8925_CMD_VBBATT 0xd8
#define MAX8925_CMD_VMBATT 0xe0
#define MAX8925_CMD_ISNS 0xe8
#define MAX8925_CMD_THM 0xf0
#define MAX8925_CMD_TDIE 0xf8
enum {
MEASURE_AUX2,
MEASURE_VCHG,
MEASURE_VBBATT,
MEASURE_VMBATT,
MEASURE_ISNS,
MEASURE_THM,
MEASURE_TDIE,
MEASURE_MAX,
};
struct max8925_power_info {
struct max8925_chip *chip;
struct i2c_client *gpm;
struct i2c_client *adc;
struct power_supply *ac;
struct power_supply *usb;
struct power_supply *battery;
int irq_base;
unsigned ac_online:1;
unsigned usb_online:1;
unsigned bat_online:1;
unsigned chg_mode:2;
unsigned batt_detect:1; /* detecing MB by ID pin */
unsigned topoff_threshold:2;
unsigned fast_charge:3;
unsigned no_temp_support:1;
unsigned no_insert_detect:1;
int (*set_charger) (int);
};
static int __set_charger(struct max8925_power_info *info, int enable)
{
struct max8925_chip *chip = info->chip;
if (enable) {
/* enable charger in platform */
if (info->set_charger)
info->set_charger(1);
/* enable charger */
max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 0);
} else {
/* disable charge */
max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 1 << 7);
if (info->set_charger)
info->set_charger(0);
}
dev_dbg(chip->dev, "%s\n", (enable) ? "Enable charger"
: "Disable charger");
return 0;
}
static irqreturn_t max8925_charger_handler(int irq, void *data)
{
struct max8925_power_info *info = (struct max8925_power_info *)data;
struct max8925_chip *chip = info->chip;
switch (irq - chip->irq_base) {
case MAX8925_IRQ_VCHG_DC_R:
info->ac_online = 1;
__set_charger(info, 1);
dev_dbg(chip->dev, "Adapter inserted\n");
break;
case MAX8925_IRQ_VCHG_DC_F:
info->ac_online = 0;
__set_charger(info, 0);
dev_dbg(chip->dev, "Adapter removed\n");
break;
case MAX8925_IRQ_VCHG_THM_OK_F:
/* Battery is not ready yet */
dev_dbg(chip->dev, "Battery temperature is out of range\n");
/* Fall through */
case MAX8925_IRQ_VCHG_DC_OVP:
dev_dbg(chip->dev, "Error detection\n");
__set_charger(info, 0);
break;
case MAX8925_IRQ_VCHG_THM_OK_R:
/* Battery is ready now */
dev_dbg(chip->dev, "Battery temperature is in range\n");
break;
case MAX8925_IRQ_VCHG_SYSLOW_R:
/* VSYS is low */
dev_info(chip->dev, "Sys power is too low\n");
break;
case MAX8925_IRQ_VCHG_SYSLOW_F:
dev_dbg(chip->dev, "Sys power is above low threshold\n");
break;
case MAX8925_IRQ_VCHG_DONE:
__set_charger(info, 0);
dev_dbg(chip->dev, "Charging is done\n");
break;
case MAX8925_IRQ_VCHG_TOPOFF:
dev_dbg(chip->dev, "Charging in top-off mode\n");
break;
case MAX8925_IRQ_VCHG_TMR_FAULT:
__set_charger(info, 0);
dev_dbg(chip->dev, "Safe timer is expired\n");
break;
case MAX8925_IRQ_VCHG_RST:
__set_charger(info, 0);
dev_dbg(chip->dev, "Charger is reset\n");
break;
}
return IRQ_HANDLED;
}
static int start_measure(struct max8925_power_info *info, int type)
{
unsigned char buf[2] = {0, 0};
int meas_cmd;
int meas_reg = 0, ret;
switch (type) {
case MEASURE_VCHG:
meas_cmd = MAX8925_CMD_VCHG;
meas_reg = MAX8925_ADC_VCHG;
break;
case MEASURE_VBBATT:
meas_cmd = MAX8925_CMD_VBBATT;
meas_reg = MAX8925_ADC_VBBATT;
break;
case MEASURE_VMBATT:
meas_cmd = MAX8925_CMD_VMBATT;
meas_reg = MAX8925_ADC_VMBATT;
break;
case MEASURE_ISNS:
meas_cmd = MAX8925_CMD_ISNS;
meas_reg = MAX8925_ADC_ISNS;
break;
default:
return -EINVAL;
}
max8925_reg_write(info->adc, meas_cmd, 0);
max8925_bulk_read(info->adc, meas_reg, 2, buf);
ret = ((buf[0]<<8) | buf[1]) >> 4;
return ret;
}
static int max8925_ac_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct max8925_power_info *info = dev_get_drvdata(psy->dev.parent);
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = info->ac_online;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (info->ac_online) {
ret = start_measure(info, MEASURE_VCHG);
if (ret >= 0) {
val->intval = ret * 2000; /* unit is uV */
goto out;
}
}
ret = -ENODATA;
break;
default:
ret = -ENODEV;
break;
}
out:
return ret;
}
static enum power_supply_property max8925_ac_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
static int max8925_usb_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct max8925_power_info *info = dev_get_drvdata(psy->dev.parent);
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = info->usb_online;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (info->usb_online) {
ret = start_measure(info, MEASURE_VCHG);
if (ret >= 0) {
val->intval = ret * 2000; /* unit is uV */
goto out;
}
}
ret = -ENODATA;
break;
default:
ret = -ENODEV;
break;
}
out:
return ret;
}
static enum power_supply_property max8925_usb_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
};
static int max8925_bat_get_prop(struct power_supply *psy,
enum power_supply_property psp,
union power_supply_propval *val)
{
struct max8925_power_info *info = dev_get_drvdata(psy->dev.parent);
int ret = 0;
switch (psp) {
case POWER_SUPPLY_PROP_ONLINE:
val->intval = info->bat_online;
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
if (info->bat_online) {
ret = start_measure(info, MEASURE_VMBATT);
if (ret >= 0) {
val->intval = ret * 2000; /* unit is uV */
ret = 0;
break;
}
}
ret = -ENODATA;
break;
case POWER_SUPPLY_PROP_CURRENT_NOW:
if (info->bat_online) {
ret = start_measure(info, MEASURE_ISNS);
if (ret >= 0) {
/* assume r_sns is 0.02 */
ret = ((ret * 6250) - 3125) /* uA */;
val->intval = 0;
if (ret > 0)
val->intval = ret; /* unit is mA */
ret = 0;
break;
}
}
ret = -ENODATA;
break;
case POWER_SUPPLY_PROP_CHARGE_TYPE:
if (!info->bat_online) {
ret = -ENODATA;
break;
}
ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS);
ret = (ret & MAX8925_CHG_STAT_MODE_MASK) >> 2;
switch (ret) {
case 1:
val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST;
break;
case 0:
case 2:
val->intval = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
break;
case 3:
val->intval = POWER_SUPPLY_CHARGE_TYPE_NONE;
break;
}
ret = 0;
break;
case POWER_SUPPLY_PROP_STATUS:
if (!info->bat_online) {
ret = -ENODATA;
break;
}
ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS);
if (info->usb_online || info->ac_online) {
val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING;
if (ret & MAX8925_CHG_STAT_EN_MASK)
val->intval = POWER_SUPPLY_STATUS_CHARGING;
} else
val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
ret = 0;
break;
default:
ret = -ENODEV;
break;
}
return ret;
}
static enum power_supply_property max8925_battery_props[] = {
POWER_SUPPLY_PROP_ONLINE,
POWER_SUPPLY_PROP_VOLTAGE_NOW,
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_STATUS,
};
static const struct power_supply_desc ac_desc = {
.name = "max8925-ac",
.type = POWER_SUPPLY_TYPE_MAINS,
.properties = max8925_ac_props,
.num_properties = ARRAY_SIZE(max8925_ac_props),
.get_property = max8925_ac_get_prop,
};
static const struct power_supply_desc usb_desc = {
.name = "max8925-usb",
.type = POWER_SUPPLY_TYPE_USB,
.properties = max8925_usb_props,
.num_properties = ARRAY_SIZE(max8925_usb_props),
.get_property = max8925_usb_get_prop,
};
static const struct power_supply_desc battery_desc = {
.name = "max8925-battery",
.type = POWER_SUPPLY_TYPE_BATTERY,
.properties = max8925_battery_props,
.num_properties = ARRAY_SIZE(max8925_battery_props),
.get_property = max8925_bat_get_prop,
};
#define REQUEST_IRQ(_irq, _name) \
do { \
ret = request_threaded_irq(chip->irq_base + _irq, NULL, \
max8925_charger_handler, \
IRQF_ONESHOT, _name, info); \
if (ret) \
dev_err(chip->dev, "Failed to request IRQ #%d: %d\n", \
_irq, ret); \
} while (0)
static int max8925_init_charger(struct max8925_chip *chip,
struct max8925_power_info *info)
{
int ret;
REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_OVP, "ac-ovp");
if (!info->no_insert_detect) {
REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_F, "ac-remove");
REQUEST_IRQ(MAX8925_IRQ_VCHG_DC_R, "ac-insert");
}
if (!info->no_temp_support) {
REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_R, "batt-temp-in-range");
REQUEST_IRQ(MAX8925_IRQ_VCHG_THM_OK_F, "batt-temp-out-range");
}
REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_F, "vsys-high");
REQUEST_IRQ(MAX8925_IRQ_VCHG_SYSLOW_R, "vsys-low");
REQUEST_IRQ(MAX8925_IRQ_VCHG_RST, "charger-reset");
REQUEST_IRQ(MAX8925_IRQ_VCHG_DONE, "charger-done");
REQUEST_IRQ(MAX8925_IRQ_VCHG_TOPOFF, "charger-topoff");
REQUEST_IRQ(MAX8925_IRQ_VCHG_TMR_FAULT, "charger-timer-expire");
info->usb_online = 0;
info->bat_online = 0;
/* check for power - can miss interrupt at boot time */
if (start_measure(info, MEASURE_VCHG) * 2000 > 500000)
info->ac_online = 1;
else
info->ac_online = 0;
ret = max8925_reg_read(info->gpm, MAX8925_CHG_STATUS);
if (ret >= 0) {
/*
* If battery detection is enabled, ID pin of battery is
* connected to MBDET pin of MAX8925. It could be used to
* detect battery presence.
* Otherwise, we have to assume that battery is always on.
*/
if (info->batt_detect)
info->bat_online = (ret & MAX8925_CHG_MBDET) ? 0 : 1;
else
info->bat_online = 1;
if (ret & MAX8925_CHG_AC_RANGE_MASK)
info->ac_online = 1;
else
info->ac_online = 0;
}
/* disable charge */
max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 1 << 7, 1 << 7);
/* set charging current in charge topoff mode */
max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 3 << 5,
info->topoff_threshold << 5);
/* set charing current in fast charge mode */
max8925_set_bits(info->gpm, MAX8925_CHG_CNTL1, 7, info->fast_charge);
return 0;
}
static int max8925_deinit_charger(struct max8925_power_info *info)
{
struct max8925_chip *chip = info->chip;
int irq;
irq = chip->irq_base + MAX8925_IRQ_VCHG_DC_OVP;
for (; irq <= chip->irq_base + MAX8925_IRQ_VCHG_TMR_FAULT; irq++)
free_irq(irq, info);
return 0;
}
#ifdef CONFIG_OF
static struct max8925_power_pdata *
max8925_power_dt_init(struct platform_device *pdev)
{
struct device_node *nproot = pdev->dev.parent->of_node;
struct device_node *np;
int batt_detect;
int topoff_threshold;
int fast_charge;
int no_temp_support;
int no_insert_detect;
struct max8925_power_pdata *pdata;
if (!nproot)
return pdev->dev.platform_data;
np = of_get_child_by_name(nproot, "charger");
if (!np) {
dev_err(&pdev->dev, "failed to find charger node\n");
return NULL;
}
pdata = devm_kzalloc(&pdev->dev,
sizeof(struct max8925_power_pdata),
GFP_KERNEL);
if (!pdata)
goto ret;
of_property_read_u32(np, "topoff-threshold", &topoff_threshold);
of_property_read_u32(np, "batt-detect", &batt_detect);
of_property_read_u32(np, "fast-charge", &fast_charge);
of_property_read_u32(np, "no-insert-detect", &no_insert_detect);
of_property_read_u32(np, "no-temp-support", &no_temp_support);
pdata->batt_detect = batt_detect;
pdata->fast_charge = fast_charge;
pdata->topoff_threshold = topoff_threshold;
pdata->no_insert_detect = no_insert_detect;
pdata->no_temp_support = no_temp_support;
ret:
of_node_put(np);
return pdata;
}
#else
static struct max8925_power_pdata *
max8925_power_dt_init(struct platform_device *pdev)
{
return pdev->dev.platform_data;
}
#endif
static int max8925_power_probe(struct platform_device *pdev)
{
struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent);
struct power_supply_config psy_cfg = {}; /* Only for ac and usb */
struct max8925_power_pdata *pdata = NULL;
struct max8925_power_info *info;
int ret;
pdata = max8925_power_dt_init(pdev);
if (!pdata) {
dev_err(&pdev->dev, "platform data isn't assigned to "
"power supply\n");
return -EINVAL;
}
info = devm_kzalloc(&pdev->dev, sizeof(struct max8925_power_info),
GFP_KERNEL);
if (!info)
return -ENOMEM;
info->chip = chip;
info->gpm = chip->i2c;
info->adc = chip->adc;
platform_set_drvdata(pdev, info);
psy_cfg.supplied_to = pdata->supplied_to;
psy_cfg.num_supplicants = pdata->num_supplicants;
info->ac = power_supply_register(&pdev->dev, &ac_desc, &psy_cfg);
if (IS_ERR(info->ac)) {
ret = PTR_ERR(info->ac);
goto out;
}
info->ac->dev.parent = &pdev->dev;
info->usb = power_supply_register(&pdev->dev, &usb_desc, &psy_cfg);
if (IS_ERR(info->usb)) {
ret = PTR_ERR(info->usb);
goto out_unregister_ac;
}
info->usb->dev.parent = &pdev->dev;
info->battery = power_supply_register(&pdev->dev, &battery_desc, NULL);
if (IS_ERR(info->battery)) {
ret = PTR_ERR(info->battery);
goto out_unregister_usb;
}
info->battery->dev.parent = &pdev->dev;
info->batt_detect = pdata->batt_detect;
info->topoff_threshold = pdata->topoff_threshold;
info->fast_charge = pdata->fast_charge;
info->set_charger = pdata->set_charger;
info->no_temp_support = pdata->no_temp_support;
info->no_insert_detect = pdata->no_insert_detect;
max8925_init_charger(chip, info);
return 0;
out_unregister_usb:
power_supply_unregister(info->usb);
out_unregister_ac:
power_supply_unregister(info->ac);
out:
return ret;
}
static int max8925_power_remove(struct platform_device *pdev)
{
struct max8925_power_info *info = platform_get_drvdata(pdev);
if (info) {
power_supply_unregister(info->ac);
power_supply_unregister(info->usb);
power_supply_unregister(info->battery);
max8925_deinit_charger(info);
}
return 0;
}
static struct platform_driver max8925_power_driver = {
.probe = max8925_power_probe,
.remove = max8925_power_remove,
.driver = {
.name = "max8925-power",
},
};
module_platform_driver(max8925_power_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Power supply driver for MAX8925");
MODULE_ALIAS("platform:max8925-power");
| gpl-2.0 |
sureandrew/trinitycore | dep/acelite/ace/IOStream_T.cpp | 653 | 6334 | // $Id: IOStream_T.cpp 80826 2008-03-04 14:51:23Z wotte $
#ifndef ACE_IOSTREAM_T_CPP
#define ACE_IOSTREAM_T_CPP
#include "ace/IOStream_T.h"
#include "ace/OS_Memory.h"
#if !defined (ACE_LACKS_PRAGMA_ONCE)
# pragma once
#endif /* ACE_LACKS_PRAGMA_ONCE */
#if !defined (ACE_LACKS_ACE_IOSTREAM)
#if !defined (__ACE_INLINE__)
#include "ace/IOStream_T.inl"
#endif /* !__ACE_INLINE__ */
ACE_BEGIN_VERSIONED_NAMESPACE_DECL
// We will be given a STREAM by the iostream object which creates us.
// See the ACE_IOStream template for how that works. Like other
// streambuf objects, we can be input-only, output-only or both.
template <class STREAM>
ACE_Streambuf_T<STREAM>::ACE_Streambuf_T (STREAM *peer,
u_int streambuf_size,
int io_mode)
: ACE_Streambuf (streambuf_size, io_mode),
peer_ (peer)
{
// A streambuf allows for unbuffered IO where every character is
// read as requested and written as provided. To me, this seems
// terribly inefficient for socket-type operations, so I've disabled
// it. All of the work would be done by the underflow/overflow
// functions anyway and I haven't implemented anything there to
// support unbuffered IO.
#if !defined (ACE_LACKS_UNBUFFERED_STREAMBUF)
this->unbuffered (0);
#endif /* ! ACE_LACKS_UNBUFFERED_STREAMBUF */
// Linebuffered is similar to unbuffered. Again, I don't have any
// need for this and I don't see the advantage. I believe this
// would have to be supported by underflow/overflow to be effective.
#if !defined (ACE_LACKS_LINEBUFFERED_STREAMBUF)
this->linebuffered (0);
#endif /* ! ACE_LACKS_LINEBUFFERED_STREAMBUF */
}
template <class STREAM> ssize_t
ACE_Streambuf_T<STREAM>::send (char *buf, ssize_t len)
{
return peer_->send_n (buf,len);
}
template <class STREAM> ssize_t
ACE_Streambuf_T<STREAM>::recv (char *buf,
ssize_t len,
ACE_Time_Value *tv)
{
return this->recv (buf, len, 0, tv);
}
template <class STREAM> ssize_t
ACE_Streambuf_T<STREAM>::recv (char *buf,
ssize_t len,
int flags,
ACE_Time_Value * tv)
{
this->timeout_ = 0;
errno = ESUCCESS;
ssize_t rval = peer_->recv (buf, len, flags, tv);
if (errno == ETIME)
this->timeout_ = 1;
return rval;
}
template <class STREAM> ssize_t
ACE_Streambuf_T<STREAM>::recv_n (char *buf,
ssize_t len,
int flags,
ACE_Time_Value *tv)
{
this->timeout_ = 0;
errno = ESUCCESS;
ssize_t rval = peer_->recv_n (buf, len, flags, tv);
if (errno == ETIME)
this->timeout_ = 1;
return rval;
}
template <class STREAM> ACE_HANDLE
ACE_Streambuf_T<STREAM>::get_handle (void)
{
return peer_ ? peer_->get_handle () : 0;
}
// The typical constructor. This will initiailze your STREAM and then
// setup the iostream baseclass to use a custom streambuf based on
// STREAM.
template <class STREAM>
ACE_IOStream<STREAM>::ACE_IOStream (STREAM &stream,
u_int streambuf_size)
: iostream (0),
STREAM (stream)
{
ACE_NEW (streambuf_,
ACE_Streambuf_T<STREAM> ((STREAM *) this,
streambuf_size));
iostream::init (this->streambuf_);
}
template <class STREAM>
ACE_IOStream<STREAM>::ACE_IOStream (u_int streambuf_size)
: iostream (0)
{
ACE_NEW (this->streambuf_,
ACE_Streambuf_T<STREAM> ((STREAM *) this,
streambuf_size));
iostream::init (this->streambuf_);
}
// We have to get rid of the streambuf_ ourselves since we gave it to
// iostream ()
template <class STREAM>
ACE_IOStream<STREAM>::~ACE_IOStream (void)
{
delete this->streambuf_;
}
// The only ambituity in the multiple inheritance is the close ()
// function.
template <class STREAM> int
ACE_IOStream<STREAM>::close (void)
{
return STREAM::close ();
}
template <class STREAM> ACE_IOStream<STREAM> &
ACE_IOStream<STREAM>::operator>> (ACE_Time_Value *&tv)
{
ACE_Time_Value *old_tv = this->streambuf_->recv_timeout (tv);
tv = old_tv;
return *this;
}
#if defined (ACE_HAS_STRING_CLASS)
// A simple string operator. The base iostream has 'em for char* but
// that isn't always the best thing for a String. If we don't provide
// our own here, we may not get what we want.
template <class STREAM> ACE_IOStream<STREAM> &
ACE_IOStream<STREAM>::operator>> (ACE_IOStream_String &v)
{
if (ipfx0 ())
{
char c;
this->get (c);
for (v = c;
this->get (c) && !isspace (c);
v += c)
continue;
}
isfx ();
return *this;
}
template <class STREAM> ACE_IOStream<STREAM> &
ACE_IOStream<STREAM>::operator<< (ACE_IOStream_String &v)
{
if (opfx ())
{
#if defined (ACE_WIN32) && defined (_MSC_VER)
for (int i = 0; i < v.GetLength (); ++i)
#else
for (u_int i = 0; i < (u_int) v.length (); ++i)
#endif /* ACE_WIN32 && defined (_MSC_VER) */
this->put (v[i]);
}
osfx ();
return *this;
}
// A more clever put operator for strings that knows how to deal with
// quoted strings containing back-quoted quotes.
template <class STREAM> STREAM &
operator>> (STREAM &stream,
ACE_Quoted_String &str)
{
char c;
if (!(stream >> c)) // eat space up to the first char
// stream.set (ios::eofbit|ios::failbit);
return stream;
str = ""; // Initialize the string
// if we don't have a quote, append until we see space
if (c != '"')
for (str = c; stream.get (c) && !isspace (c); str += c)
continue;
else
for (; stream.get (c) && c != '"'; str += c)
if (c == '\\')
{
stream.get (c);
if (c != '"')
str += '\\';
}
return stream;
}
template <class STREAM> STREAM &
operator<< (STREAM &stream,
ACE_Quoted_String &str)
{
stream.put ('"');
for (u_int i = 0; i < str.length (); ++i)
{
if (str[i] == '"')
stream.put ('\\');
stream.put (str[i]);
}
stream.put ('"');
return stream;
}
ACE_END_VERSIONED_NAMESPACE_DECL
#endif /* ACE_HAS_STRING_CLASS */
#endif /* ACE_LACKS_ACE_IOSTREAM */
#endif /* ACE_IOSTREAM_T_CPP */
| gpl-2.0 |
curbthepain/revkernel_s5 | drivers/scsi/nsp32.c | 1165 | 91234 | /*
* NinjaSCSI-32Bi Cardbus, NinjaSCSI-32UDE PCI/CardBus SCSI driver
* Copyright (C) 2001, 2002, 2003
* YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>
* GOTO Masanori <gotom@debian.or.jp>, <gotom@debian.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*
* Revision History:
* 1.0: Initial Release.
* 1.1: Add /proc SDTR status.
* Remove obsolete error handler nsp32_reset.
* Some clean up.
* 1.2: PowerPC (big endian) support.
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/ioport.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/interrupt.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/ctype.h>
#include <linux/dma-mapping.h>
#include <asm/dma.h>
#include <asm/io.h>
#include <scsi/scsi.h>
#include <scsi/scsi_cmnd.h>
#include <scsi/scsi_device.h>
#include <scsi/scsi_host.h>
#include <scsi/scsi_ioctl.h>
#include "nsp32.h"
/***********************************************************************
* Module parameters
*/
static int trans_mode = 0; /* default: BIOS */
module_param (trans_mode, int, 0);
MODULE_PARM_DESC(trans_mode, "transfer mode (0: BIOS(default) 1: Async 2: Ultra20M");
#define ASYNC_MODE 1
#define ULTRA20M_MODE 2
static bool auto_param = 0; /* default: ON */
module_param (auto_param, bool, 0);
MODULE_PARM_DESC(auto_param, "AutoParameter mode (0: ON(default) 1: OFF)");
static bool disc_priv = 1; /* default: OFF */
module_param (disc_priv, bool, 0);
MODULE_PARM_DESC(disc_priv, "disconnection privilege mode (0: ON 1: OFF(default))");
MODULE_AUTHOR("YOKOTA Hiroshi <yokota@netlab.is.tsukuba.ac.jp>, GOTO Masanori <gotom@debian.or.jp>");
MODULE_DESCRIPTION("Workbit NinjaSCSI-32Bi/UDE CardBus/PCI SCSI host bus adapter module");
MODULE_LICENSE("GPL");
static const char *nsp32_release_version = "1.2";
/****************************************************************************
* Supported hardware
*/
static struct pci_device_id nsp32_pci_table[] __devinitdata = {
{
.vendor = PCI_VENDOR_ID_IODATA,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_CBSC_II,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_IODATA,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_KME,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_KME,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_WBT,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_WORKBIT,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_WORKBIT_STANDARD,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_WORKBIT,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BI_LOGITEC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_LOGITEC,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_LOGITEC,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_MELCO,
},
{
.vendor = PCI_VENDOR_ID_WORKBIT,
.device = PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO_II,
.subvendor = PCI_ANY_ID,
.subdevice = PCI_ANY_ID,
.driver_data = MODEL_PCI_MELCO,
},
{0,0,},
};
MODULE_DEVICE_TABLE(pci, nsp32_pci_table);
static nsp32_hw_data nsp32_data_base; /* probe <-> detect glue */
/*
* Period/AckWidth speed conversion table
*
* Note: This period/ackwidth speed table must be in descending order.
*/
static nsp32_sync_table nsp32_sync_table_40M[] = {
/* {PNo, AW, SP, EP, SREQ smpl} Speed(MB/s) Period AckWidth */
{0x1, 0, 0x0c, 0x0c, SMPL_40M}, /* 20.0 : 50ns, 25ns */
{0x2, 0, 0x0d, 0x18, SMPL_40M}, /* 13.3 : 75ns, 25ns */
{0x3, 1, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
{0x4, 1, 0x1a, 0x1f, SMPL_20M}, /* 8.0 : 125ns, 50ns */
{0x5, 2, 0x20, 0x25, SMPL_20M}, /* 6.7 : 150ns, 75ns */
{0x6, 2, 0x26, 0x31, SMPL_20M}, /* 5.7 : 175ns, 75ns */
{0x7, 3, 0x32, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
{0x8, 3, 0x33, 0x38, SMPL_10M}, /* 4.4 : 225ns, 100ns */
{0x9, 3, 0x39, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
};
static nsp32_sync_table nsp32_sync_table_20M[] = {
{0x1, 0, 0x19, 0x19, SMPL_40M}, /* 10.0 : 100ns, 50ns */
{0x2, 0, 0x1a, 0x25, SMPL_20M}, /* 6.7 : 150ns, 50ns */
{0x3, 1, 0x26, 0x32, SMPL_20M}, /* 5.0 : 200ns, 100ns */
{0x4, 1, 0x33, 0x3e, SMPL_10M}, /* 4.0 : 250ns, 100ns */
{0x5, 2, 0x3f, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 150ns */
{0x6, 2, 0x4c, 0x57, SMPL_10M}, /* 2.8 : 350ns, 150ns */
{0x7, 3, 0x58, 0x64, SMPL_10M}, /* 2.5 : 400ns, 200ns */
{0x8, 3, 0x65, 0x70, SMPL_10M}, /* 2.2 : 450ns, 200ns */
{0x9, 3, 0x71, 0x7d, SMPL_10M}, /* 2.0 : 500ns, 200ns */
};
static nsp32_sync_table nsp32_sync_table_pci[] = {
{0x1, 0, 0x0c, 0x0f, SMPL_40M}, /* 16.6 : 60ns, 30ns */
{0x2, 0, 0x10, 0x16, SMPL_40M}, /* 11.1 : 90ns, 30ns */
{0x3, 1, 0x17, 0x1e, SMPL_20M}, /* 8.3 : 120ns, 60ns */
{0x4, 1, 0x1f, 0x25, SMPL_20M}, /* 6.7 : 150ns, 60ns */
{0x5, 2, 0x26, 0x2d, SMPL_20M}, /* 5.6 : 180ns, 90ns */
{0x6, 2, 0x2e, 0x34, SMPL_10M}, /* 4.8 : 210ns, 90ns */
{0x7, 3, 0x35, 0x3c, SMPL_10M}, /* 4.2 : 240ns, 120ns */
{0x8, 3, 0x3d, 0x43, SMPL_10M}, /* 3.7 : 270ns, 120ns */
{0x9, 3, 0x44, 0x4b, SMPL_10M}, /* 3.3 : 300ns, 120ns */
};
/*
* function declaration
*/
/* module entry point */
static int __devinit nsp32_probe (struct pci_dev *, const struct pci_device_id *);
static void __devexit nsp32_remove(struct pci_dev *);
static int __init init_nsp32 (void);
static void __exit exit_nsp32 (void);
/* struct struct scsi_host_template */
static int nsp32_proc_info (struct Scsi_Host *, char *, char **, off_t, int, int);
static int nsp32_detect (struct pci_dev *pdev);
static int nsp32_queuecommand(struct Scsi_Host *, struct scsi_cmnd *);
static const char *nsp32_info (struct Scsi_Host *);
static int nsp32_release (struct Scsi_Host *);
/* SCSI error handler */
static int nsp32_eh_abort (struct scsi_cmnd *);
static int nsp32_eh_bus_reset (struct scsi_cmnd *);
static int nsp32_eh_host_reset(struct scsi_cmnd *);
/* generate SCSI message */
static void nsp32_build_identify(struct scsi_cmnd *);
static void nsp32_build_nop (struct scsi_cmnd *);
static void nsp32_build_reject (struct scsi_cmnd *);
static void nsp32_build_sdtr (struct scsi_cmnd *, unsigned char, unsigned char);
/* SCSI message handler */
static int nsp32_busfree_occur(struct scsi_cmnd *, unsigned short);
static void nsp32_msgout_occur (struct scsi_cmnd *);
static void nsp32_msgin_occur (struct scsi_cmnd *, unsigned long, unsigned short);
static int nsp32_setup_sg_table (struct scsi_cmnd *);
static int nsp32_selection_autopara(struct scsi_cmnd *);
static int nsp32_selection_autoscsi(struct scsi_cmnd *);
static void nsp32_scsi_done (struct scsi_cmnd *);
static int nsp32_arbitration (struct scsi_cmnd *, unsigned int);
static int nsp32_reselection (struct scsi_cmnd *, unsigned char);
static void nsp32_adjust_busfree (struct scsi_cmnd *, unsigned int);
static void nsp32_restart_autoscsi (struct scsi_cmnd *, unsigned short);
/* SCSI SDTR */
static void nsp32_analyze_sdtr (struct scsi_cmnd *);
static int nsp32_search_period_entry(nsp32_hw_data *, nsp32_target *, unsigned char);
static void nsp32_set_async (nsp32_hw_data *, nsp32_target *);
static void nsp32_set_max_sync (nsp32_hw_data *, nsp32_target *, unsigned char *, unsigned char *);
static void nsp32_set_sync_entry (nsp32_hw_data *, nsp32_target *, int, unsigned char);
/* SCSI bus status handler */
static void nsp32_wait_req (nsp32_hw_data *, int);
static void nsp32_wait_sack (nsp32_hw_data *, int);
static void nsp32_sack_assert (nsp32_hw_data *);
static void nsp32_sack_negate (nsp32_hw_data *);
static void nsp32_do_bus_reset(nsp32_hw_data *);
/* hardware interrupt handler */
static irqreturn_t do_nsp32_isr(int, void *);
/* initialize hardware */
static int nsp32hw_init(nsp32_hw_data *);
/* EEPROM handler */
static int nsp32_getprom_param (nsp32_hw_data *);
static int nsp32_getprom_at24 (nsp32_hw_data *);
static int nsp32_getprom_c16 (nsp32_hw_data *);
static void nsp32_prom_start (nsp32_hw_data *);
static void nsp32_prom_stop (nsp32_hw_data *);
static int nsp32_prom_read (nsp32_hw_data *, int);
static int nsp32_prom_read_bit (nsp32_hw_data *);
static void nsp32_prom_write_bit(nsp32_hw_data *, int);
static void nsp32_prom_set (nsp32_hw_data *, int, int);
static int nsp32_prom_get (nsp32_hw_data *, int);
/* debug/warning/info message */
static void nsp32_message (const char *, int, char *, char *, ...);
#ifdef NSP32_DEBUG
static void nsp32_dmessage(const char *, int, int, char *, ...);
#endif
/*
* max_sectors is currently limited up to 128.
*/
static struct scsi_host_template nsp32_template = {
.proc_name = "nsp32",
.name = "Workbit NinjaSCSI-32Bi/UDE",
.proc_info = nsp32_proc_info,
.info = nsp32_info,
.queuecommand = nsp32_queuecommand,
.can_queue = 1,
.sg_tablesize = NSP32_SG_SIZE,
.max_sectors = 128,
.cmd_per_lun = 1,
.this_id = NSP32_HOST_SCSIID,
.use_clustering = DISABLE_CLUSTERING,
.eh_abort_handler = nsp32_eh_abort,
.eh_bus_reset_handler = nsp32_eh_bus_reset,
.eh_host_reset_handler = nsp32_eh_host_reset,
/* .highmem_io = 1, */
};
#include "nsp32_io.h"
/***********************************************************************
* debug, error print
*/
#ifndef NSP32_DEBUG
# define NSP32_DEBUG_MASK 0x000000
# define nsp32_msg(type, args...) nsp32_message ("", 0, (type), args)
# define nsp32_dbg(mask, args...) /* */
#else
# define NSP32_DEBUG_MASK 0xffffff
# define nsp32_msg(type, args...) \
nsp32_message (__func__, __LINE__, (type), args)
# define nsp32_dbg(mask, args...) \
nsp32_dmessage(__func__, __LINE__, (mask), args)
#endif
#define NSP32_DEBUG_QUEUECOMMAND BIT(0)
#define NSP32_DEBUG_REGISTER BIT(1)
#define NSP32_DEBUG_AUTOSCSI BIT(2)
#define NSP32_DEBUG_INTR BIT(3)
#define NSP32_DEBUG_SGLIST BIT(4)
#define NSP32_DEBUG_BUSFREE BIT(5)
#define NSP32_DEBUG_CDB_CONTENTS BIT(6)
#define NSP32_DEBUG_RESELECTION BIT(7)
#define NSP32_DEBUG_MSGINOCCUR BIT(8)
#define NSP32_DEBUG_EEPROM BIT(9)
#define NSP32_DEBUG_MSGOUTOCCUR BIT(10)
#define NSP32_DEBUG_BUSRESET BIT(11)
#define NSP32_DEBUG_RESTART BIT(12)
#define NSP32_DEBUG_SYNC BIT(13)
#define NSP32_DEBUG_WAIT BIT(14)
#define NSP32_DEBUG_TARGETFLAG BIT(15)
#define NSP32_DEBUG_PROC BIT(16)
#define NSP32_DEBUG_INIT BIT(17)
#define NSP32_SPECIAL_PRINT_REGISTER BIT(20)
#define NSP32_DEBUG_BUF_LEN 100
static void nsp32_message(const char *func, int line, char *type, char *fmt, ...)
{
va_list args;
char buf[NSP32_DEBUG_BUF_LEN];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
#ifndef NSP32_DEBUG
printk("%snsp32: %s\n", type, buf);
#else
printk("%snsp32: %s (%d): %s\n", type, func, line, buf);
#endif
}
#ifdef NSP32_DEBUG
static void nsp32_dmessage(const char *func, int line, int mask, char *fmt, ...)
{
va_list args;
char buf[NSP32_DEBUG_BUF_LEN];
va_start(args, fmt);
vsnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
if (mask & NSP32_DEBUG_MASK) {
printk("nsp32-debug: 0x%x %s (%d): %s\n", mask, func, line, buf);
}
}
#endif
#ifdef NSP32_DEBUG
# include "nsp32_debug.c"
#else
# define show_command(arg) /* */
# define show_busphase(arg) /* */
# define show_autophase(arg) /* */
#endif
/*
* IDENTIFY Message
*/
static void nsp32_build_identify(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
int mode = FALSE;
/* XXX: Auto DiscPriv detection is progressing... */
if (disc_priv == 0) {
/* mode = TRUE; */
}
data->msgoutbuf[pos] = IDENTIFY(mode, SCpnt->device->lun); pos++;
data->msgout_len = pos;
}
/*
* SDTR Message Routine
*/
static void nsp32_build_sdtr(struct scsi_cmnd *SCpnt,
unsigned char period,
unsigned char offset)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
data->msgoutbuf[pos] = EXTENDED_MESSAGE; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR_LEN; pos++;
data->msgoutbuf[pos] = EXTENDED_SDTR; pos++;
data->msgoutbuf[pos] = period; pos++;
data->msgoutbuf[pos] = offset; pos++;
data->msgout_len = pos;
}
/*
* No Operation Message
*/
static void nsp32_build_nop(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
if (pos != 0) {
nsp32_msg(KERN_WARNING,
"Some messages are already contained!");
return;
}
data->msgoutbuf[pos] = NOP; pos++;
data->msgout_len = pos;
}
/*
* Reject Message
*/
static void nsp32_build_reject(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int pos = data->msgout_len;
data->msgoutbuf[pos] = MESSAGE_REJECT; pos++;
data->msgout_len = pos;
}
/*
* timer
*/
#if 0
static void nsp32_start_timer(struct scsi_cmnd *SCpnt, int time)
{
unsigned int base = SCpnt->host->io_port;
nsp32_dbg(NSP32_DEBUG_INTR, "timer=%d", time);
if (time & (~TIMER_CNT_MASK)) {
nsp32_dbg(NSP32_DEBUG_INTR, "timer set overflow");
}
nsp32_write2(base, TIMER_SET, time & TIMER_CNT_MASK);
}
#endif
/*
* set SCSI command and other parameter to asic, and start selection phase
*/
static int nsp32_selection_autopara(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned char target = scmd_id(SCpnt);
nsp32_autoparam *param = data->autoparam;
unsigned char phase;
int i, ret;
unsigned int msgout;
u16_le s;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
/*
* check bus free
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
if (phase != BUSMON_BUS_FREE) {
nsp32_msg(KERN_WARNING, "bus busy");
show_busphase(phase & BUSMON_PHASE_MASK);
SCpnt->result = DID_BUS_BUSY << 16;
return FALSE;
}
/*
* message out
*
* Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
* over 3 messages needs another routine.
*/
if (data->msgout_len == 0) {
nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
SCpnt->result = DID_ERROR << 16;
return FALSE;
} else if (data->msgout_len > 0 && data->msgout_len <= 3) {
msgout = 0;
for (i = 0; i < data->msgout_len; i++) {
/*
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
* MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
}
msgout |= MV_VALID; /* MV valid */
msgout |= (unsigned int)data->msgout_len; /* len */
} else {
/* data->msgout_len > 3 */
msgout = 0;
}
// nsp_dbg(NSP32_DEBUG_AUTOSCSI, "sel time out=0x%x\n", nsp32_read2(base, SEL_TIME_OUT));
// nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* setup asic parameter
*/
memset(param, 0, sizeof(nsp32_autoparam));
/* cdb */
for (i = 0; i < SCpnt->cmd_len; i++) {
param->cdb[4 * i] = SCpnt->cmnd[i];
}
/* outgoing messages */
param->msgout = cpu_to_le32(msgout);
/* syncreg, ackwidth, target id, SREQ sampling rate */
param->syncreg = data->cur_target->syncreg;
param->ackwidth = data->cur_target->ackwidth;
param->target_id = BIT(host_id) | BIT(target);
param->sample_reg = data->cur_target->sample_reg;
// nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "sample rate=0x%x\n", data->cur_target->sample_reg);
/* command control */
param->command_control = cpu_to_le16(CLEAR_CDB_FIFO_POINTER |
AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 |
AUTO_ATN );
/* transfer control */
s = 0;
switch (data->trans_method) {
case NSP32_TRANSFER_BUSMASTER:
s |= BM_START;
break;
case NSP32_TRANSFER_MMIO:
s |= CB_MMIO_MODE;
break;
case NSP32_TRANSFER_PIO:
s |= CB_IO_MODE;
break;
default:
nsp32_msg(KERN_ERR, "unknown trans_method");
break;
}
/*
* OR-ed BLIEND_MODE, FIFO intr is decreased, instead of PCI bus waits.
* For bus master transfer, it's taken off.
*/
s |= (TRANSFER_GO | ALL_COUNTER_CLR);
param->transfer_control = cpu_to_le16(s);
/* sg table addr */
param->sgt_pointer = cpu_to_le32(data->cur_lunt->sglun_paddr);
/*
* transfer parameter to ASIC
*/
nsp32_write4(base, SGT_ADR, data->auto_paddr);
nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER |
AUTO_PARAMETER );
/*
* Check arbitration
*/
ret = nsp32_arbitration(SCpnt, base);
return ret;
}
/*
* Selection with AUTO SCSI (without AUTO PARAMETER)
*/
static int nsp32_selection_autoscsi(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned char target = scmd_id(SCpnt);
unsigned char phase;
int status;
unsigned short command = 0;
unsigned int msgout = 0;
unsigned short execph;
int i;
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "in");
/*
* IRQ disable
*/
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
/*
* check bus line
*/
phase = nsp32_read1(base, SCSI_BUS_MONITOR);
if(((phase & BUSMON_BSY) == 1) || (phase & BUSMON_SEL) == 1) {
nsp32_msg(KERN_WARNING, "bus busy");
SCpnt->result = DID_BUS_BUSY << 16;
status = 1;
goto out;
}
/*
* clear execph
*/
execph = nsp32_read2(base, SCSI_EXECUTE_PHASE);
/*
* clear FIFO counter to set CDBs
*/
nsp32_write2(base, COMMAND_CONTROL, CLEAR_CDB_FIFO_POINTER);
/*
* set CDB0 - CDB15
*/
for (i = 0; i < SCpnt->cmd_len; i++) {
nsp32_write1(base, COMMAND_DATA, SCpnt->cmnd[i]);
}
nsp32_dbg(NSP32_DEBUG_CDB_CONTENTS, "CDB[0]=[0x%x]", SCpnt->cmnd[0]);
/*
* set SCSIOUT LATCH(initiator)/TARGET(target) (OR-ed) ID
*/
nsp32_write1(base, SCSI_OUT_LATCH_TARGET_ID, BIT(host_id) | BIT(target));
/*
* set SCSI MSGOUT REG
*
* Note: If the range of msgout_len is 1 - 3, fill scsi_msgout.
* over 3 messages needs another routine.
*/
if (data->msgout_len == 0) {
nsp32_msg(KERN_ERR, "SCSI MsgOut without any message!");
SCpnt->result = DID_ERROR << 16;
status = 1;
goto out;
} else if (data->msgout_len > 0 && data->msgout_len <= 3) {
msgout = 0;
for (i = 0; i < data->msgout_len; i++) {
/*
* the sending order of the message is:
* MCNT 3: MSG#0 -> MSG#1 -> MSG#2
* MCNT 2: MSG#1 -> MSG#2
* MCNT 1: MSG#2
*/
msgout >>= 8;
msgout |= ((unsigned int)(data->msgoutbuf[i]) << 24);
}
msgout |= MV_VALID; /* MV valid */
msgout |= (unsigned int)data->msgout_len; /* len */
nsp32_write4(base, SCSI_MSG_OUT, msgout);
} else {
/* data->msgout_len > 3 */
nsp32_write4(base, SCSI_MSG_OUT, 0);
}
/*
* set selection timeout(= 250ms)
*/
nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* set SREQ hazard killer sampling rate
*
* TODO: sample_rate (BASE+0F) is 0 when internal clock = 40MHz.
* check other internal clock!
*/
nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
/*
* clear Arbit
*/
nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
/*
* set SYNCREG
* Don't set BM_START_ADR before setting this register.
*/
nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
/*
* set ACKWIDTH
*/
nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"syncreg=0x%x, ackwidth=0x%x, sgtpaddr=0x%x, id=0x%x",
nsp32_read1(base, SYNC_REG), nsp32_read1(base, ACK_WIDTH),
nsp32_read4(base, SGT_ADR), nsp32_read1(base, SCSI_OUT_LATCH_TARGET_ID));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "msgout_len=%d, msgout=0x%x",
data->msgout_len, msgout);
/*
* set SGT ADDR (physical address)
*/
nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
/*
* set TRANSFER CONTROL REG
*/
command = 0;
command |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
if (scsi_bufflen(SCpnt) > 0) {
command |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
command |= CB_MMIO_MODE;
} else if (data->trans_method & NSP32_TRANSFER_PIO) {
command |= CB_IO_MODE;
}
nsp32_write2(base, TRANSFER_CONTROL, command);
/*
* start AUTO SCSI, kick off arbitration
*/
command = (CLEAR_CDB_FIFO_POINTER |
AUTOSCSI_START |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 |
AUTO_ATN );
nsp32_write2(base, COMMAND_CONTROL, command);
/*
* Check arbitration
*/
status = nsp32_arbitration(SCpnt, base);
out:
/*
* IRQ enable
*/
nsp32_write2(base, IRQ_CONTROL, 0);
return status;
}
/*
* Arbitration Status Check
*
* Note: Arbitration counter is waited during ARBIT_GO is not lifting.
* Using udelay(1) consumes CPU time and system time, but
* arbitration delay time is defined minimal 2.4us in SCSI
* specification, thus udelay works as coarse grained wait timer.
*/
static int nsp32_arbitration(struct scsi_cmnd *SCpnt, unsigned int base)
{
unsigned char arbit;
int status = TRUE;
int time = 0;
do {
arbit = nsp32_read1(base, ARBIT_STATUS);
time++;
} while ((arbit & (ARBIT_WIN | ARBIT_FAIL)) == 0 &&
(time <= ARBIT_TIMEOUT_TIME));
nsp32_dbg(NSP32_DEBUG_AUTOSCSI,
"arbit: 0x%x, delay time: %d", arbit, time);
if (arbit & ARBIT_WIN) {
/* Arbitration succeeded */
SCpnt->result = DID_OK << 16;
nsp32_index_write1(base, EXT_PORT, LED_ON); /* PCI LED on */
} else if (arbit & ARBIT_FAIL) {
/* Arbitration failed */
SCpnt->result = DID_BUS_BUSY << 16;
status = FALSE;
} else {
/*
* unknown error or ARBIT_GO timeout,
* something lock up! guess no connection.
*/
nsp32_dbg(NSP32_DEBUG_AUTOSCSI, "arbit timeout");
SCpnt->result = DID_NO_CONNECT << 16;
status = FALSE;
}
/*
* clear Arbit
*/
nsp32_write1(base, SET_ARBIT, ARBIT_CLEAR);
return status;
}
/*
* reselection
*
* Note: This reselection routine is called from msgin_occur,
* reselection target id&lun must be already set.
* SCSI-2 says IDENTIFY implies RESTORE_POINTER operation.
*/
static int nsp32_reselection(struct scsi_cmnd *SCpnt, unsigned char newlun)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int host_id = SCpnt->device->host->this_id;
unsigned int base = SCpnt->device->host->io_port;
unsigned char tmpid, newid;
nsp32_dbg(NSP32_DEBUG_RESELECTION, "enter");
/*
* calculate reselected SCSI ID
*/
tmpid = nsp32_read1(base, RESELECT_ID);
tmpid &= (~BIT(host_id));
newid = 0;
while (tmpid) {
if (tmpid & 1) {
break;
}
tmpid >>= 1;
newid++;
}
/*
* If reselected New ID:LUN is not existed
* or current nexus is not existed, unexpected
* reselection is occurred. Send reject message.
*/
if (newid >= ARRAY_SIZE(data->lunt) || newlun >= ARRAY_SIZE(data->lunt[0])) {
nsp32_msg(KERN_WARNING, "unknown id/lun");
return FALSE;
} else if(data->lunt[newid][newlun].SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "no SCSI command is processing");
return FALSE;
}
data->cur_id = newid;
data->cur_lun = newlun;
data->cur_target = &(data->target[newid]);
data->cur_lunt = &(data->lunt[newid][newlun]);
/* reset SACK/SavedACK counter (or ALL clear?) */
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
return TRUE;
}
/*
* nsp32_setup_sg_table - build scatter gather list for transfer data
* with bus master.
*
* Note: NinjaSCSI-32Bi/UDE bus master can not transfer over 64KB at a time.
*/
static int nsp32_setup_sg_table(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
struct scatterlist *sg;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
int num, i;
u32_le l;
if (sgt == NULL) {
nsp32_dbg(NSP32_DEBUG_SGLIST, "SGT == null");
return FALSE;
}
num = scsi_dma_map(SCpnt);
if (!num)
return TRUE;
else if (num < 0)
return FALSE;
else {
scsi_for_each_sg(SCpnt, sg, num, i) {
/*
* Build nsp32_sglist, substitute sg dma addresses.
*/
sgt[i].addr = cpu_to_le32(sg_dma_address(sg));
sgt[i].len = cpu_to_le32(sg_dma_len(sg));
if (le32_to_cpu(sgt[i].len) > 0x10000) {
nsp32_msg(KERN_ERR,
"can't transfer over 64KB at a time, size=0x%lx", le32_to_cpu(sgt[i].len));
return FALSE;
}
nsp32_dbg(NSP32_DEBUG_SGLIST,
"num 0x%x : addr 0x%lx len 0x%lx",
i,
le32_to_cpu(sgt[i].addr),
le32_to_cpu(sgt[i].len ));
}
/* set end mark */
l = le32_to_cpu(sgt[num-1].len);
sgt[num-1].len = cpu_to_le32(l | SGTEND);
}
return TRUE;
}
static int nsp32_queuecommand_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target;
nsp32_lunt *cur_lunt;
int ret;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"enter. target: 0x%x LUN: 0x%x cmnd: 0x%x cmndlen: 0x%x "
"use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_ERR, "Currentsc != NULL. Cancel this command request");
data->CurrentSC = NULL;
SCpnt->result = DID_NO_CONNECT << 16;
done(SCpnt);
return 0;
}
/* check target ID is not same as this initiator ID */
if (scmd_id(SCpnt) == SCpnt->device->host->this_id) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "terget==host???");
SCpnt->result = DID_BAD_TARGET << 16;
done(SCpnt);
return 0;
}
/* check target LUN is allowable value */
if (SCpnt->device->lun >= MAX_LUN) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "no more lun");
SCpnt->result = DID_BAD_TARGET << 16;
done(SCpnt);
return 0;
}
show_command(SCpnt);
SCpnt->scsi_done = done;
data->CurrentSC = SCpnt;
SCpnt->SCp.Status = CHECK_CONDITION;
SCpnt->SCp.Message = 0;
scsi_set_resid(SCpnt, scsi_bufflen(SCpnt));
SCpnt->SCp.ptr = (char *)scsi_sglist(SCpnt);
SCpnt->SCp.this_residual = scsi_bufflen(SCpnt);
SCpnt->SCp.buffer = NULL;
SCpnt->SCp.buffers_residual = 0;
/* initialize data */
data->msgout_len = 0;
data->msgin_len = 0;
cur_lunt = &(data->lunt[SCpnt->device->id][SCpnt->device->lun]);
cur_lunt->SCpnt = SCpnt;
cur_lunt->save_datp = 0;
cur_lunt->msgin03 = FALSE;
data->cur_lunt = cur_lunt;
data->cur_id = SCpnt->device->id;
data->cur_lun = SCpnt->device->lun;
ret = nsp32_setup_sg_table(SCpnt);
if (ret == FALSE) {
nsp32_msg(KERN_ERR, "SGT fail");
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return 0;
}
/* Build IDENTIFY */
nsp32_build_identify(SCpnt);
/*
* If target is the first time to transfer after the reset
* (target don't have SDTR_DONE and SDTR_INITIATOR), sync
* message SDTR is needed to do synchronous transfer.
*/
target = &data->target[scmd_id(SCpnt)];
data->cur_target = target;
if (!(target->sync_flag & (SDTR_DONE | SDTR_INITIATOR | SDTR_TARGET))) {
unsigned char period, offset;
if (trans_mode != ASYNC_MODE) {
nsp32_set_max_sync(data, target, &period, &offset);
nsp32_build_sdtr(SCpnt, period, offset);
target->sync_flag |= SDTR_INITIATOR;
} else {
nsp32_set_async(data, target);
target->sync_flag |= SDTR_DONE;
}
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"SDTR: entry: %d start_period: 0x%x offset: 0x%x\n",
target->limit_entry, period, offset);
} else if (target->sync_flag & SDTR_INITIATOR) {
/*
* It was negotiating SDTR with target, sending from the
* initiator, but there are no chance to remove this flag.
* Set async because we don't get proper negotiation.
*/
nsp32_set_async(data, target);
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"SDTR_INITIATOR: fall back to async");
} else if (target->sync_flag & SDTR_TARGET) {
/*
* It was negotiating SDTR with target, sending from target,
* but there are no chance to remove this flag. Set async
* because we don't get proper negotiation.
*/
nsp32_set_async(data, target);
target->sync_flag &= ~SDTR_TARGET;
target->sync_flag |= SDTR_DONE;
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
"Unknown SDTR from target is reached, fall back to async.");
}
nsp32_dbg(NSP32_DEBUG_TARGETFLAG,
"target: %d sync_flag: 0x%x syncreg: 0x%x ackwidth: 0x%x",
SCpnt->device->id, target->sync_flag, target->syncreg,
target->ackwidth);
/* Selection */
if (auto_param == 0) {
ret = nsp32_selection_autopara(SCpnt);
} else {
ret = nsp32_selection_autoscsi(SCpnt);
}
if (ret != TRUE) {
nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND, "selection fail");
nsp32_scsi_done(SCpnt);
}
return 0;
}
static DEF_SCSI_QCMD(nsp32_queuecommand)
/* initialize asic */
static int nsp32hw_init(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned short irq_stat;
unsigned long lc_reg;
unsigned char power;
lc_reg = nsp32_index_read4(base, CFG_LATE_CACHE);
if ((lc_reg & 0xff00) == 0) {
lc_reg |= (0x20 << 8);
nsp32_index_write2(base, CFG_LATE_CACHE, lc_reg & 0xffff);
}
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
do {
irq_stat = nsp32_read2(base, IRQ_STATUS);
nsp32_dbg(NSP32_DEBUG_INIT, "irq_stat 0x%x", irq_stat);
} while (irq_stat & IRQSTATUS_ANY_IRQ);
/*
* Fill FIFO_FULL_SHLD, FIFO_EMPTY_SHLD. Below parameter is
* designated by specification.
*/
if ((data->trans_method & NSP32_TRANSFER_PIO) ||
(data->trans_method & NSP32_TRANSFER_MMIO)) {
nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x40);
nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x40);
} else if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
nsp32_index_write1(base, FIFO_FULL_SHLD_COUNT, 0x10);
nsp32_index_write1(base, FIFO_EMPTY_SHLD_COUNT, 0x60);
} else {
nsp32_dbg(NSP32_DEBUG_INIT, "unknown transfer mode");
}
nsp32_dbg(NSP32_DEBUG_INIT, "full 0x%x emp 0x%x",
nsp32_index_read1(base, FIFO_FULL_SHLD_COUNT),
nsp32_index_read1(base, FIFO_EMPTY_SHLD_COUNT));
nsp32_index_write1(base, CLOCK_DIV, data->clock);
nsp32_index_write1(base, BM_CYCLE, MEMRD_CMD1 | SGT_AUTO_PARA_MEMED_CMD);
nsp32_write1(base, PARITY_CONTROL, 0); /* parity check is disable */
/*
* initialize MISC_WRRD register
*
* Note: Designated parameters is obeyed as following:
* MISC_SCSI_DIRECTION_DETECTOR_SELECT: It must be set.
* MISC_MASTER_TERMINATION_SELECT: It must be set.
* MISC_BMREQ_NEGATE_TIMING_SEL: It should be set.
* MISC_AUTOSEL_TIMING_SEL: It should be set.
* MISC_BMSTOP_CHANGE2_NONDATA_PHASE: It should be set.
* MISC_DELAYED_BMSTART: It's selected for safety.
*
* Note: If MISC_BMSTOP_CHANGE2_NONDATA_PHASE is set, then
* we have to set TRANSFERCONTROL_BM_START as 0 and set
* appropriate value before restarting bus master transfer.
*/
nsp32_index_write2(base, MISC_WR,
(SCSI_DIRECTION_DETECTOR_SELECT |
DELAYED_BMSTART |
MASTER_TERMINATION_SELECT |
BMREQ_NEGATE_TIMING_SEL |
AUTOSEL_TIMING_SEL |
BMSTOP_CHANGE2_NONDATA_PHASE));
nsp32_index_write1(base, TERM_PWR_CONTROL, 0);
power = nsp32_index_read1(base, TERM_PWR_CONTROL);
if (!(power & SENSE)) {
nsp32_msg(KERN_INFO, "term power on");
nsp32_index_write1(base, TERM_PWR_CONTROL, BPWR);
}
nsp32_write2(base, TIMER_SET, TIMER_STOP);
nsp32_write2(base, TIMER_SET, TIMER_STOP); /* Required 2 times */
nsp32_write1(base, SYNC_REG, 0);
nsp32_write1(base, ACK_WIDTH, 0);
nsp32_write2(base, SEL_TIME_OUT, SEL_TIMEOUT_TIME);
/*
* enable to select designated IRQ (except for
* IRQSELECT_SERR, IRQSELECT_PERR, IRQSELECT_BMCNTERR)
*/
nsp32_index_write2(base, IRQ_SELECT, IRQSELECT_TIMER_IRQ |
IRQSELECT_SCSIRESET_IRQ |
IRQSELECT_FIFO_SHLD_IRQ |
IRQSELECT_RESELECT_IRQ |
IRQSELECT_PHASE_CHANGE_IRQ |
IRQSELECT_AUTO_SCSI_SEQ_IRQ |
// IRQSELECT_BMCNTERR_IRQ |
IRQSELECT_TARGET_ABORT_IRQ |
IRQSELECT_MASTER_ABORT_IRQ );
nsp32_write2(base, IRQ_CONTROL, 0);
/* PCI LED off */
nsp32_index_write1(base, EXT_PORT_DDR, LED_OFF);
nsp32_index_write1(base, EXT_PORT, LED_OFF);
return TRUE;
}
/* interrupt routine */
static irqreturn_t do_nsp32_isr(int irq, void *dev_id)
{
nsp32_hw_data *data = dev_id;
unsigned int base = data->BaseAddress;
struct scsi_cmnd *SCpnt = data->CurrentSC;
unsigned short auto_stat, irq_stat, trans_stat;
unsigned char busmon, busphase;
unsigned long flags;
int ret;
int handled = 0;
struct Scsi_Host *host = data->Host;
spin_lock_irqsave(host->host_lock, flags);
/*
* IRQ check, then enable IRQ mask
*/
irq_stat = nsp32_read2(base, IRQ_STATUS);
nsp32_dbg(NSP32_DEBUG_INTR,
"enter IRQ: %d, IRQstatus: 0x%x", irq, irq_stat);
/* is this interrupt comes from Ninja asic? */
if ((irq_stat & IRQSTATUS_ANY_IRQ) == 0) {
nsp32_dbg(NSP32_DEBUG_INTR, "shared interrupt: irq other 0x%x", irq_stat);
goto out2;
}
handled = 1;
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
busmon = nsp32_read1(base, SCSI_BUS_MONITOR);
busphase = busmon & BUSMON_PHASE_MASK;
trans_stat = nsp32_read2(base, TRANSFER_STATUS);
if ((irq_stat == 0xffff) && (trans_stat == 0xffff)) {
nsp32_msg(KERN_INFO, "card disconnect");
if (data->CurrentSC != NULL) {
nsp32_msg(KERN_INFO, "clean up current SCSI command");
SCpnt->result = DID_BAD_TARGET << 16;
nsp32_scsi_done(SCpnt);
}
goto out;
}
/* Timer IRQ */
if (irq_stat & IRQSTATUS_TIMER_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "timer stop");
nsp32_write2(base, TIMER_SET, TIMER_STOP);
goto out;
}
/* SCSI reset */
if (irq_stat & IRQSTATUS_SCSIRESET_IRQ) {
nsp32_msg(KERN_INFO, "detected someone do bus reset");
nsp32_do_bus_reset(data);
if (SCpnt != NULL) {
SCpnt->result = DID_RESET << 16;
nsp32_scsi_done(SCpnt);
}
goto out;
}
if (SCpnt == NULL) {
nsp32_msg(KERN_WARNING, "SCpnt==NULL this can't be happened");
nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
goto out;
}
/*
* AutoSCSI Interrupt.
* Note: This interrupt is occurred when AutoSCSI is finished. Then
* check SCSIEXECUTEPHASE, and do appropriate action. Each phases are
* recorded when AutoSCSI sequencer has been processed.
*/
if(irq_stat & IRQSTATUS_AUTOSCSI_IRQ) {
/* getting SCSI executed phase */
auto_stat = nsp32_read2(base, SCSI_EXECUTE_PHASE);
nsp32_write2(base, SCSI_EXECUTE_PHASE, 0);
/* Selection Timeout, go busfree phase. */
if (auto_stat & SELECTION_TIMEOUT) {
nsp32_dbg(NSP32_DEBUG_INTR,
"selection timeout occurred");
SCpnt->result = DID_TIME_OUT << 16;
nsp32_scsi_done(SCpnt);
goto out;
}
if (auto_stat & MSGOUT_PHASE) {
/*
* MsgOut phase was processed.
* If MSG_IN_OCCUER is not set, then MsgOut phase is
* completed. Thus, msgout_len must reset. Otherwise,
* nothing to do here. If MSG_OUT_OCCUER is occurred,
* then we will encounter the condition and check.
*/
if (!(auto_stat & MSG_IN_OCCUER) &&
(data->msgout_len <= 3)) {
/*
* !MSG_IN_OCCUER && msgout_len <=3
* ---> AutoSCSI with MSGOUTreg is processed.
*/
data->msgout_len = 0;
};
nsp32_dbg(NSP32_DEBUG_INTR, "MsgOut phase processed");
}
if ((auto_stat & DATA_IN_PHASE) &&
(scsi_get_resid(SCpnt) > 0) &&
((nsp32_read2(base, FIFO_REST_CNT) & FIFO_REST_MASK) != 0)) {
printk( "auto+fifo\n");
//nsp32_pio_read(SCpnt);
}
if (auto_stat & (DATA_IN_PHASE | DATA_OUT_PHASE)) {
/* DATA_IN_PHASE/DATA_OUT_PHASE was processed. */
nsp32_dbg(NSP32_DEBUG_INTR,
"Data in/out phase processed");
/* read BMCNT, SGT pointer addr */
nsp32_dbg(NSP32_DEBUG_INTR, "BMCNT=0x%lx",
nsp32_read4(base, BM_CNT));
nsp32_dbg(NSP32_DEBUG_INTR, "addr=0x%lx",
nsp32_read4(base, SGT_ADR));
nsp32_dbg(NSP32_DEBUG_INTR, "SACK=0x%lx",
nsp32_read4(base, SACK_CNT));
nsp32_dbg(NSP32_DEBUG_INTR, "SSACK=0x%lx",
nsp32_read4(base, SAVED_SACK_CNT));
scsi_set_resid(SCpnt, 0); /* all data transferred! */
}
/*
* MsgIn Occur
*/
if (auto_stat & MSG_IN_OCCUER) {
nsp32_msgin_occur(SCpnt, irq_stat, auto_stat);
}
/*
* MsgOut Occur
*/
if (auto_stat & MSG_OUT_OCCUER) {
nsp32_msgout_occur(SCpnt);
}
/*
* Bus Free Occur
*/
if (auto_stat & BUS_FREE_OCCUER) {
ret = nsp32_busfree_occur(SCpnt, auto_stat);
if (ret == TRUE) {
goto out;
}
}
if (auto_stat & STATUS_PHASE) {
/*
* Read CSB and substitute CSB for SCpnt->result
* to save status phase stutas byte.
* scsi error handler checks host_byte (DID_*:
* low level driver to indicate status), then checks
* status_byte (SCSI status byte).
*/
SCpnt->result = (int)nsp32_read1(base, SCSI_CSB_IN);
}
if (auto_stat & ILLEGAL_PHASE) {
/* Illegal phase is detected. SACK is not back. */
nsp32_msg(KERN_WARNING,
"AUTO SCSI ILLEGAL PHASE OCCUR!!!!");
/* TODO: currently we don't have any action... bus reset? */
/*
* To send back SACK, assert, wait, and negate.
*/
nsp32_sack_assert(data);
nsp32_wait_req(data, NEGATE);
nsp32_sack_negate(data);
}
if (auto_stat & COMMAND_PHASE) {
/* nothing to do */
nsp32_dbg(NSP32_DEBUG_INTR, "Command phase processed");
}
if (auto_stat & AUTOSCSI_BUSY) {
/* AutoSCSI is running */
}
show_autophase(auto_stat);
}
/* FIFO_SHLD_IRQ */
if (irq_stat & IRQSTATUS_FIFO_SHLD_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "FIFO IRQ");
switch(busphase) {
case BUSPHASE_DATA_OUT:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/write");
//nsp32_pio_write(SCpnt);
break;
case BUSPHASE_DATA_IN:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/read");
//nsp32_pio_read(SCpnt);
break;
case BUSPHASE_STATUS:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/status");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
break;
default:
nsp32_dbg(NSP32_DEBUG_INTR, "fifo/other phase");
nsp32_dbg(NSP32_DEBUG_INTR, "irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
show_busphase(busphase);
break;
}
goto out;
}
/* Phase Change IRQ */
if (irq_stat & IRQSTATUS_PHASE_CHANGE_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "phase change IRQ");
switch(busphase) {
case BUSPHASE_MESSAGE_IN:
nsp32_dbg(NSP32_DEBUG_INTR, "phase chg/msg in");
nsp32_msgin_occur(SCpnt, irq_stat, 0);
break;
default:
nsp32_msg(KERN_WARNING, "phase chg/other phase?");
nsp32_msg(KERN_WARNING, "irq_stat=0x%x trans_stat=0x%x\n",
irq_stat, trans_stat);
show_busphase(busphase);
break;
}
goto out;
}
/* PCI_IRQ */
if (irq_stat & IRQSTATUS_PCI_IRQ) {
nsp32_dbg(NSP32_DEBUG_INTR, "PCI IRQ occurred");
/* Do nothing */
}
/* BMCNTERR_IRQ */
if (irq_stat & IRQSTATUS_BMCNTERR_IRQ) {
nsp32_msg(KERN_ERR, "Received unexpected BMCNTERR IRQ! ");
/*
* TODO: To be implemented improving bus master
* transfer reliability when BMCNTERR is occurred in
* AutoSCSI phase described in specification.
*/
}
#if 0
nsp32_dbg(NSP32_DEBUG_INTR,
"irq_stat=0x%x trans_stat=0x%x", irq_stat, trans_stat);
show_busphase(busphase);
#endif
out:
/* disable IRQ mask */
nsp32_write2(base, IRQ_CONTROL, 0);
out2:
spin_unlock_irqrestore(host->host_lock, flags);
nsp32_dbg(NSP32_DEBUG_INTR, "exit");
return IRQ_RETVAL(handled);
}
#undef SPRINTF
#define SPRINTF(args...) \
do { \
if(length > (pos - buffer)) { \
pos += snprintf(pos, length - (pos - buffer) + 1, ## args); \
nsp32_dbg(NSP32_DEBUG_PROC, "buffer=0x%p pos=0x%p length=%d %d\n", buffer, pos, length, length - (pos - buffer));\
} \
} while(0)
static int nsp32_proc_info(struct Scsi_Host *host, char *buffer, char **start,
off_t offset, int length, int inout)
{
char *pos = buffer;
int thislength;
unsigned long flags;
nsp32_hw_data *data;
int hostno;
unsigned int base;
unsigned char mode_reg;
int id, speed;
long model;
/* Write is not supported, just return. */
if (inout == TRUE) {
return -EINVAL;
}
hostno = host->host_no;
data = (nsp32_hw_data *)host->hostdata;
base = host->io_port;
SPRINTF("NinjaSCSI-32 status\n\n");
SPRINTF("Driver version: %s, $Revision: 1.33 $\n", nsp32_release_version);
SPRINTF("SCSI host No.: %d\n", hostno);
SPRINTF("IRQ: %d\n", host->irq);
SPRINTF("IO: 0x%lx-0x%lx\n", host->io_port, host->io_port + host->n_io_port - 1);
SPRINTF("MMIO(virtual address): 0x%lx-0x%lx\n", host->base, host->base + data->MmioLength - 1);
SPRINTF("sg_tablesize: %d\n", host->sg_tablesize);
SPRINTF("Chip revision: 0x%x\n", (nsp32_read2(base, INDEX_REG) >> 8) & 0xff);
mode_reg = nsp32_index_read1(base, CHIP_MODE);
model = data->pci_devid->driver_data;
#ifdef CONFIG_PM
SPRINTF("Power Management: %s\n", (mode_reg & OPTF) ? "yes" : "no");
#endif
SPRINTF("OEM: %ld, %s\n", (mode_reg & (OEM0|OEM1)), nsp32_model[model]);
spin_lock_irqsave(&(data->Lock), flags);
SPRINTF("CurrentSC: 0x%p\n\n", data->CurrentSC);
spin_unlock_irqrestore(&(data->Lock), flags);
SPRINTF("SDTR status\n");
for (id = 0; id < ARRAY_SIZE(data->target); id++) {
SPRINTF("id %d: ", id);
if (id == host->this_id) {
SPRINTF("----- NinjaSCSI-32 host adapter\n");
continue;
}
if (data->target[id].sync_flag == SDTR_DONE) {
if (data->target[id].period == 0 &&
data->target[id].offset == ASYNC_OFFSET ) {
SPRINTF("async");
} else {
SPRINTF(" sync");
}
} else {
SPRINTF(" none");
}
if (data->target[id].period != 0) {
speed = 1000000 / (data->target[id].period * 4);
SPRINTF(" transfer %d.%dMB/s, offset %d",
speed / 1000,
speed % 1000,
data->target[id].offset
);
}
SPRINTF("\n");
}
thislength = pos - (buffer + offset);
if(thislength < 0) {
*start = NULL;
return 0;
}
thislength = min(thislength, length);
*start = buffer + offset;
return thislength;
}
#undef SPRINTF
/*
* Reset parameters and call scsi_done for data->cur_lunt.
* Be careful setting SCpnt->result = DID_* before calling this function.
*/
static void nsp32_scsi_done(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
scsi_dma_unmap(SCpnt);
/*
* clear TRANSFERCONTROL_BM_START
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
/*
* call scsi_done
*/
(*SCpnt->scsi_done)(SCpnt);
/*
* reset parameters
*/
data->cur_lunt->SCpnt = NULL;
data->cur_lunt = NULL;
data->cur_target = NULL;
data->CurrentSC = NULL;
}
/*
* Bus Free Occur
*
* Current Phase is BUSFREE. AutoSCSI is automatically execute BUSFREE phase
* with ACK reply when below condition is matched:
* MsgIn 00: Command Complete.
* MsgIn 02: Save Data Pointer.
* MsgIn 04: Diconnect.
* In other case, unexpected BUSFREE is detected.
*/
static int nsp32_busfree_occur(struct scsi_cmnd *SCpnt, unsigned short execph)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
nsp32_dbg(NSP32_DEBUG_BUSFREE, "enter execph=0x%x", execph);
show_autophase(execph);
nsp32_write4(base, BM_CNT, 0);
nsp32_write2(base, TRANSFER_CONTROL, 0);
/*
* MsgIn 02: Save Data Pointer
*
* VALID:
* Save Data Pointer is received. Adjust pointer.
*
* NO-VALID:
* SCSI-3 says if Save Data Pointer is not received, then we restart
* processing and we can't adjust any SCSI data pointer in next data
* phase.
*/
if (execph & MSGIN_02_VALID) {
nsp32_dbg(NSP32_DEBUG_BUSFREE, "MsgIn02_Valid");
/*
* Check sack_cnt/saved_sack_cnt, then adjust sg table if
* needed.
*/
if (!(execph & MSGIN_00_VALID) &&
((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE))) {
unsigned int sacklen, s_sacklen;
/*
* Read SACK count and SAVEDSACK count, then compare.
*/
sacklen = nsp32_read4(base, SACK_CNT );
s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
/*
* If SAVEDSACKCNT == 0, it means SavedDataPointer is
* come after data transferring.
*/
if (s_sacklen > 0) {
/*
* Comparing between sack and savedsack to
* check the condition of AutoMsgIn03.
*
* If they are same, set msgin03 == TRUE,
* COMMANDCONTROL_AUTO_MSGIN_03 is enabled at
* reselection. On the other hand, if they
* aren't same, set msgin03 == FALSE, and
* COMMANDCONTROL_AUTO_MSGIN_03 is disabled at
* reselection.
*/
if (sacklen != s_sacklen) {
data->cur_lunt->msgin03 = FALSE;
} else {
data->cur_lunt->msgin03 = TRUE;
}
nsp32_adjust_busfree(SCpnt, s_sacklen);
}
}
/* This value has not substitude with valid value yet... */
//data->cur_lunt->save_datp = data->cur_datp;
} else {
/*
* no processing.
*/
}
if (execph & MSGIN_03_VALID) {
/* MsgIn03 was valid to be processed. No need processing. */
}
/*
* target SDTR check
*/
if (data->cur_target->sync_flag & SDTR_INITIATOR) {
/*
* SDTR negotiation pulled by the initiator has not
* finished yet. Fall back to ASYNC mode.
*/
nsp32_set_async(data, data->cur_target);
data->cur_target->sync_flag &= ~SDTR_INITIATOR;
data->cur_target->sync_flag |= SDTR_DONE;
} else if (data->cur_target->sync_flag & SDTR_TARGET) {
/*
* SDTR negotiation pulled by the target has been
* negotiating.
*/
if (execph & (MSGIN_00_VALID | MSGIN_04_VALID)) {
/*
* If valid message is received, then
* negotiation is succeeded.
*/
} else {
/*
* On the contrary, if unexpected bus free is
* occurred, then negotiation is failed. Fall
* back to ASYNC mode.
*/
nsp32_set_async(data, data->cur_target);
}
data->cur_target->sync_flag &= ~SDTR_TARGET;
data->cur_target->sync_flag |= SDTR_DONE;
}
/*
* It is always ensured by SCSI standard that initiator
* switches into Bus Free Phase after
* receiving message 00 (Command Complete), 04 (Disconnect).
* It's the reason that processing here is valid.
*/
if (execph & MSGIN_00_VALID) {
/* MsgIn 00: Command Complete */
nsp32_dbg(NSP32_DEBUG_BUSFREE, "command complete");
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
SCpnt->SCp.Message = 0;
nsp32_dbg(NSP32_DEBUG_BUSFREE,
"normal end stat=0x%x resid=0x%x\n",
SCpnt->SCp.Status, scsi_get_resid(SCpnt));
SCpnt->result = (DID_OK << 16) |
(SCpnt->SCp.Message << 8) |
(SCpnt->SCp.Status << 0);
nsp32_scsi_done(SCpnt);
/* All operation is done */
return TRUE;
} else if (execph & MSGIN_04_VALID) {
/* MsgIn 04: Disconnect */
SCpnt->SCp.Status = nsp32_read1(base, SCSI_CSB_IN);
SCpnt->SCp.Message = 4;
nsp32_dbg(NSP32_DEBUG_BUSFREE, "disconnect");
return TRUE;
} else {
/* Unexpected bus free */
nsp32_msg(KERN_WARNING, "unexpected bus free occurred");
/* DID_ERROR? */
//SCpnt->result = (DID_OK << 16) | (SCpnt->SCp.Message << 8) | (SCpnt->SCp.Status << 0);
SCpnt->result = DID_ERROR << 16;
nsp32_scsi_done(SCpnt);
return TRUE;
}
return FALSE;
}
/*
* nsp32_adjust_busfree - adjusting SG table
*
* Note: This driver adjust the SG table using SCSI ACK
* counter instead of BMCNT counter!
*/
static void nsp32_adjust_busfree(struct scsi_cmnd *SCpnt, unsigned int s_sacklen)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
int old_entry = data->cur_entry;
int new_entry;
int sg_num = data->cur_lunt->sg_num;
nsp32_sgtable *sgt = data->cur_lunt->sglun->sgt;
unsigned int restlen, sentlen;
u32_le len, addr;
nsp32_dbg(NSP32_DEBUG_SGLIST, "old resid=0x%x", scsi_get_resid(SCpnt));
/* adjust saved SACK count with 4 byte start address boundary */
s_sacklen -= le32_to_cpu(sgt[old_entry].addr) & 3;
/*
* calculate new_entry from sack count and each sgt[].len
* calculate the byte which is intent to send
*/
sentlen = 0;
for (new_entry = old_entry; new_entry < sg_num; new_entry++) {
sentlen += (le32_to_cpu(sgt[new_entry].len) & ~SGTEND);
if (sentlen > s_sacklen) {
break;
}
}
/* all sgt is processed */
if (new_entry == sg_num) {
goto last;
}
if (sentlen == s_sacklen) {
/* XXX: confirm it's ok or not */
/* In this case, it's ok because we are at
the head element of the sg. restlen is correctly calculated. */
}
/* calculate the rest length for transferring */
restlen = sentlen - s_sacklen;
/* update adjusting current SG table entry */
len = le32_to_cpu(sgt[new_entry].len);
addr = le32_to_cpu(sgt[new_entry].addr);
addr += (len - restlen);
sgt[new_entry].addr = cpu_to_le32(addr);
sgt[new_entry].len = cpu_to_le32(restlen);
/* set cur_entry with new_entry */
data->cur_entry = new_entry;
return;
last:
if (scsi_get_resid(SCpnt) < sentlen) {
nsp32_msg(KERN_ERR, "resid underflow");
}
scsi_set_resid(SCpnt, scsi_get_resid(SCpnt) - sentlen);
nsp32_dbg(NSP32_DEBUG_SGLIST, "new resid=0x%x", scsi_get_resid(SCpnt));
/* update hostdata and lun */
return;
}
/*
* It's called MsgOut phase occur.
* NinjaSCSI-32Bi/UDE automatically processes up to 3 messages in
* message out phase. It, however, has more than 3 messages,
* HBA creates the interrupt and we have to process by hand.
*/
static void nsp32_msgout_occur(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
//unsigned short command;
long new_sgtp;
int i;
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"enter: msgout_len: 0x%x", data->msgout_len);
/*
* If MsgOut phase is occurred without having any
* message, then No_Operation is sent (SCSI-2).
*/
if (data->msgout_len == 0) {
nsp32_build_nop(SCpnt);
}
/*
* Set SGTP ADDR current entry for restarting AUTOSCSI,
* because SGTP is incremented next point.
* There is few statement in the specification...
*/
new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
/*
* send messages
*/
for (i = 0; i < data->msgout_len; i++) {
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR,
"%d : 0x%x", i, data->msgoutbuf[i]);
/*
* Check REQ is asserted.
*/
nsp32_wait_req(data, ASSERT);
if (i == (data->msgout_len - 1)) {
/*
* If the last message, set the AutoSCSI restart
* before send back the ack message. AutoSCSI
* restart automatically negate ATN signal.
*/
//command = (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
//nsp32_restart_autoscsi(SCpnt, command);
nsp32_write2(base, COMMAND_CONTROL,
(CLEAR_CDB_FIFO_POINTER |
AUTO_COMMAND_PHASE |
AUTOSCSI_RESTART |
AUTO_MSGIN_00_OR_04 |
AUTO_MSGIN_02 ));
}
/*
* Write data with SACK, then wait sack is
* automatically negated.
*/
nsp32_write1(base, SCSI_DATA_WITH_ACK, data->msgoutbuf[i]);
nsp32_wait_sack(data, NEGATE);
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "bus: 0x%x\n",
nsp32_read1(base, SCSI_BUS_MONITOR));
};
data->msgout_len = 0;
nsp32_dbg(NSP32_DEBUG_MSGOUTOCCUR, "exit");
}
/*
* Restart AutoSCSI
*
* Note: Restarting AutoSCSI needs set:
* SYNC_REG, ACK_WIDTH, SGT_ADR, TRANSFER_CONTROL
*/
static void nsp32_restart_autoscsi(struct scsi_cmnd *SCpnt, unsigned short command)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = data->BaseAddress;
unsigned short transfer = 0;
nsp32_dbg(NSP32_DEBUG_RESTART, "enter");
if (data->cur_target == NULL || data->cur_lunt == NULL) {
nsp32_msg(KERN_ERR, "Target or Lun is invalid");
}
/*
* set SYNC_REG
* Don't set BM_START_ADR before setting this register.
*/
nsp32_write1(base, SYNC_REG, data->cur_target->syncreg);
/*
* set ACKWIDTH
*/
nsp32_write1(base, ACK_WIDTH, data->cur_target->ackwidth);
/*
* set SREQ hazard killer sampling rate
*/
nsp32_write1(base, SREQ_SMPL_RATE, data->cur_target->sample_reg);
/*
* set SGT ADDR (physical address)
*/
nsp32_write4(base, SGT_ADR, data->cur_lunt->sglun_paddr);
/*
* set TRANSFER CONTROL REG
*/
transfer = 0;
transfer |= (TRANSFER_GO | ALL_COUNTER_CLR);
if (data->trans_method & NSP32_TRANSFER_BUSMASTER) {
if (scsi_bufflen(SCpnt) > 0) {
transfer |= BM_START;
}
} else if (data->trans_method & NSP32_TRANSFER_MMIO) {
transfer |= CB_MMIO_MODE;
} else if (data->trans_method & NSP32_TRANSFER_PIO) {
transfer |= CB_IO_MODE;
}
nsp32_write2(base, TRANSFER_CONTROL, transfer);
/*
* restart AutoSCSI
*
* TODO: COMMANDCONTROL_AUTO_COMMAND_PHASE is needed ?
*/
command |= (CLEAR_CDB_FIFO_POINTER |
AUTO_COMMAND_PHASE |
AUTOSCSI_RESTART );
nsp32_write2(base, COMMAND_CONTROL, command);
nsp32_dbg(NSP32_DEBUG_RESTART, "exit");
}
/*
* cannot run automatically message in occur
*/
static void nsp32_msgin_occur(struct scsi_cmnd *SCpnt,
unsigned long irq_status,
unsigned short execph)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
unsigned char msg;
unsigned char msgtype;
unsigned char newlun;
unsigned short command = 0;
int msgclear = TRUE;
long new_sgtp;
int ret;
/*
* read first message
* Use SCSIDATA_W_ACK instead of SCSIDATAIN, because the procedure
* of Message-In have to be processed before sending back SCSI ACK.
*/
msg = nsp32_read1(base, SCSI_DATA_IN);
data->msginbuf[(unsigned char)data->msgin_len] = msg;
msgtype = data->msginbuf[0];
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR,
"enter: msglen: 0x%x msgin: 0x%x msgtype: 0x%x",
data->msgin_len, msg, msgtype);
/*
* TODO: We need checking whether bus phase is message in?
*/
/*
* assert SCSI ACK
*/
nsp32_sack_assert(data);
/*
* processing IDENTIFY
*/
if (msgtype & 0x80) {
if (!(irq_status & IRQSTATUS_RESELECT_OCCUER)) {
/* Invalid (non reselect) phase */
goto reject;
}
newlun = msgtype & 0x1f; /* TODO: SPI-3 compliant? */
ret = nsp32_reselection(SCpnt, newlun);
if (ret == TRUE) {
goto restart;
} else {
goto reject;
}
}
/*
* processing messages except for IDENTIFY
*
* TODO: Messages are all SCSI-2 terminology. SCSI-3 compliance is TODO.
*/
switch (msgtype) {
/*
* 1-byte message
*/
case COMMAND_COMPLETE:
case DISCONNECT:
/*
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
nsp32_msg(KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: 0x%x", msg);
break;
case RESTORE_POINTERS:
/*
* AutoMsgIn03 is disabled, and HBA gets this message.
*/
if ((execph & DATA_IN_PHASE) || (execph & DATA_OUT_PHASE)) {
unsigned int s_sacklen;
s_sacklen = nsp32_read4(base, SAVED_SACK_CNT);
if ((execph & MSGIN_02_VALID) && (s_sacklen > 0)) {
nsp32_adjust_busfree(SCpnt, s_sacklen);
} else {
/* No need to rewrite SGT */
}
}
data->cur_lunt->msgin03 = FALSE;
/* Update with the new value */
/* reset SACK/SavedACK counter (or ALL clear?) */
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* set new sg pointer
*/
new_sgtp = data->cur_lunt->sglun_paddr +
(data->cur_lunt->cur_entry * sizeof(nsp32_sgtable));
nsp32_write4(base, SGT_ADR, new_sgtp);
break;
case SAVE_POINTERS:
/*
* These messages should not be occurred.
* They should be processed on AutoSCSI sequencer.
*/
nsp32_msg (KERN_WARNING,
"unexpected message of AutoSCSI MsgIn: SAVE_POINTERS");
break;
case MESSAGE_REJECT:
/* If previous message_out is sending SDTR, and get
message_reject from target, SDTR negotiation is failed */
if (data->cur_target->sync_flag &
(SDTR_INITIATOR | SDTR_TARGET)) {
/*
* Current target is negotiating SDTR, but it's
* failed. Fall back to async transfer mode, and set
* SDTR_DONE.
*/
nsp32_set_async(data, data->cur_target);
data->cur_target->sync_flag &= ~SDTR_INITIATOR;
data->cur_target->sync_flag |= SDTR_DONE;
}
break;
case LINKED_CMD_COMPLETE:
case LINKED_FLG_CMD_COMPLETE:
/* queue tag is not supported currently */
nsp32_msg (KERN_WARNING,
"unsupported message: 0x%x", msgtype);
break;
case INITIATE_RECOVERY:
/* staring ECA (Extended Contingent Allegiance) state. */
/* This message is declined in SPI2 or later. */
goto reject;
/*
* 2-byte message
*/
case SIMPLE_QUEUE_TAG:
case 0x23:
/*
* 0x23: Ignore_Wide_Residue is not declared in scsi.h.
* No support is needed.
*/
if (data->msgin_len >= 1) {
goto reject;
}
/* current position is 1-byte of 2 byte */
msgclear = FALSE;
break;
/*
* extended message
*/
case EXTENDED_MESSAGE:
if (data->msgin_len < 1) {
/*
* Current position does not reach 2-byte
* (2-byte is extended message length).
*/
msgclear = FALSE;
break;
}
if ((data->msginbuf[1] + 1) > data->msgin_len) {
/*
* Current extended message has msginbuf[1] + 2
* (msgin_len starts counting from 0, so buf[1] + 1).
* If current message position is not finished,
* continue receiving message.
*/
msgclear = FALSE;
break;
}
/*
* Reach here means regular length of each type of
* extended messages.
*/
switch (data->msginbuf[2]) {
case EXTENDED_MODIFY_DATA_POINTER:
/* TODO */
goto reject; /* not implemented yet */
break;
case EXTENDED_SDTR:
/*
* Exchange this message between initiator and target.
*/
if (data->msgin_len != EXTENDED_SDTR_LEN + 1) {
/*
* received inappropriate message.
*/
goto reject;
break;
}
nsp32_analyze_sdtr(SCpnt);
break;
case EXTENDED_EXTENDED_IDENTIFY:
/* SCSI-I only, not supported. */
goto reject; /* not implemented yet */
break;
case EXTENDED_WDTR:
goto reject; /* not implemented yet */
break;
default:
goto reject;
}
break;
default:
goto reject;
}
restart:
if (msgclear == TRUE) {
data->msgin_len = 0;
/*
* If restarting AutoSCSI, but there are some message to out
* (msgout_len > 0), set AutoATN, and set SCSIMSGOUT as 0
* (MV_VALID = 0). When commandcontrol is written with
* AutoSCSI restart, at the same time MsgOutOccur should be
* happened (however, such situation is really possible...?).
*/
if (data->msgout_len > 0) {
nsp32_write4(base, SCSI_MSG_OUT, 0);
command |= AUTO_ATN;
}
/*
* restart AutoSCSI
* If it's failed, COMMANDCONTROL_AUTO_COMMAND_PHASE is needed.
*/
command |= (AUTO_MSGIN_00_OR_04 | AUTO_MSGIN_02);
/*
* If current msgin03 is TRUE, then flag on.
*/
if (data->cur_lunt->msgin03 == TRUE) {
command |= AUTO_MSGIN_03;
}
data->cur_lunt->msgin03 = FALSE;
} else {
data->msgin_len++;
}
/*
* restart AutoSCSI
*/
nsp32_restart_autoscsi(SCpnt, command);
/*
* wait SCSI REQ negate for REQ-ACK handshake
*/
nsp32_wait_req(data, NEGATE);
/*
* negate SCSI ACK
*/
nsp32_sack_negate(data);
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
return;
reject:
nsp32_msg(KERN_WARNING,
"invalid or unsupported MessageIn, rejected. "
"current msg: 0x%x (len: 0x%x), processing msg: 0x%x",
msg, data->msgin_len, msgtype);
nsp32_build_reject(SCpnt);
data->msgin_len = 0;
goto restart;
}
/*
*
*/
static void nsp32_analyze_sdtr(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
nsp32_target *target = data->cur_target;
nsp32_sync_table *synct;
unsigned char get_period = data->msginbuf[3];
unsigned char get_offset = data->msginbuf[4];
int entry;
int syncnum;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "enter");
synct = data->synct;
syncnum = data->syncnum;
/*
* If this inititor sent the SDTR message, then target responds SDTR,
* initiator SYNCREG, ACKWIDTH from SDTR parameter.
* Messages are not appropriate, then send back reject message.
* If initiator did not send the SDTR, but target sends SDTR,
* initiator calculator the appropriate parameter and send back SDTR.
*/
if (target->sync_flag & SDTR_INITIATOR) {
/*
* Initiator sent SDTR, the target responds and
* send back negotiation SDTR.
*/
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target responds SDTR");
target->sync_flag &= ~SDTR_INITIATOR;
target->sync_flag |= SDTR_DONE;
/*
* offset:
*/
if (get_offset > SYNC_OFFSET) {
/*
* Negotiation is failed, the target send back
* unexpected offset value.
*/
goto reject;
}
if (get_offset == ASYNC_OFFSET) {
/*
* Negotiation is succeeded, the target want
* to fall back into asynchronous transfer mode.
*/
goto async;
}
/*
* period:
* Check whether sync period is too short. If too short,
* fall back to async mode. If it's ok, then investigate
* the received sync period. If sync period is acceptable
* between sync table start_period and end_period, then
* set this I_T nexus as sent offset and period.
* If it's not acceptable, send back reject and fall back
* to async mode.
*/
if (get_period < data->synct[0].period_num) {
/*
* Negotiation is failed, the target send back
* unexpected period value.
*/
goto reject;
}
entry = nsp32_search_period_entry(data, target, get_period);
if (entry < 0) {
/*
* Target want to use long period which is not
* acceptable NinjaSCSI-32Bi/UDE.
*/
goto reject;
}
/*
* Set new sync table and offset in this I_T nexus.
*/
nsp32_set_sync_entry(data, target, entry, get_offset);
} else {
/* Target send SDTR to initiator. */
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "target send SDTR");
target->sync_flag |= SDTR_INITIATOR;
/* offset: */
if (get_offset > SYNC_OFFSET) {
/* send back as SYNC_OFFSET */
get_offset = SYNC_OFFSET;
}
/* period: */
if (get_period < data->synct[0].period_num) {
get_period = data->synct[0].period_num;
}
entry = nsp32_search_period_entry(data, target, get_period);
if (get_offset == ASYNC_OFFSET || entry < 0) {
nsp32_set_async(data, target);
nsp32_build_sdtr(SCpnt, 0, ASYNC_OFFSET);
} else {
nsp32_set_sync_entry(data, target, entry, get_offset);
nsp32_build_sdtr(SCpnt, get_period, get_offset);
}
}
target->period = get_period;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit");
return;
reject:
/*
* If the current message is unacceptable, send back to the target
* with reject message.
*/
nsp32_build_reject(SCpnt);
async:
nsp32_set_async(data, target); /* set as ASYNC transfer mode */
target->period = 0;
nsp32_dbg(NSP32_DEBUG_MSGINOCCUR, "exit: set async");
return;
}
/*
* Search config entry number matched in sync_table from given
* target and speed period value. If failed to search, return negative value.
*/
static int nsp32_search_period_entry(nsp32_hw_data *data,
nsp32_target *target,
unsigned char period)
{
int i;
if (target->limit_entry >= data->syncnum) {
nsp32_msg(KERN_ERR, "limit_entry exceeds syncnum!");
target->limit_entry = 0;
}
for (i = target->limit_entry; i < data->syncnum; i++) {
if (period >= data->synct[i].start_period &&
period <= data->synct[i].end_period) {
break;
}
}
/*
* Check given period value is over the sync_table value.
* If so, return max value.
*/
if (i == data->syncnum) {
i = -1;
}
return i;
}
/*
* target <-> initiator use ASYNC transfer
*/
static void nsp32_set_async(nsp32_hw_data *data, nsp32_target *target)
{
unsigned char period = data->synct[target->limit_entry].period_num;
target->offset = ASYNC_OFFSET;
target->period = 0;
target->syncreg = TO_SYNCREG(period, ASYNC_OFFSET);
target->ackwidth = 0;
target->sample_reg = 0;
nsp32_dbg(NSP32_DEBUG_SYNC, "set async");
}
/*
* target <-> initiator use maximum SYNC transfer
*/
static void nsp32_set_max_sync(nsp32_hw_data *data,
nsp32_target *target,
unsigned char *period,
unsigned char *offset)
{
unsigned char period_num, ackwidth;
period_num = data->synct[target->limit_entry].period_num;
*period = data->synct[target->limit_entry].start_period;
ackwidth = data->synct[target->limit_entry].ackwidth;
*offset = SYNC_OFFSET;
target->syncreg = TO_SYNCREG(period_num, *offset);
target->ackwidth = ackwidth;
target->offset = *offset;
target->sample_reg = 0; /* disable SREQ sampling */
}
/*
* target <-> initiator use entry number speed
*/
static void nsp32_set_sync_entry(nsp32_hw_data *data,
nsp32_target *target,
int entry,
unsigned char offset)
{
unsigned char period, ackwidth, sample_rate;
period = data->synct[entry].period_num;
ackwidth = data->synct[entry].ackwidth;
offset = offset;
sample_rate = data->synct[entry].sample_rate;
target->syncreg = TO_SYNCREG(period, offset);
target->ackwidth = ackwidth;
target->offset = offset;
target->sample_reg = sample_rate | SAMPLING_ENABLE;
nsp32_dbg(NSP32_DEBUG_SYNC, "set sync");
}
/*
* It waits until SCSI REQ becomes assertion or negation state.
*
* Note: If nsp32_msgin_occur is called, we asserts SCSI ACK. Then
* connected target responds SCSI REQ negation. We have to wait
* SCSI REQ becomes negation in order to negate SCSI ACK signal for
* REQ-ACK handshake.
*/
static void nsp32_wait_req(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
int wait_time = 0;
unsigned char bus, req_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
nsp32_msg(KERN_ERR, "unknown state designation");
}
/* REQ is BIT(5) */
req_bit = (state == ASSERT ? BUSMON_REQ : 0);
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_REQ) == req_bit) {
nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
udelay(1);
wait_time++;
} while (wait_time < REQSACK_TIMEOUT_TIME);
nsp32_msg(KERN_WARNING, "wait REQ timeout, req_bit: 0x%x", req_bit);
}
/*
* It waits until SCSI SACK becomes assertion or negation state.
*/
static void nsp32_wait_sack(nsp32_hw_data *data, int state)
{
unsigned int base = data->BaseAddress;
int wait_time = 0;
unsigned char bus, ack_bit;
if (!((state == ASSERT) || (state == NEGATE))) {
nsp32_msg(KERN_ERR, "unknown state designation");
}
/* ACK is BIT(4) */
ack_bit = (state == ASSERT ? BUSMON_ACK : 0);
do {
bus = nsp32_read1(base, SCSI_BUS_MONITOR);
if ((bus & BUSMON_ACK) == ack_bit) {
nsp32_dbg(NSP32_DEBUG_WAIT,
"wait_time: %d", wait_time);
return;
}
udelay(1);
wait_time++;
} while (wait_time < REQSACK_TIMEOUT_TIME);
nsp32_msg(KERN_WARNING, "wait SACK timeout, ack_bit: 0x%x", ack_bit);
}
/*
* assert SCSI ACK
*
* Note: SCSI ACK assertion needs with ACKENB=1, AUTODIRECTION=1.
*/
static void nsp32_sack_assert(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned char busctrl;
busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
busctrl |= (BUSCTL_ACK | AUTODIRECTION | ACKENB);
nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
}
/*
* negate SCSI ACK
*/
static void nsp32_sack_negate(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned char busctrl;
busctrl = nsp32_read1(base, SCSI_BUS_CONTROL);
busctrl &= ~BUSCTL_ACK;
nsp32_write1(base, SCSI_BUS_CONTROL, busctrl);
}
/*
* Note: n_io_port is defined as 0x7f because I/O register port is
* assigned as:
* 0x800-0x8ff: memory mapped I/O port
* 0x900-0xbff: (map same 0x800-0x8ff I/O port image repeatedly)
* 0xc00-0xfff: CardBus status registers
*/
static int nsp32_detect(struct pci_dev *pdev)
{
struct Scsi_Host *host; /* registered host structure */
struct resource *res;
nsp32_hw_data *data;
int ret;
int i, j;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
/*
* register this HBA as SCSI device
*/
host = scsi_host_alloc(&nsp32_template, sizeof(nsp32_hw_data));
if (host == NULL) {
nsp32_msg (KERN_ERR, "failed to scsi register");
goto err;
}
/*
* set nsp32_hw_data
*/
data = (nsp32_hw_data *)host->hostdata;
memcpy(data, &nsp32_data_base, sizeof(nsp32_hw_data));
host->irq = data->IrqNumber;
host->io_port = data->BaseAddress;
host->unique_id = data->BaseAddress;
host->n_io_port = data->NumAddress;
host->base = (unsigned long)data->MmioAddress;
data->Host = host;
spin_lock_init(&(data->Lock));
data->cur_lunt = NULL;
data->cur_target = NULL;
/*
* Bus master transfer mode is supported currently.
*/
data->trans_method = NSP32_TRANSFER_BUSMASTER;
/*
* Set clock div, CLOCK_4 (HBA has own external clock, and
* dividing * 100ns/4).
* Currently CLOCK_4 has only tested, not for CLOCK_2/PCICLK yet.
*/
data->clock = CLOCK_4;
/*
* Select appropriate nsp32_sync_table and set I_CLOCKDIV.
*/
switch (data->clock) {
case CLOCK_4:
/* If data->clock is CLOCK_4, then select 40M sync table. */
data->synct = nsp32_sync_table_40M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
break;
case CLOCK_2:
/* If data->clock is CLOCK_2, then select 20M sync table. */
data->synct = nsp32_sync_table_20M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_20M);
break;
case PCICLK:
/* If data->clock is PCICLK, then select pci sync table. */
data->synct = nsp32_sync_table_pci;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_pci);
break;
default:
nsp32_msg(KERN_WARNING,
"Invalid clock div is selected, set CLOCK_4.");
/* Use default value CLOCK_4 */
data->clock = CLOCK_4;
data->synct = nsp32_sync_table_40M;
data->syncnum = ARRAY_SIZE(nsp32_sync_table_40M);
}
/*
* setup nsp32_lunt
*/
/*
* setup DMA
*/
if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
nsp32_msg (KERN_ERR, "failed to set PCI DMA mask");
goto scsi_unregister;
}
/*
* allocate autoparam DMA resource.
*/
data->autoparam = pci_alloc_consistent(pdev, sizeof(nsp32_autoparam), &(data->auto_paddr));
if (data->autoparam == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto scsi_unregister;
}
/*
* allocate scatter-gather DMA resource.
*/
data->sg_list = pci_alloc_consistent(pdev, NSP32_SG_TABLE_SIZE,
&(data->sg_paddr));
if (data->sg_list == NULL) {
nsp32_msg(KERN_ERR, "failed to allocate DMA memory");
goto free_autoparam;
}
for (i = 0; i < ARRAY_SIZE(data->lunt); i++) {
for (j = 0; j < ARRAY_SIZE(data->lunt[0]); j++) {
int offset = i * ARRAY_SIZE(data->lunt[0]) + j;
nsp32_lunt tmp = {
.SCpnt = NULL,
.save_datp = 0,
.msgin03 = FALSE,
.sg_num = 0,
.cur_entry = 0,
.sglun = &(data->sg_list[offset]),
.sglun_paddr = data->sg_paddr + (offset * sizeof(nsp32_sglun)),
};
data->lunt[i][j] = tmp;
}
}
/*
* setup target
*/
for (i = 0; i < ARRAY_SIZE(data->target); i++) {
nsp32_target *target = &(data->target[i]);
target->limit_entry = 0;
target->sync_flag = 0;
nsp32_set_async(data, target);
}
/*
* EEPROM check
*/
ret = nsp32_getprom_param(data);
if (ret == FALSE) {
data->resettime = 3; /* default 3 */
}
/*
* setup HBA
*/
nsp32hw_init(data);
snprintf(data->info_str, sizeof(data->info_str),
"NinjaSCSI-32Bi/UDE: irq %d, io 0x%lx+0x%x",
host->irq, host->io_port, host->n_io_port);
/*
* SCSI bus reset
*
* Note: It's important to reset SCSI bus in initialization phase.
* NinjaSCSI-32Bi/UDE HBA EEPROM seems to exchange SDTR when
* system is coming up, so SCSI devices connected to HBA is set as
* un-asynchronous mode. It brings the merit that this HBA is
* ready to start synchronous transfer without any preparation,
* but we are difficult to control transfer speed. In addition,
* it prevents device transfer speed from effecting EEPROM start-up
* SDTR. NinjaSCSI-32Bi/UDE has the feature if EEPROM is set as
* Auto Mode, then FAST-10M is selected when SCSI devices are
* connected same or more than 4 devices. It should be avoided
* depending on this specification. Thus, resetting the SCSI bus
* restores all connected SCSI devices to asynchronous mode, then
* this driver set SDTR safely later, and we can control all SCSI
* device transfer mode.
*/
nsp32_do_bus_reset(data);
ret = request_irq(host->irq, do_nsp32_isr, IRQF_SHARED, "nsp32", data);
if (ret < 0) {
nsp32_msg(KERN_ERR, "Unable to allocate IRQ for NinjaSCSI32 "
"SCSI PCI controller. Interrupt: %d", host->irq);
goto free_sg_list;
}
/*
* PCI IO register
*/
res = request_region(host->io_port, host->n_io_port, "nsp32");
if (res == NULL) {
nsp32_msg(KERN_ERR,
"I/O region 0x%lx+0x%lx is already used",
data->BaseAddress, data->NumAddress);
goto free_irq;
}
ret = scsi_add_host(host, &pdev->dev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to add scsi host");
goto free_region;
}
scsi_scan_host(host);
pci_set_drvdata(pdev, host);
return 0;
free_region:
release_region(host->io_port, host->n_io_port);
free_irq:
free_irq(host->irq, data);
free_sg_list:
pci_free_consistent(pdev, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
free_autoparam:
pci_free_consistent(pdev, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
scsi_unregister:
scsi_host_put(host);
err:
return 1;
}
static int nsp32_release(struct Scsi_Host *host)
{
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
if (data->autoparam) {
pci_free_consistent(data->Pci, sizeof(nsp32_autoparam),
data->autoparam, data->auto_paddr);
}
if (data->sg_list) {
pci_free_consistent(data->Pci, NSP32_SG_TABLE_SIZE,
data->sg_list, data->sg_paddr);
}
if (host->irq) {
free_irq(host->irq, data);
}
if (host->io_port && host->n_io_port) {
release_region(host->io_port, host->n_io_port);
}
if (data->MmioAddress) {
iounmap(data->MmioAddress);
}
return 0;
}
static const char *nsp32_info(struct Scsi_Host *shpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)shpnt->hostdata;
return data->info_str;
}
/****************************************************************************
* error handler
*/
static int nsp32_eh_abort(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
nsp32_msg(KERN_WARNING, "abort");
if (data->cur_lunt->SCpnt == NULL) {
nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort failed");
return FAILED;
}
if (data->cur_target->sync_flag & (SDTR_INITIATOR | SDTR_TARGET)) {
/* reset SDTR negotiation */
data->cur_target->sync_flag = 0;
nsp32_set_async(data, data->cur_target);
}
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write2(base, BM_CNT, 0);
SCpnt->result = DID_ABORT << 16;
nsp32_scsi_done(SCpnt);
nsp32_dbg(NSP32_DEBUG_BUSRESET, "abort success");
return SUCCESS;
}
static int nsp32_eh_bus_reset(struct scsi_cmnd *SCpnt)
{
nsp32_hw_data *data = (nsp32_hw_data *)SCpnt->device->host->hostdata;
unsigned int base = SCpnt->device->host->io_port;
spin_lock_irq(SCpnt->device->host->host_lock);
nsp32_msg(KERN_INFO, "Bus Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_do_bus_reset(data);
nsp32_write2(base, IRQ_CONTROL, 0);
spin_unlock_irq(SCpnt->device->host->host_lock);
return SUCCESS; /* SCSI bus reset is succeeded at any time. */
}
static void nsp32_do_bus_reset(nsp32_hw_data *data)
{
unsigned int base = data->BaseAddress;
unsigned short intrdat;
int i;
nsp32_dbg(NSP32_DEBUG_BUSRESET, "in");
/*
* stop all transfer
* clear TRANSFERCONTROL_BM_START
* clear counter
*/
nsp32_write2(base, TRANSFER_CONTROL, 0);
nsp32_write4(base, BM_CNT, 0);
nsp32_write4(base, CLR_COUNTER, CLRCOUNTER_ALLMASK);
/*
* fall back to asynchronous transfer mode
* initialize SDTR negotiation flag
*/
for (i = 0; i < ARRAY_SIZE(data->target); i++) {
nsp32_target *target = &data->target[i];
target->sync_flag = 0;
nsp32_set_async(data, target);
}
/*
* reset SCSI bus
*/
nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST);
mdelay(RESET_HOLD_TIME / 1000);
nsp32_write1(base, SCSI_BUS_CONTROL, 0);
for(i = 0; i < 5; i++) {
intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */
nsp32_dbg(NSP32_DEBUG_BUSRESET, "irq:1: 0x%x", intrdat);
}
data->CurrentSC = NULL;
}
static int nsp32_eh_host_reset(struct scsi_cmnd *SCpnt)
{
struct Scsi_Host *host = SCpnt->device->host;
unsigned int base = SCpnt->device->host->io_port;
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
nsp32_msg(KERN_INFO, "Host Reset");
nsp32_dbg(NSP32_DEBUG_BUSRESET, "SCpnt=0x%x", SCpnt);
spin_lock_irq(SCpnt->device->host->host_lock);
nsp32hw_init(data);
nsp32_write2(base, IRQ_CONTROL, IRQ_CONTROL_ALL_IRQ_MASK);
nsp32_do_bus_reset(data);
nsp32_write2(base, IRQ_CONTROL, 0);
spin_unlock_irq(SCpnt->device->host->host_lock);
return SUCCESS; /* Host reset is succeeded at any time. */
}
/**************************************************************************
* EEPROM handler
*/
/*
* getting EEPROM parameter
*/
static int nsp32_getprom_param(nsp32_hw_data *data)
{
int vendor = data->pci_devid->vendor;
int device = data->pci_devid->device;
int ret, val, i;
/*
* EEPROM checking.
*/
ret = nsp32_prom_read(data, 0x7e);
if (ret != 0x55) {
nsp32_msg(KERN_INFO, "No EEPROM detected: 0x%x", ret);
return FALSE;
}
ret = nsp32_prom_read(data, 0x7f);
if (ret != 0xaa) {
nsp32_msg(KERN_INFO, "Invalid number: 0x%x", ret);
return FALSE;
}
/*
* check EEPROM type
*/
if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_WORKBIT_STANDARD) {
ret = nsp32_getprom_c16(data);
} else if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_NINJASCSI_32BIB_LOGITEC) {
ret = nsp32_getprom_at24(data);
} else if (vendor == PCI_VENDOR_ID_WORKBIT &&
device == PCI_DEVICE_ID_NINJASCSI_32UDE_MELCO ) {
ret = nsp32_getprom_at24(data);
} else {
nsp32_msg(KERN_WARNING, "Unknown EEPROM");
ret = FALSE;
}
/* for debug : SPROM data full checking */
for (i = 0; i <= 0x1f; i++) {
val = nsp32_prom_read(data, i);
nsp32_dbg(NSP32_DEBUG_EEPROM,
"rom address 0x%x : 0x%x", i, val);
}
return ret;
}
/*
* AT24C01A (Logitec: LHA-600S), AT24C02 (Melco Buffalo: IFC-USLP) data map:
*
* ROMADDR
* 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: ASYNC, 0x0c: Ultra-20M, 0x19: Fast-10M
* 0x07 : HBA Synchronous Transfer Period
* Value 0: AutoSync, 1: Manual Setting
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Bus Termination
* Value 0: Auto[ON], 1: ON, 2: OFF
* 0x11 : Not Used? (0)
* 0x12 : Bus Reset Delay Time (0x03)
* 0x13 : Bootable CD Support
* Value 0: Disable, 1: Enable
* 0x14 : Device Scan
* Bit 7 6 5 4 3 2 1 0
* | <----------------->
* | SCSI ID: Value 0: Skip, 1: YES
* |-> Value 0: ALL scan, Value 1: Manual
* 0x15 - 0x1b : Not Used? (0)
* 0x1c : Constant? (0x01) (clock div?)
* 0x1d - 0x7c : Not Used (0xff)
* 0x7d : Not Used? (0xff)
* 0x7e : Constant (0x55), Validity signature
* 0x7f : Constant (0xaa), Validity signature
*/
static int nsp32_getprom_at24(nsp32_hw_data *data)
{
int ret, i;
int auto_sync;
nsp32_target *target;
int entry;
/*
* Reset time which is designated by EEPROM.
*
* TODO: Not used yet.
*/
data->resettime = nsp32_prom_read(data, 0x12);
/*
* HBA Synchronous Transfer Period
*
* Note: auto_sync = 0: auto, 1: manual. Ninja SCSI HBA spec says
* that if auto_sync is 0 (auto), and connected SCSI devices are
* same or lower than 3, then transfer speed is set as ULTRA-20M.
* On the contrary if connected SCSI devices are same or higher
* than 4, then transfer speed is set as FAST-10M.
*
* I break this rule. The number of connected SCSI devices are
* only ignored. If auto_sync is 0 (auto), then transfer speed is
* forced as ULTRA-20M.
*/
ret = nsp32_prom_read(data, 0x07);
switch (ret) {
case 0:
auto_sync = TRUE;
break;
case 1:
auto_sync = FALSE;
break;
default:
nsp32_msg(KERN_WARNING,
"Unsupported Auto Sync mode. Fall back to manual mode.");
auto_sync = TRUE;
}
if (trans_mode == ULTRA20M_MODE) {
auto_sync = TRUE;
}
/*
* each device Synchronous Transfer Period
*/
for (i = 0; i < NSP32_HOST_SCSIID; i++) {
target = &data->target[i];
if (auto_sync == TRUE) {
target->limit_entry = 0; /* set as ULTRA20M */
} else {
ret = nsp32_prom_read(data, i);
entry = nsp32_search_period_entry(data, target, ret);
if (entry < 0) {
/* search failed... set maximum speed */
entry = 0;
}
target->limit_entry = entry;
}
}
return TRUE;
}
/*
* C16 110 (I-O Data: SC-NBD) data map:
*
* ROMADDR
* 0x00 - 0x06 : Device Synchronous Transfer Period (SCSI ID 0 - 6)
* Value 0x0: 20MB/S, 0x1: 10MB/S, 0x2: 5MB/S, 0x3: ASYNC
* 0x07 : 0 (HBA Synchronous Transfer Period: Auto Sync)
* 0x08 - 0x0f : Not Used? (0x0)
* 0x10 : Transfer Mode
* Value 0: PIO, 1: Busmater
* 0x11 : Bus Reset Delay Time (0x00-0x20)
* 0x12 : Bus Termination
* Value 0: Disable, 1: Enable
* 0x13 - 0x19 : Disconnection
* Value 0: Disable, 1: Enable
* 0x1a - 0x7c : Not Used? (0)
* 0x7d : Not Used? (0xf8)
* 0x7e : Constant (0x55), Validity signature
* 0x7f : Constant (0xaa), Validity signature
*/
static int nsp32_getprom_c16(nsp32_hw_data *data)
{
int ret, i;
nsp32_target *target;
int entry, val;
/*
* Reset time which is designated by EEPROM.
*
* TODO: Not used yet.
*/
data->resettime = nsp32_prom_read(data, 0x11);
/*
* each device Synchronous Transfer Period
*/
for (i = 0; i < NSP32_HOST_SCSIID; i++) {
target = &data->target[i];
ret = nsp32_prom_read(data, i);
switch (ret) {
case 0: /* 20MB/s */
val = 0x0c;
break;
case 1: /* 10MB/s */
val = 0x19;
break;
case 2: /* 5MB/s */
val = 0x32;
break;
case 3: /* ASYNC */
val = 0x00;
break;
default: /* default 20MB/s */
val = 0x0c;
break;
}
entry = nsp32_search_period_entry(data, target, val);
if (entry < 0 || trans_mode == ULTRA20M_MODE) {
/* search failed... set maximum speed */
entry = 0;
}
target->limit_entry = entry;
}
return TRUE;
}
/*
* Atmel AT24C01A (drived in 5V) serial EEPROM routines
*/
static int nsp32_prom_read(nsp32_hw_data *data, int romaddr)
{
int i, val;
/* start condition */
nsp32_prom_start(data);
/* device address */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
/* R/W: W for dummy write */
nsp32_prom_write_bit(data, 0);
/* ack */
nsp32_prom_write_bit(data, 0);
/* word address */
for (i = 7; i >= 0; i--) {
nsp32_prom_write_bit(data, ((romaddr >> i) & 1));
}
/* ack */
nsp32_prom_write_bit(data, 0);
/* start condition */
nsp32_prom_start(data);
/* device address */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 1); /* 1 */
nsp32_prom_write_bit(data, 0); /* 0 */
nsp32_prom_write_bit(data, 0); /* A2: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A1: 0 (GND) */
nsp32_prom_write_bit(data, 0); /* A0: 0 (GND) */
/* R/W: R */
nsp32_prom_write_bit(data, 1);
/* ack */
nsp32_prom_write_bit(data, 0);
/* data... */
val = 0;
for (i = 7; i >= 0; i--) {
val += (nsp32_prom_read_bit(data) << i);
}
/* no ack */
nsp32_prom_write_bit(data, 1);
/* stop condition */
nsp32_prom_stop(data);
return val;
}
static void nsp32_prom_set(nsp32_hw_data *data, int bit, int val)
{
int base = data->BaseAddress;
int tmp;
tmp = nsp32_index_read1(base, SERIAL_ROM_CTL);
if (val == 0) {
tmp &= ~bit;
} else {
tmp |= bit;
}
nsp32_index_write1(base, SERIAL_ROM_CTL, tmp);
udelay(10);
}
static int nsp32_prom_get(nsp32_hw_data *data, int bit)
{
int base = data->BaseAddress;
int tmp, ret;
if (bit != SDA) {
nsp32_msg(KERN_ERR, "return value is not appropriate");
return 0;
}
tmp = nsp32_index_read1(base, SERIAL_ROM_CTL) & bit;
if (tmp == 0) {
ret = 0;
} else {
ret = 1;
}
udelay(10);
return ret;
}
static void nsp32_prom_start (nsp32_hw_data *data)
{
/* start condition */
nsp32_prom_set(data, SCL, 1);
nsp32_prom_set(data, SDA, 1);
nsp32_prom_set(data, ENA, 1); /* output mode */
nsp32_prom_set(data, SDA, 0); /* keeping SCL=1 and transiting
* SDA 1->0 is start condition */
nsp32_prom_set(data, SCL, 0);
}
static void nsp32_prom_stop (nsp32_hw_data *data)
{
/* stop condition */
nsp32_prom_set(data, SCL, 1);
nsp32_prom_set(data, SDA, 0);
nsp32_prom_set(data, ENA, 1); /* output mode */
nsp32_prom_set(data, SDA, 1);
nsp32_prom_set(data, SCL, 0);
}
static void nsp32_prom_write_bit(nsp32_hw_data *data, int val)
{
/* write */
nsp32_prom_set(data, SDA, val);
nsp32_prom_set(data, SCL, 1 );
nsp32_prom_set(data, SCL, 0 );
}
static int nsp32_prom_read_bit(nsp32_hw_data *data)
{
int val;
/* read */
nsp32_prom_set(data, ENA, 0); /* input mode */
nsp32_prom_set(data, SCL, 1);
val = nsp32_prom_get(data, SDA);
nsp32_prom_set(data, SCL, 0);
nsp32_prom_set(data, ENA, 1); /* output mode */
return val;
}
/**************************************************************************
* Power Management
*/
#ifdef CONFIG_PM
/* Device suspended */
static int nsp32_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_msg(KERN_INFO, "pci-suspend: pdev=0x%p, state=%ld, slot=%s, host=0x%p", pdev, state, pci_name(pdev), host);
pci_save_state (pdev);
pci_disable_device (pdev);
pci_set_power_state(pdev, pci_choose_state(pdev, state));
return 0;
}
/* Device woken up */
static int nsp32_resume(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_hw_data *data = (nsp32_hw_data *)host->hostdata;
unsigned short reg;
nsp32_msg(KERN_INFO, "pci-resume: pdev=0x%p, slot=%s, host=0x%p", pdev, pci_name(pdev), host);
pci_set_power_state(pdev, PCI_D0);
pci_enable_wake (pdev, PCI_D0, 0);
pci_restore_state (pdev);
reg = nsp32_read2(data->BaseAddress, INDEX_REG);
nsp32_msg(KERN_INFO, "io=0x%x reg=0x%x", data->BaseAddress, reg);
if (reg == 0xffff) {
nsp32_msg(KERN_INFO, "missing device. abort resume.");
return 0;
}
nsp32hw_init (data);
nsp32_do_bus_reset(data);
nsp32_msg(KERN_INFO, "resume success");
return 0;
}
#endif
/************************************************************************
* PCI/Cardbus probe/remove routine
*/
static int __devinit nsp32_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
int ret;
nsp32_hw_data *data = &nsp32_data_base;
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
ret = pci_enable_device(pdev);
if (ret) {
nsp32_msg(KERN_ERR, "failed to enable pci device");
return ret;
}
data->Pci = pdev;
data->pci_devid = id;
data->IrqNumber = pdev->irq;
data->BaseAddress = pci_resource_start(pdev, 0);
data->NumAddress = pci_resource_len (pdev, 0);
data->MmioAddress = pci_ioremap_bar(pdev, 1);
data->MmioLength = pci_resource_len (pdev, 1);
pci_set_master(pdev);
ret = nsp32_detect(pdev);
nsp32_msg(KERN_INFO, "irq: %i mmio: %p+0x%lx slot: %s model: %s",
pdev->irq,
data->MmioAddress, data->MmioLength,
pci_name(pdev),
nsp32_model[id->driver_data]);
nsp32_dbg(NSP32_DEBUG_REGISTER, "exit %d", ret);
return ret;
}
static void __devexit nsp32_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
nsp32_dbg(NSP32_DEBUG_REGISTER, "enter");
scsi_remove_host(host);
nsp32_release(host);
scsi_host_put(host);
}
static struct pci_driver nsp32_driver = {
.name = "nsp32",
.id_table = nsp32_pci_table,
.probe = nsp32_probe,
.remove = __devexit_p(nsp32_remove),
#ifdef CONFIG_PM
.suspend = nsp32_suspend,
.resume = nsp32_resume,
#endif
};
/*********************************************************************
* Moule entry point
*/
static int __init init_nsp32(void) {
nsp32_msg(KERN_INFO, "loading...");
return pci_register_driver(&nsp32_driver);
}
static void __exit exit_nsp32(void) {
nsp32_msg(KERN_INFO, "unloading...");
pci_unregister_driver(&nsp32_driver);
}
module_init(init_nsp32);
module_exit(exit_nsp32);
/* end */
| gpl-2.0 |
mali1/NST-kernel | arch/arm/mach-at91/at91rm9200_devices.c | 1677 | 32118 | /*
* arch/arm/mach-at91/at91rm9200_devices.c
*
* Copyright (C) 2005 Thibaut VARENE <varenet@parisc-linux.org>
* Copyright (C) 2005 David Brownell
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <asm/mach/arch.h>
#include <asm/mach/map.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include <linux/i2c-gpio.h>
#include <mach/board.h>
#include <mach/gpio.h>
#include <mach/at91rm9200.h>
#include <mach/at91rm9200_mc.h>
#include "generic.h"
/* --------------------------------------------------------------------
* USB Host
* -------------------------------------------------------------------- */
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
static u64 ohci_dmamask = DMA_BIT_MASK(32);
static struct at91_usbh_data usbh_data;
static struct resource usbh_resources[] = {
[0] = {
.start = AT91RM9200_UHP_BASE,
.end = AT91RM9200_UHP_BASE + SZ_1M - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_UHP,
.end = AT91RM9200_ID_UHP,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_usbh_device = {
.name = "at91_ohci",
.id = -1,
.dev = {
.dma_mask = &ohci_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &usbh_data,
},
.resource = usbh_resources,
.num_resources = ARRAY_SIZE(usbh_resources),
};
void __init at91_add_device_usbh(struct at91_usbh_data *data)
{
if (!data)
return;
usbh_data = *data;
platform_device_register(&at91rm9200_usbh_device);
}
#else
void __init at91_add_device_usbh(struct at91_usbh_data *data) {}
#endif
/* --------------------------------------------------------------------
* USB Device (Gadget)
* -------------------------------------------------------------------- */
#ifdef CONFIG_USB_GADGET_AT91
static struct at91_udc_data udc_data;
static struct resource udc_resources[] = {
[0] = {
.start = AT91RM9200_BASE_UDP,
.end = AT91RM9200_BASE_UDP + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_UDP,
.end = AT91RM9200_ID_UDP,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_udc_device = {
.name = "at91_udc",
.id = -1,
.dev = {
.platform_data = &udc_data,
},
.resource = udc_resources,
.num_resources = ARRAY_SIZE(udc_resources),
};
void __init at91_add_device_udc(struct at91_udc_data *data)
{
if (!data)
return;
if (data->vbus_pin) {
at91_set_gpio_input(data->vbus_pin, 0);
at91_set_deglitch(data->vbus_pin, 1);
}
if (data->pullup_pin)
at91_set_gpio_output(data->pullup_pin, 0);
udc_data = *data;
platform_device_register(&at91rm9200_udc_device);
}
#else
void __init at91_add_device_udc(struct at91_udc_data *data) {}
#endif
/* --------------------------------------------------------------------
* Ethernet
* -------------------------------------------------------------------- */
#if defined(CONFIG_ARM_AT91_ETHER) || defined(CONFIG_ARM_AT91_ETHER_MODULE)
static u64 eth_dmamask = DMA_BIT_MASK(32);
static struct at91_eth_data eth_data;
static struct resource eth_resources[] = {
[0] = {
.start = AT91_VA_BASE_EMAC,
.end = AT91_VA_BASE_EMAC + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_EMAC,
.end = AT91RM9200_ID_EMAC,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_eth_device = {
.name = "at91_ether",
.id = -1,
.dev = {
.dma_mask = ð_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = ð_data,
},
.resource = eth_resources,
.num_resources = ARRAY_SIZE(eth_resources),
};
void __init at91_add_device_eth(struct at91_eth_data *data)
{
if (!data)
return;
if (data->phy_irq_pin) {
at91_set_gpio_input(data->phy_irq_pin, 0);
at91_set_deglitch(data->phy_irq_pin, 1);
}
/* Pins used for MII and RMII */
at91_set_A_periph(AT91_PIN_PA16, 0); /* EMDIO */
at91_set_A_periph(AT91_PIN_PA15, 0); /* EMDC */
at91_set_A_periph(AT91_PIN_PA14, 0); /* ERXER */
at91_set_A_periph(AT91_PIN_PA13, 0); /* ERX1 */
at91_set_A_periph(AT91_PIN_PA12, 0); /* ERX0 */
at91_set_A_periph(AT91_PIN_PA11, 0); /* ECRS_ECRSDV */
at91_set_A_periph(AT91_PIN_PA10, 0); /* ETX1 */
at91_set_A_periph(AT91_PIN_PA9, 0); /* ETX0 */
at91_set_A_periph(AT91_PIN_PA8, 0); /* ETXEN */
at91_set_A_periph(AT91_PIN_PA7, 0); /* ETXCK_EREFCK */
if (!data->is_rmii) {
at91_set_B_periph(AT91_PIN_PB19, 0); /* ERXCK */
at91_set_B_periph(AT91_PIN_PB18, 0); /* ECOL */
at91_set_B_periph(AT91_PIN_PB17, 0); /* ERXDV */
at91_set_B_periph(AT91_PIN_PB16, 0); /* ERX3 */
at91_set_B_periph(AT91_PIN_PB15, 0); /* ERX2 */
at91_set_B_periph(AT91_PIN_PB14, 0); /* ETXER */
at91_set_B_periph(AT91_PIN_PB13, 0); /* ETX3 */
at91_set_B_periph(AT91_PIN_PB12, 0); /* ETX2 */
}
eth_data = *data;
platform_device_register(&at91rm9200_eth_device);
}
#else
void __init at91_add_device_eth(struct at91_eth_data *data) {}
#endif
/* --------------------------------------------------------------------
* Compact Flash / PCMCIA
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91_CF) || defined(CONFIG_AT91_CF_MODULE)
static struct at91_cf_data cf_data;
#define CF_BASE AT91_CHIPSELECT_4
static struct resource cf_resources[] = {
[0] = {
.start = CF_BASE,
/* ties up CS4, CS5 and CS6 */
.end = CF_BASE + (0x30000000 - 1),
.flags = IORESOURCE_MEM | IORESOURCE_MEM_8AND16BIT,
},
};
static struct platform_device at91rm9200_cf_device = {
.name = "at91_cf",
.id = -1,
.dev = {
.platform_data = &cf_data,
},
.resource = cf_resources,
.num_resources = ARRAY_SIZE(cf_resources),
};
void __init at91_add_device_cf(struct at91_cf_data *data)
{
unsigned int csa;
if (!data)
return;
data->chipselect = 4; /* can only use EBI ChipSelect 4 */
/* CF takes over CS4, CS5, CS6 */
csa = at91_sys_read(AT91_EBI_CSA);
at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS4A_SMC_COMPACTFLASH);
/*
* Static memory controller timing adjustments.
* REVISIT: these timings are in terms of MCK cycles, so
* when MCK changes (cpufreq etc) so must these values...
*/
at91_sys_write(AT91_SMC_CSR(4),
AT91_SMC_ACSS_STD
| AT91_SMC_DBW_16
| AT91_SMC_BAT
| AT91_SMC_WSEN
| AT91_SMC_NWS_(32) /* wait states */
| AT91_SMC_RWSETUP_(6) /* setup time */
| AT91_SMC_RWHOLD_(4) /* hold time */
);
/* input/irq */
if (data->irq_pin) {
at91_set_gpio_input(data->irq_pin, 1);
at91_set_deglitch(data->irq_pin, 1);
}
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
/* outputs, initially off */
if (data->vcc_pin)
at91_set_gpio_output(data->vcc_pin, 0);
at91_set_gpio_output(data->rst_pin, 0);
/* force poweron defaults for these pins ... */
at91_set_A_periph(AT91_PIN_PC9, 0); /* A25/CFRNW */
at91_set_A_periph(AT91_PIN_PC10, 0); /* NCS4/CFCS */
at91_set_A_periph(AT91_PIN_PC11, 0); /* NCS5/CFCE1 */
at91_set_A_periph(AT91_PIN_PC12, 0); /* NCS6/CFCE2 */
/* nWAIT is _not_ a default setting */
at91_set_A_periph(AT91_PIN_PC6, 1); /* nWAIT */
cf_data = *data;
platform_device_register(&at91rm9200_cf_device);
}
#else
void __init at91_add_device_cf(struct at91_cf_data *data) {}
#endif
/* --------------------------------------------------------------------
* MMC / SD
* -------------------------------------------------------------------- */
#if defined(CONFIG_MMC_AT91) || defined(CONFIG_MMC_AT91_MODULE)
static u64 mmc_dmamask = DMA_BIT_MASK(32);
static struct at91_mmc_data mmc_data;
static struct resource mmc_resources[] = {
[0] = {
.start = AT91RM9200_BASE_MCI,
.end = AT91RM9200_BASE_MCI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_MCI,
.end = AT91RM9200_ID_MCI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_mmc_device = {
.name = "at91_mci",
.id = -1,
.dev = {
.dma_mask = &mmc_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &mmc_data,
},
.resource = mmc_resources,
.num_resources = ARRAY_SIZE(mmc_resources),
};
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data)
{
if (!data)
return;
/* input/irq */
if (data->det_pin) {
at91_set_gpio_input(data->det_pin, 1);
at91_set_deglitch(data->det_pin, 1);
}
if (data->wp_pin)
at91_set_gpio_input(data->wp_pin, 1);
if (data->vcc_pin)
at91_set_gpio_output(data->vcc_pin, 0);
/* CLK */
at91_set_A_periph(AT91_PIN_PA27, 0);
if (data->slot_b) {
/* CMD */
at91_set_B_periph(AT91_PIN_PA8, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_B_periph(AT91_PIN_PA9, 1);
if (data->wire4) {
at91_set_B_periph(AT91_PIN_PA10, 1);
at91_set_B_periph(AT91_PIN_PA11, 1);
at91_set_B_periph(AT91_PIN_PA12, 1);
}
} else {
/* CMD */
at91_set_A_periph(AT91_PIN_PA28, 1);
/* DAT0, maybe DAT1..DAT3 */
at91_set_A_periph(AT91_PIN_PA29, 1);
if (data->wire4) {
at91_set_B_periph(AT91_PIN_PB3, 1);
at91_set_B_periph(AT91_PIN_PB4, 1);
at91_set_B_periph(AT91_PIN_PB5, 1);
}
}
mmc_data = *data;
platform_device_register(&at91rm9200_mmc_device);
}
#else
void __init at91_add_device_mmc(short mmc_id, struct at91_mmc_data *data) {}
#endif
/* --------------------------------------------------------------------
* NAND / SmartMedia
* -------------------------------------------------------------------- */
#if defined(CONFIG_MTD_NAND_ATMEL) || defined(CONFIG_MTD_NAND_ATMEL_MODULE)
static struct atmel_nand_data nand_data;
#define NAND_BASE AT91_CHIPSELECT_3
static struct resource nand_resources[] = {
{
.start = NAND_BASE,
.end = NAND_BASE + SZ_256M - 1,
.flags = IORESOURCE_MEM,
}
};
static struct platform_device at91rm9200_nand_device = {
.name = "atmel_nand",
.id = -1,
.dev = {
.platform_data = &nand_data,
},
.resource = nand_resources,
.num_resources = ARRAY_SIZE(nand_resources),
};
void __init at91_add_device_nand(struct atmel_nand_data *data)
{
unsigned int csa;
if (!data)
return;
/* enable the address range of CS3 */
csa = at91_sys_read(AT91_EBI_CSA);
at91_sys_write(AT91_EBI_CSA, csa | AT91_EBI_CS3A_SMC_SMARTMEDIA);
/* set the bus interface characteristics */
at91_sys_write(AT91_SMC_CSR(3), AT91_SMC_ACSS_STD | AT91_SMC_DBW_8 | AT91_SMC_WSEN
| AT91_SMC_NWS_(5)
| AT91_SMC_TDF_(1)
| AT91_SMC_RWSETUP_(0) /* tDS Data Set up Time 30 - ns */
| AT91_SMC_RWHOLD_(1) /* tDH Data Hold Time 20 - ns */
);
/* enable pin */
if (data->enable_pin)
at91_set_gpio_output(data->enable_pin, 1);
/* ready/busy pin */
if (data->rdy_pin)
at91_set_gpio_input(data->rdy_pin, 1);
/* card detect pin */
if (data->det_pin)
at91_set_gpio_input(data->det_pin, 1);
at91_set_A_periph(AT91_PIN_PC1, 0); /* SMOE */
at91_set_A_periph(AT91_PIN_PC3, 0); /* SMWE */
nand_data = *data;
platform_device_register(&at91rm9200_nand_device);
}
#else
void __init at91_add_device_nand(struct atmel_nand_data *data) {}
#endif
/* --------------------------------------------------------------------
* TWI (i2c)
* -------------------------------------------------------------------- */
/*
* Prefer the GPIO code since the TWI controller isn't robust
* (gets overruns and underruns under load) and can only issue
* repeated STARTs in one scenario (the driver doesn't yet handle them).
*/
#if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE)
static struct i2c_gpio_platform_data pdata = {
.sda_pin = AT91_PIN_PA25,
.sda_is_open_drain = 1,
.scl_pin = AT91_PIN_PA26,
.scl_is_open_drain = 1,
.udelay = 2, /* ~100 kHz */
};
static struct platform_device at91rm9200_twi_device = {
.name = "i2c-gpio",
.id = -1,
.dev.platform_data = &pdata,
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
at91_set_GPIO_periph(AT91_PIN_PA25, 1); /* TWD (SDA) */
at91_set_multi_drive(AT91_PIN_PA25, 1);
at91_set_GPIO_periph(AT91_PIN_PA26, 1); /* TWCK (SCL) */
at91_set_multi_drive(AT91_PIN_PA26, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91rm9200_twi_device);
}
#elif defined(CONFIG_I2C_AT91) || defined(CONFIG_I2C_AT91_MODULE)
static struct resource twi_resources[] = {
[0] = {
.start = AT91RM9200_BASE_TWI,
.end = AT91RM9200_BASE_TWI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_TWI,
.end = AT91RM9200_ID_TWI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_twi_device = {
.name = "at91_i2c",
.id = -1,
.resource = twi_resources,
.num_resources = ARRAY_SIZE(twi_resources),
};
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices)
{
/* pins used for TWI interface */
at91_set_A_periph(AT91_PIN_PA25, 0); /* TWD */
at91_set_multi_drive(AT91_PIN_PA25, 1);
at91_set_A_periph(AT91_PIN_PA26, 0); /* TWCK */
at91_set_multi_drive(AT91_PIN_PA26, 1);
i2c_register_board_info(0, devices, nr_devices);
platform_device_register(&at91rm9200_twi_device);
}
#else
void __init at91_add_device_i2c(struct i2c_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* SPI
* -------------------------------------------------------------------- */
#if defined(CONFIG_SPI_ATMEL) || defined(CONFIG_SPI_ATMEL_MODULE)
static u64 spi_dmamask = DMA_BIT_MASK(32);
static struct resource spi_resources[] = {
[0] = {
.start = AT91RM9200_BASE_SPI,
.end = AT91RM9200_BASE_SPI + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_SPI,
.end = AT91RM9200_ID_SPI,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_spi_device = {
.name = "atmel_spi",
.id = 0,
.dev = {
.dma_mask = &spi_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = spi_resources,
.num_resources = ARRAY_SIZE(spi_resources),
};
static const unsigned spi_standard_cs[4] = { AT91_PIN_PA3, AT91_PIN_PA4, AT91_PIN_PA5, AT91_PIN_PA6 };
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices)
{
int i;
unsigned long cs_pin;
at91_set_A_periph(AT91_PIN_PA0, 0); /* MISO */
at91_set_A_periph(AT91_PIN_PA1, 0); /* MOSI */
at91_set_A_periph(AT91_PIN_PA2, 0); /* SPCK */
/* Enable SPI chip-selects */
for (i = 0; i < nr_devices; i++) {
if (devices[i].controller_data)
cs_pin = (unsigned long) devices[i].controller_data;
else
cs_pin = spi_standard_cs[devices[i].chip_select];
if (devices[i].chip_select == 0) /* for CS0 errata */
at91_set_A_periph(cs_pin, 0);
else
at91_set_gpio_output(cs_pin, 1);
/* pass chip-select pin to driver */
devices[i].controller_data = (void *) cs_pin;
}
spi_register_board_info(devices, nr_devices);
platform_device_register(&at91rm9200_spi_device);
}
#else
void __init at91_add_device_spi(struct spi_board_info *devices, int nr_devices) {}
#endif
/* --------------------------------------------------------------------
* Timer/Counter blocks
* -------------------------------------------------------------------- */
#ifdef CONFIG_ATMEL_TCLIB
static struct resource tcb0_resources[] = {
[0] = {
.start = AT91RM9200_BASE_TCB0,
.end = AT91RM9200_BASE_TCB0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_TC0,
.end = AT91RM9200_ID_TC0,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AT91RM9200_ID_TC1,
.end = AT91RM9200_ID_TC1,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = AT91RM9200_ID_TC2,
.end = AT91RM9200_ID_TC2,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_tcb0_device = {
.name = "atmel_tcb",
.id = 0,
.resource = tcb0_resources,
.num_resources = ARRAY_SIZE(tcb0_resources),
};
static struct resource tcb1_resources[] = {
[0] = {
.start = AT91RM9200_BASE_TCB1,
.end = AT91RM9200_BASE_TCB1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_TC3,
.end = AT91RM9200_ID_TC3,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AT91RM9200_ID_TC4,
.end = AT91RM9200_ID_TC4,
.flags = IORESOURCE_IRQ,
},
[3] = {
.start = AT91RM9200_ID_TC5,
.end = AT91RM9200_ID_TC5,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_tcb1_device = {
.name = "atmel_tcb",
.id = 1,
.resource = tcb1_resources,
.num_resources = ARRAY_SIZE(tcb1_resources),
};
static void __init at91_add_device_tc(void)
{
/* this chip has a separate clock and irq for each TC channel */
at91_clock_associate("tc0_clk", &at91rm9200_tcb0_device.dev, "t0_clk");
at91_clock_associate("tc1_clk", &at91rm9200_tcb0_device.dev, "t1_clk");
at91_clock_associate("tc2_clk", &at91rm9200_tcb0_device.dev, "t2_clk");
platform_device_register(&at91rm9200_tcb0_device);
at91_clock_associate("tc3_clk", &at91rm9200_tcb1_device.dev, "t0_clk");
at91_clock_associate("tc4_clk", &at91rm9200_tcb1_device.dev, "t1_clk");
at91_clock_associate("tc5_clk", &at91rm9200_tcb1_device.dev, "t2_clk");
platform_device_register(&at91rm9200_tcb1_device);
}
#else
static void __init at91_add_device_tc(void) { }
#endif
/* --------------------------------------------------------------------
* RTC
* -------------------------------------------------------------------- */
#if defined(CONFIG_RTC_DRV_AT91RM9200) || defined(CONFIG_RTC_DRV_AT91RM9200_MODULE)
static struct platform_device at91rm9200_rtc_device = {
.name = "at91_rtc",
.id = -1,
.num_resources = 0,
};
static void __init at91_add_device_rtc(void)
{
platform_device_register(&at91rm9200_rtc_device);
}
#else
static void __init at91_add_device_rtc(void) {}
#endif
/* --------------------------------------------------------------------
* Watchdog
* -------------------------------------------------------------------- */
#if defined(CONFIG_AT91RM9200_WATCHDOG) || defined(CONFIG_AT91RM9200_WATCHDOG_MODULE)
static struct platform_device at91rm9200_wdt_device = {
.name = "at91_wdt",
.id = -1,
.num_resources = 0,
};
static void __init at91_add_device_watchdog(void)
{
platform_device_register(&at91rm9200_wdt_device);
}
#else
static void __init at91_add_device_watchdog(void) {}
#endif
/* --------------------------------------------------------------------
* SSC -- Synchronous Serial Controller
* -------------------------------------------------------------------- */
#if defined(CONFIG_ATMEL_SSC) || defined(CONFIG_ATMEL_SSC_MODULE)
static u64 ssc0_dmamask = DMA_BIT_MASK(32);
static struct resource ssc0_resources[] = {
[0] = {
.start = AT91RM9200_BASE_SSC0,
.end = AT91RM9200_BASE_SSC0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_SSC0,
.end = AT91RM9200_ID_SSC0,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_ssc0_device = {
.name = "ssc",
.id = 0,
.dev = {
.dma_mask = &ssc0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc0_resources,
.num_resources = ARRAY_SIZE(ssc0_resources),
};
static inline void configure_ssc0_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_A_periph(AT91_PIN_PB0, 1);
if (pins & ATMEL_SSC_TK)
at91_set_A_periph(AT91_PIN_PB1, 1);
if (pins & ATMEL_SSC_TD)
at91_set_A_periph(AT91_PIN_PB2, 1);
if (pins & ATMEL_SSC_RD)
at91_set_A_periph(AT91_PIN_PB3, 1);
if (pins & ATMEL_SSC_RK)
at91_set_A_periph(AT91_PIN_PB4, 1);
if (pins & ATMEL_SSC_RF)
at91_set_A_periph(AT91_PIN_PB5, 1);
}
static u64 ssc1_dmamask = DMA_BIT_MASK(32);
static struct resource ssc1_resources[] = {
[0] = {
.start = AT91RM9200_BASE_SSC1,
.end = AT91RM9200_BASE_SSC1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_SSC1,
.end = AT91RM9200_ID_SSC1,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_ssc1_device = {
.name = "ssc",
.id = 1,
.dev = {
.dma_mask = &ssc1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc1_resources,
.num_resources = ARRAY_SIZE(ssc1_resources),
};
static inline void configure_ssc1_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_A_periph(AT91_PIN_PB6, 1);
if (pins & ATMEL_SSC_TK)
at91_set_A_periph(AT91_PIN_PB7, 1);
if (pins & ATMEL_SSC_TD)
at91_set_A_periph(AT91_PIN_PB8, 1);
if (pins & ATMEL_SSC_RD)
at91_set_A_periph(AT91_PIN_PB9, 1);
if (pins & ATMEL_SSC_RK)
at91_set_A_periph(AT91_PIN_PB10, 1);
if (pins & ATMEL_SSC_RF)
at91_set_A_periph(AT91_PIN_PB11, 1);
}
static u64 ssc2_dmamask = DMA_BIT_MASK(32);
static struct resource ssc2_resources[] = {
[0] = {
.start = AT91RM9200_BASE_SSC2,
.end = AT91RM9200_BASE_SSC2 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_SSC2,
.end = AT91RM9200_ID_SSC2,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device at91rm9200_ssc2_device = {
.name = "ssc",
.id = 2,
.dev = {
.dma_mask = &ssc2_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
},
.resource = ssc2_resources,
.num_resources = ARRAY_SIZE(ssc2_resources),
};
static inline void configure_ssc2_pins(unsigned pins)
{
if (pins & ATMEL_SSC_TF)
at91_set_A_periph(AT91_PIN_PB12, 1);
if (pins & ATMEL_SSC_TK)
at91_set_A_periph(AT91_PIN_PB13, 1);
if (pins & ATMEL_SSC_TD)
at91_set_A_periph(AT91_PIN_PB14, 1);
if (pins & ATMEL_SSC_RD)
at91_set_A_periph(AT91_PIN_PB15, 1);
if (pins & ATMEL_SSC_RK)
at91_set_A_periph(AT91_PIN_PB16, 1);
if (pins & ATMEL_SSC_RF)
at91_set_A_periph(AT91_PIN_PB17, 1);
}
/*
* SSC controllers are accessed through library code, instead of any
* kind of all-singing/all-dancing driver. For example one could be
* used by a particular I2S audio codec's driver, while another one
* on the same system might be used by a custom data capture driver.
*/
void __init at91_add_device_ssc(unsigned id, unsigned pins)
{
struct platform_device *pdev;
/*
* NOTE: caller is responsible for passing information matching
* "pins" to whatever will be using each particular controller.
*/
switch (id) {
case AT91RM9200_ID_SSC0:
pdev = &at91rm9200_ssc0_device;
configure_ssc0_pins(pins);
at91_clock_associate("ssc0_clk", &pdev->dev, "ssc");
break;
case AT91RM9200_ID_SSC1:
pdev = &at91rm9200_ssc1_device;
configure_ssc1_pins(pins);
at91_clock_associate("ssc1_clk", &pdev->dev, "ssc");
break;
case AT91RM9200_ID_SSC2:
pdev = &at91rm9200_ssc2_device;
configure_ssc2_pins(pins);
at91_clock_associate("ssc2_clk", &pdev->dev, "ssc");
break;
default:
return;
}
platform_device_register(pdev);
}
#else
void __init at91_add_device_ssc(unsigned id, unsigned pins) {}
#endif
/* --------------------------------------------------------------------
* UART
* -------------------------------------------------------------------- */
#if defined(CONFIG_SERIAL_ATMEL)
static struct resource dbgu_resources[] = {
[0] = {
.start = AT91_VA_BASE_SYS + AT91_DBGU,
.end = AT91_VA_BASE_SYS + AT91_DBGU + SZ_512 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91_ID_SYS,
.end = AT91_ID_SYS,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data dbgu_data = {
.use_dma_tx = 0,
.use_dma_rx = 0, /* DBGU not capable of receive DMA */
.regs = (void __iomem *)(AT91_VA_BASE_SYS + AT91_DBGU),
};
static u64 dbgu_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91rm9200_dbgu_device = {
.name = "atmel_usart",
.id = 0,
.dev = {
.dma_mask = &dbgu_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &dbgu_data,
},
.resource = dbgu_resources,
.num_resources = ARRAY_SIZE(dbgu_resources),
};
static inline void configure_dbgu_pins(void)
{
at91_set_A_periph(AT91_PIN_PA30, 0); /* DRXD */
at91_set_A_periph(AT91_PIN_PA31, 1); /* DTXD */
}
static struct resource uart0_resources[] = {
[0] = {
.start = AT91RM9200_BASE_US0,
.end = AT91RM9200_BASE_US0 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_US0,
.end = AT91RM9200_ID_US0,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart0_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart0_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91rm9200_uart0_device = {
.name = "atmel_usart",
.id = 1,
.dev = {
.dma_mask = &uart0_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart0_data,
},
.resource = uart0_resources,
.num_resources = ARRAY_SIZE(uart0_resources),
};
static inline void configure_usart0_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PA17, 1); /* TXD0 */
at91_set_A_periph(AT91_PIN_PA18, 0); /* RXD0 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PA20, 0); /* CTS0 */
if (pins & ATMEL_UART_RTS) {
/*
* AT91RM9200 Errata #39 - RTS0 is not internally connected to PA21.
* We need to drive the pin manually. Default is off (RTS is active low).
*/
at91_set_gpio_output(AT91_PIN_PA21, 1);
}
}
static struct resource uart1_resources[] = {
[0] = {
.start = AT91RM9200_BASE_US1,
.end = AT91RM9200_BASE_US1 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_US1,
.end = AT91RM9200_ID_US1,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart1_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart1_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91rm9200_uart1_device = {
.name = "atmel_usart",
.id = 2,
.dev = {
.dma_mask = &uart1_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart1_data,
},
.resource = uart1_resources,
.num_resources = ARRAY_SIZE(uart1_resources),
};
static inline void configure_usart1_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PB20, 1); /* TXD1 */
at91_set_A_periph(AT91_PIN_PB21, 0); /* RXD1 */
if (pins & ATMEL_UART_RI)
at91_set_A_periph(AT91_PIN_PB18, 0); /* RI1 */
if (pins & ATMEL_UART_DTR)
at91_set_A_periph(AT91_PIN_PB19, 0); /* DTR1 */
if (pins & ATMEL_UART_DCD)
at91_set_A_periph(AT91_PIN_PB23, 0); /* DCD1 */
if (pins & ATMEL_UART_CTS)
at91_set_A_periph(AT91_PIN_PB24, 0); /* CTS1 */
if (pins & ATMEL_UART_DSR)
at91_set_A_periph(AT91_PIN_PB25, 0); /* DSR1 */
if (pins & ATMEL_UART_RTS)
at91_set_A_periph(AT91_PIN_PB26, 0); /* RTS1 */
}
static struct resource uart2_resources[] = {
[0] = {
.start = AT91RM9200_BASE_US2,
.end = AT91RM9200_BASE_US2 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_US2,
.end = AT91RM9200_ID_US2,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart2_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart2_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91rm9200_uart2_device = {
.name = "atmel_usart",
.id = 3,
.dev = {
.dma_mask = &uart2_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart2_data,
},
.resource = uart2_resources,
.num_resources = ARRAY_SIZE(uart2_resources),
};
static inline void configure_usart2_pins(unsigned pins)
{
at91_set_A_periph(AT91_PIN_PA22, 0); /* RXD2 */
at91_set_A_periph(AT91_PIN_PA23, 1); /* TXD2 */
if (pins & ATMEL_UART_CTS)
at91_set_B_periph(AT91_PIN_PA30, 0); /* CTS2 */
if (pins & ATMEL_UART_RTS)
at91_set_B_periph(AT91_PIN_PA31, 0); /* RTS2 */
}
static struct resource uart3_resources[] = {
[0] = {
.start = AT91RM9200_BASE_US3,
.end = AT91RM9200_BASE_US3 + SZ_16K - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AT91RM9200_ID_US3,
.end = AT91RM9200_ID_US3,
.flags = IORESOURCE_IRQ,
},
};
static struct atmel_uart_data uart3_data = {
.use_dma_tx = 1,
.use_dma_rx = 1,
};
static u64 uart3_dmamask = DMA_BIT_MASK(32);
static struct platform_device at91rm9200_uart3_device = {
.name = "atmel_usart",
.id = 4,
.dev = {
.dma_mask = &uart3_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &uart3_data,
},
.resource = uart3_resources,
.num_resources = ARRAY_SIZE(uart3_resources),
};
static inline void configure_usart3_pins(unsigned pins)
{
at91_set_B_periph(AT91_PIN_PA5, 1); /* TXD3 */
at91_set_B_periph(AT91_PIN_PA6, 0); /* RXD3 */
if (pins & ATMEL_UART_CTS)
at91_set_B_periph(AT91_PIN_PB1, 0); /* CTS3 */
if (pins & ATMEL_UART_RTS)
at91_set_B_periph(AT91_PIN_PB0, 0); /* RTS3 */
}
static struct platform_device *__initdata at91_uarts[ATMEL_MAX_UART]; /* the UARTs to use */
struct platform_device *atmel_default_console_device; /* the serial console device */
void __init __deprecated at91_init_serial(struct at91_uart_config *config)
{
int i;
/* Fill in list of supported UARTs */
for (i = 0; i < config->nr_tty; i++) {
switch (config->tty_map[i]) {
case 0:
configure_usart0_pins(ATMEL_UART_CTS | ATMEL_UART_RTS);
at91_uarts[i] = &at91rm9200_uart0_device;
at91_clock_associate("usart0_clk", &at91rm9200_uart0_device.dev, "usart");
break;
case 1:
configure_usart1_pins(ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DSR | ATMEL_UART_DTR | ATMEL_UART_DCD | ATMEL_UART_RI);
at91_uarts[i] = &at91rm9200_uart1_device;
at91_clock_associate("usart1_clk", &at91rm9200_uart1_device.dev, "usart");
break;
case 2:
configure_usart2_pins(0);
at91_uarts[i] = &at91rm9200_uart2_device;
at91_clock_associate("usart2_clk", &at91rm9200_uart2_device.dev, "usart");
break;
case 3:
configure_usart3_pins(0);
at91_uarts[i] = &at91rm9200_uart3_device;
at91_clock_associate("usart3_clk", &at91rm9200_uart3_device.dev, "usart");
break;
case 4:
configure_dbgu_pins();
at91_uarts[i] = &at91rm9200_dbgu_device;
at91_clock_associate("mck", &at91rm9200_dbgu_device.dev, "usart");
break;
default:
continue;
}
at91_uarts[i]->id = i; /* update ID number to mapped ID */
}
/* Set serial console device */
if (config->console_tty < ATMEL_MAX_UART)
atmel_default_console_device = at91_uarts[config->console_tty];
if (!atmel_default_console_device)
printk(KERN_INFO "AT91: No default serial console defined.\n");
}
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins)
{
struct platform_device *pdev;
switch (id) {
case 0: /* DBGU */
pdev = &at91rm9200_dbgu_device;
configure_dbgu_pins();
at91_clock_associate("mck", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US0:
pdev = &at91rm9200_uart0_device;
configure_usart0_pins(pins);
at91_clock_associate("usart0_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US1:
pdev = &at91rm9200_uart1_device;
configure_usart1_pins(pins);
at91_clock_associate("usart1_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US2:
pdev = &at91rm9200_uart2_device;
configure_usart2_pins(pins);
at91_clock_associate("usart2_clk", &pdev->dev, "usart");
break;
case AT91RM9200_ID_US3:
pdev = &at91rm9200_uart3_device;
configure_usart3_pins(pins);
at91_clock_associate("usart3_clk", &pdev->dev, "usart");
break;
default:
return;
}
pdev->id = portnr; /* update to mapped ID */
if (portnr < ATMEL_MAX_UART)
at91_uarts[portnr] = pdev;
}
void __init at91_set_serial_console(unsigned portnr)
{
if (portnr < ATMEL_MAX_UART)
atmel_default_console_device = at91_uarts[portnr];
}
void __init at91_add_device_serial(void)
{
int i;
for (i = 0; i < ATMEL_MAX_UART; i++) {
if (at91_uarts[i])
platform_device_register(at91_uarts[i]);
}
if (!atmel_default_console_device)
printk(KERN_INFO "AT91: No default serial console defined.\n");
}
#else
void __init __deprecated at91_init_serial(struct at91_uart_config *config) {}
void __init at91_register_uart(unsigned id, unsigned portnr, unsigned pins) {}
void __init at91_set_serial_console(unsigned portnr) {}
void __init at91_add_device_serial(void) {}
#endif
/* -------------------------------------------------------------------- */
/*
* These devices are always present and don't need any board-specific
* setup.
*/
static int __init at91_add_standard_devices(void)
{
at91_add_device_rtc();
at91_add_device_watchdog();
at91_add_device_tc();
return 0;
}
arch_initcall(at91_add_standard_devices);
| gpl-2.0 |
ohporter/linux-am33x | drivers/staging/iio/imu/adis16400_trigger.c | 2701 | 1734 | #include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/mutex.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/sysfs.h>
#include <linux/list.h>
#include <linux/spi/spi.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../trigger.h"
#include "adis16400.h"
/**
* adis16400_data_rdy_trigger_set_state() set datardy interrupt state
**/
static int adis16400_data_rdy_trigger_set_state(struct iio_trigger *trig,
bool state)
{
struct iio_dev *indio_dev = trig->private_data;
dev_dbg(&indio_dev->dev, "%s (%d)\n", __func__, state);
return adis16400_set_irq(indio_dev, state);
}
int adis16400_probe_trigger(struct iio_dev *indio_dev)
{
int ret;
struct adis16400_state *st = iio_priv(indio_dev);
st->trig = iio_allocate_trigger("%s-dev%d",
spi_get_device_id(st->us)->name,
indio_dev->id);
if (st->trig == NULL) {
ret = -ENOMEM;
goto error_ret;
}
ret = request_irq(st->us->irq,
&iio_trigger_generic_data_rdy_poll,
IRQF_TRIGGER_RISING,
"adis16400",
st->trig);
if (ret)
goto error_free_trig;
st->trig->dev.parent = &st->us->dev;
st->trig->owner = THIS_MODULE;
st->trig->private_data = indio_dev;
st->trig->set_trigger_state = &adis16400_data_rdy_trigger_set_state;
ret = iio_trigger_register(st->trig);
/* select default trigger */
indio_dev->trig = st->trig;
if (ret)
goto error_free_irq;
return 0;
error_free_irq:
free_irq(st->us->irq, st->trig);
error_free_trig:
iio_free_trigger(st->trig);
error_ret:
return ret;
}
void adis16400_remove_trigger(struct iio_dev *indio_dev)
{
struct adis16400_state *st = iio_priv(indio_dev);
iio_trigger_unregister(st->trig);
free_irq(st->us->irq, st->trig);
iio_free_trigger(st->trig);
}
| gpl-2.0 |
Andrew-Gazizov/linux-beagle-npi | drivers/staging/iio/adc/ad7606_core.c | 2701 | 14507 | /*
* AD7606 SPI ADC driver
*
* Copyright 2011 Analog Devices Inc.
*
* Licensed under the GPL-2.
*/
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sysfs.h>
#include <linux/regulator/consumer.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include "../iio.h"
#include "../sysfs.h"
#include "../ring_generic.h"
#include "adc.h"
#include "ad7606.h"
int ad7606_reset(struct ad7606_state *st)
{
if (st->have_reset) {
gpio_set_value(st->pdata->gpio_reset, 1);
ndelay(100); /* t_reset >= 100ns */
gpio_set_value(st->pdata->gpio_reset, 0);
return 0;
}
return -ENODEV;
}
static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch)
{
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
st->done = false;
gpio_set_value(st->pdata->gpio_convst, 1);
ret = wait_event_interruptible(st->wq_data_avail, st->done);
if (ret)
goto error_ret;
if (st->have_frstdata) {
ret = st->bops->read_block(st->dev, 1, st->data);
if (ret)
goto error_ret;
if (!gpio_get_value(st->pdata->gpio_frstdata)) {
/* This should never happen */
ad7606_reset(st);
ret = -EIO;
goto error_ret;
}
ret = st->bops->read_block(st->dev,
st->chip_info->num_channels - 1, &st->data[1]);
if (ret)
goto error_ret;
} else {
ret = st->bops->read_block(st->dev,
st->chip_info->num_channels, st->data);
if (ret)
goto error_ret;
}
ret = st->data[ch];
error_ret:
gpio_set_value(st->pdata->gpio_convst, 0);
return ret;
}
static int ad7606_read_raw(struct iio_dev *indio_dev,
struct iio_chan_spec const *chan,
int *val,
int *val2,
long m)
{
int ret;
struct ad7606_state *st = iio_priv(indio_dev);
unsigned int scale_uv;
switch (m) {
case 0:
mutex_lock(&indio_dev->mlock);
if (iio_ring_enabled(indio_dev))
ret = ad7606_scan_from_ring(indio_dev, chan->address);
else
ret = ad7606_scan_direct(indio_dev, chan->address);
mutex_unlock(&indio_dev->mlock);
if (ret < 0)
return ret;
*val = (short) ret;
return IIO_VAL_INT;
case (1 << IIO_CHAN_INFO_SCALE_SHARED):
scale_uv = (st->range * 1000 * 2)
>> st->chip_info->channels[0].scan_type.realbits;
*val = scale_uv / 1000;
*val2 = (scale_uv % 1000) * 1000;
return IIO_VAL_INT_PLUS_MICRO;
}
return -EINVAL;
}
static ssize_t ad7606_show_range(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
return sprintf(buf, "%u\n", st->range);
}
static ssize_t ad7606_store_range(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
unsigned long lval;
if (strict_strtoul(buf, 10, &lval))
return -EINVAL;
if (!(lval == 5000 || lval == 10000)) {
dev_err(dev, "range is not supported\n");
return -EINVAL;
}
mutex_lock(&indio_dev->mlock);
gpio_set_value(st->pdata->gpio_range, lval == 10000);
st->range = lval;
mutex_unlock(&indio_dev->mlock);
return count;
}
static IIO_DEVICE_ATTR(range, S_IRUGO | S_IWUSR, \
ad7606_show_range, ad7606_store_range, 0);
static IIO_CONST_ATTR(range_available, "5000 10000");
static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
return sprintf(buf, "%u\n", st->oversampling);
}
static int ad7606_oversampling_get_index(unsigned val)
{
unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
int i;
for (i = 0; i < ARRAY_SIZE(supported); i++)
if (val == supported[i])
return i;
return -EINVAL;
}
static ssize_t ad7606_store_oversampling_ratio(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
unsigned long lval;
int ret;
if (strict_strtoul(buf, 10, &lval))
return -EINVAL;
ret = ad7606_oversampling_get_index(lval);
if (ret < 0) {
dev_err(dev, "oversampling %lu is not supported\n", lval);
return ret;
}
mutex_lock(&indio_dev->mlock);
gpio_set_value(st->pdata->gpio_os0, (ret >> 0) & 1);
gpio_set_value(st->pdata->gpio_os1, (ret >> 1) & 1);
gpio_set_value(st->pdata->gpio_os1, (ret >> 2) & 1);
st->oversampling = lval;
mutex_unlock(&indio_dev->mlock);
return count;
}
static IIO_DEVICE_ATTR(oversampling_ratio, S_IRUGO | S_IWUSR,
ad7606_show_oversampling_ratio,
ad7606_store_oversampling_ratio, 0);
static IIO_CONST_ATTR(oversampling_ratio_available, "0 2 4 8 16 32 64");
static struct attribute *ad7606_attributes[] = {
&iio_dev_attr_range.dev_attr.attr,
&iio_const_attr_range_available.dev_attr.attr,
&iio_dev_attr_oversampling_ratio.dev_attr.attr,
&iio_const_attr_oversampling_ratio_available.dev_attr.attr,
NULL,
};
static mode_t ad7606_attr_is_visible(struct kobject *kobj,
struct attribute *attr, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct ad7606_state *st = iio_priv(indio_dev);
mode_t mode = attr->mode;
if (!st->have_os &&
(attr == &iio_dev_attr_oversampling_ratio.dev_attr.attr ||
attr ==
&iio_const_attr_oversampling_ratio_available.dev_attr.attr))
mode = 0;
else if (!st->have_range &&
(attr == &iio_dev_attr_range.dev_attr.attr ||
attr == &iio_const_attr_range_available.dev_attr.attr))
mode = 0;
return mode;
}
static const struct attribute_group ad7606_attribute_group = {
.attrs = ad7606_attributes,
.is_visible = ad7606_attr_is_visible,
};
static struct iio_chan_spec ad7606_8_channels[] = {
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
1, 1, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
2, 2, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
3, 3, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
4, 4, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
5, 5, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 6, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
6, 6, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 7, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
7, 7, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(8),
};
static struct iio_chan_spec ad7606_6_channels[] = {
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
1, 1, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
2, 2, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
3, 3, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 4, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
4, 4, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 5, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
5, 5, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(6),
};
static struct iio_chan_spec ad7606_4_channels[] = {
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 0, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
0, 0, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 1, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
1, 1, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 2, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
2, 2, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN(IIO_IN, 0, 1, 0, NULL, 3, 0,
(1 << IIO_CHAN_INFO_SCALE_SHARED),
3, 3, IIO_ST('s', 16, 16, 0), 0),
IIO_CHAN_SOFT_TIMESTAMP(4),
};
static const struct ad7606_chip_info ad7606_chip_info_tbl[] = {
/*
* More devices added in future
*/
[ID_AD7606_8] = {
.name = "ad7606",
.int_vref_mv = 2500,
.channels = ad7606_8_channels,
.num_channels = 8,
},
[ID_AD7606_6] = {
.name = "ad7606-6",
.int_vref_mv = 2500,
.channels = ad7606_6_channels,
.num_channels = 6,
},
[ID_AD7606_4] = {
.name = "ad7606-4",
.int_vref_mv = 2500,
.channels = ad7606_4_channels,
.num_channels = 4,
},
};
static int ad7606_request_gpios(struct ad7606_state *st)
{
struct gpio gpio_array[3] = {
[0] = {
.gpio = st->pdata->gpio_os0,
.flags = GPIOF_DIR_OUT | ((st->oversampling & 1) ?
GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
.label = "AD7606_OS0",
},
[1] = {
.gpio = st->pdata->gpio_os1,
.flags = GPIOF_DIR_OUT | ((st->oversampling & 2) ?
GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
.label = "AD7606_OS1",
},
[2] = {
.gpio = st->pdata->gpio_os2,
.flags = GPIOF_DIR_OUT | ((st->oversampling & 4) ?
GPIOF_INIT_HIGH : GPIOF_INIT_LOW),
.label = "AD7606_OS2",
},
};
int ret;
ret = gpio_request_one(st->pdata->gpio_convst, GPIOF_OUT_INIT_LOW,
"AD7606_CONVST");
if (ret) {
dev_err(st->dev, "failed to request GPIO CONVST\n");
return ret;
}
ret = gpio_request_array(gpio_array, ARRAY_SIZE(gpio_array));
if (!ret) {
st->have_os = true;
}
ret = gpio_request_one(st->pdata->gpio_reset, GPIOF_OUT_INIT_LOW,
"AD7606_RESET");
if (!ret)
st->have_reset = true;
ret = gpio_request_one(st->pdata->gpio_range, GPIOF_DIR_OUT |
((st->range == 10000) ? GPIOF_INIT_HIGH :
GPIOF_INIT_LOW), "AD7606_RANGE");
if (!ret)
st->have_range = true;
ret = gpio_request_one(st->pdata->gpio_stby, GPIOF_OUT_INIT_HIGH,
"AD7606_STBY");
if (!ret)
st->have_stby = true;
if (gpio_is_valid(st->pdata->gpio_frstdata)) {
ret = gpio_request_one(st->pdata->gpio_frstdata, GPIOF_IN,
"AD7606_FRSTDATA");
if (!ret)
st->have_frstdata = true;
}
return 0;
}
static void ad7606_free_gpios(struct ad7606_state *st)
{
if (st->have_range)
gpio_free(st->pdata->gpio_range);
if (st->have_stby)
gpio_free(st->pdata->gpio_stby);
if (st->have_os) {
gpio_free(st->pdata->gpio_os0);
gpio_free(st->pdata->gpio_os1);
gpio_free(st->pdata->gpio_os2);
}
if (st->have_reset)
gpio_free(st->pdata->gpio_reset);
if (st->have_frstdata)
gpio_free(st->pdata->gpio_frstdata);
gpio_free(st->pdata->gpio_convst);
}
/**
* Interrupt handler
*/
static irqreturn_t ad7606_interrupt(int irq, void *dev_id)
{
struct iio_dev *indio_dev = dev_id;
struct ad7606_state *st = iio_priv(indio_dev);
if (iio_ring_enabled(indio_dev)) {
if (!work_pending(&st->poll_work))
schedule_work(&st->poll_work);
} else {
st->done = true;
wake_up_interruptible(&st->wq_data_avail);
}
return IRQ_HANDLED;
};
static const struct iio_info ad7606_info = {
.driver_module = THIS_MODULE,
.read_raw = &ad7606_read_raw,
.attrs = &ad7606_attribute_group,
};
struct iio_dev *ad7606_probe(struct device *dev, int irq,
void __iomem *base_address,
unsigned id,
const struct ad7606_bus_ops *bops)
{
struct ad7606_platform_data *pdata = dev->platform_data;
struct ad7606_state *st;
int ret, regdone = 0;
struct iio_dev *indio_dev = iio_allocate_device(sizeof(*st));
if (indio_dev == NULL) {
ret = -ENOMEM;
goto error_ret;
}
st = iio_priv(indio_dev);
st->dev = dev;
st->id = id;
st->irq = irq;
st->bops = bops;
st->base_address = base_address;
st->range = pdata->default_range == 10000 ? 10000 : 5000;
ret = ad7606_oversampling_get_index(pdata->default_os);
if (ret < 0) {
dev_warn(dev, "oversampling %d is not supported\n",
pdata->default_os);
st->oversampling = 0;
} else {
st->oversampling = pdata->default_os;
}
st->reg = regulator_get(dev, "vcc");
if (!IS_ERR(st->reg)) {
ret = regulator_enable(st->reg);
if (ret)
goto error_put_reg;
}
st->pdata = pdata;
st->chip_info = &ad7606_chip_info_tbl[id];
indio_dev->dev.parent = dev;
indio_dev->info = &ad7606_info;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = st->chip_info->name;
indio_dev->channels = st->chip_info->channels;
indio_dev->num_channels = st->chip_info->num_channels;
init_waitqueue_head(&st->wq_data_avail);
ret = ad7606_request_gpios(st);
if (ret)
goto error_disable_reg;
ret = ad7606_reset(st);
if (ret)
dev_warn(st->dev, "failed to RESET: no RESET GPIO specified\n");
ret = request_irq(st->irq, ad7606_interrupt,
IRQF_TRIGGER_FALLING, st->chip_info->name, indio_dev);
if (ret)
goto error_free_gpios;
ret = ad7606_register_ring_funcs_and_init(indio_dev);
if (ret)
goto error_free_irq;
ret = iio_device_register(indio_dev);
if (ret)
goto error_free_irq;
regdone = 1;
ret = iio_ring_buffer_register_ex(indio_dev->ring, 0,
indio_dev->channels,
indio_dev->num_channels);
if (ret)
goto error_cleanup_ring;
return indio_dev;
error_cleanup_ring:
ad7606_ring_cleanup(indio_dev);
error_free_irq:
free_irq(st->irq, indio_dev);
error_free_gpios:
ad7606_free_gpios(st);
error_disable_reg:
if (!IS_ERR(st->reg))
regulator_disable(st->reg);
error_put_reg:
if (!IS_ERR(st->reg))
regulator_put(st->reg);
if (regdone)
iio_device_unregister(indio_dev);
else
iio_free_device(indio_dev);
error_ret:
return ERR_PTR(ret);
}
int ad7606_remove(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
iio_ring_buffer_unregister(indio_dev->ring);
ad7606_ring_cleanup(indio_dev);
free_irq(st->irq, indio_dev);
if (!IS_ERR(st->reg)) {
regulator_disable(st->reg);
regulator_put(st->reg);
}
ad7606_free_gpios(st);
iio_device_unregister(indio_dev);
return 0;
}
void ad7606_suspend(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
if (st->have_stby) {
if (st->have_range)
gpio_set_value(st->pdata->gpio_range, 1);
gpio_set_value(st->pdata->gpio_stby, 0);
}
}
void ad7606_resume(struct iio_dev *indio_dev)
{
struct ad7606_state *st = iio_priv(indio_dev);
if (st->have_stby) {
if (st->have_range)
gpio_set_value(st->pdata->gpio_range,
st->range == 10000);
gpio_set_value(st->pdata->gpio_stby, 1);
ad7606_reset(st);
}
}
MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
MODULE_DESCRIPTION("Analog Devices AD7606 ADC");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
faux123/flounder | arch/x86/ia32/sys_ia32.c | 3213 | 7056 | /*
* sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Based on
* sys_sparc32
*
* Copyright (C) 2000 VA Linux Co
* Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
* Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
* Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 2000 Hewlett-Packard Co.
* Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 2000,2001,2002 Andi Kleen, SuSE Labs (x86-64 port)
*
* These routines maintain argument size conversion between 32bit and 64bit
* environment. In 2.5 most of this should be moved to a generic directory.
*
* This file assumes that there is a hole at the end of user address space.
*
* Some of the functions are LE specific currently. These are
* hopefully all marked. This should be fixed.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/times.h>
#include <linux/utsname.h>
#include <linux/mm.h>
#include <linux/uio.h>
#include <linux/poll.h>
#include <linux/personality.h>
#include <linux/stat.h>
#include <linux/rwsem.h>
#include <linux/compat.h>
#include <linux/vfs.h>
#include <linux/ptrace.h>
#include <linux/highuid.h>
#include <linux/sysctl.h>
#include <linux/slab.h>
#include <asm/mman.h>
#include <asm/types.h>
#include <asm/uaccess.h>
#include <linux/atomic.h>
#include <asm/vgtod.h>
#include <asm/sys_ia32.h>
#define AA(__x) ((unsigned long)(__x))
asmlinkage long sys32_truncate64(const char __user *filename,
unsigned long offset_low,
unsigned long offset_high)
{
return sys_truncate(filename, ((loff_t) offset_high << 32) | offset_low);
}
asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
unsigned long offset_high)
{
return sys_ftruncate(fd, ((loff_t) offset_high << 32) | offset_low);
}
/*
* Another set for IA32/LFS -- x86_64 struct stat is different due to
* support for 64bit inode numbers.
*/
static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
{
typeof(ubuf->st_uid) uid = 0;
typeof(ubuf->st_gid) gid = 0;
SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
__put_user(huge_encode_dev(stat->dev), &ubuf->st_dev) ||
__put_user(stat->ino, &ubuf->__st_ino) ||
__put_user(stat->ino, &ubuf->st_ino) ||
__put_user(stat->mode, &ubuf->st_mode) ||
__put_user(stat->nlink, &ubuf->st_nlink) ||
__put_user(uid, &ubuf->st_uid) ||
__put_user(gid, &ubuf->st_gid) ||
__put_user(huge_encode_dev(stat->rdev), &ubuf->st_rdev) ||
__put_user(stat->size, &ubuf->st_size) ||
__put_user(stat->atime.tv_sec, &ubuf->st_atime) ||
__put_user(stat->atime.tv_nsec, &ubuf->st_atime_nsec) ||
__put_user(stat->mtime.tv_sec, &ubuf->st_mtime) ||
__put_user(stat->mtime.tv_nsec, &ubuf->st_mtime_nsec) ||
__put_user(stat->ctime.tv_sec, &ubuf->st_ctime) ||
__put_user(stat->ctime.tv_nsec, &ubuf->st_ctime_nsec) ||
__put_user(stat->blksize, &ubuf->st_blksize) ||
__put_user(stat->blocks, &ubuf->st_blocks))
return -EFAULT;
return 0;
}
asmlinkage long sys32_stat64(const char __user *filename,
struct stat64 __user *statbuf)
{
struct kstat stat;
int ret = vfs_stat(filename, &stat);
if (!ret)
ret = cp_stat64(statbuf, &stat);
return ret;
}
asmlinkage long sys32_lstat64(const char __user *filename,
struct stat64 __user *statbuf)
{
struct kstat stat;
int ret = vfs_lstat(filename, &stat);
if (!ret)
ret = cp_stat64(statbuf, &stat);
return ret;
}
asmlinkage long sys32_fstat64(unsigned int fd, struct stat64 __user *statbuf)
{
struct kstat stat;
int ret = vfs_fstat(fd, &stat);
if (!ret)
ret = cp_stat64(statbuf, &stat);
return ret;
}
asmlinkage long sys32_fstatat(unsigned int dfd, const char __user *filename,
struct stat64 __user *statbuf, int flag)
{
struct kstat stat;
int error;
error = vfs_fstatat(dfd, filename, &stat, flag);
if (error)
return error;
return cp_stat64(statbuf, &stat);
}
/*
* Linux/i386 didn't use to be able to handle more than
* 4 system call parameters, so these system calls used a memory
* block for parameter passing..
*/
struct mmap_arg_struct32 {
unsigned int addr;
unsigned int len;
unsigned int prot;
unsigned int flags;
unsigned int fd;
unsigned int offset;
};
asmlinkage long sys32_mmap(struct mmap_arg_struct32 __user *arg)
{
struct mmap_arg_struct32 a;
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
if (a.offset & ~PAGE_MASK)
return -EINVAL;
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset>>PAGE_SHIFT);
}
asmlinkage long sys32_waitpid(compat_pid_t pid, unsigned int __user *stat_addr,
int options)
{
return compat_sys_wait4(pid, stat_addr, options, NULL);
}
/* warning: next two assume little endian */
asmlinkage long sys32_pread(unsigned int fd, char __user *ubuf, u32 count,
u32 poslo, u32 poshi)
{
return sys_pread64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
asmlinkage long sys32_pwrite(unsigned int fd, const char __user *ubuf,
u32 count, u32 poslo, u32 poshi)
{
return sys_pwrite64(fd, ubuf, count,
((loff_t)AA(poshi) << 32) | AA(poslo));
}
/*
* Some system calls that need sign extended arguments. This could be
* done by a generic wrapper.
*/
long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
__u32 len_low, __u32 len_high, int advice)
{
return sys_fadvise64_64(fd,
(((u64)offset_high)<<32) | offset_low,
(((u64)len_high)<<32) | len_low,
advice);
}
long sys32_vm86_warning(void)
{
struct task_struct *me = current;
static char lastcomm[sizeof(me->comm)];
if (strncmp(lastcomm, me->comm, sizeof(lastcomm))) {
compat_printk(KERN_INFO
"%s: vm86 mode not supported on 64 bit kernel\n",
me->comm);
strncpy(lastcomm, me->comm, sizeof(lastcomm));
}
return -ENOSYS;
}
asmlinkage ssize_t sys32_readahead(int fd, unsigned off_lo, unsigned off_hi,
size_t count)
{
return sys_readahead(fd, ((u64)off_hi << 32) | off_lo, count);
}
asmlinkage long sys32_sync_file_range(int fd, unsigned off_low, unsigned off_hi,
unsigned n_low, unsigned n_hi, int flags)
{
return sys_sync_file_range(fd,
((u64)off_hi << 32) | off_low,
((u64)n_hi << 32) | n_low, flags);
}
asmlinkage long sys32_fadvise64(int fd, unsigned offset_lo, unsigned offset_hi,
size_t len, int advice)
{
return sys_fadvise64_64(fd, ((u64)offset_hi << 32) | offset_lo,
len, advice);
}
asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_lo,
unsigned offset_hi, unsigned len_lo,
unsigned len_hi)
{
return sys_fallocate(fd, mode, ((u64)offset_hi << 32) | offset_lo,
((u64)len_hi << 32) | len_lo);
}
| gpl-2.0 |
ML-Design/ta-kernel | drivers/staging/octeon/cvmx-helper-sgmii.c | 4749 | 17037 | /***********************license start***************
* Author: Cavium Networks
*
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
* Copyright (c) 2003-2008 Cavium Networks
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
* published by the Free Software Foundation.
*
* This file is distributed in the hope that it will be useful, but
* AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
* NONINFRINGEMENT. See the GNU General Public License for more
* details.
*
* You should have received a copy of the GNU General Public License
* along with this file; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
* or visit http://www.gnu.org/licenses/.
*
* This file may also be available under a different license from Cavium.
* Contact Cavium Networks for more information
***********************license end**************************************/
/*
* Functions for SGMII initialization, configuration,
* and monitoring.
*/
#include <asm/octeon/octeon.h>
#include "cvmx-config.h"
#include "cvmx-mdio.h"
#include "cvmx-helper.h"
#include "cvmx-helper-board.h"
#include "cvmx-gmxx-defs.h"
#include "cvmx-pcsx-defs.h"
void __cvmx_interrupt_gmxx_enable(int interface);
void __cvmx_interrupt_pcsx_intx_en_reg_enable(int index, int block);
void __cvmx_interrupt_pcsxx_int_en_reg_enable(int index);
/**
* Perform initialization required only once for an SGMII port.
*
* @interface: Interface to init
* @index: Index of prot on the interface
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_one_time(int interface, int index)
{
const uint64_t clock_mhz = cvmx_sysinfo_get()->cpu_clock_hz / 1000000;
union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
union cvmx_pcsx_linkx_timer_count_reg pcsx_linkx_timer_count_reg;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
/* Disable GMX */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/*
* Write PCS*_LINK*_TIMER_COUNT_REG[COUNT] with the
* appropriate value. 1000BASE-X specifies a 10ms
* interval. SGMII specifies a 1.6ms interval.
*/
pcs_misc_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
pcsx_linkx_timer_count_reg.u64 =
cvmx_read_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface));
if (pcs_misc_ctl_reg.s.mode) {
/* 1000BASE-X */
pcsx_linkx_timer_count_reg.s.count =
(10000ull * clock_mhz) >> 10;
} else {
/* SGMII */
pcsx_linkx_timer_count_reg.s.count =
(1600ull * clock_mhz) >> 10;
}
cvmx_write_csr(CVMX_PCSX_LINKX_TIMER_COUNT_REG(index, interface),
pcsx_linkx_timer_count_reg.u64);
/*
* Write the advertisement register to be used as the
* tx_Config_Reg<D15:D0> of the autonegotiation. In
* 1000BASE-X mode, tx_Config_Reg<D15:D0> is PCS*_AN*_ADV_REG.
* In SGMII PHY mode, tx_Config_Reg<D15:D0> is
* PCS*_SGM*_AN_ADV_REG. In SGMII MAC mode,
* tx_Config_Reg<D15:D0> is the fixed value 0x4001, so this
* step can be skipped.
*/
if (pcs_misc_ctl_reg.s.mode) {
/* 1000BASE-X */
union cvmx_pcsx_anx_adv_reg pcsx_anx_adv_reg;
pcsx_anx_adv_reg.u64 =
cvmx_read_csr(CVMX_PCSX_ANX_ADV_REG(index, interface));
pcsx_anx_adv_reg.s.rem_flt = 0;
pcsx_anx_adv_reg.s.pause = 3;
pcsx_anx_adv_reg.s.hfd = 1;
pcsx_anx_adv_reg.s.fd = 1;
cvmx_write_csr(CVMX_PCSX_ANX_ADV_REG(index, interface),
pcsx_anx_adv_reg.u64);
} else {
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
pcsx_miscx_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mac_phy) {
/* PHY Mode */
union cvmx_pcsx_sgmx_an_adv_reg pcsx_sgmx_an_adv_reg;
pcsx_sgmx_an_adv_reg.u64 =
cvmx_read_csr(CVMX_PCSX_SGMX_AN_ADV_REG
(index, interface));
pcsx_sgmx_an_adv_reg.s.link = 1;
pcsx_sgmx_an_adv_reg.s.dup = 1;
pcsx_sgmx_an_adv_reg.s.speed = 2;
cvmx_write_csr(CVMX_PCSX_SGMX_AN_ADV_REG
(index, interface),
pcsx_sgmx_an_adv_reg.u64);
} else {
/* MAC Mode - Nothing to do */
}
}
return 0;
}
/**
* Initialize the SERTES link for the first time or after a loss
* of link.
*
* @interface: Interface to init
* @index: Index of prot on the interface
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_link(int interface, int index)
{
union cvmx_pcsx_mrx_control_reg control_reg;
/*
* Take PCS through a reset sequence.
* PCS*_MR*_CONTROL_REG[PWR_DN] should be cleared to zero.
* Write PCS*_MR*_CONTROL_REG[RESET]=1 (while not changing the
* value of the other PCS*_MR*_CONTROL_REG bits). Read
* PCS*_MR*_CONTROL_REG[RESET] until it changes value to
* zero.
*/
control_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
if (cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) {
control_reg.s.reset = 1;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
control_reg.u64);
if (CVMX_WAIT_FOR_FIELD64
(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
union cvmx_pcsx_mrx_control_reg, reset, ==, 0, 10000)) {
cvmx_dprintf("SGMII%d: Timeout waiting for port %d "
"to finish reset\n",
interface, index);
return -1;
}
}
/*
* Write PCS*_MR*_CONTROL_REG[RST_AN]=1 to ensure a fresh
* sgmii negotiation starts.
*/
control_reg.s.rst_an = 1;
control_reg.s.an_en = 1;
control_reg.s.pwr_dn = 0;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
control_reg.u64);
/*
* Wait for PCS*_MR*_STATUS_REG[AN_CPT] to be set, indicating
* that sgmii autonegotiation is complete. In MAC mode this
* isn't an ethernet link, but a link between Octeon and the
* PHY.
*/
if ((cvmx_sysinfo_get()->board_type != CVMX_BOARD_TYPE_SIM) &&
CVMX_WAIT_FOR_FIELD64(CVMX_PCSX_MRX_STATUS_REG(index, interface),
union cvmx_pcsx_mrx_status_reg, an_cpt, ==, 1,
10000)) {
/* cvmx_dprintf("SGMII%d: Port %d link timeout\n", interface, index); */
return -1;
}
return 0;
}
/**
* Configure an SGMII link to the specified speed after the SERTES
* link is up.
*
* @interface: Interface to init
* @index: Index of prot on the interface
* @link_info: Link state to configure
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init_link_speed(int interface,
int index,
cvmx_helper_link_info_t
link_info)
{
int is_enabled;
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
/* Disable GMX before we make any changes. Remember the enable state */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
is_enabled = gmxx_prtx_cfg.s.en;
gmxx_prtx_cfg.s.en = 0;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Wait for GMX to be idle */
if (CVMX_WAIT_FOR_FIELD64
(CVMX_GMXX_PRTX_CFG(index, interface), union cvmx_gmxx_prtx_cfg,
rx_idle, ==, 1, 10000)
|| CVMX_WAIT_FOR_FIELD64(CVMX_GMXX_PRTX_CFG(index, interface),
union cvmx_gmxx_prtx_cfg, tx_idle, ==, 1,
10000)) {
cvmx_dprintf
("SGMII%d: Timeout waiting for port %d to be idle\n",
interface, index);
return -1;
}
/* Read GMX CFG again to make sure the disable completed */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/*
* Get the misc control for PCS. We will need to set the
* duplication amount.
*/
pcsx_miscx_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
/*
* Use GMXENO to force the link down if the status we get says
* it should be down.
*/
pcsx_miscx_ctl_reg.s.gmxeno = !link_info.s.link_up;
/* Only change the duplex setting if the link is up */
if (link_info.s.link_up)
gmxx_prtx_cfg.s.duplex = link_info.s.full_duplex;
/* Do speed based setting for GMX */
switch (link_info.s.speed) {
case 10:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 1;
gmxx_prtx_cfg.s.slottime = 0;
/* Setting from GMX-603 */
pcsx_miscx_ctl_reg.s.samp_pt = 25;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 100:
gmxx_prtx_cfg.s.speed = 0;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 0;
pcsx_miscx_ctl_reg.s.samp_pt = 0x5;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 64);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 0);
break;
case 1000:
gmxx_prtx_cfg.s.speed = 1;
gmxx_prtx_cfg.s.speed_msb = 0;
gmxx_prtx_cfg.s.slottime = 1;
pcsx_miscx_ctl_reg.s.samp_pt = 1;
cvmx_write_csr(CVMX_GMXX_TXX_SLOT(index, interface), 512);
cvmx_write_csr(CVMX_GMXX_TXX_BURST(index, interface), 8192);
break;
default:
break;
}
/* Write the new misc control for PCS */
cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
/* Write the new GMX settings with the port still disabled */
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
/* Read GMX CFG again to make sure the config completed */
gmxx_prtx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
/* Restore the enabled / disabled state */
gmxx_prtx_cfg.s.en = is_enabled;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface), gmxx_prtx_cfg.u64);
return 0;
}
/**
* Bring up the SGMII interface to be ready for packet I/O but
* leave I/O disabled using the GMX override. This function
* follows the bringup documented in 10.6.3 of the manual.
*
* @interface: Interface to bringup
* @num_ports: Number of ports on the interface
*
* Returns Zero on success, negative on failure
*/
static int __cvmx_helper_sgmii_hardware_init(int interface, int num_ports)
{
int index;
__cvmx_helper_setup_gmx(interface, num_ports);
for (index = 0; index < num_ports; index++) {
int ipd_port = cvmx_helper_get_ipd_port(interface, index);
__cvmx_helper_sgmii_hardware_init_one_time(interface, index);
__cvmx_helper_sgmii_link_set(ipd_port,
__cvmx_helper_sgmii_link_get
(ipd_port));
}
return 0;
}
/**
* Probe a SGMII interface and determine the number of ports
* connected to it. The SGMII interface should still be down after
* this call.
*
* @interface: Interface to probe
*
* Returns Number of ports on the interface. Zero to disable.
*/
int __cvmx_helper_sgmii_probe(int interface)
{
union cvmx_gmxx_inf_mode mode;
/*
* Due to errata GMX-700 on CN56XXp1.x and CN52XXp1.x, the
* interface needs to be enabled before IPD otherwise per port
* backpressure may not work properly
*/
mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
mode.s.en = 1;
cvmx_write_csr(CVMX_GMXX_INF_MODE(interface), mode.u64);
return 4;
}
/**
* Bringup and enable a SGMII interface. After this call packet
* I/O should be fully functional. This is called with IPD
* enabled but PKO disabled.
*
* @interface: Interface to bring up
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_enable(int interface)
{
int num_ports = cvmx_helper_ports_on_interface(interface);
int index;
__cvmx_helper_sgmii_hardware_init(interface, num_ports);
for (index = 0; index < num_ports; index++) {
union cvmx_gmxx_prtx_cfg gmxx_prtx_cfg;
gmxx_prtx_cfg.u64 =
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
gmxx_prtx_cfg.s.en = 1;
cvmx_write_csr(CVMX_GMXX_PRTX_CFG(index, interface),
gmxx_prtx_cfg.u64);
__cvmx_interrupt_pcsx_intx_en_reg_enable(index, interface);
}
__cvmx_interrupt_pcsxx_int_en_reg_enable(interface);
__cvmx_interrupt_gmxx_enable(interface);
return 0;
}
/**
* Return the link state of an IPD/PKO port as returned by
* auto negotiation. The result of this function may not match
* Octeon's link config if auto negotiation has changed since
* the last call to cvmx_helper_link_set().
*
* @ipd_port: IPD/PKO port to query
*
* Returns Link state
*/
cvmx_helper_link_info_t __cvmx_helper_sgmii_link_get(int ipd_port)
{
cvmx_helper_link_info_t result;
union cvmx_pcsx_miscx_ctl_reg pcs_misc_ctl_reg;
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
result.u64 = 0;
if (cvmx_sysinfo_get()->board_type == CVMX_BOARD_TYPE_SIM) {
/* The simulator gives you a simulated 1Gbps full duplex link */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
pcsx_mrx_control_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
if (pcsx_mrx_control_reg.s.loopbck1) {
/* Force 1Gbps full duplex link for internal loopback */
result.s.link_up = 1;
result.s.full_duplex = 1;
result.s.speed = 1000;
return result;
}
pcs_misc_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcs_misc_ctl_reg.s.mode) {
/* 1000BASE-X */
/* FIXME */
} else {
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
pcsx_miscx_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
if (pcsx_miscx_ctl_reg.s.mac_phy) {
/* PHY Mode */
union cvmx_pcsx_mrx_status_reg pcsx_mrx_status_reg;
union cvmx_pcsx_anx_results_reg pcsx_anx_results_reg;
/*
* Don't bother continuing if the SERTES low
* level link is down
*/
pcsx_mrx_status_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MRX_STATUS_REG
(index, interface));
if (pcsx_mrx_status_reg.s.lnk_st == 0) {
if (__cvmx_helper_sgmii_hardware_init_link
(interface, index) != 0)
return result;
}
/* Read the autoneg results */
pcsx_anx_results_reg.u64 =
cvmx_read_csr(CVMX_PCSX_ANX_RESULTS_REG
(index, interface));
if (pcsx_anx_results_reg.s.an_cpt) {
/*
* Auto negotiation is complete. Set
* status accordingly.
*/
result.s.full_duplex =
pcsx_anx_results_reg.s.dup;
result.s.link_up =
pcsx_anx_results_reg.s.link_ok;
switch (pcsx_anx_results_reg.s.spd) {
case 0:
result.s.speed = 10;
break;
case 1:
result.s.speed = 100;
break;
case 2:
result.s.speed = 1000;
break;
default:
result.s.speed = 0;
result.s.link_up = 0;
break;
}
} else {
/*
* Auto negotiation isn't
* complete. Return link down.
*/
result.s.speed = 0;
result.s.link_up = 0;
}
} else { /* MAC Mode */
result = __cvmx_helper_board_link_get(ipd_port);
}
}
return result;
}
/**
* Configure an IPD/PKO port for the specified link state. This
* function does not influence auto negotiation at the PHY level.
* The passed link state must always match the link state returned
* by cvmx_helper_link_get(). It is normally best to use
* cvmx_helper_link_autoconf() instead.
*
* @ipd_port: IPD/PKO port to configure
* @link_info: The new link state
*
* Returns Zero on success, negative on failure
*/
int __cvmx_helper_sgmii_link_set(int ipd_port,
cvmx_helper_link_info_t link_info)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
__cvmx_helper_sgmii_hardware_init_link(interface, index);
return __cvmx_helper_sgmii_hardware_init_link_speed(interface, index,
link_info);
}
/**
* Configure a port for internal and/or external loopback. Internal
* loopback causes packets sent by the port to be received by
* Octeon. External loopback causes packets received from the wire to
* sent out again.
*
* @ipd_port: IPD/PKO port to loopback.
* @enable_internal:
* Non zero if you want internal loopback
* @enable_external:
* Non zero if you want external loopback
*
* Returns Zero on success, negative on failure.
*/
int __cvmx_helper_sgmii_configure_loopback(int ipd_port, int enable_internal,
int enable_external)
{
int interface = cvmx_helper_get_interface_num(ipd_port);
int index = cvmx_helper_get_interface_index_num(ipd_port);
union cvmx_pcsx_mrx_control_reg pcsx_mrx_control_reg;
union cvmx_pcsx_miscx_ctl_reg pcsx_miscx_ctl_reg;
pcsx_mrx_control_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface));
pcsx_mrx_control_reg.s.loopbck1 = enable_internal;
cvmx_write_csr(CVMX_PCSX_MRX_CONTROL_REG(index, interface),
pcsx_mrx_control_reg.u64);
pcsx_miscx_ctl_reg.u64 =
cvmx_read_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface));
pcsx_miscx_ctl_reg.s.loopbck2 = enable_external;
cvmx_write_csr(CVMX_PCSX_MISCX_CTL_REG(index, interface),
pcsx_miscx_ctl_reg.u64);
__cvmx_helper_sgmii_hardware_init_link(interface, index);
return 0;
}
| gpl-2.0 |
NXTnet/android_kernel_samsung_msm8916-caf | arch/mips/alchemy/board-xxs1500.c | 7309 | 4289 | /*
* BRIEF MODULE DESCRIPTION
* MyCable XXS1500 board support
*
* Copyright 2003, 2008 MontaVista Software Inc.
* Author: MontaVista Software, Inc. <source@mvista.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <linux/delay.h>
#include <linux/pm.h>
#include <asm/bootinfo.h>
#include <asm/reboot.h>
#include <asm/mach-au1x00/au1000.h>
#include <prom.h>
const char *get_system_type(void)
{
return "XXS1500";
}
void __init prom_init(void)
{
unsigned char *memsize_str;
unsigned long memsize;
prom_argc = fw_arg0;
prom_argv = (char **)fw_arg1;
prom_envp = (char **)fw_arg2;
prom_init_cmdline();
memsize_str = prom_getenv("memsize");
if (!memsize_str || strict_strtoul(memsize_str, 0, &memsize))
memsize = 0x04000000;
add_memory_region(0, memsize, BOOT_MEM_RAM);
}
void prom_putchar(unsigned char c)
{
alchemy_uart_putchar(AU1000_UART0_PHYS_ADDR, c);
}
static void xxs1500_reset(char *c)
{
/* Jump to the reset vector */
__asm__ __volatile__("jr\t%0" : : "r"(0xbfc00000));
}
static void xxs1500_power_off(void)
{
while (1)
asm volatile (
" .set mips32 \n"
" wait \n"
" .set mips0 \n");
}
void __init board_setup(void)
{
u32 pin_func;
pm_power_off = xxs1500_power_off;
_machine_halt = xxs1500_power_off;
_machine_restart = xxs1500_reset;
alchemy_gpio1_input_enable();
alchemy_gpio2_enable();
/* Set multiple use pins (UART3/GPIO) to UART (it's used as UART too) */
pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_UR3;
pin_func |= SYS_PF_UR3;
au_writel(pin_func, SYS_PINFUNC);
/* Enable UART */
alchemy_uart_enable(AU1000_UART3_PHYS_ADDR);
/* Enable DTR (MCR bit 0) = USB power up */
__raw_writel(1, (void __iomem *)KSEG1ADDR(AU1000_UART3_PHYS_ADDR + 0x18));
wmb();
}
/******************************************************************************/
static struct resource xxs1500_pcmcia_res[] = {
{
.name = "pcmcia-io",
.flags = IORESOURCE_MEM,
.start = AU1000_PCMCIA_IO_PHYS_ADDR,
.end = AU1000_PCMCIA_IO_PHYS_ADDR + 0x000400000 - 1,
},
{
.name = "pcmcia-attr",
.flags = IORESOURCE_MEM,
.start = AU1000_PCMCIA_ATTR_PHYS_ADDR,
.end = AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1,
},
{
.name = "pcmcia-mem",
.flags = IORESOURCE_MEM,
.start = AU1000_PCMCIA_MEM_PHYS_ADDR,
.end = AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
},
};
static struct platform_device xxs1500_pcmcia_dev = {
.name = "xxs1500_pcmcia",
.id = -1,
.num_resources = ARRAY_SIZE(xxs1500_pcmcia_res),
.resource = xxs1500_pcmcia_res,
};
static struct platform_device *xxs1500_devs[] __initdata = {
&xxs1500_pcmcia_dev,
};
static int __init xxs1500_dev_init(void)
{
irq_set_irq_type(AU1500_GPIO204_INT, IRQ_TYPE_LEVEL_HIGH);
irq_set_irq_type(AU1500_GPIO201_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO202_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO203_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO205_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO207_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO0_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO1_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO2_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO3_INT, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(AU1500_GPIO4_INT, IRQ_TYPE_LEVEL_LOW); /* CF irq */
irq_set_irq_type(AU1500_GPIO5_INT, IRQ_TYPE_LEVEL_LOW);
return platform_add_devices(xxs1500_devs,
ARRAY_SIZE(xxs1500_devs));
}
device_initcall(xxs1500_dev_init);
| gpl-2.0 |
championswimmer/android_kernel_sony_msm8930 | drivers/media/video/pvrusb2/pvrusb2-cs53l32a.c | 12685 | 2567 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
* Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
This source file is specifically designed to interface with the
v4l-dvb cs53l32a module.
*/
#include "pvrusb2-cs53l32a.h"
#include "pvrusb2-hdw-internal.h"
#include "pvrusb2-debug.h"
#include <linux/videodev2.h>
#include <media/v4l2-common.h>
#include <linux/errno.h>
struct routing_scheme {
const int *def;
unsigned int cnt;
};
static const int routing_scheme1[] = {
[PVR2_CVAL_INPUT_TV] = 2, /* 1 or 2 seems to work here */
[PVR2_CVAL_INPUT_RADIO] = 2,
[PVR2_CVAL_INPUT_COMPOSITE] = 0,
[PVR2_CVAL_INPUT_SVIDEO] = 0,
};
static const struct routing_scheme routing_def1 = {
.def = routing_scheme1,
.cnt = ARRAY_SIZE(routing_scheme1),
};
static const struct routing_scheme *routing_schemes[] = {
[PVR2_ROUTING_SCHEME_ONAIR] = &routing_def1,
};
void pvr2_cs53l32a_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
{
if (hdw->input_dirty || hdw->force_dirty) {
const struct routing_scheme *sp;
unsigned int sid = hdw->hdw_desc->signal_routing_scheme;
u32 input;
pvr2_trace(PVR2_TRACE_CHIPS, "subdev v4l2 set_input(%d)",
hdw->input_val);
sp = (sid < ARRAY_SIZE(routing_schemes)) ?
routing_schemes[sid] : NULL;
if ((sp == NULL) ||
(hdw->input_val < 0) ||
(hdw->input_val >= sp->cnt)) {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev v4l2 set_input:"
" Invalid routing scheme (%u)"
" and/or input (%d)",
sid, hdw->input_val);
return;
}
input = sp->def[hdw->input_val];
sd->ops->audio->s_routing(sd, input, 0, 0);
}
}
/*
Stuff for Emacs to see, in order to encourage consistent editing style:
*** Local Variables: ***
*** mode: c ***
*** fill-column: 70 ***
*** tab-width: 8 ***
*** c-basic-offset: 8 ***
*** End: ***
*/
| gpl-2.0 |
rneugeba/linux-stable | drivers/media/usb/gspca/m5602/m5602_ov9650.c | 142 | 20591 |
/*
* Driver for the ov9650 sensor
*
* Copyright (C) 2008 Erik Andrén
* Copyright (C) 2007 Ilyes Gouta. Based on the m5603x Linux Driver Project.
* Copyright (C) 2005 m5603x Linux Driver Project <m5602@x3ng.com.br>
*
* Portions of code to USB interface and ALi driver software,
* Copyright (c) 2006 Willem Duinker
* v4l2 interface modeled after the V4L2 driver
* for SN9C10x PC Camera Controllers
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation, version 2.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "m5602_ov9650.h"
static int ov9650_s_ctrl(struct v4l2_ctrl *ctrl);
static void ov9650_dump_registers(struct sd *sd);
static const unsigned char preinit_ov9650[][3] = {
/* [INITCAM] */
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
{BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
{BRIDGE, M5602_XB_SENSOR_TYPE, 0x08},
{BRIDGE, M5602_XB_GPIO_DIR, 0x05},
{BRIDGE, M5602_XB_GPIO_DAT, 0x04},
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
{BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
{BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT, 0x00},
{BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a},
/* Reset chip */
{SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
/* Enable double clock */
{SENSOR, OV9650_CLKRC, 0x80},
/* Do something out of spec with the power */
{SENSOR, OV9650_OFON, 0x40}
};
static const unsigned char init_ov9650[][3] = {
/* [INITCAM] */
{BRIDGE, M5602_XB_MCU_CLK_DIV, 0x02},
{BRIDGE, M5602_XB_MCU_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_SEN_CLK_DIV, 0x00},
{BRIDGE, M5602_XB_SEN_CLK_CTRL, 0xb0},
{BRIDGE, M5602_XB_ADC_CTRL, 0xc0},
{BRIDGE, M5602_XB_SENSOR_CTRL, 0x00},
{BRIDGE, M5602_XB_SENSOR_TYPE, 0x08},
{BRIDGE, M5602_XB_GPIO_DIR, 0x05},
{BRIDGE, M5602_XB_GPIO_DAT, 0x04},
{BRIDGE, M5602_XB_GPIO_EN_H, 0x06},
{BRIDGE, M5602_XB_GPIO_DIR_H, 0x06},
{BRIDGE, M5602_XB_GPIO_DAT_H, 0x00},
{BRIDGE, M5602_XB_GPIO_DAT, 0x00},
{BRIDGE, M5602_XB_I2C_CLK_DIV, 0x0a},
/* Reset chip */
{SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
/* One extra reset is needed in order to make the sensor behave
properly when resuming from ram, could be a timing issue */
{SENSOR, OV9650_COM7, OV9650_REGISTER_RESET},
/* Enable double clock */
{SENSOR, OV9650_CLKRC, 0x80},
/* Do something out of spec with the power */
{SENSOR, OV9650_OFON, 0x40},
/* Set fast AGC/AEC algorithm with unlimited step size */
{SENSOR, OV9650_COM8, OV9650_FAST_AGC_AEC |
OV9650_AEC_UNLIM_STEP_SIZE},
{SENSOR, OV9650_CHLF, 0x10},
{SENSOR, OV9650_ARBLM, 0xbf},
{SENSOR, OV9650_ACOM38, 0x81},
/* Turn off color matrix coefficient double option */
{SENSOR, OV9650_COM16, 0x00},
/* Enable color matrix for RGB/YUV, Delay Y channel,
set output Y/UV delay to 1 */
{SENSOR, OV9650_COM13, 0x19},
/* Enable digital BLC, Set output mode to U Y V Y */
{SENSOR, OV9650_TSLB, 0x0c},
/* Limit the AGC/AEC stable upper region */
{SENSOR, OV9650_COM24, 0x00},
/* Enable HREF and some out of spec things */
{SENSOR, OV9650_COM12, 0x73},
/* Set all DBLC offset signs to positive and
do some out of spec stuff */
{SENSOR, OV9650_DBLC1, 0xdf},
{SENSOR, OV9650_COM21, 0x06},
{SENSOR, OV9650_RSVD35, 0x91},
/* Necessary, no camera stream without it */
{SENSOR, OV9650_RSVD16, 0x06},
{SENSOR, OV9650_RSVD94, 0x99},
{SENSOR, OV9650_RSVD95, 0x99},
{SENSOR, OV9650_RSVD96, 0x04},
/* Enable full range output */
{SENSOR, OV9650_COM15, 0x0},
/* Enable HREF at optical black, enable ADBLC bias,
enable ADBLC, reset timings at format change */
{SENSOR, OV9650_COM6, 0x4b},
/* Subtract 32 from the B channel bias */
{SENSOR, OV9650_BBIAS, 0xa0},
/* Subtract 32 from the Gb channel bias */
{SENSOR, OV9650_GbBIAS, 0xa0},
/* Do not bypass the analog BLC and to some out of spec stuff */
{SENSOR, OV9650_Gr_COM, 0x00},
/* Subtract 32 from the R channel bias */
{SENSOR, OV9650_RBIAS, 0xa0},
/* Subtract 32 from the R channel bias */
{SENSOR, OV9650_RBIAS, 0x0},
{SENSOR, OV9650_COM26, 0x80},
{SENSOR, OV9650_ACOMA9, 0x98},
/* Set the AGC/AEC stable region upper limit */
{SENSOR, OV9650_AEW, 0x68},
/* Set the AGC/AEC stable region lower limit */
{SENSOR, OV9650_AEB, 0x5c},
/* Set the high and low limit nibbles to 3 */
{SENSOR, OV9650_VPT, 0xc3},
/* Set the Automatic Gain Ceiling (AGC) to 128x,
drop VSYNC at frame drop,
limit exposure timing,
drop frame when the AEC step is larger than the exposure gap */
{SENSOR, OV9650_COM9, 0x6e},
/* Set VSYNC negative, Set RESET to SLHS (slave mode horizontal sync)
and set PWDN to SLVS (slave mode vertical sync) */
{SENSOR, OV9650_COM10, 0x42},
/* Set horizontal column start high to default value */
{SENSOR, OV9650_HSTART, 0x1a}, /* 210 */
/* Set horizontal column end */
{SENSOR, OV9650_HSTOP, 0xbf}, /* 1534 */
/* Complementing register to the two writes above */
{SENSOR, OV9650_HREF, 0xb2},
/* Set vertical row start high bits */
{SENSOR, OV9650_VSTRT, 0x02},
/* Set vertical row end low bits */
{SENSOR, OV9650_VSTOP, 0x7e},
/* Set complementing vertical frame control */
{SENSOR, OV9650_VREF, 0x10},
{SENSOR, OV9650_ADC, 0x04},
{SENSOR, OV9650_HV, 0x40},
/* Enable denoise, and white-pixel erase */
{SENSOR, OV9650_COM22, OV9650_DENOISE_ENABLE |
OV9650_WHITE_PIXEL_ENABLE |
OV9650_WHITE_PIXEL_OPTION},
/* Enable VARIOPIXEL */
{SENSOR, OV9650_COM3, OV9650_VARIOPIXEL},
{SENSOR, OV9650_COM4, OV9650_QVGA_VARIOPIXEL},
/* Put the sensor in soft sleep mode */
{SENSOR, OV9650_COM2, OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X},
};
static const unsigned char res_init_ov9650[][3] = {
{SENSOR, OV9650_COM2, OV9650_OUTPUT_DRIVE_2X},
{BRIDGE, M5602_XB_LINE_OF_FRAME_H, 0x82},
{BRIDGE, M5602_XB_LINE_OF_FRAME_L, 0x00},
{BRIDGE, M5602_XB_PIX_OF_LINE_H, 0x82},
{BRIDGE, M5602_XB_PIX_OF_LINE_L, 0x00},
{BRIDGE, M5602_XB_SIG_INI, 0x01}
};
/* Vertically and horizontally flips the image if matched, needed for machines
where the sensor is mounted upside down */
static
const
struct dmi_system_id ov9650_flip_dmi_table[] = {
{
.ident = "ASUS A6Ja",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A6J")
}
},
{
.ident = "ASUS A6JC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A6JC")
}
},
{
.ident = "ASUS A6K",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A6K")
}
},
{
.ident = "ASUS A6Kt",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A6Kt")
}
},
{
.ident = "ASUS A6VA",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A6VA")
}
},
{
.ident = "ASUS A6VC",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A6VC")
}
},
{
.ident = "ASUS A6VM",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A6VM")
}
},
{
.ident = "ASUS A7V",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
DMI_MATCH(DMI_PRODUCT_NAME, "A7V")
}
},
{
.ident = "Alienware Aurora m9700",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "alienware"),
DMI_MATCH(DMI_PRODUCT_NAME, "Aurora m9700")
}
},
{}
};
static struct v4l2_pix_format ov9650_modes[] = {
{
176,
144,
V4L2_PIX_FMT_SBGGR8,
V4L2_FIELD_NONE,
.sizeimage =
176 * 144,
.bytesperline = 176,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 9
}, {
320,
240,
V4L2_PIX_FMT_SBGGR8,
V4L2_FIELD_NONE,
.sizeimage =
320 * 240,
.bytesperline = 320,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 8
}, {
352,
288,
V4L2_PIX_FMT_SBGGR8,
V4L2_FIELD_NONE,
.sizeimage =
352 * 288,
.bytesperline = 352,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 9
}, {
640,
480,
V4L2_PIX_FMT_SBGGR8,
V4L2_FIELD_NONE,
.sizeimage =
640 * 480,
.bytesperline = 640,
.colorspace = V4L2_COLORSPACE_SRGB,
.priv = 9
}
};
static const struct v4l2_ctrl_ops ov9650_ctrl_ops = {
.s_ctrl = ov9650_s_ctrl,
};
int ov9650_probe(struct sd *sd)
{
int err = 0;
u8 prod_id = 0, ver_id = 0, i;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if (force_sensor) {
if (force_sensor == OV9650_SENSOR) {
pr_info("Forcing an %s sensor\n", ov9650.name);
goto sensor_found;
}
/* If we want to force another sensor,
don't try to probe this one */
return -ENODEV;
}
gspca_dbg(gspca_dev, D_PROBE, "Probing for an ov9650 sensor\n");
/* Run the pre-init before probing the sensor */
for (i = 0; i < ARRAY_SIZE(preinit_ov9650) && !err; i++) {
u8 data = preinit_ov9650[i][2];
if (preinit_ov9650[i][0] == SENSOR)
err = m5602_write_sensor(sd,
preinit_ov9650[i][1], &data, 1);
else
err = m5602_write_bridge(sd,
preinit_ov9650[i][1], data);
}
if (err < 0)
return err;
if (m5602_read_sensor(sd, OV9650_PID, &prod_id, 1))
return -ENODEV;
if (m5602_read_sensor(sd, OV9650_VER, &ver_id, 1))
return -ENODEV;
if ((prod_id == 0x96) && (ver_id == 0x52)) {
pr_info("Detected an ov9650 sensor\n");
goto sensor_found;
}
return -ENODEV;
sensor_found:
sd->gspca_dev.cam.cam_mode = ov9650_modes;
sd->gspca_dev.cam.nmodes = ARRAY_SIZE(ov9650_modes);
return 0;
}
int ov9650_init(struct sd *sd)
{
int i, err = 0;
u8 data;
if (dump_sensor)
ov9650_dump_registers(sd);
for (i = 0; i < ARRAY_SIZE(init_ov9650) && !err; i++) {
data = init_ov9650[i][2];
if (init_ov9650[i][0] == SENSOR)
err = m5602_write_sensor(sd, init_ov9650[i][1],
&data, 1);
else
err = m5602_write_bridge(sd, init_ov9650[i][1], data);
}
return 0;
}
int ov9650_init_controls(struct sd *sd)
{
struct v4l2_ctrl_handler *hdl = &sd->gspca_dev.ctrl_handler;
sd->gspca_dev.vdev.ctrl_handler = hdl;
v4l2_ctrl_handler_init(hdl, 9);
sd->auto_white_bal = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops,
V4L2_CID_AUTO_WHITE_BALANCE,
0, 1, 1, 1);
sd->red_bal = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops,
V4L2_CID_RED_BALANCE, 0, 255, 1,
RED_GAIN_DEFAULT);
sd->blue_bal = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops,
V4L2_CID_BLUE_BALANCE, 0, 255, 1,
BLUE_GAIN_DEFAULT);
sd->autoexpo = v4l2_ctrl_new_std_menu(hdl, &ov9650_ctrl_ops,
V4L2_CID_EXPOSURE_AUTO, 1, 0, V4L2_EXPOSURE_AUTO);
sd->expo = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_EXPOSURE,
0, 0x1ff, 4, EXPOSURE_DEFAULT);
sd->autogain = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops,
V4L2_CID_AUTOGAIN, 0, 1, 1, 1);
sd->gain = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_GAIN, 0,
0x3ff, 1, GAIN_DEFAULT);
sd->hflip = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_HFLIP,
0, 1, 1, 0);
sd->vflip = v4l2_ctrl_new_std(hdl, &ov9650_ctrl_ops, V4L2_CID_VFLIP,
0, 1, 1, 0);
if (hdl->error) {
pr_err("Could not initialize controls\n");
return hdl->error;
}
v4l2_ctrl_auto_cluster(3, &sd->auto_white_bal, 0, false);
v4l2_ctrl_auto_cluster(2, &sd->autoexpo, 0, false);
v4l2_ctrl_auto_cluster(2, &sd->autogain, 0, false);
v4l2_ctrl_cluster(2, &sd->hflip);
return 0;
}
int ov9650_start(struct sd *sd)
{
u8 data;
int i, err = 0;
struct cam *cam = &sd->gspca_dev.cam;
int width = cam->cam_mode[sd->gspca_dev.curr_mode].width;
int height = cam->cam_mode[sd->gspca_dev.curr_mode].height;
int ver_offs = cam->cam_mode[sd->gspca_dev.curr_mode].priv;
int hor_offs = OV9650_LEFT_OFFSET;
struct gspca_dev *gspca_dev = (struct gspca_dev *)sd;
if ((!dmi_check_system(ov9650_flip_dmi_table) &&
sd->vflip->val) ||
(dmi_check_system(ov9650_flip_dmi_table) &&
!sd->vflip->val))
ver_offs--;
if (width <= 320)
hor_offs /= 2;
/* Synthesize the vsync/hsync setup */
for (i = 0; i < ARRAY_SIZE(res_init_ov9650) && !err; i++) {
if (res_init_ov9650[i][0] == BRIDGE)
err = m5602_write_bridge(sd, res_init_ov9650[i][1],
res_init_ov9650[i][2]);
else if (res_init_ov9650[i][0] == SENSOR) {
data = res_init_ov9650[i][2];
err = m5602_write_sensor(sd,
res_init_ov9650[i][1], &data, 1);
}
}
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA,
((ver_offs >> 8) & 0xff));
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (ver_offs & 0xff));
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height >> 8) & 0xff);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, (height & 0xff));
if (err < 0)
return err;
for (i = 0; i < 2 && !err; i++)
err = m5602_write_bridge(sd, M5602_XB_VSYNC_PARA, 0);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 2);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
(hor_offs >> 8) & 0xff);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA, hor_offs & 0xff);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
((width + hor_offs) >> 8) & 0xff);
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_HSYNC_PARA,
((width + hor_offs) & 0xff));
if (err < 0)
return err;
err = m5602_write_bridge(sd, M5602_XB_SIG_INI, 0);
if (err < 0)
return err;
switch (width) {
case 640:
gspca_dbg(gspca_dev, D_CONF, "Configuring camera for VGA mode\n");
data = OV9650_VGA_SELECT | OV9650_RGB_SELECT |
OV9650_RAW_RGB_SELECT;
err = m5602_write_sensor(sd, OV9650_COM7, &data, 1);
break;
case 352:
gspca_dbg(gspca_dev, D_CONF, "Configuring camera for CIF mode\n");
data = OV9650_CIF_SELECT | OV9650_RGB_SELECT |
OV9650_RAW_RGB_SELECT;
err = m5602_write_sensor(sd, OV9650_COM7, &data, 1);
break;
case 320:
gspca_dbg(gspca_dev, D_CONF, "Configuring camera for QVGA mode\n");
data = OV9650_QVGA_SELECT | OV9650_RGB_SELECT |
OV9650_RAW_RGB_SELECT;
err = m5602_write_sensor(sd, OV9650_COM7, &data, 1);
break;
case 176:
gspca_dbg(gspca_dev, D_CONF, "Configuring camera for QCIF mode\n");
data = OV9650_QCIF_SELECT | OV9650_RGB_SELECT |
OV9650_RAW_RGB_SELECT;
err = m5602_write_sensor(sd, OV9650_COM7, &data, 1);
break;
}
return err;
}
int ov9650_stop(struct sd *sd)
{
u8 data = OV9650_SOFT_SLEEP | OV9650_OUTPUT_DRIVE_2X;
return m5602_write_sensor(sd, OV9650_COM2, &data, 1);
}
void ov9650_disconnect(struct sd *sd)
{
ov9650_stop(sd);
sd->sensor = NULL;
}
static int ov9650_set_exposure(struct gspca_dev *gspca_dev, __s32 val)
{
struct sd *sd = (struct sd *) gspca_dev;
u8 i2c_data;
int err;
gspca_dbg(gspca_dev, D_CONF, "Set exposure to %d\n", val);
/* The 6 MSBs */
i2c_data = (val >> 10) & 0x3f;
err = m5602_write_sensor(sd, OV9650_AECHM,
&i2c_data, 1);
if (err < 0)
return err;
/* The 8 middle bits */
i2c_data = (val >> 2) & 0xff;
err = m5602_write_sensor(sd, OV9650_AECH,
&i2c_data, 1);
if (err < 0)
return err;
/* The 2 LSBs */
i2c_data = val & 0x03;
err = m5602_write_sensor(sd, OV9650_COM1, &i2c_data, 1);
return err;
}
static int ov9650_set_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
gspca_dbg(gspca_dev, D_CONF, "Setting gain to %d\n", val);
/* The 2 MSB */
/* Read the OV9650_VREF register first to avoid
corrupting the VREF high and low bits */
err = m5602_read_sensor(sd, OV9650_VREF, &i2c_data, 1);
if (err < 0)
return err;
/* Mask away all uninteresting bits */
i2c_data = ((val & 0x0300) >> 2) |
(i2c_data & 0x3f);
err = m5602_write_sensor(sd, OV9650_VREF, &i2c_data, 1);
if (err < 0)
return err;
/* The 8 LSBs */
i2c_data = val & 0xff;
err = m5602_write_sensor(sd, OV9650_GAIN, &i2c_data, 1);
return err;
}
static int ov9650_set_red_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
gspca_dbg(gspca_dev, D_CONF, "Set red gain to %d\n", val);
i2c_data = val & 0xff;
err = m5602_write_sensor(sd, OV9650_RED, &i2c_data, 1);
return err;
}
static int ov9650_set_blue_balance(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
gspca_dbg(gspca_dev, D_CONF, "Set blue gain to %d\n", val);
i2c_data = val & 0xff;
err = m5602_write_sensor(sd, OV9650_BLUE, &i2c_data, 1);
return err;
}
static int ov9650_set_hvflip(struct gspca_dev *gspca_dev)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
int hflip = sd->hflip->val;
int vflip = sd->vflip->val;
gspca_dbg(gspca_dev, D_CONF, "Set hvflip to %d %d\n", hflip, vflip);
if (dmi_check_system(ov9650_flip_dmi_table))
vflip = !vflip;
i2c_data = (hflip << 5) | (vflip << 4);
err = m5602_write_sensor(sd, OV9650_MVFP, &i2c_data, 1);
if (err < 0)
return err;
/* When vflip is toggled we need to readjust the bridge hsync/vsync */
if (gspca_dev->streaming)
err = ov9650_start(sd);
return err;
}
static int ov9650_set_auto_exposure(struct gspca_dev *gspca_dev,
__s32 val)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
gspca_dbg(gspca_dev, D_CONF, "Set auto exposure control to %d\n", val);
err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
if (err < 0)
return err;
val = (val == V4L2_EXPOSURE_AUTO);
i2c_data = ((i2c_data & 0xfe) | ((val & 0x01) << 0));
return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
}
static int ov9650_set_auto_white_balance(struct gspca_dev *gspca_dev,
__s32 val)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
gspca_dbg(gspca_dev, D_CONF, "Set auto white balance to %d\n", val);
err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
if (err < 0)
return err;
i2c_data = ((i2c_data & 0xfd) | ((val & 0x01) << 1));
err = m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
return err;
}
static int ov9650_set_auto_gain(struct gspca_dev *gspca_dev, __s32 val)
{
int err;
u8 i2c_data;
struct sd *sd = (struct sd *) gspca_dev;
gspca_dbg(gspca_dev, D_CONF, "Set auto gain control to %d\n", val);
err = m5602_read_sensor(sd, OV9650_COM8, &i2c_data, 1);
if (err < 0)
return err;
i2c_data = ((i2c_data & 0xfb) | ((val & 0x01) << 2));
return m5602_write_sensor(sd, OV9650_COM8, &i2c_data, 1);
}
static int ov9650_s_ctrl(struct v4l2_ctrl *ctrl)
{
struct gspca_dev *gspca_dev =
container_of(ctrl->handler, struct gspca_dev, ctrl_handler);
struct sd *sd = (struct sd *) gspca_dev;
int err;
if (!gspca_dev->streaming)
return 0;
switch (ctrl->id) {
case V4L2_CID_AUTO_WHITE_BALANCE:
err = ov9650_set_auto_white_balance(gspca_dev, ctrl->val);
if (err || ctrl->val)
return err;
err = ov9650_set_red_balance(gspca_dev, sd->red_bal->val);
if (err)
return err;
err = ov9650_set_blue_balance(gspca_dev, sd->blue_bal->val);
break;
case V4L2_CID_EXPOSURE_AUTO:
err = ov9650_set_auto_exposure(gspca_dev, ctrl->val);
if (err || ctrl->val == V4L2_EXPOSURE_AUTO)
return err;
err = ov9650_set_exposure(gspca_dev, sd->expo->val);
break;
case V4L2_CID_AUTOGAIN:
err = ov9650_set_auto_gain(gspca_dev, ctrl->val);
if (err || ctrl->val)
return err;
err = ov9650_set_gain(gspca_dev, sd->gain->val);
break;
case V4L2_CID_HFLIP:
err = ov9650_set_hvflip(gspca_dev);
break;
default:
return -EINVAL;
}
return err;
}
static void ov9650_dump_registers(struct sd *sd)
{
int address;
pr_info("Dumping the ov9650 register state\n");
for (address = 0; address < 0xa9; address++) {
u8 value;
m5602_read_sensor(sd, address, &value, 1);
pr_info("register 0x%x contains 0x%x\n", address, value);
}
pr_info("ov9650 register state dump complete\n");
pr_info("Probing for which registers that are read/write\n");
for (address = 0; address < 0xff; address++) {
u8 old_value, ctrl_value;
u8 test_value[2] = {0xff, 0xff};
m5602_read_sensor(sd, address, &old_value, 1);
m5602_write_sensor(sd, address, test_value, 1);
m5602_read_sensor(sd, address, &ctrl_value, 1);
if (ctrl_value == test_value[0])
pr_info("register 0x%x is writeable\n", address);
else
pr_info("register 0x%x is read only\n", address);
/* Restore original value */
m5602_write_sensor(sd, address, &old_value, 1);
}
}
| gpl-2.0 |
beaka/RK3188_tablet_kernel_sources | drivers/net/wireless/rtl8723bs/hal/hal_btcoex.c | 142 | 73001 | /******************************************************************************
*
* Copyright(c) 2013 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
*
******************************************************************************/
#define __HAL_BTCOEX_C__
#ifdef CONFIG_BT_COEXIST
#include <hal_data.h>
#include <hal_btcoex.h>
#include <Mp_Precomp.h>
//====================================
// Global variables
//====================================
const char *const BtProfileString[] =
{
"NONE",
"A2DP",
"PAN",
"HID",
"SCO",
};
const char *const BtSpecString[] =
{
"1.0b",
"1.1",
"1.2",
"2.0+EDR",
"2.1+EDR",
"3.0+HS",
"4.0",
};
const char *const BtLinkRoleString[] =
{
"Master",
"Slave",
};
const char *const h2cStaString[] =
{
"successful",
"h2c busy",
"rf off",
"fw not read",
};
const char *const ioStaString[] =
{
"IO_STATUS_SUCCESS",
"IO_STATUS_FAIL_CANNOT_IO",
"IO_STATUS_FAIL_RF_OFF",
"IO_STATUS_FAIL_FW_READ_CLEAR_TIMEOUT",
"IO_STATUS_FAIL_WAIT_IO_EVENT_TIMEOUT",
"IO_STATUS_INVALID_LEN",
"IO_STATUS_IO_IDLE_QUEUE_EMPTY",
"IO_STATUS_IO_INSERT_WAIT_QUEUE_FAIL",
"IO_STATUS_UNKNOWN_FAIL",
"IO_STATUS_WRONG_LEVEL",
"IO_STATUS_H2C_STOPPED",
};
BTC_COEXIST GLBtCoexist;
u8 GLBtcWiFiInScanState;
u8 GLBtcWiFiInIQKState;
u32 GLBtcDbgType[BTC_MSG_MAX];
u8 GLBtcDbgBuf[BT_TMP_BUF_SIZE];
typedef struct _btcoexdbginfo
{
u8 *info;
u32 size; // buffer total size
u32 len; // now used length
} BTCDBGINFO, *PBTCDBGINFO;
BTCDBGINFO GLBtcDbgInfo;
#define BT_Operation(Adapter) _FALSE
static void DBG_BT_INFO_INIT(PBTCDBGINFO pinfo, u8 *pbuf, u32 size)
{
if (NULL == pinfo) return;
_rtw_memset(pinfo, 0, sizeof(BTCDBGINFO));
if (pbuf && size) {
pinfo->info = pbuf;
pinfo->size = size;
}
}
void DBG_BT_INFO(u8 *dbgmsg)
{
PBTCDBGINFO pinfo;
u32 msglen, buflen;
u8 *pbuf;
pinfo = &GLBtcDbgInfo;
if (NULL == pinfo->info)
return;
msglen = strlen(dbgmsg);
if (pinfo->len + msglen > pinfo->size)
return;
pbuf = pinfo->info + pinfo->len;
_rtw_memcpy(pbuf, dbgmsg, msglen);
pinfo->len += msglen;
}
//====================================
// Debug related function
//====================================
static u8 halbtcoutsrc_IsBtCoexistAvailable(PBTC_COEXIST pBtCoexist)
{
if (!pBtCoexist->bBinded ||
NULL == pBtCoexist->Adapter)
{
return _FALSE;
}
return _TRUE;
}
static void halbtcoutsrc_DbgInit(void)
{
u8 i;
for (i=0; i<BTC_MSG_MAX; i++)
GLBtcDbgType[i] = 0;
GLBtcDbgType[BTC_MSG_INTERFACE] = \
// INTF_INIT |
// INTF_NOTIFY |
0;
GLBtcDbgType[BTC_MSG_ALGORITHM] = \
// ALGO_BT_RSSI_STATE |
// ALGO_WIFI_RSSI_STATE |
// ALGO_BT_MONITOR |
// ALGO_TRACE |
// ALGO_TRACE_FW |
// ALGO_TRACE_FW_DETAIL |
// ALGO_TRACE_FW_EXEC |
// ALGO_TRACE_SW |
// ALGO_TRACE_SW_DETAIL |
// ALGO_TRACE_SW_EXEC |
0;
}
static u8 halbtcoutsrc_IsHwMailboxExist(PBTC_COEXIST pBtCoexist)
{
if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
return _FALSE;
}
else
return _TRUE;
}
static void halbtcoutsrc_LeaveLps(PBTC_COEXIST pBtCoexist)
{
PADAPTER padapter;
padapter = pBtCoexist->Adapter;
pBtCoexist->btInfo.bBtCtrlLps = _TRUE;
pBtCoexist->btInfo.bBtLpsOn = _FALSE;
rtw_btcoex_LPS_Leave(padapter);
}
void halbtcoutsrc_EnterLps(PBTC_COEXIST pBtCoexist)
{
PADAPTER padapter;
padapter = pBtCoexist->Adapter;
pBtCoexist->btInfo.bBtCtrlLps = _TRUE;
pBtCoexist->btInfo.bBtLpsOn = _TRUE;
rtw_btcoex_LPS_Enter(padapter);
}
void halbtcoutsrc_NormalLps(PBTC_COEXIST pBtCoexist)
{
PADAPTER padapter;
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE, ("[BTCoex], Normal LPS behavior!!!\n"));
padapter = pBtCoexist->Adapter;
if (pBtCoexist->btInfo.bBtCtrlLps)
{
pBtCoexist->btInfo.bBtLpsOn = _FALSE;
rtw_btcoex_LPS_Leave(padapter);
pBtCoexist->btInfo.bBtCtrlLps = _FALSE;
// recover the LPS state to the original
#if 0
padapter->HalFunc.UpdateLPSStatusHandler(
padapter,
pPSC->RegLeisurePsMode,
pPSC->RegPowerSaveMode);
#endif
}
}
/*
* Constraint:
* 1. this function will request pwrctrl->lock
*/
void halbtcoutsrc_LeaveLowPower(PBTC_COEXIST pBtCoexist)
{
#ifdef CONFIG_LPS_LCLK
PADAPTER padapter;
PHAL_DATA_TYPE pHalData;
struct pwrctrl_priv *pwrctrl;
s32 ready;
u32 stime;
s32 utime;
u32 timeout; // unit: ms
padapter = pBtCoexist->Adapter;
pHalData = GET_HAL_DATA(padapter);
pwrctrl = adapter_to_pwrctl(padapter);
ready = _FAIL;
#ifdef LPS_RPWM_WAIT_MS
timeout = LPS_RPWM_WAIT_MS;
#else // !LPS_RPWM_WAIT_MS
timeout = 30;
#endif // !LPS_RPWM_WAIT_MS
stime = rtw_get_current_time();
do {
ready = rtw_register_task_alive(padapter, BTCOEX_ALIVE);
if (_SUCCESS == ready)
break;
utime = rtw_get_passing_time_ms(stime);
if (utime > timeout)
break;
rtw_msleep_os(1);
} while (1);
#endif // CONFIG_LPS_LCLK
}
/*
* Constraint:
* 1. this function will request pwrctrl->lock
*/
void halbtcoutsrc_NormalLowPower(PBTC_COEXIST pBtCoexist)
{
#ifdef CONFIG_LPS_LCLK
PADAPTER padapter;
padapter = pBtCoexist->Adapter;
rtw_unregister_task_alive(padapter, BTCOEX_ALIVE);
#endif // CONFIG_LPS_LCLK
}
void halbtcoutsrc_DisableLowPower(PBTC_COEXIST pBtCoexist, u8 bLowPwrDisable)
{
pBtCoexist->btInfo.bBtDisableLowPwr = bLowPwrDisable;
if (bLowPwrDisable)
halbtcoutsrc_LeaveLowPower(pBtCoexist); // leave 32k low power.
else
halbtcoutsrc_NormalLowPower(pBtCoexist); // original 32k low power behavior.
}
void halbtcoutsrc_AggregationCheck(PBTC_COEXIST pBtCoexist)
{
PADAPTER padapter;
BOOLEAN bNeedToAct;
padapter = pBtCoexist->Adapter;
bNeedToAct = _FALSE;
if (pBtCoexist->btInfo.bRejectAggPkt)
rtw_btcoex_RejectApAggregatedPacket(padapter, _TRUE);
else
{
if (pBtCoexist->btInfo.bPreBtCtrlAggBufSize !=
pBtCoexist->btInfo.bBtCtrlAggBufSize)
{
bNeedToAct = _TRUE;
pBtCoexist->btInfo.bPreBtCtrlAggBufSize = pBtCoexist->btInfo.bBtCtrlAggBufSize;
}
if (pBtCoexist->btInfo.bBtCtrlAggBufSize)
{
if (pBtCoexist->btInfo.preAggBufSize !=
pBtCoexist->btInfo.aggBufSize)
{
bNeedToAct = _TRUE;
}
pBtCoexist->btInfo.preAggBufSize = pBtCoexist->btInfo.aggBufSize;
}
if (bNeedToAct)
{
rtw_btcoex_RejectApAggregatedPacket(padapter, _TRUE);
rtw_btcoex_RejectApAggregatedPacket(padapter, _FALSE);
}
}
}
u8 halbtcoutsrc_IsWifiBusy(PADAPTER padapter)
{
struct mlme_priv *pmlmepriv;
pmlmepriv = &padapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE) == _TRUE)
{
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
return _TRUE;
if (_TRUE == pmlmepriv->LinkDetectInfo.bBusyTraffic)
return _TRUE;
}
#if defined(CONFIG_CONCURRENT_MODE) || defined(CONFIG_DUALMAC_CONCURRENT)
pmlmepriv = &padapter->pbuddy_adapter->mlmepriv;
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE) == _TRUE)
{
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
return _TRUE;
if (_TRUE == pmlmepriv->LinkDetectInfo.bBusyTraffic)
return _TRUE;
}
#endif
return _FALSE;
}
static u32 _halbtcoutsrc_GetWifiLinkStatus(PADAPTER padapter)
{
struct mlme_priv *pmlmepriv;
u8 bp2p;
u32 portConnectedStatus;
pmlmepriv = &padapter->mlmepriv;
bp2p = _FALSE;
portConnectedStatus = 0;
#ifdef CONFIG_P2P
if (!rtw_p2p_chk_state(&padapter->wdinfo, P2P_STATE_NONE))
bp2p = _TRUE;
#endif // CONFIG_P2P
if (check_fwstate(pmlmepriv, WIFI_ASOC_STATE) == _TRUE)
{
if (check_fwstate(pmlmepriv, WIFI_AP_STATE) == _TRUE)
{
if (_TRUE == bp2p)
portConnectedStatus |= WIFI_P2P_GO_CONNECTED;
else
portConnectedStatus |= WIFI_AP_CONNECTED;
}
else
{
if (_TRUE == bp2p)
portConnectedStatus |= WIFI_P2P_GC_CONNECTED;
else
portConnectedStatus |= WIFI_STA_CONNECTED;
}
}
return portConnectedStatus;
}
u32 halbtcoutsrc_GetWifiLinkStatus(PBTC_COEXIST pBtCoexist)
{
//=================================
// return value:
// [31:16]=> connected port number
// [15:0]=> port connected bit define
//================================
PADAPTER padapter;
u32 retVal;
u32 portConnectedStatus, numOfConnectedPort;
padapter = pBtCoexist->Adapter;
retVal = 0;
portConnectedStatus = 0;
numOfConnectedPort = 0;
retVal = _halbtcoutsrc_GetWifiLinkStatus(padapter);
if (retVal)
{
portConnectedStatus |= retVal;
numOfConnectedPort++;
}
#ifdef CONFIG_CONCURRENT_MODE
if (padapter->pbuddy_adapter)
{
retVal = _halbtcoutsrc_GetWifiLinkStatus(padapter->pbuddy_adapter);
if (retVal)
{
portConnectedStatus |= retVal;
numOfConnectedPort++;
}
}
#endif // CONFIG_CONCURRENT_MODE
retVal = (numOfConnectedPort << 16) | portConnectedStatus;
return retVal;
}
u32 halbtcoutsrc_GetBtPatchVer(PBTC_COEXIST pBtCoexist)
{
u16 btRealFwVer = 0x0;
u8 btFwVer = 0x0;
u8 cnt = 0;
#if 0
if (!pBtCoexist->btInfo.btRealFwVer && cnt<=5)
{
if (halbtcoutsrc_IsHwMailboxExist(pBtCoexist))
{ // mailbox exists, through mailbox
if (NDBG_GetBtFwVersion(pBtCoexist->Adapter, &btRealFwVer, &btFwVer))
{
pBtCoexist->btInfo.btRealFwVer = btRealFwVer;
pBtCoexist->btInfo.btFwVer = btFwVer;
}
else
{
pBtCoexist->btInfo.btRealFwVer = 0x0;
pBtCoexist->btInfo.btFwVer = 0x0;
}
}
else // no mailbox, query bt patch version through stack.
{
u1Byte dataLen=2;
u1Byte buf[4] = {0};
buf[0] = 0x0; // OP_Code
buf[1] = 0x0; // OP_Code_Length
BT_SendEventExtBtCoexControl(pBtCoexist->Adapter, _FALSE, dataLen, &buf[0]);
}
cnt++;
}
#endif
return pBtCoexist->btInfo.btRealFwVer;
}
s32 halbtcoutsrc_GetWifiRssi(PADAPTER padapter)
{
PHAL_DATA_TYPE pHalData;
s32 UndecoratedSmoothedPWDB = 0;
pHalData = GET_HAL_DATA(padapter);
UndecoratedSmoothedPWDB = pHalData->dmpriv.EntryMinUndecoratedSmoothedPWDB;
return UndecoratedSmoothedPWDB;
}
static u8 halbtcoutsrc_GetWifiScanAPNum(PADAPTER padapter)
{
struct mlme_priv *pmlmepriv;
struct mlme_ext_priv *pmlmeext;
static u8 scan_AP_num = 0;
pmlmepriv = &padapter->mlmepriv;
pmlmeext = &padapter->mlmeextpriv;
if (check_fwstate(pmlmepriv, WIFI_SITE_MONITOR) == _FALSE) {
if (pmlmeext->sitesurvey_res.bss_cnt > 0xFF)
scan_AP_num = 0xFF;
else
scan_AP_num = (u8)pmlmeext->sitesurvey_res.bss_cnt;
}
return scan_AP_num;
}
u8 halbtcoutsrc_Get(void *pBtcContext, u8 getType, void *pOutBuf)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
PHAL_DATA_TYPE pHalData;
struct mlme_ext_priv *mlmeext;
u8 bSoftApExist, bVwifiExist;
u8 *pu8;
s32 *pS4Tmp;
u32 *pU4Tmp;
u8 *pU1Tmp;
u8 ret;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return _FALSE;
padapter = pBtCoexist->Adapter;
pHalData = GET_HAL_DATA(padapter);
mlmeext = &padapter->mlmeextpriv;
bSoftApExist = _FALSE;
bVwifiExist = _FALSE;
pu8 = (u8*)pOutBuf;
pS4Tmp = (s32*)pOutBuf;
pU4Tmp = (u32*)pOutBuf;
pU1Tmp = (u8*)pOutBuf;
ret = _TRUE;
switch (getType)
{
case BTC_GET_BL_HS_OPERATION:
*pu8 = _FALSE;
ret = _FALSE;
break;
case BTC_GET_BL_HS_CONNECTING:
*pu8 = _FALSE;
ret = _FALSE;
break;
case BTC_GET_BL_WIFI_CONNECTED:
*pu8 = check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE);
#ifdef CONFIG_CONCURRENT_MODE
if ((_FALSE == *pu8) && padapter->pbuddy_adapter)
{
*pu8 = check_fwstate(&padapter->pbuddy_adapter->mlmepriv, WIFI_ASOC_STATE);
}
#endif // CONFIG_CONCURRENT_MODE
break;
case BTC_GET_BL_WIFI_BUSY:
*pu8 = halbtcoutsrc_IsWifiBusy(padapter);
break;
case BTC_GET_BL_WIFI_SCAN:
#if 0
*pu8 = check_fwstate(&padapter->mlmepriv, WIFI_SITE_MONITOR);
#ifdef CONFIG_CONCURRENT_MODE
if ((_FALSE == *pu8) && padapter->pbuddy_adapter)
{
*pu8 = check_fwstate(&padapter->pbuddy_adapter->mlmepriv, WIFI_SITE_MONITOR);
}
#endif // CONFIG_CONCURRENT_MODE
#else
/* Use the value of the new variable GLBtcWiFiInScanState to judge whether WiFi is in scan state or not, since the originally used flag
WIFI_SITE_MONITOR in fwstate may not be cleared in time */
*pu8 = GLBtcWiFiInScanState;
#endif
break;
case BTC_GET_BL_WIFI_LINK:
*pu8 = check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING);
#ifdef CONFIG_CONCURRENT_MODE
if ((_FALSE == *pu8) && padapter->pbuddy_adapter)
{
*pu8 = check_fwstate(&padapter->pbuddy_adapter->mlmepriv, WIFI_UNDER_LINKING);
}
#endif // CONFIG_CONCURRENT_MODE
break;
case BTC_GET_BL_WIFI_ROAM:
*pu8 = check_fwstate(&padapter->mlmepriv, WIFI_UNDER_LINKING);
#ifdef CONFIG_CONCURRENT_MODE
if ((_FALSE == *pu8) && padapter->pbuddy_adapter)
{
*pu8 = check_fwstate(&padapter->pbuddy_adapter->mlmepriv, WIFI_UNDER_LINKING);
}
#endif // CONFIG_CONCURRENT_MODE
break;
case BTC_GET_BL_WIFI_4_WAY_PROGRESS:
*pu8 = _FALSE;
break;
case BTC_GET_BL_WIFI_UNDER_5G:
*pu8 = (pHalData->CurrentBandType == 1)? _TRUE : _FALSE;
break;
case BTC_GET_BL_WIFI_AP_MODE_ENABLE:
*pu8 = check_fwstate(&padapter->mlmepriv, WIFI_AP_STATE);
#ifdef CONFIG_CONCURRENT_MODE
if ((_FALSE == *pu8) && padapter->pbuddy_adapter)
{
*pu8 = check_fwstate(&padapter->pbuddy_adapter->mlmepriv, WIFI_AP_STATE);
}
#endif // CONFIG_CONCURRENT_MODE
break;
case BTC_GET_BL_WIFI_ENABLE_ENCRYPTION:
*pu8 = padapter->securitypriv.dot11PrivacyAlgrthm == 0? _FALSE: _TRUE;
break;
case BTC_GET_BL_WIFI_UNDER_B_MODE:
if (mlmeext->cur_wireless_mode == WIRELESS_11B)
*pu8 = _TRUE;
else
*pu8 = _FALSE;
break;
case BTC_GET_BL_EXT_SWITCH:
*pu8 = _FALSE;
break;
case BTC_GET_S4_WIFI_RSSI:
*pS4Tmp = halbtcoutsrc_GetWifiRssi(padapter);
break;
case BTC_GET_S4_HS_RSSI:
*pS4Tmp = 0;
ret = _FALSE;
break;
case BTC_GET_U4_WIFI_BW:
if (IsLegacyOnly(mlmeext->cur_wireless_mode))
*pU4Tmp = BTC_WIFI_BW_LEGACY;
else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_20)
*pU4Tmp = BTC_WIFI_BW_HT20;
else if (pHalData->CurrentChannelBW == CHANNEL_WIDTH_40)
*pU4Tmp = BTC_WIFI_BW_HT40;
else
*pU4Tmp = BTC_WIFI_BW_HT40; /* todo */
break;
case BTC_GET_U4_WIFI_TRAFFIC_DIRECTION:
{
PRT_LINK_DETECT_T plinkinfo;
plinkinfo = &padapter->mlmepriv.LinkDetectInfo;
if (plinkinfo->NumTxOkInPeriod > plinkinfo->NumRxOkInPeriod)
*pU4Tmp = BTC_WIFI_TRAFFIC_TX;
else
*pU4Tmp = BTC_WIFI_TRAFFIC_RX;
}
break;
case BTC_GET_U4_WIFI_FW_VER:
*pU4Tmp = pHalData->FirmwareVersion << 16;
*pU4Tmp |= pHalData->FirmwareSubVersion;
break;
case BTC_GET_U4_WIFI_LINK_STATUS:
*pU4Tmp = halbtcoutsrc_GetWifiLinkStatus(pBtCoexist);
break;
case BTC_GET_U4_BT_PATCH_VER:
*pU4Tmp = halbtcoutsrc_GetBtPatchVer(pBtCoexist);
break;
case BTC_GET_U1_WIFI_DOT11_CHNL:
*pU1Tmp = padapter->mlmeextpriv.cur_channel;
break;
case BTC_GET_U1_WIFI_CENTRAL_CHNL:
*pU1Tmp = pHalData->CurrentChannel;
break;
case BTC_GET_U1_WIFI_HS_CHNL:
*pU1Tmp = 0;
ret = _FALSE;
break;
case BTC_GET_U1_MAC_PHY_MODE:
*pU1Tmp = BTC_SMSP;
// *pU1Tmp = BTC_DMSP;
// *pU1Tmp = BTC_DMDP;
// *pU1Tmp = BTC_MP_UNKNOWN;
break;
case BTC_GET_U1_AP_NUM:
*pU1Tmp = halbtcoutsrc_GetWifiScanAPNum(padapter);
break;
//=======1Ant===========
case BTC_GET_U1_LPS_MODE:
*pU1Tmp = padapter->dvobj->pwrctl_priv.pwr_mode;
break;
default:
ret = _FALSE;
break;
}
return ret;
}
u8 halbtcoutsrc_Set(void *pBtcContext, u8 setType, void *pInBuf)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
PHAL_DATA_TYPE pHalData;
u8 *pu8;
u8 *pU1Tmp;
u32 *pU4Tmp;
u8 ret;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
pHalData = GET_HAL_DATA(padapter);
pu8 = (u8*)pInBuf;
pU1Tmp = (u8*)pInBuf;
pU4Tmp = (u32*)pInBuf;
ret = _TRUE;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return _FALSE;
switch (setType)
{
// set some u8 type variables.
case BTC_SET_BL_BT_DISABLE:
pBtCoexist->btInfo.bBtDisabled = *pu8;
break;
case BTC_SET_BL_BT_TRAFFIC_BUSY:
pBtCoexist->btInfo.bBtBusy = *pu8;
break;
case BTC_SET_BL_BT_LIMITED_DIG:
pBtCoexist->btInfo.bLimitedDig = *pu8;
break;
case BTC_SET_BL_FORCE_TO_ROAM:
pBtCoexist->btInfo.bForceToRoam = *pu8;
break;
case BTC_SET_BL_TO_REJ_AP_AGG_PKT:
pBtCoexist->btInfo.bRejectAggPkt = *pu8;
break;
case BTC_SET_BL_BT_CTRL_AGG_SIZE:
pBtCoexist->btInfo.bBtCtrlAggBufSize = *pu8;
break;
case BTC_SET_BL_INC_SCAN_DEV_NUM:
pBtCoexist->btInfo.bIncreaseScanDevNum = *pu8;
// set some u8 type variables.
case BTC_SET_U1_RSSI_ADJ_VAL_FOR_AGC_TABLE_ON:
pBtCoexist->btInfo.rssiAdjustForAgcTableOn = *pU1Tmp;
break;
case BTC_SET_U1_AGG_BUF_SIZE:
pBtCoexist->btInfo.aggBufSize = *pU1Tmp;
break;
// the following are some action which will be triggered
case BTC_SET_ACT_GET_BT_RSSI:
#if 0
BT_SendGetBtRssiEvent(padapter);
#else
ret = _FALSE;
#endif
break;
case BTC_SET_ACT_AGGREGATE_CTRL:
halbtcoutsrc_AggregationCheck(pBtCoexist);
break;
//=======1Ant===========
// set some u8 type variables.
case BTC_SET_U1_RSSI_ADJ_VAL_FOR_1ANT_COEX_TYPE:
pBtCoexist->btInfo.rssiAdjustFor1AntCoexType = *pU1Tmp;
break;
case BTC_SET_U1_LPS_VAL:
pBtCoexist->btInfo.lpsVal = *pU1Tmp;
break;
case BTC_SET_U1_RPWM_VAL:
pBtCoexist->btInfo.rpwmVal = *pU1Tmp;
break;
// the following are some action which will be triggered
case BTC_SET_ACT_LEAVE_LPS:
halbtcoutsrc_LeaveLps(pBtCoexist);
break;
case BTC_SET_ACT_ENTER_LPS:
halbtcoutsrc_EnterLps(pBtCoexist);
break;
case BTC_SET_ACT_NORMAL_LPS:
halbtcoutsrc_NormalLps(pBtCoexist);
break;
case BTC_SET_ACT_DISABLE_LOW_POWER:
halbtcoutsrc_DisableLowPower(pBtCoexist, *pu8);
break;
case BTC_SET_ACT_UPDATE_RAMASK:
pBtCoexist->btInfo.raMask = *pU4Tmp;
if (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE) == _TRUE)
{
struct sta_info *psta;
PWLAN_BSSID_EX cur_network;
cur_network = &padapter->mlmeextpriv.mlmext_info.network;
psta = rtw_get_stainfo(&padapter->stapriv, cur_network->MacAddress);
rtw_hal_update_ra_mask(psta, 0);
}
break;
case BTC_SET_ACT_SEND_MIMO_PS:
#if 0 // not implement yet
{
u8 newMimoPsMode = *pU1Tmp;
if (check_fwstate(&padapter->mlmepriv, WIFI_ASOC_STATE) == _TRUE)
SendMimoPsFrame(padapter, padapter->MgntInfo.Bssid, newMimoPsMode);
}
#else
ret = _FALSE;
#endif
break;
case BTC_SET_ACT_CTRL_BT_INFO:
#if 0
{
u8 dataLen = *pU1Tmp;
u8 tmpBuf[20];
if (dataLen)
{
_rtw_memcpy(tmpBuf, pU1Tmp+1, dataLen);
}
// BT_SendEventExtBtInfoControl(padapter, dataLen, &tmpBuf[0]);
}
#else
ret = _FALSE;
#endif
break;
case BTC_SET_ACT_CTRL_BT_COEX:
#if 0
{
u8 dataLen = *pU1Tmp;
u8 tmpBuf[20];
if (dataLen)
{
_rtw_memcpy(tmpBuf, pU1Tmp+1, dataLen);
}
// BT_SendEventExtBtCoexControl(padapter, _FALSE, dataLen, &tmpBuf[0]);
}
#else
ret = _FALSE;
#endif
break;
//=====================
default:
ret = _FALSE;
break;
}
return ret;
}
void halbtcoutsrc_DisplayCoexStatistics(PBTC_COEXIST pBtCoexist)
{
#if 0
PADAPTER padapter = (PADAPTER)pBtCoexist->Adapter;
PBT_MGNT pBtMgnt = &padapter->MgntInfo.BtInfo.BtMgnt;
PHAL_DATA_TYPE pHalData = GET_HAL_DATA(padapter);
u8 *cliBuf = pBtCoexist->cliBuf;
u8 i;
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s", "============[Statistics]============");
CL_PRINTF(cliBuf);
#if (H2C_USE_IO_THREAD != 1)
for(i=0; i<H2C_STATUS_MAX; i++)
{
if (pHalData->h2cStatistics[i])
{
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = [%s] = %d", "H2C statistics", \
h2cStaString[i], pHalData->h2cStatistics[i]);
CL_PRINTF(cliBuf);
}
}
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x", "lastHMEBoxNum", \
pHalData->LastHMEBoxNum);
CL_PRINTF(cliBuf);
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = 0x%x / 0x%x", "LastSuccessFwEid/FirstfailedFwEid", \
pHalData->lastSuccessH2cEid, pHalData->firstFailedH2cEid);
CL_PRINTF(cliBuf);
#endif
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d/ %d", "c2hIsr/c2hIntr/clr1AF/noRdy/noBuf", \
pHalData->InterruptLog.nIMR_C2HCMD, DBG_Var.c2hInterruptCnt, DBG_Var.c2hClrReadC2hCnt,
DBG_Var.c2hNotReadyCnt, DBG_Var.c2hBufAlloFailCnt);
CL_PRINTF(cliBuf);
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d", "c2hPacket", \
DBG_Var.c2hPacketCnt);
CL_PRINTF(cliBuf);
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Periodical/ DbgCtrl", \
pBtCoexist->statistics.cntPeriodical, pBtCoexist->statistics.cntDbgCtrl);
CL_PRINTF(cliBuf);
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "InitHw/InitCoexDm/", \
pBtCoexist->statistics.cntInitHwConfig, pBtCoexist->statistics.cntInitCoexDm);
CL_PRINTF(cliBuf);
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d/ %d/ %d/ %d", "Ips/Lps/Scan/Connect/Mstatus", \
pBtCoexist->statistics.cntIpsNotify, pBtCoexist->statistics.cntLpsNotify,
pBtCoexist->statistics.cntScanNotify, pBtCoexist->statistics.cntConnectNotify,
pBtCoexist->statistics.cntMediaStatusNotify);
CL_PRINTF(cliBuf);
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %d/ %d", "Special pkt/Bt info", \
pBtCoexist->statistics.cntSpecialPacketNotify, pBtCoexist->statistics.cntBtInfoNotify);
CL_PRINTF(cliBuf);
#endif
}
void halbtcoutsrc_DisplayBtLinkInfo(PBTC_COEXIST pBtCoexist)
{
#if 0
PADAPTER padapter = (PADAPTER)pBtCoexist->Adapter;
PBT_MGNT pBtMgnt = &padapter->MgntInfo.BtInfo.BtMgnt;
u8 *cliBuf = pBtCoexist->cliBuf;
u8 i;
if (pBtCoexist->stackInfo.bProfileNotified)
{
for (i=0; i<pBtMgnt->ExtConfig.NumberOfACL; i++)
{
if (pBtMgnt->ExtConfig.HCIExtensionVer >= 1)
{
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s/ %s", "Bt link type/spec/role", \
BtProfileString[pBtMgnt->ExtConfig.aclLink[i].BTProfile],
BtSpecString[pBtMgnt->ExtConfig.aclLink[i].BTCoreSpec],
BtLinkRoleString[pBtMgnt->ExtConfig.aclLink[i].linkRole]);
CL_PRINTF(cliBuf); }
else
{
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %s/ %s", "Bt link type/spec", \
BtProfileString[pBtMgnt->ExtConfig.aclLink[i].BTProfile],
BtSpecString[pBtMgnt->ExtConfig.aclLink[i].BTCoreSpec]);
CL_PRINTF(cliBuf);
}
}
}
#endif
}
void halbtcoutsrc_DisplayFwPwrModeCmd(PBTC_COEXIST pBtCoexist)
{
u8 *cliBuf = pBtCoexist->cliBuf;
CL_SPRINTF(cliBuf, BT_TMP_BUF_SIZE, "\r\n %-35s = %02x %02x %02x %02x %02x %02x ", "Power mode cmd ", \
pBtCoexist->pwrModeVal[0], pBtCoexist->pwrModeVal[1],
pBtCoexist->pwrModeVal[2], pBtCoexist->pwrModeVal[3],
pBtCoexist->pwrModeVal[4], pBtCoexist->pwrModeVal[5]);
CL_PRINTF(cliBuf);
}
//====================================
// IO related function
//====================================
u8 halbtcoutsrc_Read1Byte(void *pBtcContext, u32 RegAddr)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
return rtw_read8(padapter, RegAddr);
}
u16 halbtcoutsrc_Read2Byte(void *pBtcContext, u32 RegAddr)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
return rtw_read16(padapter, RegAddr);
}
u32 halbtcoutsrc_Read4Byte(void *pBtcContext, u32 RegAddr)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
return rtw_read32(padapter, RegAddr);
}
void halbtcoutsrc_Write1Byte(void *pBtcContext, u32 RegAddr, u8 Data)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
rtw_write8(padapter, RegAddr, Data);
}
void halbtcoutsrc_BitMaskWrite1Byte(void *pBtcContext, u32 regAddr, u8 bitMask, u8 data1b)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
u8 originalValue, bitShift;
u8 i;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
originalValue = 0;
bitShift = 0;
if (bitMask != 0xFF)
{
originalValue = rtw_read8(padapter, regAddr);
for (i=0; i<=7; i++)
{
if ((bitMask>>i)&0x1)
break;
}
bitShift = i;
data1b = (originalValue & ~bitMask) | ((data1b << bitShift) & bitMask);
}
rtw_write8(padapter, regAddr, data1b);
}
void halbtcoutsrc_Write2Byte(void *pBtcContext, u32 RegAddr, u16 Data)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
rtw_write16(padapter, RegAddr, Data);
}
void halbtcoutsrc_Write4Byte(void *pBtcContext, u32 RegAddr, u32 Data)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
rtw_write32(padapter, RegAddr, Data);
}
void halbtcoutsrc_SetBbReg(void *pBtcContext, u32 RegAddr, u32 BitMask, u32 Data)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
PHY_SetBBReg(padapter, RegAddr, BitMask, Data);
}
u32 halbtcoutsrc_GetBbReg(void *pBtcContext, u32 RegAddr, u32 BitMask)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
return PHY_QueryBBReg(padapter, RegAddr, BitMask);
}
void halbtcoutsrc_SetRfReg(void *pBtcContext, u8 eRFPath, u32 RegAddr, u32 BitMask, u32 Data)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
PHY_SetRFReg(padapter, eRFPath, RegAddr, BitMask, Data);
}
u32 halbtcoutsrc_GetRfReg(void *pBtcContext, u8 eRFPath, u32 RegAddr, u32 BitMask)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
return PHY_QueryRFReg(padapter, eRFPath, RegAddr, BitMask);
}
void halbtcoutsrc_SetBtReg(void *pBtcContext, u8 RegType, u32 RegAddr, u32 Data)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
u8 CmdBuffer1[4] = {0};
u8 CmdBuffer2[4] = {0};
u8* AddrToSet = (u8*)&RegAddr;
u8* ValueToSet = (u8*)&Data;
u8 OperVer = 0;
u8 ReqNum = 0;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
if (IS_HARDWARE_TYPE_8723B(padapter))
{
CmdBuffer1[0] |= (OperVer & 0x0f); /* Set OperVer */
CmdBuffer1[0] |= ((ReqNum << 4) & 0xf0); /* Set ReqNum */
CmdBuffer1[1] = 0x0d; /* Set OpCode to BT_LO_OP_WRITE_REG_VALUE */
CmdBuffer1[2] = ValueToSet[0]; /* Set WriteRegValue */
rtw_hal_fill_h2c_cmd(padapter, 0x67, 4, &(CmdBuffer1[0]));
rtw_msleep_os(200);
ReqNum++;
CmdBuffer2[0] |= (OperVer & 0x0f); /* Set OperVer */
CmdBuffer2[0] |= ((ReqNum << 4) & 0xf0); /* Set ReqNum */
CmdBuffer2[1] = 0x0c; /* Set OpCode of BT_LO_OP_WRITE_REG_ADDR */
CmdBuffer2[3] = AddrToSet[0]; /* Set WriteRegAddr */
rtw_hal_fill_h2c_cmd(padapter, 0x67, 4, &(CmdBuffer2[0]));
}
}
void halbtcoutsrc_FillH2cCmd(void *pBtcContext, u8 elementId, u32 cmdLen, u8 *pCmdBuffer)
{
PBTC_COEXIST pBtCoexist;
PADAPTER padapter;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
padapter = pBtCoexist->Adapter;
rtw_hal_fill_h2c_cmd(padapter, elementId, cmdLen, pCmdBuffer);
}
void halbtcoutsrc_DisplayDbgMsg(void *pBtcContext, u8 dispType)
{
PBTC_COEXIST pBtCoexist;
pBtCoexist = (PBTC_COEXIST)pBtcContext;
switch(dispType)
{
case BTC_DBG_DISP_COEX_STATISTICS:
halbtcoutsrc_DisplayCoexStatistics(pBtCoexist);
break;
case BTC_DBG_DISP_BT_LINK_INFO:
halbtcoutsrc_DisplayBtLinkInfo(pBtCoexist);
break;
case BTC_DBG_DISP_FW_PWR_MODE_CMD:
halbtcoutsrc_DisplayFwPwrModeCmd(pBtCoexist);
break;
default:
break;
}
}
u8 halbtcoutsrc_UnderIps(PBTC_COEXIST pBtCoexist)
{
PADAPTER padapter;
struct pwrctrl_priv *pwrpriv;
u8 bMacPwrCtrlOn;
padapter = pBtCoexist->Adapter;
pwrpriv = &padapter->dvobj->pwrctl_priv;
bMacPwrCtrlOn = _FALSE;
if ((_TRUE == pwrpriv->bips_processing)
&& (IPS_NONE != pwrpriv->ips_mode_req)
)
{
return _TRUE;
}
if (rf_off == pwrpriv->rf_pwrstate)
{
return _TRUE;
}
rtw_hal_get_hwreg(padapter, HW_VAR_APFM_ON_MAC, &bMacPwrCtrlOn);
if (_FALSE == bMacPwrCtrlOn)
{
return _TRUE;
}
return _FALSE;
}
//====================================
// Extern functions called by other module
//====================================
u8 EXhalbtcoutsrc_InitlizeVariables(void *padapter)
{
PBTC_COEXIST pBtCoexist = &GLBtCoexist;
pBtCoexist->statistics.cntBind++;
halbtcoutsrc_DbgInit();
if (pBtCoexist->bBinded)
return _FALSE;
else
pBtCoexist->bBinded = _TRUE;
#ifdef CONFIG_PCI_HCI
pBtCoexist->chipInterface = BTC_INTF_PCI;
#elif defined(CONFIG_USB_HCI)
pBtCoexist->chipInterface = BTC_INTF_USB;
#elif defined(CONFIG_SDIO_HCI) || defined(CONFIG_GSPI_HCI)
pBtCoexist->chipInterface = BTC_INTF_SDIO;
#else
pBtCoexist->chipInterface = BTC_INTF_UNKNOWN;
#endif
if (NULL == pBtCoexist->Adapter)
{
pBtCoexist->Adapter = padapter;
}
pBtCoexist->stackInfo.bProfileNotified = _FALSE;
pBtCoexist->fBtcRead1Byte = halbtcoutsrc_Read1Byte;
pBtCoexist->fBtcWrite1Byte = halbtcoutsrc_Write1Byte;
pBtCoexist->fBtcWrite1ByteBitMask = halbtcoutsrc_BitMaskWrite1Byte;
pBtCoexist->fBtcRead2Byte = halbtcoutsrc_Read2Byte;
pBtCoexist->fBtcWrite2Byte = halbtcoutsrc_Write2Byte;
pBtCoexist->fBtcRead4Byte = halbtcoutsrc_Read4Byte;
pBtCoexist->fBtcWrite4Byte = halbtcoutsrc_Write4Byte;
pBtCoexist->fBtcSetBbReg = halbtcoutsrc_SetBbReg;
pBtCoexist->fBtcGetBbReg = halbtcoutsrc_GetBbReg;
pBtCoexist->fBtcSetRfReg = halbtcoutsrc_SetRfReg;
pBtCoexist->fBtcGetRfReg = halbtcoutsrc_GetRfReg;
pBtCoexist->fBtcSetBtReg = halbtcoutsrc_SetBtReg;
pBtCoexist->fBtcFillH2c = halbtcoutsrc_FillH2cCmd;
pBtCoexist->fBtcDispDbgMsg = halbtcoutsrc_DisplayDbgMsg;
pBtCoexist->fBtcGet = halbtcoutsrc_Get;
pBtCoexist->fBtcSet = halbtcoutsrc_Set;
pBtCoexist->cliBuf = GLBtcDbgBuf;
pBtCoexist->btInfo.bBtCtrlAggBufSize = _FALSE;
pBtCoexist->btInfo.aggBufSize = 5;
pBtCoexist->btInfo.bIncreaseScanDevNum = _FALSE;
GLBtcWiFiInScanState = _FALSE;
GLBtcWiFiInIQKState = _FALSE;
return _TRUE;
}
void EXhalbtcoutsrc_InitHwConfig(PBTC_COEXIST pBtCoexist)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntInitHwConfig++;
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_InitHwConfig(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_InitHwConfig(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_InitHwConfig(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_InitHwConfig(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_InitHwConfig(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_InitHwConfig(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_InitHwConfig(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_InitHwConfig(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_InitHwConfig(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_InitHwConfig(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_InitHwConfig(pBtCoexist);
}
}
void EXhalbtcoutsrc_InitCoexDm(PBTC_COEXIST pBtCoexist)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntInitCoexDm++;
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_InitCoexDm(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_InitCoexDm(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_InitCoexDm(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_InitCoexDm(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_InitCoexDm(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_InitCoexDm(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_InitCoexDm(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_InitCoexDm(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_InitCoexDm(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_InitCoexDm(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_InitCoexDm(pBtCoexist);
}
pBtCoexist->bInitilized = _TRUE;
}
void EXhalbtcoutsrc_IpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
{
u8 ipsType;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntIpsNotify++;
if (pBtCoexist->bManualControl)
return;
if (IPS_NONE == type)
ipsType = BTC_IPS_LEAVE;
else
ipsType = BTC_IPS_ENTER;
// All notify is called in cmd thread, don't need to leave low power again
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_IpsNotify(pBtCoexist, ipsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_IpsNotify(pBtCoexist, ipsType);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_IpsNotify(pBtCoexist, ipsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_IpsNotify(pBtCoexist, ipsType);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_IpsNotify(pBtCoexist, ipsType);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_IpsNotify(pBtCoexist, ipsType);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_IpsNotify(pBtCoexist, ipsType);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_IpsNotify(pBtCoexist, ipsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_IpsNotify(pBtCoexist, ipsType);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_IpsNotify(pBtCoexist, ipsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_IpsNotify(pBtCoexist, ipsType);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_LpsNotify(PBTC_COEXIST pBtCoexist, u8 type)
{
u8 lpsType;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntLpsNotify++;
if (pBtCoexist->bManualControl)
return;
if (PS_MODE_ACTIVE == type)
lpsType = BTC_LPS_DISABLE;
else
lpsType = BTC_LPS_ENABLE;
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_LpsNotify(pBtCoexist, lpsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_LpsNotify(pBtCoexist, lpsType);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_LpsNotify(pBtCoexist, lpsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_LpsNotify(pBtCoexist, lpsType);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_LpsNotify(pBtCoexist, lpsType);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_LpsNotify(pBtCoexist, lpsType);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_LpsNotify(pBtCoexist, lpsType);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_LpsNotify(pBtCoexist, lpsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_LpsNotify(pBtCoexist, lpsType);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_LpsNotify(pBtCoexist, lpsType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_LpsNotify(pBtCoexist, lpsType);
}
}
void EXhalbtcoutsrc_ScanNotify(PBTC_COEXIST pBtCoexist, u8 type)
{
u8 scanType;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntScanNotify++;
if (pBtCoexist->bManualControl)
return;
if (type)
{
scanType = BTC_SCAN_START;
GLBtcWiFiInScanState = _TRUE;
}
else
{
scanType = BTC_SCAN_FINISH;
GLBtcWiFiInScanState = _FALSE;
}
// All notify is called in cmd thread, don't need to leave low power again
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_ScanNotify(pBtCoexist, scanType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_ScanNotify(pBtCoexist, scanType);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_ScanNotify(pBtCoexist, scanType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_ScanNotify(pBtCoexist, scanType);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_ScanNotify(pBtCoexist, scanType);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_ScanNotify(pBtCoexist, scanType);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_ScanNotify(pBtCoexist, scanType);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_ScanNotify(pBtCoexist, scanType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_ScanNotify(pBtCoexist, scanType);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_ScanNotify(pBtCoexist, scanType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_ScanNotify(pBtCoexist, scanType);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_ConnectNotify(PBTC_COEXIST pBtCoexist, u8 action)
{
u8 assoType;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntConnectNotify++;
if (pBtCoexist->bManualControl)
return;
if (action)
assoType = BTC_ASSOCIATE_START;
else
assoType = BTC_ASSOCIATE_FINISH;
// All notify is called in cmd thread, don't need to leave low power again
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_ConnectNotify(pBtCoexist, assoType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_ConnectNotify(pBtCoexist, assoType);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_ConnectNotify(pBtCoexist, assoType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_ConnectNotify(pBtCoexist, assoType);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_ConnectNotify(pBtCoexist, assoType);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_ConnectNotify(pBtCoexist, assoType);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_ConnectNotify(pBtCoexist, assoType);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_ConnectNotify(pBtCoexist, assoType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_ConnectNotify(pBtCoexist, assoType);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_ConnectNotify(pBtCoexist, assoType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_ConnectNotify(pBtCoexist, assoType);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_MediaStatusNotify(PBTC_COEXIST pBtCoexist, RT_MEDIA_STATUS mediaStatus)
{
u8 mStatus;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntMediaStatusNotify++;
if (pBtCoexist->bManualControl)
return;
if (RT_MEDIA_CONNECT == mediaStatus)
mStatus = BTC_MEDIA_CONNECT;
else
mStatus = BTC_MEDIA_DISCONNECT;
// All notify is called in cmd thread, don't need to leave low power again
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_MediaStatusNotify(pBtCoexist, mStatus);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_MediaStatusNotify(pBtCoexist, mStatus);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_MediaStatusNotify(pBtCoexist, mStatus);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_MediaStatusNotify(pBtCoexist, mStatus);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_MediaStatusNotify(pBtCoexist, mStatus);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_MediaStatusNotify(pBtCoexist, mStatus);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_MediaStatusNotify(pBtCoexist, mStatus);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_MediaStatusNotify(pBtCoexist, mStatus);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_MediaStatusNotify(pBtCoexist, mStatus);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_MediaStatusNotify(pBtCoexist, mStatus);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_MediaStatusNotify(pBtCoexist, mStatus);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_SpecialPacketNotify(PBTC_COEXIST pBtCoexist, u8 pktType)
{
u8 packetType;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntSpecialPacketNotify++;
if (pBtCoexist->bManualControl)
return;
if (PACKET_DHCP == pktType)
packetType = BTC_PACKET_DHCP;
else if (PACKET_EAPOL == pktType)
packetType = BTC_PACKET_EAPOL;
else if (PACKET_ARP == pktType)
packetType = BTC_PACKET_ARP;
else
{
packetType = BTC_PACKET_UNKNOWN;
return;
}
// All notify is called in cmd thread, don't need to leave low power again
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_SpecialPacketNotify(pBtCoexist, packetType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_SpecialPacketNotify(pBtCoexist, packetType);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_SpecialPacketNotify(pBtCoexist, packetType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_SpecialPacketNotify(pBtCoexist, packetType);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_SpecialPacketNotify(pBtCoexist, packetType);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_SpecialPacketNotify(pBtCoexist, packetType);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_SpecialPacketNotify(pBtCoexist, packetType);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_SpecialPacketNotify(pBtCoexist, packetType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_SpecialPacketNotify(pBtCoexist, packetType);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_SpecialPacketNotify(pBtCoexist, packetType);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_SpecialPacketNotify(pBtCoexist, packetType);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_BtInfoNotify(PBTC_COEXIST pBtCoexist, u8 *tmpBuf, u8 length)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntBtInfoNotify++;
// All notify is called in cmd thread, don't need to leave low power again
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_BtInfoNotify(pBtCoexist, tmpBuf, length);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_StackOperationNotify(PBTC_COEXIST pBtCoexist, u8 type)
{
#if 0
u8 stackOpType;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntStackOperationNotify++;
if (pBtCoexist->bManualControl)
return;
if ((HCI_BT_OP_INQUIRY_START == type) ||
(HCI_BT_OP_PAGING_START == type) ||
(HCI_BT_OP_PAIRING_START == type))
{
stackOpType = BTC_STACK_OP_INQ_PAGE_PAIR_START;
}
else if ((HCI_BT_OP_INQUIRY_FINISH == type) ||
(HCI_BT_OP_PAGING_SUCCESS == type) ||
(HCI_BT_OP_PAGING_UNSUCCESS == type) ||
(HCI_BT_OP_PAIRING_FINISH == type) )
{
stackOpType = BTC_STACK_OP_INQ_PAGE_PAIR_FINISH;
}
else
{
stackOpType = BTC_STACK_OP_NONE;
}
if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_StackOperationNotify(pBtCoexist, stackOpType);
}
#endif
}
void EXhalbtcoutsrc_HaltNotify(PBTC_COEXIST pBtCoexist)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_HaltNotify(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_HaltNotify(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_HaltNotify(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_HaltNotify(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_HaltNotify(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723a1ant_HaltNotify(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_HaltNotify(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_HaltNotify(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_HaltNotify(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_HaltNotify(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_HaltNotify(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_HaltNotify(pBtCoexist);
}
}
void EXhalbtcoutsrc_SwitchGntBt(PBTC_COEXIST pBtCoexist)
{
if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
{
halbtcoutsrc_SetBtReg(pBtCoexist, 0, 0x3c, 0x01); //BT goto standby while GNT_BT 1-->0
}
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
{
halbtcoutsrc_SetBtReg(pBtCoexist, 0, 0x3c, 0x15); //BT goto standby while GNT_BT 1-->0
}
}
}
void EXhalbtcoutsrc_PnpNotify(PBTC_COEXIST pBtCoexist, u8 pnpState)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
//
// currently only 1ant we have to do the notification,
// once pnp is notified to sleep state, we have to leave LPS that we can sleep normally.
//
if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_PnpNotify(pBtCoexist,pnpState);
}
else if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_PnpNotify(pBtCoexist,pnpState);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_PnpNotify(pBtCoexist, pnpState);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_PnpNotify(pBtCoexist, pnpState);
}
}
void EXhalbtcoutsrc_CoexDmSwitch(PBTC_COEXIST pBtCoexist, BOOLEAN antInverse)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntCoexDmSwitch++;
halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 1)
{
pBtCoexist->bStopCoexDm = TRUE;
EXhalbtc8723b1ant_CoexDmReset(pBtCoexist);
EXhalbtcoutsrc_SetAntNum(BT_COEX_ANT_TYPE_DETECTED, 2, antInverse);
EXhalbtc8723b2ant_InitHwConfig(pBtCoexist);
EXhalbtc8723b2ant_InitCoexDm(pBtCoexist);
pBtCoexist->bStopCoexDm = FALSE;
}
}
halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_Periodical(PBTC_COEXIST pBtCoexist)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntPeriodical++;
// Periodical should be called in cmd thread,
// don't need to leave low power again
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_Periodical(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
{
if (!halbtcoutsrc_UnderIps(pBtCoexist))
{
EXhalbtc8821a1ant_Periodical(pBtCoexist);
}
}
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_Periodical(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_Periodical(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_Periodical(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
{
if (!halbtcoutsrc_UnderIps(pBtCoexist))
EXhalbtc8723a1ant_Periodical(pBtCoexist);
}
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_Periodical(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_Periodical(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_Periodical(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_Periodical(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_Periodical(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_Periodical(pBtCoexist);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_DbgControl(PBTC_COEXIST pBtCoexist, u8 opCode, u8 opLen, u8 *pData)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->statistics.cntDbgCtrl++;
// This function doesn't be called yet,
// default no need to leave low power to avoid deadlock
// halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_DbgControl(pBtCoexist, opCode, opLen, pData);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_DbgControl(pBtCoexist, opCode, opLen, pData);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_DbgControl(pBtCoexist, opCode, opLen, pData);
}
// halbtcoutsrc_NormalLowPower(pBtCoexist);
}
void EXhalbtcoutsrc_StackUpdateProfileInfo(void)
{
#if 0
PBTC_COEXIST pBtCoexist = &GLBtCoexist;
PADAPTER padapter = (PADAPTER)GLBtCoexist.padapter;
PBT_MGNT pBtMgnt = &padapter->MgntInfo.BtInfo.BtMgnt;
u8 i;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->stackInfo.bProfileNotified = _TRUE;
pBtCoexist->stackInfo.numOfLink =
pBtMgnt->ExtConfig.NumberOfACL+pBtMgnt->ExtConfig.NumberOfSCO;
// reset first
pBtCoexist->stackInfo.bBtLinkExist = _FALSE;
pBtCoexist->stackInfo.bScoExist = _FALSE;
pBtCoexist->stackInfo.bAclExist = _FALSE;
pBtCoexist->stackInfo.bA2dpExist = _FALSE;
pBtCoexist->stackInfo.bHidExist = _FALSE;
pBtCoexist->stackInfo.numOfHid = 0;
pBtCoexist->stackInfo.bPanExist = _FALSE;
if (!pBtMgnt->ExtConfig.NumberOfACL)
pBtCoexist->stackInfo.minBtRssi = 0;
if (pBtCoexist->stackInfo.numOfLink)
{
pBtCoexist->stackInfo.bBtLinkExist = _TRUE;
if (pBtMgnt->ExtConfig.NumberOfSCO)
pBtCoexist->stackInfo.bScoExist = _TRUE;
if (pBtMgnt->ExtConfig.NumberOfACL)
pBtCoexist->stackInfo.bAclExist = _TRUE;
}
for (i=0; i<pBtMgnt->ExtConfig.NumberOfACL; i++)
{
if (BT_PROFILE_A2DP == pBtMgnt->ExtConfig.aclLink[i].BTProfile)
{
pBtCoexist->stackInfo.bA2dpExist = _TRUE;
}
else if (BT_PROFILE_PAN == pBtMgnt->ExtConfig.aclLink[i].BTProfile)
{
pBtCoexist->stackInfo.bPanExist = _TRUE;
}
else if (BT_PROFILE_HID == pBtMgnt->ExtConfig.aclLink[i].BTProfile)
{
pBtCoexist->stackInfo.bHidExist = _TRUE;
pBtCoexist->stackInfo.numOfHid++;
}
else
{
pBtCoexist->stackInfo.bUnknownAclExist = _TRUE;
}
}
#endif
}
void EXhalbtcoutsrc_UpdateMinBtRssi(s8 btRssi)
{
PBTC_COEXIST pBtCoexist = &GLBtCoexist;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->stackInfo.minBtRssi = btRssi;
}
void EXhalbtcoutsrc_SetHciVersion(u16 hciVersion)
{
PBTC_COEXIST pBtCoexist = &GLBtCoexist;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->stackInfo.hciVersion = hciVersion;
}
void EXhalbtcoutsrc_SetBtPatchVersion(u16 btHciVersion, u16 btPatchVersion)
{
PBTC_COEXIST pBtCoexist = &GLBtCoexist;
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
pBtCoexist->btInfo.btRealFwVer = btPatchVersion;
pBtCoexist->btInfo.btHciVer = btHciVersion;
}
void EXhalbtcoutsrc_SetBtExist(u8 bBtExist)
{
GLBtCoexist.boardInfo.bBtExist = bBtExist;
}
void EXhalbtcoutsrc_SetChipType(u8 chipType)
{
switch(chipType)
{
default:
case BT_2WIRE:
case BT_ISSC_3WIRE:
case BT_ACCEL:
case BT_RTL8756:
GLBtCoexist.boardInfo.btChipType = BTC_CHIP_UNDEF;
break;
case BT_CSR_BC4:
GLBtCoexist.boardInfo.btChipType = BTC_CHIP_CSR_BC4;
break;
case BT_CSR_BC8:
GLBtCoexist.boardInfo.btChipType = BTC_CHIP_CSR_BC8;
break;
case BT_RTL8723A:
GLBtCoexist.boardInfo.btChipType = BTC_CHIP_RTL8723A;
break;
case BT_RTL8821:
GLBtCoexist.boardInfo.btChipType = BTC_CHIP_RTL8821;
break;
case BT_RTL8723B:
GLBtCoexist.boardInfo.btChipType = BTC_CHIP_RTL8723B;
break;
}
}
void EXhalbtcoutsrc_SetAntNum(u8 type, u8 antNum, BOOLEAN antInverse)
{
if (BT_COEX_ANT_TYPE_PG == type)
{
GLBtCoexist.boardInfo.pgAntNum = antNum;
GLBtCoexist.boardInfo.btdmAntNum = antNum;
//The antenna position: Main (default) or Aux for pgAntNum=2 && btdmAntNum =1
//The antenna position should be determined by auto-detect mechanism
// The following is assumed to main, and those must be modified if y auto-detect mechanism is ready
if ((GLBtCoexist.boardInfo.pgAntNum == 2) && (GLBtCoexist.boardInfo.btdmAntNum == 1) )
GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT;
else
GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT;
}
else if (BT_COEX_ANT_TYPE_ANTDIV == type)
{
GLBtCoexist.boardInfo.btdmAntNum = antNum;
GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT;
}
else if (BT_COEX_ANT_TYPE_DETECTED == type)
{
GLBtCoexist.boardInfo.btdmAntNum = antNum;
GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_MAIN_PORT;
}
if (antInverse == _TRUE)
{
GLBtCoexist.boardInfo.btdmAntPos = BTC_ANTENNA_AT_AUX_PORT;
}
}
void EXhalbtcoutsrc_DisplayBtCoexInfo(PBTC_COEXIST pBtCoexist)
{
if (!halbtcoutsrc_IsBtCoexistAvailable(pBtCoexist))
return;
halbtcoutsrc_LeaveLowPower(pBtCoexist);
if (IS_HARDWARE_TYPE_8821(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8821a2ant_DisplayCoexInfo(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8821a1ant_DisplayCoexInfo(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723B(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723b2ant_DisplayCoexInfo(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723b1ant_DisplayCoexInfo(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8723A(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8723a2ant_DisplayCoexInfo(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8723a1ant_DisplayCoexInfo(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192C(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8188c2ant_DisplayCoexInfo(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192D(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192d2ant_DisplayCoexInfo(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8192E(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8192e2ant_DisplayCoexInfo(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8192e1ant_DisplayCoexInfo(pBtCoexist);
}
else if (IS_HARDWARE_TYPE_8812(pBtCoexist->Adapter))
{
if (pBtCoexist->boardInfo.btdmAntNum == 2)
EXhalbtc8812a2ant_DisplayCoexInfo(pBtCoexist);
else if (pBtCoexist->boardInfo.btdmAntNum == 1)
EXhalbtc8812a1ant_DisplayCoexInfo(pBtCoexist);
}
halbtcoutsrc_NormalLowPower(pBtCoexist);
}
static void halbt_InitHwConfig92C(PADAPTER padapter)
{
PHAL_DATA_TYPE pHalData;
u8 u1Tmp;
pHalData = GET_HAL_DATA(padapter);
if( (pHalData->bt_coexist.btChipType == BT_CSR_BC4) ||
(pHalData->bt_coexist.btChipType == BT_CSR_BC8))
{
if (pHalData->rf_type == RF_1T1R)
{
// Config to 1T1R
u1Tmp = rtw_read8(padapter, rOFDM0_TRxPathEnable);
u1Tmp &= ~BIT(1);
rtw_write8(padapter, rOFDM0_TRxPathEnable, u1Tmp);
RT_DISP(FBT, BT_TRACE, ("[BTCoex], BT write 0xC04 = 0x%x\n", u1Tmp));
u1Tmp = rtw_read8(padapter, rOFDM1_TRxPathEnable);
u1Tmp &= ~BIT(1);
rtw_write8(padapter, rOFDM1_TRxPathEnable, u1Tmp);
RT_DISP(FBT, BT_TRACE, ("[BTCoex], BT write 0xD04 = 0x%x\n", u1Tmp));
}
}
}
static void halbt_InitHwConfig92D(PADAPTER padapter)
{
PHAL_DATA_TYPE pHalData;
u8 u1Tmp;
pHalData = GET_HAL_DATA(padapter);
if ((pHalData->bt_coexist.btChipType == BT_CSR_BC4) ||
(pHalData->bt_coexist.btChipType == BT_CSR_BC8))
{
if (pHalData->rf_type == RF_1T1R)
{
// Config to 1T1R
u1Tmp = rtw_read8(padapter, rOFDM0_TRxPathEnable);
u1Tmp &= ~BIT(1);
rtw_write8(padapter, rOFDM0_TRxPathEnable, u1Tmp);
RT_DISP(FBT, BT_TRACE, ("[BTCoex], BT write 0xC04 = 0x%x\n", u1Tmp));
u1Tmp = rtw_read8(padapter, rOFDM1_TRxPathEnable);
u1Tmp &= ~BIT(1);
rtw_write8(padapter, rOFDM1_TRxPathEnable, u1Tmp);
RT_DISP(FBT, BT_TRACE, ("[BTCoex], BT write 0xD04 = 0x%x\n", u1Tmp));
}
}
}
/*
* Description:
* Run BT-Coexist mechansim or not
*
*/
void hal_btcoex_SetBTCoexist(PADAPTER padapter, u8 bBtExist)
{
PHAL_DATA_TYPE pHalData;
pHalData = GET_HAL_DATA(padapter);
pHalData->bt_coexist.bBtExist = bBtExist;
EXhalbtcoutsrc_SetBtExist(bBtExist);
}
/*
* Dewcription:
* Check is co-exist mechanism enabled or not
*
* Return:
* _TRUE Enable BT co-exist mechanism
* _FALSE Disable BT co-exist mechanism
*/
u8 hal_btcoex_IsBtExist(PADAPTER padapter)
{
PHAL_DATA_TYPE pHalData;
pHalData = GET_HAL_DATA(padapter);
return pHalData->bt_coexist.bBtExist;
}
u8 hal_btcoex_IsBtDisabled(PADAPTER padapter)
{
if (!hal_btcoex_IsBtExist(padapter))
return _TRUE;
if (GLBtCoexist.btInfo.bBtDisabled)
return _TRUE;
else
return _FALSE;
}
void hal_btcoex_SetChipType(PADAPTER padapter, u8 chipType)
{
PHAL_DATA_TYPE pHalData;
pHalData = GET_HAL_DATA(padapter);
pHalData->bt_coexist.btChipType = chipType;
EXhalbtcoutsrc_SetChipType(chipType);
}
u8 hal_btcoex_GetChipType(PADAPTER padapter)
{
PHAL_DATA_TYPE pHalData;
pHalData = GET_HAL_DATA(padapter);
return pHalData->bt_coexist.btChipType;
}
void hal_btcoex_SetPgAntNum(PADAPTER padapter, u8 antNum, BOOLEAN antInverse)
{
PHAL_DATA_TYPE pHalData;
pHalData = GET_HAL_DATA(padapter);
pHalData->bt_coexist.btTotalAntNum = antNum;
EXhalbtcoutsrc_SetAntNum(BT_COEX_ANT_TYPE_PG, antNum, antInverse);
}
u8 hal_btcoex_GetPgAntNum(PADAPTER padapter)
{
PHAL_DATA_TYPE pHalData;
pHalData = GET_HAL_DATA(padapter);
return pHalData->bt_coexist.btTotalAntNum;
}
u8 hal_btcoex_Initialize(PADAPTER padapter)
{
u8 ret1;
u8 ret2;
_rtw_memset(&GLBtCoexist, 0, sizeof(GLBtCoexist));
ret1 = EXhalbtcoutsrc_InitlizeVariables((void*)padapter);
ret2 = (ret1==_TRUE) ? _TRUE : _FALSE;
return ret2;
}
void hal_btcoex_InitHwConfig(PADAPTER padapter)
{
if (!hal_btcoex_IsBtExist(padapter))
return;
if (IS_HARDWARE_TYPE_8192C(padapter))
{
halbt_InitHwConfig92C(padapter);
}
else if(IS_HARDWARE_TYPE_8192D(padapter))
{
halbt_InitHwConfig92D(padapter);
}
EXhalbtcoutsrc_InitHwConfig(&GLBtCoexist);
EXhalbtcoutsrc_InitCoexDm(&GLBtCoexist);
}
void hal_btcoex_IpsNotify(PADAPTER padapter, u8 type)
{
EXhalbtcoutsrc_IpsNotify(&GLBtCoexist, type);
}
void hal_btcoex_LpsNotify(PADAPTER padapter, u8 type)
{
EXhalbtcoutsrc_LpsNotify(&GLBtCoexist, type);
}
void hal_btcoex_ScanNotify(PADAPTER padapter, u8 type)
{
EXhalbtcoutsrc_ScanNotify(&GLBtCoexist, type);
}
void hal_btcoex_ConnectNotify(PADAPTER padapter, u8 action)
{
EXhalbtcoutsrc_ConnectNotify(&GLBtCoexist, action);
}
void hal_btcoex_MediaStatusNotify(PADAPTER padapter, u8 mediaStatus)
{
EXhalbtcoutsrc_MediaStatusNotify(&GLBtCoexist, mediaStatus);
}
void hal_btcoex_SpecialPacketNotify(PADAPTER padapter, u8 pktType)
{
EXhalbtcoutsrc_SpecialPacketNotify(&GLBtCoexist, pktType);
}
void hal_btcoex_IQKNotify(PADAPTER padapter, u8 state)
{
GLBtcWiFiInIQKState = state;
}
void hal_btcoex_BtInfoNotify(PADAPTER padapter, u8 length, u8 *tmpBuf)
{
if (GLBtcWiFiInIQKState == _TRUE)
return;
EXhalbtcoutsrc_BtInfoNotify(&GLBtCoexist, tmpBuf, length);
}
void hal_btcoex_SuspendNotify(PADAPTER padapter, u8 state)
{
if (state == 1)
state = BTC_WIFI_PNP_SLEEP;
else
state = BTC_WIFI_PNP_WAKE_UP;
EXhalbtcoutsrc_PnpNotify(&GLBtCoexist, state);
}
void hal_btcoex_HaltNotify(PADAPTER padapter)
{
EXhalbtcoutsrc_HaltNotify(&GLBtCoexist);
}
void hal_btcoex_SwitchGntBt(PADAPTER padapter)
{
EXhalbtcoutsrc_SwitchGntBt(&GLBtCoexist);
}
void hal_btcoex_Hanlder(PADAPTER padapter)
{
EXhalbtcoutsrc_Periodical(&GLBtCoexist);
}
s32 hal_btcoex_IsBTCoexCtrlAMPDUSize(PADAPTER padapter)
{
return (s32)GLBtCoexist.btInfo.bBtCtrlAggBufSize;
}
u32 hal_btcoex_GetAMPDUSize(PADAPTER padapter)
{
return (u32)GLBtCoexist.btInfo.aggBufSize;
}
void hal_btcoex_SetManualControl(PADAPTER padapter, u8 bmanual)
{
GLBtCoexist.bManualControl = bmanual;
}
u8 hal_btcoex_1Ant(PADAPTER padapter)
{
if (hal_btcoex_IsBtExist(padapter) == _FALSE)
return _FALSE;
if (GLBtCoexist.boardInfo.btdmAntNum == 1)
return _TRUE;
return _FALSE;
}
u8 hal_btcoex_IsBtControlLps(PADAPTER padapter)
{
if (hal_btcoex_IsBtExist(padapter) == _FALSE)
return _FALSE;
if (GLBtCoexist.btInfo.bBtDisabled)
return _FALSE;
if (GLBtCoexist.btInfo.bBtCtrlLps)
return _TRUE;
return _FALSE;
}
u8 hal_btcoex_IsLpsOn(PADAPTER padapter)
{
if (hal_btcoex_IsBtExist(padapter) == _FALSE)
return _FALSE;
if (GLBtCoexist.btInfo.bBtDisabled)
return _FALSE;
if (GLBtCoexist.btInfo.bBtLpsOn)
return _TRUE;
return _FALSE;
}
u8 hal_btcoex_RpwmVal(PADAPTER padapter)
{
return GLBtCoexist.btInfo.rpwmVal;
}
u8 hal_btcoex_LpsVal(PADAPTER padapter)
{
return GLBtCoexist.btInfo.lpsVal;
}
u32 hal_btcoex_GetRaMask(PADAPTER padapter)
{
if (!hal_btcoex_IsBtExist(padapter))
return 0;
if (GLBtCoexist.btInfo.bBtDisabled)
return 0;
if (GLBtCoexist.boardInfo.btdmAntNum != 1)
return 0;
return GLBtCoexist.btInfo.raMask;
}
void hal_btcoex_RecordPwrMode(PADAPTER padapter, u8 *pCmdBuf, u8 cmdLen)
{
BTC_PRINT(BTC_MSG_ALGORITHM, ALGO_TRACE_FW_EXEC, ("[BTCoex], FW write pwrModeCmd=0x%04x%08x\n",
pCmdBuf[0]<<8|pCmdBuf[1],
pCmdBuf[2]<<24|pCmdBuf[3]<<16|pCmdBuf[4]<<8|pCmdBuf[5]));
_rtw_memcpy(GLBtCoexist.pwrModeVal, pCmdBuf, cmdLen);
}
void hal_btcoex_DisplayBtCoexInfo(PADAPTER padapter, u8 *pbuf, u32 bufsize)
{
PBTCDBGINFO pinfo;
pinfo = &GLBtcDbgInfo;
DBG_BT_INFO_INIT(pinfo, pbuf, bufsize);
EXhalbtcoutsrc_DisplayBtCoexInfo(&GLBtCoexist);
DBG_BT_INFO_INIT(pinfo, NULL, 0);
}
void hal_btcoex_SetDBG(PADAPTER padapter, u32 *pDbgModule)
{
u32 i;
if (NULL == pDbgModule)
return;
for (i=0; i<BTC_MSG_MAX; i++)
GLBtcDbgType[i] = pDbgModule[i];
}
u32 hal_btcoex_GetDBG(PADAPTER padapter, u8 *pStrBuf, u32 bufSize)
{
s32 count;
u8 *pstr;
u32 leftSize;
if ((NULL == pStrBuf) || (0 == bufSize))
return 0;
count = 0;
pstr = pStrBuf;
leftSize = bufSize;
// DBG_871X(FUNC_ADPT_FMT ": bufsize=%d\n", FUNC_ADPT_ARG(padapter), bufSize);
count = rtw_sprintf(pstr, leftSize, "#define DBG\t%d\n", DBG);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "BTCOEX Debug Setting:\n");
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize,
"INTERFACE / ALGORITHM: 0x%08X / 0x%08X\n\n",
GLBtcDbgType[BTC_MSG_INTERFACE],
GLBtcDbgType[BTC_MSG_ALGORITHM]);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "INTERFACE Debug Setting Definition:\n");
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[0]=%d for INTF_INIT\n",
GLBtcDbgType[BTC_MSG_INTERFACE]&INTF_INIT?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[2]=%d for INTF_NOTIFY\n\n",
GLBtcDbgType[BTC_MSG_INTERFACE]&INTF_NOTIFY?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "ALGORITHM Debug Setting Definition:\n");
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[0]=%d for BT_RSSI_STATE\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_BT_RSSI_STATE?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[1]=%d for WIFI_RSSI_STATE\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_WIFI_RSSI_STATE?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[2]=%d for BT_MONITOR\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_BT_MONITOR?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[3]=%d for TRACE\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[4]=%d for TRACE_FW\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_FW?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[5]=%d for TRACE_FW_DETAIL\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_FW_DETAIL?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[6]=%d for TRACE_FW_EXEC\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_FW_EXEC?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[7]=%d for TRACE_SW\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_SW?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[8]=%d for TRACE_SW_DETAIL\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_SW_DETAIL?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
count = rtw_sprintf(pstr, leftSize, "\tbit[9]=%d for TRACE_SW_EXEC\n",
GLBtcDbgType[BTC_MSG_ALGORITHM]&ALGO_TRACE_SW_EXEC?1:0);
if ((count < 0) || (count >= leftSize))
goto exit;
pstr += count;
leftSize -= count;
exit:
count = pstr - pStrBuf;
// DBG_871X(FUNC_ADPT_FMT ": usedsize=%d\n", FUNC_ADPT_ARG(padapter), count);
return count;
}
u8 hal_btcoex_IncreaseScanDeviceNum(PADAPTER padapter)
{
if (!hal_btcoex_IsBtExist(padapter))
return _FALSE;
if (GLBtCoexist.btInfo.bIncreaseScanDevNum)
return _TRUE;
return _FALSE;
}
u8 hal_btcoex_IsBtLinkExist(PADAPTER padapter)
{
if (GLBtCoexist.btLinkInfo.bBtLinkExist)
return _TRUE;
return _FALSE;
}
#endif // CONFIG_BT_COEXIST
| gpl-2.0 |
Steven-Cai/pi-kernel | arch/sparc/math-emu/math_64.c | 142 | 16172 | /*
* arch/sparc64/math-emu/math.c
*
* Copyright (C) 1997,1999 Jakub Jelinek (jj@ultra.linux.cz)
* Copyright (C) 1999 David S. Miller (davem@redhat.com)
*
* Emulation routines originate from soft-fp package, which is part
* of glibc and has appropriate copyrights in it.
*/
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/perf_event.h>
#include <asm/fpumacro.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
#include "sfp-util_64.h"
#include <math-emu/soft-fp.h>
#include <math-emu/single.h>
#include <math-emu/double.h>
#include <math-emu/quad.h>
/* QUAD - ftt == 3 */
#define FMOVQ 0x003
#define FNEGQ 0x007
#define FABSQ 0x00b
#define FSQRTQ 0x02b
#define FADDQ 0x043
#define FSUBQ 0x047
#define FMULQ 0x04b
#define FDIVQ 0x04f
#define FDMULQ 0x06e
#define FQTOX 0x083
#define FXTOQ 0x08c
#define FQTOS 0x0c7
#define FQTOD 0x0cb
#define FITOQ 0x0cc
#define FSTOQ 0x0cd
#define FDTOQ 0x0ce
#define FQTOI 0x0d3
/* SUBNORMAL - ftt == 2 */
#define FSQRTS 0x029
#define FSQRTD 0x02a
#define FADDS 0x041
#define FADDD 0x042
#define FSUBS 0x045
#define FSUBD 0x046
#define FMULS 0x049
#define FMULD 0x04a
#define FDIVS 0x04d
#define FDIVD 0x04e
#define FSMULD 0x069
#define FSTOX 0x081
#define FDTOX 0x082
#define FDTOS 0x0c6
#define FSTOD 0x0c9
#define FSTOI 0x0d1
#define FDTOI 0x0d2
#define FXTOS 0x084 /* Only Ultra-III generates this. */
#define FXTOD 0x088 /* Only Ultra-III generates this. */
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
#define FITOS 0x0c4 /* Only Ultra-III generates this. */
#endif
#define FITOD 0x0c8 /* Only Ultra-III generates this. */
/* FPOP2 */
#define FCMPQ 0x053
#define FCMPEQ 0x057
#define FMOVQ0 0x003
#define FMOVQ1 0x043
#define FMOVQ2 0x083
#define FMOVQ3 0x0c3
#define FMOVQI 0x103
#define FMOVQX 0x183
#define FMOVQZ 0x027
#define FMOVQLE 0x047
#define FMOVQLZ 0x067
#define FMOVQNZ 0x0a7
#define FMOVQGZ 0x0c7
#define FMOVQGE 0x0e7
#define FSR_TEM_SHIFT 23UL
#define FSR_TEM_MASK (0x1fUL << FSR_TEM_SHIFT)
#define FSR_AEXC_SHIFT 5UL
#define FSR_AEXC_MASK (0x1fUL << FSR_AEXC_SHIFT)
#define FSR_CEXC_SHIFT 0UL
#define FSR_CEXC_MASK (0x1fUL << FSR_CEXC_SHIFT)
/* All routines returning an exception to raise should detect
* such exceptions _before_ rounding to be consistent with
* the behavior of the hardware in the implemented cases
* (and thus with the recommendations in the V9 architecture
* manual).
*
* We return 0 if a SIGFPE should be sent, 1 otherwise.
*/
static inline int record_exception(struct pt_regs *regs, int eflag)
{
u64 fsr = current_thread_info()->xfsr[0];
int would_trap;
/* Determine if this exception would have generated a trap. */
would_trap = (fsr & ((long)eflag << FSR_TEM_SHIFT)) != 0UL;
/* If trapping, we only want to signal one bit. */
if(would_trap != 0) {
eflag &= ((fsr & FSR_TEM_MASK) >> FSR_TEM_SHIFT);
if((eflag & (eflag - 1)) != 0) {
if(eflag & FP_EX_INVALID)
eflag = FP_EX_INVALID;
else if(eflag & FP_EX_OVERFLOW)
eflag = FP_EX_OVERFLOW;
else if(eflag & FP_EX_UNDERFLOW)
eflag = FP_EX_UNDERFLOW;
else if(eflag & FP_EX_DIVZERO)
eflag = FP_EX_DIVZERO;
else if(eflag & FP_EX_INEXACT)
eflag = FP_EX_INEXACT;
}
}
/* Set CEXC, here is the rule:
*
* In general all FPU ops will set one and only one
* bit in the CEXC field, this is always the case
* when the IEEE exception trap is enabled in TEM.
*/
fsr &= ~(FSR_CEXC_MASK);
fsr |= ((long)eflag << FSR_CEXC_SHIFT);
/* Set the AEXC field, rule is:
*
* If a trap would not be generated, the
* CEXC just generated is OR'd into the
* existing value of AEXC.
*/
if(would_trap == 0)
fsr |= ((long)eflag << FSR_AEXC_SHIFT);
/* If trapping, indicate fault trap type IEEE. */
if(would_trap != 0)
fsr |= (1UL << 14);
current_thread_info()->xfsr[0] = fsr;
/* If we will not trap, advance the program counter over
* the instruction being handled.
*/
if(would_trap == 0) {
regs->tpc = regs->tnpc;
regs->tnpc += 4;
}
return (would_trap ? 0 : 1);
}
typedef union {
u32 s;
u64 d;
u64 q[2];
} *argp;
int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
{
unsigned long pc = regs->tpc;
unsigned long tstate = regs->tstate;
u32 insn = 0;
int type = 0;
/* ftt tells which ftt it may happen in, r is rd, b is rs2 and a is rs1. The *u arg tells
whether the argument should be packed/unpacked (0 - do not unpack/pack, 1 - unpack/pack)
non-u args tells the size of the argument (0 - no argument, 1 - single, 2 - double, 3 - quad */
#define TYPE(ftt, r, ru, b, bu, a, au) type = (au << 2) | (a << 0) | (bu << 5) | (b << 3) | (ru << 8) | (r << 6) | (ftt << 9)
int freg;
static u64 zero[2] = { 0L, 0L };
int flags;
FP_DECL_EX;
FP_DECL_S(SA); FP_DECL_S(SB); FP_DECL_S(SR);
FP_DECL_D(DA); FP_DECL_D(DB); FP_DECL_D(DR);
FP_DECL_Q(QA); FP_DECL_Q(QB); FP_DECL_Q(QR);
int IR;
long XR, xfsr;
if (tstate & TSTATE_PRIV)
die_if_kernel("unfinished/unimplemented FPop from kernel", regs);
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
if (test_thread_flag(TIF_32BIT))
pc = (u32)pc;
if (get_user(insn, (u32 __user *) pc) != -EFAULT) {
if ((insn & 0xc1f80000) == 0x81a00000) /* FPOP1 */ {
switch ((insn >> 5) & 0x1ff) {
/* QUAD - ftt == 3 */
case FMOVQ:
case FNEGQ:
case FABSQ: TYPE(3,3,0,3,0,0,0); break;
case FSQRTQ: TYPE(3,3,1,3,1,0,0); break;
case FADDQ:
case FSUBQ:
case FMULQ:
case FDIVQ: TYPE(3,3,1,3,1,3,1); break;
case FDMULQ: TYPE(3,3,1,2,1,2,1); break;
case FQTOX: TYPE(3,2,0,3,1,0,0); break;
case FXTOQ: TYPE(3,3,1,2,0,0,0); break;
case FQTOS: TYPE(3,1,1,3,1,0,0); break;
case FQTOD: TYPE(3,2,1,3,1,0,0); break;
case FITOQ: TYPE(3,3,1,1,0,0,0); break;
case FSTOQ: TYPE(3,3,1,1,1,0,0); break;
case FDTOQ: TYPE(3,3,1,2,1,0,0); break;
case FQTOI: TYPE(3,1,0,3,1,0,0); break;
/* We can get either unimplemented or unfinished
* for these cases. Pre-Niagara systems generate
* unfinished fpop for SUBNORMAL cases, and Niagara
* always gives unimplemented fpop for fsqrt{s,d}.
*/
case FSQRTS: {
unsigned long x = current_thread_info()->xfsr[0];
x = (x >> 14) & 0x7;
TYPE(x,1,1,1,1,0,0);
break;
}
case FSQRTD: {
unsigned long x = current_thread_info()->xfsr[0];
x = (x >> 14) & 0x7;
TYPE(x,2,1,2,1,0,0);
break;
}
/* SUBNORMAL - ftt == 2 */
case FADDD:
case FSUBD:
case FMULD:
case FDIVD: TYPE(2,2,1,2,1,2,1); break;
case FADDS:
case FSUBS:
case FMULS:
case FDIVS: TYPE(2,1,1,1,1,1,1); break;
case FSMULD: TYPE(2,2,1,1,1,1,1); break;
case FSTOX: TYPE(2,2,0,1,1,0,0); break;
case FDTOX: TYPE(2,2,0,2,1,0,0); break;
case FDTOS: TYPE(2,1,1,2,1,0,0); break;
case FSTOD: TYPE(2,2,1,1,1,0,0); break;
case FSTOI: TYPE(2,1,0,1,1,0,0); break;
case FDTOI: TYPE(2,1,0,2,1,0,0); break;
/* Only Ultra-III generates these */
case FXTOS: TYPE(2,1,1,2,0,0,0); break;
case FXTOD: TYPE(2,2,1,2,0,0,0); break;
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
case FITOS: TYPE(2,1,1,1,0,0,0); break;
#endif
case FITOD: TYPE(2,2,1,1,0,0,0); break;
}
}
else if ((insn & 0xc1f80000) == 0x81a80000) /* FPOP2 */ {
IR = 2;
switch ((insn >> 5) & 0x1ff) {
case FCMPQ: TYPE(3,0,0,3,1,3,1); break;
case FCMPEQ: TYPE(3,0,0,3,1,3,1); break;
/* Now the conditional fmovq support */
case FMOVQ0:
case FMOVQ1:
case FMOVQ2:
case FMOVQ3:
/* fmovq %fccX, %fY, %fZ */
if (!((insn >> 11) & 3))
XR = current_thread_info()->xfsr[0] >> 10;
else
XR = current_thread_info()->xfsr[0] >> (30 + ((insn >> 10) & 0x6));
XR &= 3;
IR = 0;
switch ((insn >> 14) & 0x7) {
/* case 0: IR = 0; break; */ /* Never */
case 1: if (XR) IR = 1; break; /* Not Equal */
case 2: if (XR == 1 || XR == 2) IR = 1; break; /* Less or Greater */
case 3: if (XR & 1) IR = 1; break; /* Unordered or Less */
case 4: if (XR == 1) IR = 1; break; /* Less */
case 5: if (XR & 2) IR = 1; break; /* Unordered or Greater */
case 6: if (XR == 2) IR = 1; break; /* Greater */
case 7: if (XR == 3) IR = 1; break; /* Unordered */
}
if ((insn >> 14) & 8)
IR ^= 1;
break;
case FMOVQI:
case FMOVQX:
/* fmovq %[ix]cc, %fY, %fZ */
XR = regs->tstate >> 32;
if ((insn >> 5) & 0x80)
XR >>= 4;
XR &= 0xf;
IR = 0;
freg = ((XR >> 2) ^ XR) & 2;
switch ((insn >> 14) & 0x7) {
/* case 0: IR = 0; break; */ /* Never */
case 1: if (XR & 4) IR = 1; break; /* Equal */
case 2: if ((XR & 4) || freg) IR = 1; break; /* Less or Equal */
case 3: if (freg) IR = 1; break; /* Less */
case 4: if (XR & 5) IR = 1; break; /* Less or Equal Unsigned */
case 5: if (XR & 1) IR = 1; break; /* Carry Set */
case 6: if (XR & 8) IR = 1; break; /* Negative */
case 7: if (XR & 2) IR = 1; break; /* Overflow Set */
}
if ((insn >> 14) & 8)
IR ^= 1;
break;
case FMOVQZ:
case FMOVQLE:
case FMOVQLZ:
case FMOVQNZ:
case FMOVQGZ:
case FMOVQGE:
freg = (insn >> 14) & 0x1f;
if (!freg)
XR = 0;
else if (freg < 16)
XR = regs->u_regs[freg];
else if (test_thread_flag(TIF_32BIT)) {
struct reg_window32 __user *win32;
flushw_user ();
win32 = (struct reg_window32 __user *)((unsigned long)((u32)regs->u_regs[UREG_FP]));
get_user(XR, &win32->locals[freg - 16]);
} else {
struct reg_window __user *win;
flushw_user ();
win = (struct reg_window __user *)(regs->u_regs[UREG_FP] + STACK_BIAS);
get_user(XR, &win->locals[freg - 16]);
}
IR = 0;
switch ((insn >> 10) & 3) {
case 1: if (!XR) IR = 1; break; /* Register Zero */
case 2: if (XR <= 0) IR = 1; break; /* Register Less Than or Equal to Zero */
case 3: if (XR < 0) IR = 1; break; /* Register Less Than Zero */
}
if ((insn >> 10) & 4)
IR ^= 1;
break;
}
if (IR == 0) {
/* The fmov test was false. Do a nop instead */
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
} else if (IR == 1) {
/* Change the instruction into plain fmovq */
insn = (insn & 0x3e00001f) | 0x81a00060;
TYPE(3,3,0,3,0,0,0);
}
}
}
if (type) {
argp rs1 = NULL, rs2 = NULL, rd = NULL;
/* Starting with UltraSPARC-T2, the cpu does not set the FP Trap
* Type field in the %fsr to unimplemented_FPop. Nor does it
* use the fp_exception_other trap. Instead it signals an
* illegal instruction and leaves the FP trap type field of
* the %fsr unchanged.
*/
if (!illegal_insn_trap) {
int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7;
if (ftt != (type >> 9))
goto err;
}
current_thread_info()->xfsr[0] &= ~0x1c000;
freg = ((insn >> 14) & 0x1f);
switch (type & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs1 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & flags))
rs1 = (argp)&zero;
break;
}
switch (type & 0x7) {
case 7: FP_UNPACK_QP (QA, rs1); break;
case 6: FP_UNPACK_DP (DA, rs1); break;
case 5: FP_UNPACK_SP (SA, rs1); break;
}
freg = (insn & 0x1f);
switch ((type >> 3) & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rs2 = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & flags))
rs2 = (argp)&zero;
break;
}
switch ((type >> 3) & 0x7) {
case 7: FP_UNPACK_QP (QB, rs2); break;
case 6: FP_UNPACK_DP (DB, rs2); break;
case 5: FP_UNPACK_SP (SB, rs2); break;
}
freg = ((insn >> 25) & 0x1f);
switch ((type >> 6) & 0x3) {
case 3: if (freg & 2) {
current_thread_info()->xfsr[0] |= (6 << 14) /* invalid_fp_register */;
goto err;
}
case 2: freg = ((freg & 1) << 5) | (freg & 0x1e);
case 1: rd = (argp)&f->regs[freg];
flags = (freg < 32) ? FPRS_DL : FPRS_DU;
if (!(current_thread_info()->fpsaved[0] & FPRS_FEF)) {
current_thread_info()->fpsaved[0] = FPRS_FEF;
current_thread_info()->gsr[0] = 0;
}
if (!(current_thread_info()->fpsaved[0] & flags)) {
if (freg < 32)
memset(f->regs, 0, 32*sizeof(u32));
else
memset(f->regs+32, 0, 32*sizeof(u32));
}
current_thread_info()->fpsaved[0] |= flags;
break;
}
switch ((insn >> 5) & 0x1ff) {
/* + */
case FADDS: FP_ADD_S (SR, SA, SB); break;
case FADDD: FP_ADD_D (DR, DA, DB); break;
case FADDQ: FP_ADD_Q (QR, QA, QB); break;
/* - */
case FSUBS: FP_SUB_S (SR, SA, SB); break;
case FSUBD: FP_SUB_D (DR, DA, DB); break;
case FSUBQ: FP_SUB_Q (QR, QA, QB); break;
/* * */
case FMULS: FP_MUL_S (SR, SA, SB); break;
case FSMULD: FP_CONV (D, S, 1, 1, DA, SA);
FP_CONV (D, S, 1, 1, DB, SB);
case FMULD: FP_MUL_D (DR, DA, DB); break;
case FDMULQ: FP_CONV (Q, D, 2, 1, QA, DA);
FP_CONV (Q, D, 2, 1, QB, DB);
case FMULQ: FP_MUL_Q (QR, QA, QB); break;
/* / */
case FDIVS: FP_DIV_S (SR, SA, SB); break;
case FDIVD: FP_DIV_D (DR, DA, DB); break;
case FDIVQ: FP_DIV_Q (QR, QA, QB); break;
/* sqrt */
case FSQRTS: FP_SQRT_S (SR, SB); break;
case FSQRTD: FP_SQRT_D (DR, DB); break;
case FSQRTQ: FP_SQRT_Q (QR, QB); break;
/* mov */
case FMOVQ: rd->q[0] = rs2->q[0]; rd->q[1] = rs2->q[1]; break;
case FABSQ: rd->q[0] = rs2->q[0] & 0x7fffffffffffffffUL; rd->q[1] = rs2->q[1]; break;
case FNEGQ: rd->q[0] = rs2->q[0] ^ 0x8000000000000000UL; rd->q[1] = rs2->q[1]; break;
/* float to int */
case FSTOI: FP_TO_INT_S (IR, SB, 32, 1); break;
case FDTOI: FP_TO_INT_D (IR, DB, 32, 1); break;
case FQTOI: FP_TO_INT_Q (IR, QB, 32, 1); break;
case FSTOX: FP_TO_INT_S (XR, SB, 64, 1); break;
case FDTOX: FP_TO_INT_D (XR, DB, 64, 1); break;
case FQTOX: FP_TO_INT_Q (XR, QB, 64, 1); break;
/* int to float */
case FITOQ: IR = rs2->s; FP_FROM_INT_Q (QR, IR, 32, int); break;
case FXTOQ: XR = rs2->d; FP_FROM_INT_Q (QR, XR, 64, long); break;
/* Only Ultra-III generates these */
case FXTOS: XR = rs2->d; FP_FROM_INT_S (SR, XR, 64, long); break;
case FXTOD: XR = rs2->d; FP_FROM_INT_D (DR, XR, 64, long); break;
#if 0 /* Optimized inline in sparc64/kernel/entry.S */
case FITOS: IR = rs2->s; FP_FROM_INT_S (SR, IR, 32, int); break;
#endif
case FITOD: IR = rs2->s; FP_FROM_INT_D (DR, IR, 32, int); break;
/* float to float */
case FSTOD: FP_CONV (D, S, 1, 1, DR, SB); break;
case FSTOQ: FP_CONV (Q, S, 2, 1, QR, SB); break;
case FDTOQ: FP_CONV (Q, D, 2, 1, QR, DB); break;
case FDTOS: FP_CONV (S, D, 1, 1, SR, DB); break;
case FQTOS: FP_CONV (S, Q, 1, 2, SR, QB); break;
case FQTOD: FP_CONV (D, Q, 1, 2, DR, QB); break;
/* comparison */
case FCMPQ:
case FCMPEQ:
FP_CMP_Q(XR, QB, QA, 3);
if (XR == 3 &&
(((insn >> 5) & 0x1ff) == FCMPEQ ||
FP_ISSIGNAN_Q(QA) ||
FP_ISSIGNAN_Q(QB)))
FP_SET_EXCEPTION (FP_EX_INVALID);
}
if (!FP_INHIBIT_RESULTS) {
switch ((type >> 6) & 0x7) {
case 0: xfsr = current_thread_info()->xfsr[0];
if (XR == -1) XR = 2;
switch (freg & 3) {
/* fcc0, 1, 2, 3 */
case 0: xfsr &= ~0xc00; xfsr |= (XR << 10); break;
case 1: xfsr &= ~0x300000000UL; xfsr |= (XR << 32); break;
case 2: xfsr &= ~0xc00000000UL; xfsr |= (XR << 34); break;
case 3: xfsr &= ~0x3000000000UL; xfsr |= (XR << 36); break;
}
current_thread_info()->xfsr[0] = xfsr;
break;
case 1: rd->s = IR; break;
case 2: rd->d = XR; break;
case 5: FP_PACK_SP (rd, SR); break;
case 6: FP_PACK_DP (rd, DR); break;
case 7: FP_PACK_QP (rd, QR); break;
}
}
if(_fex != 0)
return record_exception(regs, _fex);
/* Success and no exceptions detected. */
current_thread_info()->xfsr[0] &= ~(FSR_CEXC_MASK);
regs->tpc = regs->tnpc;
regs->tnpc += 4;
return 1;
}
err: return 0;
}
| gpl-2.0 |
Hellmanor/kernel_v30c | arch/x86/pci/acpi.c | 654 | 10434 | #include <linux/pci.h>
#include <linux/acpi.h>
#include <linux/init.h>
#include <linux/irq.h>
#include <linux/dmi.h>
#include <linux/slab.h>
#include <asm/numa.h>
#include <asm/pci_x86.h>
struct pci_root_info {
struct acpi_device *bridge;
char *name;
unsigned int res_num;
struct resource *res;
struct pci_bus *bus;
int busnum;
};
static bool pci_use_crs = true;
static int __init set_use_crs(const struct dmi_system_id *id)
{
pci_use_crs = true;
return 0;
}
static const struct dmi_system_id pci_use_crs_table[] __initconst = {
/* http://bugzilla.kernel.org/show_bug.cgi?id=14183 */
{
.callback = set_use_crs,
.ident = "IBM System x3800",
.matches = {
DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=16007 */
/* 2006 AMD HT/VIA system with two host bridges */
{
.callback = set_use_crs,
.ident = "ASRock ALiveSATA2-GLAN",
.matches = {
DMI_MATCH(DMI_PRODUCT_NAME, "ALiveSATA2-GLAN"),
},
},
/* https://bugzilla.kernel.org/show_bug.cgi?id=30552 */
/* 2006 AMD HT/VIA system with two host bridges */
{
.callback = set_use_crs,
.ident = "ASUS M2V-MX SE",
.matches = {
DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."),
DMI_MATCH(DMI_BOARD_NAME, "M2V-MX SE"),
DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
},
},
{}
};
void __init pci_acpi_crs_quirks(void)
{
int year;
if (dmi_get_date(DMI_BIOS_DATE, &year, NULL, NULL) && year < 2008)
pci_use_crs = false;
dmi_check_system(pci_use_crs_table);
/*
* If the user specifies "pci=use_crs" or "pci=nocrs" explicitly, that
* takes precedence over anything we figured out above.
*/
if (pci_probe & PCI_ROOT_NO_CRS)
pci_use_crs = false;
else if (pci_probe & PCI_USE__CRS)
pci_use_crs = true;
printk(KERN_INFO "PCI: %s host bridge windows from ACPI; "
"if necessary, use \"pci=%s\" and report a bug\n",
pci_use_crs ? "Using" : "Ignoring",
pci_use_crs ? "nocrs" : "use_crs");
}
static acpi_status
resource_to_addr(struct acpi_resource *resource,
struct acpi_resource_address64 *addr)
{
acpi_status status;
struct acpi_resource_memory24 *memory24;
struct acpi_resource_memory32 *memory32;
struct acpi_resource_fixed_memory32 *fixed_memory32;
memset(addr, 0, sizeof(*addr));
switch (resource->type) {
case ACPI_RESOURCE_TYPE_MEMORY24:
memory24 = &resource->data.memory24;
addr->resource_type = ACPI_MEMORY_RANGE;
addr->minimum = memory24->minimum;
addr->address_length = memory24->address_length;
addr->maximum = addr->minimum + addr->address_length - 1;
return AE_OK;
case ACPI_RESOURCE_TYPE_MEMORY32:
memory32 = &resource->data.memory32;
addr->resource_type = ACPI_MEMORY_RANGE;
addr->minimum = memory32->minimum;
addr->address_length = memory32->address_length;
addr->maximum = addr->minimum + addr->address_length - 1;
return AE_OK;
case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
fixed_memory32 = &resource->data.fixed_memory32;
addr->resource_type = ACPI_MEMORY_RANGE;
addr->minimum = fixed_memory32->address;
addr->address_length = fixed_memory32->address_length;
addr->maximum = addr->minimum + addr->address_length - 1;
return AE_OK;
case ACPI_RESOURCE_TYPE_ADDRESS16:
case ACPI_RESOURCE_TYPE_ADDRESS32:
case ACPI_RESOURCE_TYPE_ADDRESS64:
status = acpi_resource_to_address64(resource, addr);
if (ACPI_SUCCESS(status) &&
(addr->resource_type == ACPI_MEMORY_RANGE ||
addr->resource_type == ACPI_IO_RANGE) &&
addr->address_length > 0) {
return AE_OK;
}
break;
}
return AE_ERROR;
}
static acpi_status
count_resource(struct acpi_resource *acpi_res, void *data)
{
struct pci_root_info *info = data;
struct acpi_resource_address64 addr;
acpi_status status;
status = resource_to_addr(acpi_res, &addr);
if (ACPI_SUCCESS(status))
info->res_num++;
return AE_OK;
}
static acpi_status
setup_resource(struct acpi_resource *acpi_res, void *data)
{
struct pci_root_info *info = data;
struct resource *res;
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags;
u64 start, end;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
return AE_OK;
if (addr.resource_type == ACPI_MEMORY_RANGE) {
flags = IORESOURCE_MEM;
if (addr.info.mem.caching == ACPI_PREFETCHABLE_MEMORY)
flags |= IORESOURCE_PREFETCH;
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
} else
return AE_OK;
start = addr.minimum + addr.translation_offset;
end = addr.maximum + addr.translation_offset;
res = &info->res[info->res_num];
res->name = info->name;
res->flags = flags;
res->start = start;
res->end = end;
res->child = NULL;
if (!pci_use_crs) {
dev_printk(KERN_DEBUG, &info->bridge->dev,
"host bridge window %pR (ignored)\n", res);
return AE_OK;
}
info->res_num++;
if (addr.translation_offset)
dev_info(&info->bridge->dev, "host bridge window %pR "
"(PCI address [%#llx-%#llx])\n",
res, res->start - addr.translation_offset,
res->end - addr.translation_offset);
else
dev_info(&info->bridge->dev, "host bridge window %pR\n", res);
return AE_OK;
}
static bool resource_contains(struct resource *res, resource_size_t point)
{
if (res->start <= point && point <= res->end)
return true;
return false;
}
static void coalesce_windows(struct pci_root_info *info, unsigned long type)
{
int i, j;
struct resource *res1, *res2;
for (i = 0; i < info->res_num; i++) {
res1 = &info->res[i];
if (!(res1->flags & type))
continue;
for (j = i + 1; j < info->res_num; j++) {
res2 = &info->res[j];
if (!(res2->flags & type))
continue;
/*
* I don't like throwing away windows because then
* our resources no longer match the ACPI _CRS, but
* the kernel resource tree doesn't allow overlaps.
*/
if (resource_contains(res1, res2->start) ||
resource_contains(res1, res2->end) ||
resource_contains(res2, res1->start) ||
resource_contains(res2, res1->end)) {
res1->start = min(res1->start, res2->start);
res1->end = max(res1->end, res2->end);
dev_info(&info->bridge->dev,
"host bridge window expanded to %pR; %pR ignored\n",
res1, res2);
res2->flags = 0;
}
}
}
}
static void add_resources(struct pci_root_info *info)
{
int i;
struct resource *res, *root, *conflict;
if (!pci_use_crs)
return;
coalesce_windows(info, IORESOURCE_MEM);
coalesce_windows(info, IORESOURCE_IO);
for (i = 0; i < info->res_num; i++) {
res = &info->res[i];
if (res->flags & IORESOURCE_MEM)
root = &iomem_resource;
else if (res->flags & IORESOURCE_IO)
root = &ioport_resource;
else
continue;
conflict = insert_resource_conflict(root, res);
if (conflict)
dev_err(&info->bridge->dev,
"address space collision: host bridge window %pR "
"conflicts with %s %pR\n",
res, conflict->name, conflict);
else
pci_bus_add_resource(info->bus, res, 0);
}
}
static void
get_current_resources(struct acpi_device *device, int busnum,
int domain, struct pci_bus *bus)
{
struct pci_root_info info;
size_t size;
if (pci_use_crs)
pci_bus_remove_resources(bus);
info.bridge = device;
info.bus = bus;
info.res_num = 0;
acpi_walk_resources(device->handle, METHOD_NAME__CRS, count_resource,
&info);
if (!info.res_num)
return;
size = sizeof(*info.res) * info.res_num;
info.res = kmalloc(size, GFP_KERNEL);
if (!info.res)
goto res_alloc_fail;
info.name = kasprintf(GFP_KERNEL, "PCI Bus %04x:%02x", domain, busnum);
if (!info.name)
goto name_alloc_fail;
info.res_num = 0;
acpi_walk_resources(device->handle, METHOD_NAME__CRS, setup_resource,
&info);
add_resources(&info);
return;
name_alloc_fail:
kfree(info.res);
res_alloc_fail:
return;
}
struct pci_bus * __devinit pci_acpi_scan_root(struct acpi_pci_root *root)
{
struct acpi_device *device = root->device;
int domain = root->segment;
int busnum = root->secondary.start;
struct pci_bus *bus;
struct pci_sysdata *sd;
int node;
#ifdef CONFIG_ACPI_NUMA
int pxm;
#endif
if (domain && !pci_domains_supported) {
printk(KERN_WARNING "pci_bus %04x:%02x: "
"ignored (multiple domains not supported)\n",
domain, busnum);
return NULL;
}
node = -1;
#ifdef CONFIG_ACPI_NUMA
pxm = acpi_get_pxm(device->handle);
if (pxm >= 0)
node = pxm_to_node(pxm);
if (node != -1)
set_mp_bus_to_node(busnum, node);
else
#endif
node = get_mp_bus_to_node(busnum);
if (node != -1 && !node_online(node))
node = -1;
/* Allocate per-root-bus (not per bus) arch-specific data.
* TODO: leak; this memory is never freed.
* It's arguable whether it's worth the trouble to care.
*/
sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!sd) {
printk(KERN_WARNING "pci_bus %04x:%02x: "
"ignored (out of memory)\n", domain, busnum);
return NULL;
}
sd->domain = domain;
sd->node = node;
/*
* Maybe the desired pci bus has been already scanned. In such case
* it is unnecessary to scan the pci bus with the given domain,busnum.
*/
bus = pci_find_bus(domain, busnum);
if (bus) {
/*
* If the desired bus exits, the content of bus->sysdata will
* be replaced by sd.
*/
memcpy(bus->sysdata, sd, sizeof(*sd));
kfree(sd);
} else {
bus = pci_create_bus(NULL, busnum, &pci_root_ops, sd);
if (bus) {
get_current_resources(device, busnum, domain, bus);
bus->subordinate = pci_scan_child_bus(bus);
}
}
if (!bus)
kfree(sd);
if (bus && node != -1) {
#ifdef CONFIG_ACPI_NUMA
if (pxm >= 0)
dev_printk(KERN_DEBUG, &bus->dev,
"on NUMA node %d (pxm %d)\n", node, pxm);
#else
dev_printk(KERN_DEBUG, &bus->dev, "on NUMA node %d\n", node);
#endif
}
return bus;
}
int __init pci_acpi_init(void)
{
struct pci_dev *dev = NULL;
if (acpi_noirq)
return -ENODEV;
printk(KERN_INFO "PCI: Using ACPI for IRQ routing\n");
acpi_irq_penalty_init();
pcibios_enable_irq = acpi_pci_irq_enable;
pcibios_disable_irq = acpi_pci_irq_disable;
x86_init.pci.init_irq = x86_init_noop;
if (pci_routeirq) {
/*
* PCI IRQ routing is set up by pci_enable_device(), but we
* also do it here in case there are still broken drivers that
* don't use pci_enable_device().
*/
printk(KERN_INFO "PCI: Routing PCI interrupts for all devices because \"pci=routeirq\" specified\n");
for_each_pci_dev(dev)
acpi_pci_irq_enable(dev);
}
return 0;
}
| gpl-2.0 |
ncarrier/carino-packages-linux | drivers/input/misc/ab8500-ponkey.c | 654 | 3403 | /*
* Copyright (C) ST-Ericsson SA 2010
*
* License Terms: GNU General Public License v2
* Author: Sundar Iyer <sundar.iyer@stericsson.com> for ST-Ericsson
*
* AB8500 Power-On Key handler
*/
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/mfd/abx500/ab8500.h>
#include <linux/of.h>
#include <linux/slab.h>
/**
* struct ab8500_ponkey - ab8500 ponkey information
* @input_dev: pointer to input device
* @ab8500: ab8500 parent
* @irq_dbf: irq number for falling transition
* @irq_dbr: irq number for rising transition
*/
struct ab8500_ponkey {
struct input_dev *idev;
struct ab8500 *ab8500;
int irq_dbf;
int irq_dbr;
};
/* AB8500 gives us an interrupt when ONKEY is held */
static irqreturn_t ab8500_ponkey_handler(int irq, void *data)
{
struct ab8500_ponkey *ponkey = data;
if (irq == ponkey->irq_dbf)
input_report_key(ponkey->idev, KEY_POWER, true);
else if (irq == ponkey->irq_dbr)
input_report_key(ponkey->idev, KEY_POWER, false);
input_sync(ponkey->idev);
return IRQ_HANDLED;
}
static int ab8500_ponkey_probe(struct platform_device *pdev)
{
struct ab8500 *ab8500 = dev_get_drvdata(pdev->dev.parent);
struct ab8500_ponkey *ponkey;
struct input_dev *input;
int irq_dbf, irq_dbr;
int error;
irq_dbf = platform_get_irq_byname(pdev, "ONKEY_DBF");
if (irq_dbf < 0) {
dev_err(&pdev->dev, "No IRQ for ONKEY_DBF, error=%d\n", irq_dbf);
return irq_dbf;
}
irq_dbr = platform_get_irq_byname(pdev, "ONKEY_DBR");
if (irq_dbr < 0) {
dev_err(&pdev->dev, "No IRQ for ONKEY_DBR, error=%d\n", irq_dbr);
return irq_dbr;
}
ponkey = devm_kzalloc(&pdev->dev, sizeof(struct ab8500_ponkey),
GFP_KERNEL);
if (!ponkey)
return -ENOMEM;
input = devm_input_allocate_device(&pdev->dev);
if (!input)
return -ENOMEM;
ponkey->idev = input;
ponkey->ab8500 = ab8500;
ponkey->irq_dbf = irq_dbf;
ponkey->irq_dbr = irq_dbr;
input->name = "AB8500 POn(PowerOn) Key";
input->dev.parent = &pdev->dev;
input_set_capability(input, EV_KEY, KEY_POWER);
error = devm_request_any_context_irq(&pdev->dev, ponkey->irq_dbf,
ab8500_ponkey_handler, 0,
"ab8500-ponkey-dbf", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbf IRQ#%d: %d\n",
ponkey->irq_dbf, error);
return error;
}
error = devm_request_any_context_irq(&pdev->dev, ponkey->irq_dbr,
ab8500_ponkey_handler, 0,
"ab8500-ponkey-dbr", ponkey);
if (error < 0) {
dev_err(ab8500->dev, "Failed to request dbr IRQ#%d: %d\n",
ponkey->irq_dbr, error);
return error;
}
error = input_register_device(ponkey->idev);
if (error) {
dev_err(ab8500->dev, "Can't register input device: %d\n", error);
return error;
}
platform_set_drvdata(pdev, ponkey);
return 0;
}
#ifdef CONFIG_OF
static const struct of_device_id ab8500_ponkey_match[] = {
{ .compatible = "stericsson,ab8500-ponkey", },
{}
};
#endif
static struct platform_driver ab8500_ponkey_driver = {
.driver = {
.name = "ab8500-poweron-key",
.of_match_table = of_match_ptr(ab8500_ponkey_match),
},
.probe = ab8500_ponkey_probe,
};
module_platform_driver(ab8500_ponkey_driver);
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Sundar Iyer <sundar.iyer@stericsson.com>");
MODULE_DESCRIPTION("ST-Ericsson AB8500 Power-ON(Pon) Key driver");
| gpl-2.0 |
bigzz/linux-ext4 | drivers/media/usb/pvrusb2/pvrusb2-audio.c | 910 | 2547 | /*
*
*
* Copyright (C) 2005 Mike Isely <isely@pobox.com>
* Copyright (C) 2004 Aurelien Alleaume <slts@free.fr>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
#include "pvrusb2-audio.h"
#include "pvrusb2-hdw-internal.h"
#include "pvrusb2-debug.h"
#include <linux/videodev2.h>
#include <media/msp3400.h>
#include <media/v4l2-common.h>
struct routing_scheme {
const int *def;
unsigned int cnt;
};
static const int routing_scheme0[] = {
[PVR2_CVAL_INPUT_TV] = MSP_INPUT_DEFAULT,
[PVR2_CVAL_INPUT_RADIO] = MSP_INPUT(MSP_IN_SCART2,
MSP_IN_TUNER1,
MSP_DSP_IN_SCART,
MSP_DSP_IN_SCART),
[PVR2_CVAL_INPUT_COMPOSITE] = MSP_INPUT(MSP_IN_SCART1,
MSP_IN_TUNER1,
MSP_DSP_IN_SCART,
MSP_DSP_IN_SCART),
[PVR2_CVAL_INPUT_SVIDEO] = MSP_INPUT(MSP_IN_SCART1,
MSP_IN_TUNER1,
MSP_DSP_IN_SCART,
MSP_DSP_IN_SCART),
};
static const struct routing_scheme routing_def0 = {
.def = routing_scheme0,
.cnt = ARRAY_SIZE(routing_scheme0),
};
static const struct routing_scheme *routing_schemes[] = {
[PVR2_ROUTING_SCHEME_HAUPPAUGE] = &routing_def0,
};
void pvr2_msp3400_subdev_update(struct pvr2_hdw *hdw, struct v4l2_subdev *sd)
{
if (hdw->input_dirty || hdw->force_dirty) {
const struct routing_scheme *sp;
unsigned int sid = hdw->hdw_desc->signal_routing_scheme;
u32 input;
pvr2_trace(PVR2_TRACE_CHIPS, "subdev msp3400 v4l2 set_stereo");
sp = (sid < ARRAY_SIZE(routing_schemes)) ?
routing_schemes[sid] : NULL;
if ((sp != NULL) &&
(hdw->input_val >= 0) &&
(hdw->input_val < sp->cnt)) {
input = sp->def[hdw->input_val];
} else {
pvr2_trace(PVR2_TRACE_ERROR_LEGS,
"*** WARNING *** subdev msp3400 set_input:"
" Invalid routing scheme (%u)"
" and/or input (%d)",
sid, hdw->input_val);
return;
}
sd->ops->audio->s_routing(sd, input,
MSP_OUTPUT(MSP_SC_IN_DSP_SCART1), 0);
}
}
| gpl-2.0 |
GalaxyTab4/android_kernel_samsung_degaswifi | drivers/net/ethernet/lantiq_etop.c | 2190 | 19635 | /*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2011 John Crispin <blogic@openwrt.org>
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
#include <linux/in.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/phy.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <linux/ethtool.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <asm/checksum.h>
#include <lantiq_soc.h>
#include <xway_dma.h>
#include <lantiq_platform.h>
#define LTQ_ETOP_MDIO 0x11804
#define MDIO_REQUEST 0x80000000
#define MDIO_READ 0x40000000
#define MDIO_ADDR_MASK 0x1f
#define MDIO_ADDR_OFFSET 0x15
#define MDIO_REG_MASK 0x1f
#define MDIO_REG_OFFSET 0x10
#define MDIO_VAL_MASK 0xffff
#define PPE32_CGEN 0x800
#define LQ_PPE32_ENET_MAC_CFG 0x1840
#define LTQ_ETOP_ENETS0 0x11850
#define LTQ_ETOP_MAC_DA0 0x1186C
#define LTQ_ETOP_MAC_DA1 0x11870
#define LTQ_ETOP_CFG 0x16020
#define LTQ_ETOP_IGPLEN 0x16080
#define MAX_DMA_CHAN 0x8
#define MAX_DMA_CRC_LEN 0x4
#define MAX_DMA_DATA_LEN 0x600
#define ETOP_FTCU BIT(28)
#define ETOP_MII_MASK 0xf
#define ETOP_MII_NORMAL 0xd
#define ETOP_MII_REVERSE 0xe
#define ETOP_PLEN_UNDER 0x40
#define ETOP_CGEN 0x800
/* use 2 static channels for TX/RX */
#define LTQ_ETOP_TX_CHANNEL 1
#define LTQ_ETOP_RX_CHANNEL 6
#define IS_TX(x) (x == LTQ_ETOP_TX_CHANNEL)
#define IS_RX(x) (x == LTQ_ETOP_RX_CHANNEL)
#define ltq_etop_r32(x) ltq_r32(ltq_etop_membase + (x))
#define ltq_etop_w32(x, y) ltq_w32(x, ltq_etop_membase + (y))
#define ltq_etop_w32_mask(x, y, z) \
ltq_w32_mask(x, y, ltq_etop_membase + (z))
#define DRV_VERSION "1.0"
static void __iomem *ltq_etop_membase;
struct ltq_etop_chan {
int idx;
int tx_free;
struct net_device *netdev;
struct napi_struct napi;
struct ltq_dma_channel dma;
struct sk_buff *skb[LTQ_DESC_NUM];
};
struct ltq_etop_priv {
struct net_device *netdev;
struct platform_device *pdev;
struct ltq_eth_data *pldata;
struct resource *res;
struct mii_bus *mii_bus;
struct phy_device *phydev;
struct ltq_etop_chan ch[MAX_DMA_CHAN];
int tx_free[MAX_DMA_CHAN >> 1];
spinlock_t lock;
};
static int
ltq_etop_alloc_skb(struct ltq_etop_chan *ch)
{
ch->skb[ch->dma.desc] = netdev_alloc_skb(ch->netdev, MAX_DMA_DATA_LEN);
if (!ch->skb[ch->dma.desc])
return -ENOMEM;
ch->dma.desc_base[ch->dma.desc].addr = dma_map_single(NULL,
ch->skb[ch->dma.desc]->data, MAX_DMA_DATA_LEN,
DMA_FROM_DEVICE);
ch->dma.desc_base[ch->dma.desc].addr =
CPHYSADDR(ch->skb[ch->dma.desc]->data);
ch->dma.desc_base[ch->dma.desc].ctl =
LTQ_DMA_OWN | LTQ_DMA_RX_OFFSET(NET_IP_ALIGN) |
MAX_DMA_DATA_LEN;
skb_reserve(ch->skb[ch->dma.desc], NET_IP_ALIGN);
return 0;
}
static void
ltq_etop_hw_receive(struct ltq_etop_chan *ch)
{
struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
struct sk_buff *skb = ch->skb[ch->dma.desc];
int len = (desc->ctl & LTQ_DMA_SIZE_MASK) - MAX_DMA_CRC_LEN;
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
if (ltq_etop_alloc_skb(ch)) {
netdev_err(ch->netdev,
"failed to allocate new rx buffer, stopping DMA\n");
ltq_dma_close(&ch->dma);
}
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
spin_unlock_irqrestore(&priv->lock, flags);
skb_put(skb, len);
skb->protocol = eth_type_trans(skb, ch->netdev);
netif_receive_skb(skb);
}
static int
ltq_etop_poll_rx(struct napi_struct *napi, int budget)
{
struct ltq_etop_chan *ch = container_of(napi,
struct ltq_etop_chan, napi);
int rx = 0;
int complete = 0;
while ((rx < budget) && !complete) {
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
ltq_etop_hw_receive(ch);
rx++;
} else {
complete = 1;
}
}
if (complete || !rx) {
napi_complete(&ch->napi);
ltq_dma_ack_irq(&ch->dma);
}
return rx;
}
static int
ltq_etop_poll_tx(struct napi_struct *napi, int budget)
{
struct ltq_etop_chan *ch =
container_of(napi, struct ltq_etop_chan, napi);
struct ltq_etop_priv *priv = netdev_priv(ch->netdev);
struct netdev_queue *txq =
netdev_get_tx_queue(ch->netdev, ch->idx >> 1);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
while ((ch->dma.desc_base[ch->tx_free].ctl &
(LTQ_DMA_OWN | LTQ_DMA_C)) == LTQ_DMA_C) {
dev_kfree_skb_any(ch->skb[ch->tx_free]);
ch->skb[ch->tx_free] = NULL;
memset(&ch->dma.desc_base[ch->tx_free], 0,
sizeof(struct ltq_dma_desc));
ch->tx_free++;
ch->tx_free %= LTQ_DESC_NUM;
}
spin_unlock_irqrestore(&priv->lock, flags);
if (netif_tx_queue_stopped(txq))
netif_tx_start_queue(txq);
napi_complete(&ch->napi);
ltq_dma_ack_irq(&ch->dma);
return 1;
}
static irqreturn_t
ltq_etop_dma_irq(int irq, void *_priv)
{
struct ltq_etop_priv *priv = _priv;
int ch = irq - LTQ_DMA_CH0_INT;
napi_schedule(&priv->ch[ch].napi);
return IRQ_HANDLED;
}
static void
ltq_etop_free_channel(struct net_device *dev, struct ltq_etop_chan *ch)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
ltq_dma_free(&ch->dma);
if (ch->dma.irq)
free_irq(ch->dma.irq, priv);
if (IS_RX(ch->idx)) {
int desc;
for (desc = 0; desc < LTQ_DESC_NUM; desc++)
dev_kfree_skb_any(ch->skb[ch->dma.desc]);
}
}
static void
ltq_etop_hw_exit(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
int i;
ltq_pmu_disable(PMU_PPE);
for (i = 0; i < MAX_DMA_CHAN; i++)
if (IS_TX(i) || IS_RX(i))
ltq_etop_free_channel(dev, &priv->ch[i]);
}
static int
ltq_etop_hw_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
int i;
ltq_pmu_enable(PMU_PPE);
switch (priv->pldata->mii_mode) {
case PHY_INTERFACE_MODE_RMII:
ltq_etop_w32_mask(ETOP_MII_MASK,
ETOP_MII_REVERSE, LTQ_ETOP_CFG);
break;
case PHY_INTERFACE_MODE_MII:
ltq_etop_w32_mask(ETOP_MII_MASK,
ETOP_MII_NORMAL, LTQ_ETOP_CFG);
break;
default:
netdev_err(dev, "unknown mii mode %d\n",
priv->pldata->mii_mode);
return -ENOTSUPP;
}
/* enable crc generation */
ltq_etop_w32(PPE32_CGEN, LQ_PPE32_ENET_MAC_CFG);
ltq_dma_init_port(DMA_PORT_ETOP);
for (i = 0; i < MAX_DMA_CHAN; i++) {
int irq = LTQ_DMA_CH0_INT + i;
struct ltq_etop_chan *ch = &priv->ch[i];
ch->idx = ch->dma.nr = i;
if (IS_TX(i)) {
ltq_dma_alloc_tx(&ch->dma);
request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
"etop_tx", priv);
} else if (IS_RX(i)) {
ltq_dma_alloc_rx(&ch->dma);
for (ch->dma.desc = 0; ch->dma.desc < LTQ_DESC_NUM;
ch->dma.desc++)
if (ltq_etop_alloc_skb(ch))
return -ENOMEM;
ch->dma.desc = 0;
request_irq(irq, ltq_etop_dma_irq, IRQF_DISABLED,
"etop_rx", priv);
}
ch->dma.irq = irq;
}
return 0;
}
static void
ltq_etop_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
strlcpy(info->driver, "Lantiq ETOP", sizeof(info->driver));
strlcpy(info->bus_info, "internal", sizeof(info->bus_info));
strlcpy(info->version, DRV_VERSION, sizeof(info->version));
}
static int
ltq_etop_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
return phy_ethtool_gset(priv->phydev, cmd);
}
static int
ltq_etop_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
return phy_ethtool_sset(priv->phydev, cmd);
}
static int
ltq_etop_nway_reset(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
return phy_start_aneg(priv->phydev);
}
static const struct ethtool_ops ltq_etop_ethtool_ops = {
.get_drvinfo = ltq_etop_get_drvinfo,
.get_settings = ltq_etop_get_settings,
.set_settings = ltq_etop_set_settings,
.nway_reset = ltq_etop_nway_reset,
};
static int
ltq_etop_mdio_wr(struct mii_bus *bus, int phy_addr, int phy_reg, u16 phy_data)
{
u32 val = MDIO_REQUEST |
((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET) |
phy_data;
while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
;
ltq_etop_w32(val, LTQ_ETOP_MDIO);
return 0;
}
static int
ltq_etop_mdio_rd(struct mii_bus *bus, int phy_addr, int phy_reg)
{
u32 val = MDIO_REQUEST | MDIO_READ |
((phy_addr & MDIO_ADDR_MASK) << MDIO_ADDR_OFFSET) |
((phy_reg & MDIO_REG_MASK) << MDIO_REG_OFFSET);
while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
;
ltq_etop_w32(val, LTQ_ETOP_MDIO);
while (ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_REQUEST)
;
val = ltq_etop_r32(LTQ_ETOP_MDIO) & MDIO_VAL_MASK;
return val;
}
static void
ltq_etop_mdio_link(struct net_device *dev)
{
/* nothing to do */
}
static int
ltq_etop_mdio_probe(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
struct phy_device *phydev = NULL;
int phy_addr;
for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) {
if (priv->mii_bus->phy_map[phy_addr]) {
phydev = priv->mii_bus->phy_map[phy_addr];
break;
}
}
if (!phydev) {
netdev_err(dev, "no PHY found\n");
return -ENODEV;
}
phydev = phy_connect(dev, dev_name(&phydev->dev),
<q_etop_mdio_link, priv->pldata->mii_mode);
if (IS_ERR(phydev)) {
netdev_err(dev, "Could not attach to PHY\n");
return PTR_ERR(phydev);
}
phydev->supported &= (SUPPORTED_10baseT_Half
| SUPPORTED_10baseT_Full
| SUPPORTED_100baseT_Half
| SUPPORTED_100baseT_Full
| SUPPORTED_Autoneg
| SUPPORTED_MII
| SUPPORTED_TP);
phydev->advertising = phydev->supported;
priv->phydev = phydev;
pr_info("%s: attached PHY [%s] (phy_addr=%s, irq=%d)\n",
dev->name, phydev->drv->name,
dev_name(&phydev->dev), phydev->irq);
return 0;
}
static int
ltq_etop_mdio_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
int i;
int err;
priv->mii_bus = mdiobus_alloc();
if (!priv->mii_bus) {
netdev_err(dev, "failed to allocate mii bus\n");
err = -ENOMEM;
goto err_out;
}
priv->mii_bus->priv = dev;
priv->mii_bus->read = ltq_etop_mdio_rd;
priv->mii_bus->write = ltq_etop_mdio_wr;
priv->mii_bus->name = "ltq_mii";
snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
priv->pdev->name, priv->pdev->id);
priv->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
if (!priv->mii_bus->irq) {
err = -ENOMEM;
goto err_out_free_mdiobus;
}
for (i = 0; i < PHY_MAX_ADDR; ++i)
priv->mii_bus->irq[i] = PHY_POLL;
if (mdiobus_register(priv->mii_bus)) {
err = -ENXIO;
goto err_out_free_mdio_irq;
}
if (ltq_etop_mdio_probe(dev)) {
err = -ENXIO;
goto err_out_unregister_bus;
}
return 0;
err_out_unregister_bus:
mdiobus_unregister(priv->mii_bus);
err_out_free_mdio_irq:
kfree(priv->mii_bus->irq);
err_out_free_mdiobus:
mdiobus_free(priv->mii_bus);
err_out:
return err;
}
static void
ltq_etop_mdio_cleanup(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
phy_disconnect(priv->phydev);
mdiobus_unregister(priv->mii_bus);
kfree(priv->mii_bus->irq);
mdiobus_free(priv->mii_bus);
}
static int
ltq_etop_open(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
int i;
for (i = 0; i < MAX_DMA_CHAN; i++) {
struct ltq_etop_chan *ch = &priv->ch[i];
if (!IS_TX(i) && (!IS_RX(i)))
continue;
ltq_dma_open(&ch->dma);
napi_enable(&ch->napi);
}
phy_start(priv->phydev);
netif_tx_start_all_queues(dev);
return 0;
}
static int
ltq_etop_stop(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
int i;
netif_tx_stop_all_queues(dev);
phy_stop(priv->phydev);
for (i = 0; i < MAX_DMA_CHAN; i++) {
struct ltq_etop_chan *ch = &priv->ch[i];
if (!IS_RX(i) && !IS_TX(i))
continue;
napi_disable(&ch->napi);
ltq_dma_close(&ch->dma);
}
return 0;
}
static int
ltq_etop_tx(struct sk_buff *skb, struct net_device *dev)
{
int queue = skb_get_queue_mapping(skb);
struct netdev_queue *txq = netdev_get_tx_queue(dev, queue);
struct ltq_etop_priv *priv = netdev_priv(dev);
struct ltq_etop_chan *ch = &priv->ch[(queue << 1) | 1];
struct ltq_dma_desc *desc = &ch->dma.desc_base[ch->dma.desc];
int len;
unsigned long flags;
u32 byte_offset;
len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len;
if ((desc->ctl & (LTQ_DMA_OWN | LTQ_DMA_C)) || ch->skb[ch->dma.desc]) {
dev_kfree_skb_any(skb);
netdev_err(dev, "tx ring full\n");
netif_tx_stop_queue(txq);
return NETDEV_TX_BUSY;
}
/* dma needs to start on a 16 byte aligned address */
byte_offset = CPHYSADDR(skb->data) % 16;
ch->skb[ch->dma.desc] = skb;
dev->trans_start = jiffies;
spin_lock_irqsave(&priv->lock, flags);
desc->addr = ((unsigned int) dma_map_single(NULL, skb->data, len,
DMA_TO_DEVICE)) - byte_offset;
wmb();
desc->ctl = LTQ_DMA_OWN | LTQ_DMA_SOP | LTQ_DMA_EOP |
LTQ_DMA_TX_OFFSET(byte_offset) | (len & LTQ_DMA_SIZE_MASK);
ch->dma.desc++;
ch->dma.desc %= LTQ_DESC_NUM;
spin_unlock_irqrestore(&priv->lock, flags);
if (ch->dma.desc_base[ch->dma.desc].ctl & LTQ_DMA_OWN)
netif_tx_stop_queue(txq);
return NETDEV_TX_OK;
}
static int
ltq_etop_change_mtu(struct net_device *dev, int new_mtu)
{
int ret = eth_change_mtu(dev, new_mtu);
if (!ret) {
struct ltq_etop_priv *priv = netdev_priv(dev);
unsigned long flags;
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32((ETOP_PLEN_UNDER << 16) | new_mtu,
LTQ_ETOP_IGPLEN);
spin_unlock_irqrestore(&priv->lock, flags);
}
return ret;
}
static int
ltq_etop_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
/* TODO: mii-toll reports "No MII transceiver present!." ?!*/
return phy_mii_ioctl(priv->phydev, rq, cmd);
}
static int
ltq_etop_set_mac_address(struct net_device *dev, void *p)
{
int ret = eth_mac_addr(dev, p);
if (!ret) {
struct ltq_etop_priv *priv = netdev_priv(dev);
unsigned long flags;
/* store the mac for the unicast filter */
spin_lock_irqsave(&priv->lock, flags);
ltq_etop_w32(*((u32 *)dev->dev_addr), LTQ_ETOP_MAC_DA0);
ltq_etop_w32(*((u16 *)&dev->dev_addr[4]) << 16,
LTQ_ETOP_MAC_DA1);
spin_unlock_irqrestore(&priv->lock, flags);
}
return ret;
}
static void
ltq_etop_set_multicast_list(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
unsigned long flags;
/* ensure that the unicast filter is not enabled in promiscious mode */
spin_lock_irqsave(&priv->lock, flags);
if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI))
ltq_etop_w32_mask(ETOP_FTCU, 0, LTQ_ETOP_ENETS0);
else
ltq_etop_w32_mask(0, ETOP_FTCU, LTQ_ETOP_ENETS0);
spin_unlock_irqrestore(&priv->lock, flags);
}
static u16
ltq_etop_select_queue(struct net_device *dev, struct sk_buff *skb)
{
/* we are currently only using the first queue */
return 0;
}
static int
ltq_etop_init(struct net_device *dev)
{
struct ltq_etop_priv *priv = netdev_priv(dev);
struct sockaddr mac;
int err;
bool random_mac = false;
ether_setup(dev);
dev->watchdog_timeo = 10 * HZ;
err = ltq_etop_hw_init(dev);
if (err)
goto err_hw;
ltq_etop_change_mtu(dev, 1500);
memcpy(&mac, &priv->pldata->mac, sizeof(struct sockaddr));
if (!is_valid_ether_addr(mac.sa_data)) {
pr_warn("etop: invalid MAC, using random\n");
eth_random_addr(mac.sa_data);
random_mac = true;
}
err = ltq_etop_set_mac_address(dev, &mac);
if (err)
goto err_netdev;
/* Set addr_assign_type here, ltq_etop_set_mac_address would reset it. */
if (random_mac)
dev->addr_assign_type = NET_ADDR_RANDOM;
ltq_etop_set_multicast_list(dev);
err = ltq_etop_mdio_init(dev);
if (err)
goto err_netdev;
return 0;
err_netdev:
unregister_netdev(dev);
free_netdev(dev);
err_hw:
ltq_etop_hw_exit(dev);
return err;
}
static void
ltq_etop_tx_timeout(struct net_device *dev)
{
int err;
ltq_etop_hw_exit(dev);
err = ltq_etop_hw_init(dev);
if (err)
goto err_hw;
dev->trans_start = jiffies;
netif_wake_queue(dev);
return;
err_hw:
ltq_etop_hw_exit(dev);
netdev_err(dev, "failed to restart etop after TX timeout\n");
}
static const struct net_device_ops ltq_eth_netdev_ops = {
.ndo_open = ltq_etop_open,
.ndo_stop = ltq_etop_stop,
.ndo_start_xmit = ltq_etop_tx,
.ndo_change_mtu = ltq_etop_change_mtu,
.ndo_do_ioctl = ltq_etop_ioctl,
.ndo_set_mac_address = ltq_etop_set_mac_address,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_rx_mode = ltq_etop_set_multicast_list,
.ndo_select_queue = ltq_etop_select_queue,
.ndo_init = ltq_etop_init,
.ndo_tx_timeout = ltq_etop_tx_timeout,
};
static int __init
ltq_etop_probe(struct platform_device *pdev)
{
struct net_device *dev;
struct ltq_etop_priv *priv;
struct resource *res;
int err;
int i;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "failed to get etop resource\n");
err = -ENOENT;
goto err_out;
}
res = devm_request_mem_region(&pdev->dev, res->start,
resource_size(res), dev_name(&pdev->dev));
if (!res) {
dev_err(&pdev->dev, "failed to request etop resource\n");
err = -EBUSY;
goto err_out;
}
ltq_etop_membase = devm_ioremap_nocache(&pdev->dev,
res->start, resource_size(res));
if (!ltq_etop_membase) {
dev_err(&pdev->dev, "failed to remap etop engine %d\n",
pdev->id);
err = -ENOMEM;
goto err_out;
}
dev = alloc_etherdev_mq(sizeof(struct ltq_etop_priv), 4);
if (!dev) {
err = -ENOMEM;
goto err_out;
}
strcpy(dev->name, "eth%d");
dev->netdev_ops = <q_eth_netdev_ops;
dev->ethtool_ops = <q_etop_ethtool_ops;
priv = netdev_priv(dev);
priv->res = res;
priv->pdev = pdev;
priv->pldata = dev_get_platdata(&pdev->dev);
priv->netdev = dev;
spin_lock_init(&priv->lock);
for (i = 0; i < MAX_DMA_CHAN; i++) {
if (IS_TX(i))
netif_napi_add(dev, &priv->ch[i].napi,
ltq_etop_poll_tx, 8);
else if (IS_RX(i))
netif_napi_add(dev, &priv->ch[i].napi,
ltq_etop_poll_rx, 32);
priv->ch[i].netdev = dev;
}
err = register_netdev(dev);
if (err)
goto err_free;
platform_set_drvdata(pdev, dev);
return 0;
err_free:
free_netdev(dev);
err_out:
return err;
}
static int
ltq_etop_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
if (dev) {
netif_tx_stop_all_queues(dev);
ltq_etop_hw_exit(dev);
ltq_etop_mdio_cleanup(dev);
unregister_netdev(dev);
}
return 0;
}
static struct platform_driver ltq_mii_driver = {
.remove = ltq_etop_remove,
.driver = {
.name = "ltq_etop",
.owner = THIS_MODULE,
},
};
int __init
init_ltq_etop(void)
{
int ret = platform_driver_probe(<q_mii_driver, ltq_etop_probe);
if (ret)
pr_err("ltq_etop: Error registering platform driver!");
return ret;
}
static void __exit
exit_ltq_etop(void)
{
platform_driver_unregister(<q_mii_driver);
}
module_init(init_ltq_etop);
module_exit(exit_ltq_etop);
MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
MODULE_DESCRIPTION("Lantiq SoC ETOP");
MODULE_LICENSE("GPL");
| gpl-2.0 |
ch33kybutt/kernel_skipjack_tuna | fs/nfsd/vfs.c | 2190 | 54926 | /*
* File operations used by nfsd. Some of these have been ripped from
* other parts of the kernel because they weren't exported, others
* are partial duplicates with added or changed functionality.
*
* Note that several functions dget() the dentry upon which they want
* to act, most notably those that create directory entries. Response
* dentry's are dput()'d if necessary in the release callback.
* So if you notice code paths that apparently fail to dput() the
* dentry, don't worry--they have been taken care of.
*
* Copyright (C) 1995-1999 Olaf Kirch <okir@monad.swb.de>
* Zerocpy NFS support (C) 2002 Hirokazu Takahashi <taka@valinux.co.jp>
*/
#include <linux/fs.h>
#include <linux/file.h>
#include <linux/splice.h>
#include <linux/fcntl.h>
#include <linux/namei.h>
#include <linux/delay.h>
#include <linux/fsnotify.h>
#include <linux/posix_acl_xattr.h>
#include <linux/xattr.h>
#include <linux/jhash.h>
#include <linux/ima.h>
#include <linux/slab.h>
#include <asm/uaccess.h>
#include <linux/exportfs.h>
#include <linux/writeback.h>
#ifdef CONFIG_NFSD_V3
#include "xdr3.h"
#endif /* CONFIG_NFSD_V3 */
#ifdef CONFIG_NFSD_V4
#include "acl.h"
#include "idmap.h"
#endif /* CONFIG_NFSD_V4 */
#include "nfsd.h"
#include "vfs.h"
#define NFSDDBG_FACILITY NFSDDBG_FILEOP
/*
* This is a cache of readahead params that help us choose the proper
* readahead strategy. Initially, we set all readahead parameters to 0
* and let the VFS handle things.
* If you increase the number of cached files very much, you'll need to
* add a hash table here.
*/
struct raparms {
struct raparms *p_next;
unsigned int p_count;
ino_t p_ino;
dev_t p_dev;
int p_set;
struct file_ra_state p_ra;
unsigned int p_hindex;
};
struct raparm_hbucket {
struct raparms *pb_head;
spinlock_t pb_lock;
} ____cacheline_aligned_in_smp;
#define RAPARM_HASH_BITS 4
#define RAPARM_HASH_SIZE (1<<RAPARM_HASH_BITS)
#define RAPARM_HASH_MASK (RAPARM_HASH_SIZE-1)
static struct raparm_hbucket raparm_hash[RAPARM_HASH_SIZE];
/*
* Called from nfsd_lookup and encode_dirent. Check if we have crossed
* a mount point.
* Returns -EAGAIN or -ETIMEDOUT leaving *dpp and *expp unchanged,
* or nfs_ok having possibly changed *dpp and *expp
*/
int
nfsd_cross_mnt(struct svc_rqst *rqstp, struct dentry **dpp,
struct svc_export **expp)
{
struct svc_export *exp = *expp, *exp2 = NULL;
struct dentry *dentry = *dpp;
struct path path = {.mnt = mntget(exp->ex_path.mnt),
.dentry = dget(dentry)};
int err = 0;
err = follow_down(&path);
if (err < 0)
goto out;
exp2 = rqst_exp_get_by_name(rqstp, &path);
if (IS_ERR(exp2)) {
err = PTR_ERR(exp2);
/*
* We normally allow NFS clients to continue
* "underneath" a mountpoint that is not exported.
* The exception is V4ROOT, where no traversal is ever
* allowed without an explicit export of the new
* directory.
*/
if (err == -ENOENT && !(exp->ex_flags & NFSEXP_V4ROOT))
err = 0;
path_put(&path);
goto out;
}
if (nfsd_v4client(rqstp) ||
(exp->ex_flags & NFSEXP_CROSSMOUNT) || EX_NOHIDE(exp2)) {
/* successfully crossed mount point */
/*
* This is subtle: path.dentry is *not* on path.mnt
* at this point. The only reason we are safe is that
* original mnt is pinned down by exp, so we should
* put path *before* putting exp
*/
*dpp = path.dentry;
path.dentry = dentry;
*expp = exp2;
exp2 = exp;
}
path_put(&path);
exp_put(exp2);
out:
return err;
}
static void follow_to_parent(struct path *path)
{
struct dentry *dp;
while (path->dentry == path->mnt->mnt_root && follow_up(path))
;
dp = dget_parent(path->dentry);
dput(path->dentry);
path->dentry = dp;
}
static int nfsd_lookup_parent(struct svc_rqst *rqstp, struct dentry *dparent, struct svc_export **exp, struct dentry **dentryp)
{
struct svc_export *exp2;
struct path path = {.mnt = mntget((*exp)->ex_path.mnt),
.dentry = dget(dparent)};
follow_to_parent(&path);
exp2 = rqst_exp_parent(rqstp, &path);
if (PTR_ERR(exp2) == -ENOENT) {
*dentryp = dget(dparent);
} else if (IS_ERR(exp2)) {
path_put(&path);
return PTR_ERR(exp2);
} else {
*dentryp = dget(path.dentry);
exp_put(*exp);
*exp = exp2;
}
path_put(&path);
return 0;
}
/*
* For nfsd purposes, we treat V4ROOT exports as though there was an
* export at *every* directory.
*/
int nfsd_mountpoint(struct dentry *dentry, struct svc_export *exp)
{
if (d_mountpoint(dentry))
return 1;
if (!(exp->ex_flags & NFSEXP_V4ROOT))
return 0;
return dentry->d_inode != NULL;
}
__be32
nfsd_lookup_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp,
const char *name, unsigned int len,
struct svc_export **exp_ret, struct dentry **dentry_ret)
{
struct svc_export *exp;
struct dentry *dparent;
struct dentry *dentry;
int host_err;
dprintk("nfsd: nfsd_lookup(fh %s, %.*s)\n", SVCFH_fmt(fhp), len,name);
dparent = fhp->fh_dentry;
exp = fhp->fh_export;
exp_get(exp);
/* Lookup the name, but don't follow links */
if (isdotent(name, len)) {
if (len==1)
dentry = dget(dparent);
else if (dparent != exp->ex_path.dentry)
dentry = dget_parent(dparent);
else if (!EX_NOHIDE(exp) && !nfsd_v4client(rqstp))
dentry = dget(dparent); /* .. == . just like at / */
else {
/* checking mountpoint crossing is very different when stepping up */
host_err = nfsd_lookup_parent(rqstp, dparent, &exp, &dentry);
if (host_err)
goto out_nfserr;
}
} else {
fh_lock(fhp);
dentry = lookup_one_len(name, dparent, len);
host_err = PTR_ERR(dentry);
if (IS_ERR(dentry))
goto out_nfserr;
/*
* check if we have crossed a mount point ...
*/
if (nfsd_mountpoint(dentry, exp)) {
if ((host_err = nfsd_cross_mnt(rqstp, &dentry, &exp))) {
dput(dentry);
goto out_nfserr;
}
}
}
*dentry_ret = dentry;
*exp_ret = exp;
return 0;
out_nfserr:
exp_put(exp);
return nfserrno(host_err);
}
/*
* Look up one component of a pathname.
* N.B. After this call _both_ fhp and resfh need an fh_put
*
* If the lookup would cross a mountpoint, and the mounted filesystem
* is exported to the client with NFSEXP_NOHIDE, then the lookup is
* accepted as it stands and the mounted directory is
* returned. Otherwise the covered directory is returned.
* NOTE: this mountpoint crossing is not supported properly by all
* clients and is explicitly disallowed for NFSv3
* NeilBrown <neilb@cse.unsw.edu.au>
*/
__be32
nfsd_lookup(struct svc_rqst *rqstp, struct svc_fh *fhp, const char *name,
unsigned int len, struct svc_fh *resfh)
{
struct svc_export *exp;
struct dentry *dentry;
__be32 err;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
return err;
err = nfsd_lookup_dentry(rqstp, fhp, name, len, &exp, &dentry);
if (err)
return err;
err = check_nfsd_access(exp, rqstp);
if (err)
goto out;
/*
* Note: we compose the file handle now, but as the
* dentry may be negative, it may need to be updated.
*/
err = fh_compose(resfh, exp, dentry, fhp);
if (!err && !dentry->d_inode)
err = nfserr_noent;
out:
dput(dentry);
exp_put(exp);
return err;
}
static int nfsd_break_lease(struct inode *inode)
{
if (!S_ISREG(inode->i_mode))
return 0;
return break_lease(inode, O_WRONLY | O_NONBLOCK);
}
/*
* Commit metadata changes to stable storage.
*/
static int
commit_metadata(struct svc_fh *fhp)
{
struct inode *inode = fhp->fh_dentry->d_inode;
const struct export_operations *export_ops = inode->i_sb->s_export_op;
if (!EX_ISSYNC(fhp->fh_export))
return 0;
if (export_ops->commit_metadata)
return export_ops->commit_metadata(inode);
return sync_inode_metadata(inode, 1);
}
/*
* Set various file attributes.
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_setattr(struct svc_rqst *rqstp, struct svc_fh *fhp, struct iattr *iap,
int check_guard, time_t guardtime)
{
struct dentry *dentry;
struct inode *inode;
int accmode = NFSD_MAY_SATTR;
int ftype = 0;
__be32 err;
int host_err;
int size_change = 0;
if (iap->ia_valid & (ATTR_ATIME | ATTR_MTIME | ATTR_SIZE))
accmode |= NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE;
if (iap->ia_valid & ATTR_SIZE)
ftype = S_IFREG;
/* Get inode */
err = fh_verify(rqstp, fhp, ftype, accmode);
if (err)
goto out;
dentry = fhp->fh_dentry;
inode = dentry->d_inode;
/* Ignore any mode updates on symlinks */
if (S_ISLNK(inode->i_mode))
iap->ia_valid &= ~ATTR_MODE;
if (!iap->ia_valid)
goto out;
/*
* NFSv2 does not differentiate between "set-[ac]time-to-now"
* which only requires access, and "set-[ac]time-to-X" which
* requires ownership.
* So if it looks like it might be "set both to the same time which
* is close to now", and if inode_change_ok fails, then we
* convert to "set to now" instead of "set to explicit time"
*
* We only call inode_change_ok as the last test as technically
* it is not an interface that we should be using. It is only
* valid if the filesystem does not define it's own i_op->setattr.
*/
#define BOTH_TIME_SET (ATTR_ATIME_SET | ATTR_MTIME_SET)
#define MAX_TOUCH_TIME_ERROR (30*60)
if ((iap->ia_valid & BOTH_TIME_SET) == BOTH_TIME_SET &&
iap->ia_mtime.tv_sec == iap->ia_atime.tv_sec) {
/*
* Looks probable.
*
* Now just make sure time is in the right ballpark.
* Solaris, at least, doesn't seem to care what the time
* request is. We require it be within 30 minutes of now.
*/
time_t delta = iap->ia_atime.tv_sec - get_seconds();
if (delta < 0)
delta = -delta;
if (delta < MAX_TOUCH_TIME_ERROR &&
inode_change_ok(inode, iap) != 0) {
/*
* Turn off ATTR_[AM]TIME_SET but leave ATTR_[AM]TIME.
* This will cause notify_change to set these times
* to "now"
*/
iap->ia_valid &= ~BOTH_TIME_SET;
}
}
/*
* The size case is special.
* It changes the file as well as the attributes.
*/
if (iap->ia_valid & ATTR_SIZE) {
if (iap->ia_size < inode->i_size) {
err = nfsd_permission(rqstp, fhp->fh_export, dentry,
NFSD_MAY_TRUNC|NFSD_MAY_OWNER_OVERRIDE);
if (err)
goto out;
}
host_err = get_write_access(inode);
if (host_err)
goto out_nfserr;
size_change = 1;
host_err = locks_verify_truncate(inode, NULL, iap->ia_size);
if (host_err) {
put_write_access(inode);
goto out_nfserr;
}
}
/* sanitize the mode change */
if (iap->ia_valid & ATTR_MODE) {
iap->ia_mode &= S_IALLUGO;
iap->ia_mode |= (inode->i_mode & ~S_IALLUGO);
}
/* Revoke setuid/setgid on chown */
if (!S_ISDIR(inode->i_mode) &&
(((iap->ia_valid & ATTR_UID) && iap->ia_uid != inode->i_uid) ||
((iap->ia_valid & ATTR_GID) && iap->ia_gid != inode->i_gid))) {
iap->ia_valid |= ATTR_KILL_PRIV;
if (iap->ia_valid & ATTR_MODE) {
/* we're setting mode too, just clear the s*id bits */
iap->ia_mode &= ~S_ISUID;
if (iap->ia_mode & S_IXGRP)
iap->ia_mode &= ~S_ISGID;
} else {
/* set ATTR_KILL_* bits and let VFS handle it */
iap->ia_valid |= (ATTR_KILL_SUID | ATTR_KILL_SGID);
}
}
/* Change the attributes. */
iap->ia_valid |= ATTR_CTIME;
err = nfserr_notsync;
if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
host_err = nfsd_break_lease(inode);
if (host_err)
goto out_nfserr;
fh_lock(fhp);
host_err = notify_change(dentry, iap);
err = nfserrno(host_err);
fh_unlock(fhp);
}
if (size_change)
put_write_access(inode);
if (!err)
commit_metadata(fhp);
out:
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
#if defined(CONFIG_NFSD_V2_ACL) || \
defined(CONFIG_NFSD_V3_ACL) || \
defined(CONFIG_NFSD_V4)
static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
{
ssize_t buflen;
ssize_t ret;
buflen = vfs_getxattr(dentry, key, NULL, 0);
if (buflen <= 0)
return buflen;
*buf = kmalloc(buflen, GFP_KERNEL);
if (!*buf)
return -ENOMEM;
ret = vfs_getxattr(dentry, key, *buf, buflen);
if (ret < 0)
kfree(*buf);
return ret;
}
#endif
#if defined(CONFIG_NFSD_V4)
static int
set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
{
int len;
size_t buflen;
char *buf = NULL;
int error = 0;
buflen = posix_acl_xattr_size(pacl->a_count);
buf = kmalloc(buflen, GFP_KERNEL);
error = -ENOMEM;
if (buf == NULL)
goto out;
len = posix_acl_to_xattr(pacl, buf, buflen);
if (len < 0) {
error = len;
goto out;
}
error = vfs_setxattr(dentry, key, buf, len, 0);
out:
kfree(buf);
return error;
}
__be32
nfsd4_set_nfs4_acl(struct svc_rqst *rqstp, struct svc_fh *fhp,
struct nfs4_acl *acl)
{
__be32 error;
int host_error;
struct dentry *dentry;
struct inode *inode;
struct posix_acl *pacl = NULL, *dpacl = NULL;
unsigned int flags = 0;
/* Get inode */
error = fh_verify(rqstp, fhp, 0 /* S_IFREG */, NFSD_MAY_SATTR);
if (error)
return error;
dentry = fhp->fh_dentry;
inode = dentry->d_inode;
if (S_ISDIR(inode->i_mode))
flags = NFS4_ACL_DIR;
host_error = nfs4_acl_nfsv4_to_posix(acl, &pacl, &dpacl, flags);
if (host_error == -EINVAL) {
return nfserr_attrnotsupp;
} else if (host_error < 0)
goto out_nfserr;
host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
if (host_error < 0)
goto out_release;
if (S_ISDIR(inode->i_mode))
host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
out_release:
posix_acl_release(pacl);
posix_acl_release(dpacl);
out_nfserr:
if (host_error == -EOPNOTSUPP)
return nfserr_attrnotsupp;
else
return nfserrno(host_error);
}
static struct posix_acl *
_get_posix_acl(struct dentry *dentry, char *key)
{
void *buf = NULL;
struct posix_acl *pacl = NULL;
int buflen;
buflen = nfsd_getxattr(dentry, key, &buf);
if (!buflen)
buflen = -ENODATA;
if (buflen <= 0)
return ERR_PTR(buflen);
pacl = posix_acl_from_xattr(buf, buflen);
kfree(buf);
return pacl;
}
int
nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl)
{
struct inode *inode = dentry->d_inode;
int error = 0;
struct posix_acl *pacl = NULL, *dpacl = NULL;
unsigned int flags = 0;
pacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_ACCESS);
if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
if (IS_ERR(pacl)) {
error = PTR_ERR(pacl);
pacl = NULL;
goto out;
}
if (S_ISDIR(inode->i_mode)) {
dpacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_DEFAULT);
if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
dpacl = NULL;
else if (IS_ERR(dpacl)) {
error = PTR_ERR(dpacl);
dpacl = NULL;
goto out;
}
flags = NFS4_ACL_DIR;
}
*acl = nfs4_acl_posix_to_nfsv4(pacl, dpacl, flags);
if (IS_ERR(*acl)) {
error = PTR_ERR(*acl);
*acl = NULL;
}
out:
posix_acl_release(pacl);
posix_acl_release(dpacl);
return error;
}
#endif /* defined(CONFIG_NFSD_V4) */
#ifdef CONFIG_NFSD_V3
/*
* Check server access rights to a file system object
*/
struct accessmap {
u32 access;
int how;
};
static struct accessmap nfs3_regaccess[] = {
{ NFS3_ACCESS_READ, NFSD_MAY_READ },
{ NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
{ NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_TRUNC },
{ NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE },
{ 0, 0 }
};
static struct accessmap nfs3_diraccess[] = {
{ NFS3_ACCESS_READ, NFSD_MAY_READ },
{ NFS3_ACCESS_LOOKUP, NFSD_MAY_EXEC },
{ NFS3_ACCESS_MODIFY, NFSD_MAY_EXEC|NFSD_MAY_WRITE|NFSD_MAY_TRUNC},
{ NFS3_ACCESS_EXTEND, NFSD_MAY_EXEC|NFSD_MAY_WRITE },
{ NFS3_ACCESS_DELETE, NFSD_MAY_REMOVE },
{ 0, 0 }
};
static struct accessmap nfs3_anyaccess[] = {
/* Some clients - Solaris 2.6 at least, make an access call
* to the server to check for access for things like /dev/null
* (which really, the server doesn't care about). So
* We provide simple access checking for them, looking
* mainly at mode bits, and we make sure to ignore read-only
* filesystem checks
*/
{ NFS3_ACCESS_READ, NFSD_MAY_READ },
{ NFS3_ACCESS_EXECUTE, NFSD_MAY_EXEC },
{ NFS3_ACCESS_MODIFY, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
{ NFS3_ACCESS_EXTEND, NFSD_MAY_WRITE|NFSD_MAY_LOCAL_ACCESS },
{ 0, 0 }
};
__be32
nfsd_access(struct svc_rqst *rqstp, struct svc_fh *fhp, u32 *access, u32 *supported)
{
struct accessmap *map;
struct svc_export *export;
struct dentry *dentry;
u32 query, result = 0, sresult = 0;
__be32 error;
error = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP);
if (error)
goto out;
export = fhp->fh_export;
dentry = fhp->fh_dentry;
if (S_ISREG(dentry->d_inode->i_mode))
map = nfs3_regaccess;
else if (S_ISDIR(dentry->d_inode->i_mode))
map = nfs3_diraccess;
else
map = nfs3_anyaccess;
query = *access;
for (; map->access; map++) {
if (map->access & query) {
__be32 err2;
sresult |= map->access;
err2 = nfsd_permission(rqstp, export, dentry, map->how);
switch (err2) {
case nfs_ok:
result |= map->access;
break;
/* the following error codes just mean the access was not allowed,
* rather than an error occurred */
case nfserr_rofs:
case nfserr_acces:
case nfserr_perm:
/* simply don't "or" in the access bit. */
break;
default:
error = err2;
goto out;
}
}
}
*access = result;
if (supported)
*supported = sresult;
out:
return error;
}
#endif /* CONFIG_NFSD_V3 */
static int nfsd_open_break_lease(struct inode *inode, int access)
{
unsigned int mode;
if (access & NFSD_MAY_NOT_BREAK_LEASE)
return 0;
mode = (access & NFSD_MAY_WRITE) ? O_WRONLY : O_RDONLY;
return break_lease(inode, mode | O_NONBLOCK);
}
/*
* Open an existing file or directory.
* The access argument indicates the type of open (read/write/lock)
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_open(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
int access, struct file **filp)
{
struct dentry *dentry;
struct inode *inode;
int flags = O_RDONLY|O_LARGEFILE;
__be32 err;
int host_err = 0;
validate_process_creds();
/*
* If we get here, then the client has already done an "open",
* and (hopefully) checked permission - so allow OWNER_OVERRIDE
* in case a chmod has now revoked permission.
*/
err = fh_verify(rqstp, fhp, type, access | NFSD_MAY_OWNER_OVERRIDE);
if (err)
goto out;
dentry = fhp->fh_dentry;
inode = dentry->d_inode;
/* Disallow write access to files with the append-only bit set
* or any access when mandatory locking enabled
*/
err = nfserr_perm;
if (IS_APPEND(inode) && (access & NFSD_MAY_WRITE))
goto out;
/*
* We must ignore files (but only files) which might have mandatory
* locks on them because there is no way to know if the accesser has
* the lock.
*/
if (S_ISREG((inode)->i_mode) && mandatory_lock(inode))
goto out;
if (!inode->i_fop)
goto out;
host_err = nfsd_open_break_lease(inode, access);
if (host_err) /* NOMEM or WOULDBLOCK */
goto out_nfserr;
if (access & NFSD_MAY_WRITE) {
if (access & NFSD_MAY_READ)
flags = O_RDWR|O_LARGEFILE;
else
flags = O_WRONLY|O_LARGEFILE;
}
*filp = dentry_open(dget(dentry), mntget(fhp->fh_export->ex_path.mnt),
flags, current_cred());
if (IS_ERR(*filp))
host_err = PTR_ERR(*filp);
else
host_err = ima_file_check(*filp, access);
out_nfserr:
err = nfserrno(host_err);
out:
validate_process_creds();
return err;
}
/*
* Close a file.
*/
void
nfsd_close(struct file *filp)
{
fput(filp);
}
/*
* Obtain the readahead parameters for the file
* specified by (dev, ino).
*/
static inline struct raparms *
nfsd_get_raparms(dev_t dev, ino_t ino)
{
struct raparms *ra, **rap, **frap = NULL;
int depth = 0;
unsigned int hash;
struct raparm_hbucket *rab;
hash = jhash_2words(dev, ino, 0xfeedbeef) & RAPARM_HASH_MASK;
rab = &raparm_hash[hash];
spin_lock(&rab->pb_lock);
for (rap = &rab->pb_head; (ra = *rap); rap = &ra->p_next) {
if (ra->p_ino == ino && ra->p_dev == dev)
goto found;
depth++;
if (ra->p_count == 0)
frap = rap;
}
depth = nfsdstats.ra_size;
if (!frap) {
spin_unlock(&rab->pb_lock);
return NULL;
}
rap = frap;
ra = *frap;
ra->p_dev = dev;
ra->p_ino = ino;
ra->p_set = 0;
ra->p_hindex = hash;
found:
if (rap != &rab->pb_head) {
*rap = ra->p_next;
ra->p_next = rab->pb_head;
rab->pb_head = ra;
}
ra->p_count++;
nfsdstats.ra_depth[depth*10/nfsdstats.ra_size]++;
spin_unlock(&rab->pb_lock);
return ra;
}
/*
* Grab and keep cached pages associated with a file in the svc_rqst
* so that they can be passed to the network sendmsg/sendpage routines
* directly. They will be released after the sending has completed.
*/
static int
nfsd_splice_actor(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
struct splice_desc *sd)
{
struct svc_rqst *rqstp = sd->u.data;
struct page **pp = rqstp->rq_respages + rqstp->rq_resused;
struct page *page = buf->page;
size_t size;
size = sd->len;
if (rqstp->rq_res.page_len == 0) {
get_page(page);
put_page(*pp);
*pp = page;
rqstp->rq_resused++;
rqstp->rq_res.page_base = buf->offset;
rqstp->rq_res.page_len = size;
} else if (page != pp[-1]) {
get_page(page);
if (*pp)
put_page(*pp);
*pp = page;
rqstp->rq_resused++;
rqstp->rq_res.page_len += size;
} else
rqstp->rq_res.page_len += size;
return size;
}
static int nfsd_direct_splice_actor(struct pipe_inode_info *pipe,
struct splice_desc *sd)
{
return __splice_from_pipe(pipe, sd, nfsd_splice_actor);
}
static __be32
nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
{
mm_segment_t oldfs;
__be32 err;
int host_err;
err = nfserr_perm;
if (file->f_op->splice_read && rqstp->rq_splice_ok) {
struct splice_desc sd = {
.len = 0,
.total_len = *count,
.pos = offset,
.u.data = rqstp,
};
rqstp->rq_resused = 1;
host_err = splice_direct_to_actor(file, &sd, nfsd_direct_splice_actor);
} else {
oldfs = get_fs();
set_fs(KERNEL_DS);
host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs);
}
if (host_err >= 0) {
nfsdstats.io_read += host_err;
*count = host_err;
err = 0;
fsnotify_access(file);
} else
err = nfserrno(host_err);
return err;
}
static void kill_suid(struct dentry *dentry)
{
struct iattr ia;
ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
mutex_lock(&dentry->d_inode->i_mutex);
notify_change(dentry, &ia);
mutex_unlock(&dentry->d_inode->i_mutex);
}
/*
* Gathered writes: If another process is currently writing to the file,
* there's a high chance this is another nfsd (triggered by a bulk write
* from a client's biod). Rather than syncing the file with each write
* request, we sleep for 10 msec.
*
* I don't know if this roughly approximates C. Juszak's idea of
* gathered writes, but it's a nice and simple solution (IMHO), and it
* seems to work:-)
*
* Note: we do this only in the NFSv2 case, since v3 and higher have a
* better tool (separate unstable writes and commits) for solving this
* problem.
*/
static int wait_for_concurrent_writes(struct file *file)
{
struct inode *inode = file->f_path.dentry->d_inode;
static ino_t last_ino;
static dev_t last_dev;
int err = 0;
if (atomic_read(&inode->i_writecount) > 1
|| (last_ino == inode->i_ino && last_dev == inode->i_sb->s_dev)) {
dprintk("nfsd: write defer %d\n", task_pid_nr(current));
msleep(10);
dprintk("nfsd: write resume %d\n", task_pid_nr(current));
}
if (inode->i_state & I_DIRTY) {
dprintk("nfsd: write sync %d\n", task_pid_nr(current));
err = vfs_fsync(file, 0);
}
last_ino = inode->i_ino;
last_dev = inode->i_sb->s_dev;
return err;
}
static __be32
nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen,
unsigned long *cnt, int *stablep)
{
struct svc_export *exp;
struct dentry *dentry;
struct inode *inode;
mm_segment_t oldfs;
__be32 err = 0;
int host_err;
int stable = *stablep;
int use_wgather;
dentry = file->f_path.dentry;
inode = dentry->d_inode;
exp = fhp->fh_export;
/*
* Request sync writes if
* - the sync export option has been set, or
* - the client requested O_SYNC behavior (NFSv3 feature).
* - The file system doesn't support fsync().
* When NFSv2 gathered writes have been configured for this volume,
* flushing the data to disk is handled separately below.
*/
use_wgather = (rqstp->rq_vers == 2) && EX_WGATHER(exp);
if (!file->f_op->fsync) {/* COMMIT3 cannot work */
stable = 2;
*stablep = 2; /* FILE_SYNC */
}
if (!EX_ISSYNC(exp))
stable = 0;
if (stable && !use_wgather) {
spin_lock(&file->f_lock);
file->f_flags |= O_SYNC;
spin_unlock(&file->f_lock);
}
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;
*cnt = host_err;
nfsdstats.io_write += host_err;
fsnotify_modify(file);
/* clear setuid/setgid flag after write */
if (inode->i_mode & (S_ISUID | S_ISGID))
kill_suid(dentry);
if (stable && use_wgather)
host_err = wait_for_concurrent_writes(file);
out_nfserr:
dprintk("nfsd: write complete host_err=%d\n", host_err);
if (host_err >= 0)
err = 0;
else
err = nfserrno(host_err);
return err;
}
/*
* Read data from a file. count must contain the requested read count
* on entry. On return, *count contains the number of bytes actually read.
* N.B. After this call fhp needs an fh_put
*/
__be32 nfsd_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
loff_t offset, struct kvec *vec, int vlen, unsigned long *count)
{
struct file *file;
struct inode *inode;
struct raparms *ra;
__be32 err;
err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_READ, &file);
if (err)
return err;
inode = file->f_path.dentry->d_inode;
/* Get readahead parameters */
ra = nfsd_get_raparms(inode->i_sb->s_dev, inode->i_ino);
if (ra && ra->p_set)
file->f_ra = ra->p_ra;
err = nfsd_vfs_read(rqstp, fhp, file, offset, vec, vlen, count);
/* Write back readahead params */
if (ra) {
struct raparm_hbucket *rab = &raparm_hash[ra->p_hindex];
spin_lock(&rab->pb_lock);
ra->p_ra = file->f_ra;
ra->p_set = 1;
ra->p_count--;
spin_unlock(&rab->pb_lock);
}
nfsd_close(file);
return err;
}
/* As above, but use the provided file descriptor. */
__be32
nfsd_read_file(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen,
unsigned long *count)
{
__be32 err;
if (file) {
err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
NFSD_MAY_READ|NFSD_MAY_OWNER_OVERRIDE);
if (err)
goto out;
err = nfsd_vfs_read(rqstp, fhp, file, offset, vec, vlen, count);
} else /* Note file may still be NULL in NFSv4 special stateid case: */
err = nfsd_read(rqstp, fhp, offset, vec, vlen, count);
out:
return err;
}
/*
* Write data to a file.
* The stable flag requests synchronous writes.
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
loff_t offset, struct kvec *vec, int vlen, unsigned long *cnt,
int *stablep)
{
__be32 err = 0;
if (file) {
err = nfsd_permission(rqstp, fhp->fh_export, fhp->fh_dentry,
NFSD_MAY_WRITE|NFSD_MAY_OWNER_OVERRIDE);
if (err)
goto out;
err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen, cnt,
stablep);
} else {
err = nfsd_open(rqstp, fhp, S_IFREG, NFSD_MAY_WRITE, &file);
if (err)
goto out;
if (cnt)
err = nfsd_vfs_write(rqstp, fhp, file, offset, vec, vlen,
cnt, stablep);
nfsd_close(file);
}
out:
return err;
}
#ifdef CONFIG_NFSD_V3
/*
* Commit all pending writes to stable storage.
*
* Note: we only guarantee that data that lies within the range specified
* by the 'offset' and 'count' parameters will be synced.
*
* Unfortunately we cannot lock the file to make sure we return full WCC
* data to the client, as locking happens lower down in the filesystem.
*/
__be32
nfsd_commit(struct svc_rqst *rqstp, struct svc_fh *fhp,
loff_t offset, unsigned long count)
{
struct file *file;
loff_t end = LLONG_MAX;
__be32 err = nfserr_inval;
if (offset < 0)
goto out;
if (count != 0) {
end = offset + (loff_t)count - 1;
if (end < offset)
goto out;
}
err = nfsd_open(rqstp, fhp, S_IFREG,
NFSD_MAY_WRITE|NFSD_MAY_NOT_BREAK_LEASE, &file);
if (err)
goto out;
if (EX_ISSYNC(fhp->fh_export)) {
int err2 = vfs_fsync_range(file, offset, end, 0);
if (err2 != -EINVAL)
err = nfserrno(err2);
else
err = nfserr_notsupp;
}
nfsd_close(file);
out:
return err;
}
#endif /* CONFIG_NFSD_V3 */
static __be32
nfsd_create_setattr(struct svc_rqst *rqstp, struct svc_fh *resfhp,
struct iattr *iap)
{
/*
* Mode has already been set earlier in create:
*/
iap->ia_valid &= ~ATTR_MODE;
/*
* Setting uid/gid works only for root. Irix appears to
* send along the gid on create when it tries to implement
* setgid directories via NFS:
*/
if (current_fsuid() != 0)
iap->ia_valid &= ~(ATTR_UID|ATTR_GID);
if (iap->ia_valid)
return nfsd_setattr(rqstp, resfhp, iap, 0, (time_t)0);
return 0;
}
/* HPUX client sometimes creates a file in mode 000, and sets size to 0.
* setting size to 0 may fail for some specific file systems by the permission
* checking which requires WRITE permission but the mode is 000.
* we ignore the resizing(to 0) on the just new created file, since the size is
* 0 after file created.
*
* call this only after vfs_create() is called.
* */
static void
nfsd_check_ignore_resizing(struct iattr *iap)
{
if ((iap->ia_valid & ATTR_SIZE) && (iap->ia_size == 0))
iap->ia_valid &= ~ATTR_SIZE;
}
/*
* Create a file (regular, directory, device, fifo); UNIX sockets
* not yet implemented.
* If the response fh has been verified, the parent directory should
* already be locked. Note that the parent directory is left locked.
*
* N.B. Every call to nfsd_create needs an fh_put for _both_ fhp and resfhp
*/
__be32
nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
int type, dev_t rdev, struct svc_fh *resfhp)
{
struct dentry *dentry, *dchild = NULL;
struct inode *dirp;
__be32 err;
__be32 err2;
int host_err;
err = nfserr_perm;
if (!flen)
goto out;
err = nfserr_exist;
if (isdotent(fname, flen))
goto out;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
dentry = fhp->fh_dentry;
dirp = dentry->d_inode;
err = nfserr_notdir;
if (!dirp->i_op->lookup)
goto out;
/*
* Check whether the response file handle has been verified yet.
* If it has, the parent directory should already be locked.
*/
if (!resfhp->fh_dentry) {
/* called from nfsd_proc_mkdir, or possibly nfsd3_proc_create */
fh_lock_nested(fhp, I_MUTEX_PARENT);
dchild = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(dchild);
if (IS_ERR(dchild))
goto out_nfserr;
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
} else {
/* called from nfsd_proc_create */
dchild = dget(resfhp->fh_dentry);
if (!fhp->fh_locked) {
/* not actually possible */
printk(KERN_ERR
"nfsd_create: parent %s/%s not locked!\n",
dentry->d_parent->d_name.name,
dentry->d_name.name);
err = nfserr_io;
goto out;
}
}
/*
* Make sure the child dentry is still negative ...
*/
err = nfserr_exist;
if (dchild->d_inode) {
dprintk("nfsd_create: dentry %s/%s not negative!\n",
dentry->d_name.name, dchild->d_name.name);
goto out;
}
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
iap->ia_mode = (iap->ia_mode & S_IALLUGO) | type;
err = nfserr_inval;
if (!S_ISREG(type) && !S_ISDIR(type) && !special_file(type)) {
printk(KERN_WARNING "nfsd: bad file type %o in nfsd_create\n",
type);
goto out;
}
host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
if (host_err)
goto out_nfserr;
/*
* Get the dir op function pointer.
*/
err = 0;
switch (type) {
case S_IFREG:
host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
if (!host_err)
nfsd_check_ignore_resizing(iap);
break;
case S_IFDIR:
host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
break;
case S_IFCHR:
case S_IFBLK:
case S_IFIFO:
case S_IFSOCK:
host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
break;
}
if (host_err < 0) {
mnt_drop_write(fhp->fh_export->ex_path.mnt);
goto out_nfserr;
}
err = nfsd_create_setattr(rqstp, resfhp, iap);
/*
* nfsd_setattr already committed the child. Transactional filesystems
* had a chance to commit changes for both parent and child
* simultaneously making the following commit_metadata a noop.
*/
err2 = nfserrno(commit_metadata(fhp));
if (err2)
err = err2;
mnt_drop_write(fhp->fh_export->ex_path.mnt);
/*
* Update the file handle to get the new inode info.
*/
if (!err)
err = fh_update(resfhp);
out:
if (dchild && !IS_ERR(dchild))
dput(dchild);
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
#ifdef CONFIG_NFSD_V3
static inline int nfsd_create_is_exclusive(int createmode)
{
return createmode == NFS3_CREATE_EXCLUSIVE
|| createmode == NFS4_CREATE_EXCLUSIVE4_1;
}
/*
* NFSv3 and NFSv4 version of nfsd_create
*/
__be32
do_nfsd_create(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen, struct iattr *iap,
struct svc_fh *resfhp, int createmode, u32 *verifier,
int *truncp, int *created)
{
struct dentry *dentry, *dchild = NULL;
struct inode *dirp;
__be32 err;
int host_err;
__u32 v_mtime=0, v_atime=0;
err = nfserr_perm;
if (!flen)
goto out;
err = nfserr_exist;
if (isdotent(fname, flen))
goto out;
if (!(iap->ia_valid & ATTR_MODE))
iap->ia_mode = 0;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_EXEC);
if (err)
goto out;
dentry = fhp->fh_dentry;
dirp = dentry->d_inode;
/* Get all the sanity checks out of the way before
* we lock the parent. */
err = nfserr_notdir;
if (!dirp->i_op->lookup)
goto out;
fh_lock_nested(fhp, I_MUTEX_PARENT);
/*
* Compose the response file handle.
*/
dchild = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(dchild);
if (IS_ERR(dchild))
goto out_nfserr;
/* If file doesn't exist, check for permissions to create one */
if (!dchild->d_inode) {
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
}
err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
if (err)
goto out;
if (nfsd_create_is_exclusive(createmode)) {
/* solaris7 gets confused (bugid 4218508) if these have
* the high bit set, so just clear the high bits. If this is
* ever changed to use different attrs for storing the
* verifier, then do_open_lookup() will also need to be fixed
* accordingly.
*/
v_mtime = verifier[0]&0x7fffffff;
v_atime = verifier[1]&0x7fffffff;
}
host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
if (host_err)
goto out_nfserr;
if (dchild->d_inode) {
err = 0;
switch (createmode) {
case NFS3_CREATE_UNCHECKED:
if (! S_ISREG(dchild->d_inode->i_mode))
err = nfserr_exist;
else if (truncp) {
/* in nfsv4, we need to treat this case a little
* differently. we don't want to truncate the
* file now; this would be wrong if the OPEN
* fails for some other reason. furthermore,
* if the size is nonzero, we should ignore it
* according to spec!
*/
*truncp = (iap->ia_valid & ATTR_SIZE) && !iap->ia_size;
}
else {
iap->ia_valid &= ATTR_SIZE;
goto set_attr;
}
break;
case NFS3_CREATE_EXCLUSIVE:
if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
&& dchild->d_inode->i_atime.tv_sec == v_atime
&& dchild->d_inode->i_size == 0 )
break;
case NFS4_CREATE_EXCLUSIVE4_1:
if ( dchild->d_inode->i_mtime.tv_sec == v_mtime
&& dchild->d_inode->i_atime.tv_sec == v_atime
&& dchild->d_inode->i_size == 0 )
goto set_attr;
/* fallthru */
case NFS3_CREATE_GUARDED:
err = nfserr_exist;
}
mnt_drop_write(fhp->fh_export->ex_path.mnt);
goto out;
}
host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
if (host_err < 0) {
mnt_drop_write(fhp->fh_export->ex_path.mnt);
goto out_nfserr;
}
if (created)
*created = 1;
nfsd_check_ignore_resizing(iap);
if (nfsd_create_is_exclusive(createmode)) {
/* Cram the verifier into atime/mtime */
iap->ia_valid = ATTR_MTIME|ATTR_ATIME
| ATTR_MTIME_SET|ATTR_ATIME_SET;
/* XXX someone who knows this better please fix it for nsec */
iap->ia_mtime.tv_sec = v_mtime;
iap->ia_atime.tv_sec = v_atime;
iap->ia_mtime.tv_nsec = 0;
iap->ia_atime.tv_nsec = 0;
}
set_attr:
err = nfsd_create_setattr(rqstp, resfhp, iap);
/*
* nfsd_setattr already committed the child (and possibly also the parent).
*/
if (!err)
err = nfserrno(commit_metadata(fhp));
mnt_drop_write(fhp->fh_export->ex_path.mnt);
/*
* Update the filehandle to get the new inode info.
*/
if (!err)
err = fh_update(resfhp);
out:
fh_unlock(fhp);
if (dchild && !IS_ERR(dchild))
dput(dchild);
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
#endif /* CONFIG_NFSD_V3 */
/*
* Read a symlink. On entry, *lenp must contain the maximum path length that
* fits into the buffer. On return, it contains the true length.
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
{
struct dentry *dentry;
struct inode *inode;
mm_segment_t oldfs;
__be32 err;
int host_err;
err = fh_verify(rqstp, fhp, S_IFLNK, NFSD_MAY_NOP);
if (err)
goto out;
dentry = fhp->fh_dentry;
inode = dentry->d_inode;
err = nfserr_inval;
if (!inode->i_op->readlink)
goto out;
touch_atime(fhp->fh_export->ex_path.mnt, dentry);
/* N.B. Why does this call need a get_fs()??
* Remove the set_fs and watch the fireworks:-) --okir
*/
oldfs = get_fs(); set_fs(KERNEL_DS);
host_err = inode->i_op->readlink(dentry, buf, *lenp);
set_fs(oldfs);
if (host_err < 0)
goto out_nfserr;
*lenp = host_err;
err = 0;
out:
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
/*
* Create a symlink and look up its inode
* N.B. After this call _both_ fhp and resfhp need an fh_put
*/
__be32
nfsd_symlink(struct svc_rqst *rqstp, struct svc_fh *fhp,
char *fname, int flen,
char *path, int plen,
struct svc_fh *resfhp,
struct iattr *iap)
{
struct dentry *dentry, *dnew;
__be32 err, cerr;
int host_err;
err = nfserr_noent;
if (!flen || !plen)
goto out;
err = nfserr_exist;
if (isdotent(fname, flen))
goto out;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
fh_lock(fhp);
dentry = fhp->fh_dentry;
dnew = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(dnew);
if (IS_ERR(dnew))
goto out_nfserr;
host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
if (host_err)
goto out_nfserr;
if (unlikely(path[plen] != 0)) {
char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
if (path_alloced == NULL)
host_err = -ENOMEM;
else {
strncpy(path_alloced, path, plen);
path_alloced[plen] = 0;
host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced);
kfree(path_alloced);
}
} else
host_err = vfs_symlink(dentry->d_inode, dnew, path);
err = nfserrno(host_err);
if (!err)
err = nfserrno(commit_metadata(fhp));
fh_unlock(fhp);
mnt_drop_write(fhp->fh_export->ex_path.mnt);
cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
dput(dnew);
if (err==0) err = cerr;
out:
return err;
out_nfserr:
err = nfserrno(host_err);
goto out;
}
/*
* Create a hardlink
* N.B. After this call _both_ ffhp and tfhp need an fh_put
*/
__be32
nfsd_link(struct svc_rqst *rqstp, struct svc_fh *ffhp,
char *name, int len, struct svc_fh *tfhp)
{
struct dentry *ddir, *dnew, *dold;
struct inode *dirp;
__be32 err;
int host_err;
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
err = fh_verify(rqstp, tfhp, -S_IFDIR, NFSD_MAY_NOP);
if (err)
goto out;
err = nfserr_perm;
if (!len)
goto out;
err = nfserr_exist;
if (isdotent(name, len))
goto out;
fh_lock_nested(ffhp, I_MUTEX_PARENT);
ddir = ffhp->fh_dentry;
dirp = ddir->d_inode;
dnew = lookup_one_len(name, ddir, len);
host_err = PTR_ERR(dnew);
if (IS_ERR(dnew))
goto out_nfserr;
dold = tfhp->fh_dentry;
host_err = mnt_want_write(tfhp->fh_export->ex_path.mnt);
if (host_err) {
err = nfserrno(host_err);
goto out_dput;
}
err = nfserr_noent;
if (!dold->d_inode)
goto out_drop_write;
host_err = nfsd_break_lease(dold->d_inode);
if (host_err) {
err = nfserrno(host_err);
goto out_drop_write;
}
host_err = vfs_link(dold, dirp, dnew);
if (!host_err) {
err = nfserrno(commit_metadata(ffhp));
if (!err)
err = nfserrno(commit_metadata(tfhp));
} else {
if (host_err == -EXDEV && rqstp->rq_vers == 2)
err = nfserr_acces;
else
err = nfserrno(host_err);
}
out_drop_write:
mnt_drop_write(tfhp->fh_export->ex_path.mnt);
out_dput:
dput(dnew);
out_unlock:
fh_unlock(ffhp);
out:
return err;
out_nfserr:
err = nfserrno(host_err);
goto out_unlock;
}
/*
* Rename a file
* N.B. After this call _both_ ffhp and tfhp need an fh_put
*/
__be32
nfsd_rename(struct svc_rqst *rqstp, struct svc_fh *ffhp, char *fname, int flen,
struct svc_fh *tfhp, char *tname, int tlen)
{
struct dentry *fdentry, *tdentry, *odentry, *ndentry, *trap;
struct inode *fdir, *tdir;
__be32 err;
int host_err;
err = fh_verify(rqstp, ffhp, S_IFDIR, NFSD_MAY_REMOVE);
if (err)
goto out;
err = fh_verify(rqstp, tfhp, S_IFDIR, NFSD_MAY_CREATE);
if (err)
goto out;
fdentry = ffhp->fh_dentry;
fdir = fdentry->d_inode;
tdentry = tfhp->fh_dentry;
tdir = tdentry->d_inode;
err = (rqstp->rq_vers == 2) ? nfserr_acces : nfserr_xdev;
if (ffhp->fh_export != tfhp->fh_export)
goto out;
err = nfserr_perm;
if (!flen || isdotent(fname, flen) || !tlen || isdotent(tname, tlen))
goto out;
/* cannot use fh_lock as we need deadlock protective ordering
* so do it by hand */
trap = lock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = 1;
fill_pre_wcc(ffhp);
fill_pre_wcc(tfhp);
odentry = lookup_one_len(fname, fdentry, flen);
host_err = PTR_ERR(odentry);
if (IS_ERR(odentry))
goto out_nfserr;
host_err = -ENOENT;
if (!odentry->d_inode)
goto out_dput_old;
host_err = -EINVAL;
if (odentry == trap)
goto out_dput_old;
ndentry = lookup_one_len(tname, tdentry, tlen);
host_err = PTR_ERR(ndentry);
if (IS_ERR(ndentry))
goto out_dput_old;
host_err = -ENOTEMPTY;
if (ndentry == trap)
goto out_dput_new;
host_err = -EXDEV;
if (ffhp->fh_export->ex_path.mnt != tfhp->fh_export->ex_path.mnt)
goto out_dput_new;
host_err = mnt_want_write(ffhp->fh_export->ex_path.mnt);
if (host_err)
goto out_dput_new;
host_err = nfsd_break_lease(odentry->d_inode);
if (host_err)
goto out_drop_write;
if (ndentry->d_inode) {
host_err = nfsd_break_lease(ndentry->d_inode);
if (host_err)
goto out_drop_write;
}
host_err = vfs_rename(fdir, odentry, tdir, ndentry);
if (!host_err) {
host_err = commit_metadata(tfhp);
if (!host_err)
host_err = commit_metadata(ffhp);
}
out_drop_write:
mnt_drop_write(ffhp->fh_export->ex_path.mnt);
out_dput_new:
dput(ndentry);
out_dput_old:
dput(odentry);
out_nfserr:
err = nfserrno(host_err);
/* we cannot reply on fh_unlock on the two filehandles,
* as that would do the wrong thing if the two directories
* were the same, so again we do it by hand
*/
fill_post_wcc(ffhp);
fill_post_wcc(tfhp);
unlock_rename(tdentry, fdentry);
ffhp->fh_locked = tfhp->fh_locked = 0;
out:
return err;
}
/*
* Unlink a file or directory
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_unlink(struct svc_rqst *rqstp, struct svc_fh *fhp, int type,
char *fname, int flen)
{
struct dentry *dentry, *rdentry;
struct inode *dirp;
__be32 err;
int host_err;
err = nfserr_acces;
if (!flen || isdotent(fname, flen))
goto out;
err = fh_verify(rqstp, fhp, S_IFDIR, NFSD_MAY_REMOVE);
if (err)
goto out;
fh_lock_nested(fhp, I_MUTEX_PARENT);
dentry = fhp->fh_dentry;
dirp = dentry->d_inode;
rdentry = lookup_one_len(fname, dentry, flen);
host_err = PTR_ERR(rdentry);
if (IS_ERR(rdentry))
goto out_nfserr;
if (!rdentry->d_inode) {
dput(rdentry);
err = nfserr_noent;
goto out;
}
if (!type)
type = rdentry->d_inode->i_mode & S_IFMT;
host_err = mnt_want_write(fhp->fh_export->ex_path.mnt);
if (host_err)
goto out_put;
host_err = nfsd_break_lease(rdentry->d_inode);
if (host_err)
goto out_drop_write;
if (type != S_IFDIR)
host_err = vfs_unlink(dirp, rdentry);
else
host_err = vfs_rmdir(dirp, rdentry);
if (!host_err)
host_err = commit_metadata(fhp);
out_drop_write:
mnt_drop_write(fhp->fh_export->ex_path.mnt);
out_put:
dput(rdentry);
out_nfserr:
err = nfserrno(host_err);
out:
return err;
}
/*
* We do this buffering because we must not call back into the file
* system's ->lookup() method from the filldir callback. That may well
* deadlock a number of file systems.
*
* This is based heavily on the implementation of same in XFS.
*/
struct buffered_dirent {
u64 ino;
loff_t offset;
int namlen;
unsigned int d_type;
char name[];
};
struct readdir_data {
char *dirent;
size_t used;
int full;
};
static int nfsd_buffered_filldir(void *__buf, const char *name, int namlen,
loff_t offset, u64 ino, unsigned int d_type)
{
struct readdir_data *buf = __buf;
struct buffered_dirent *de = (void *)(buf->dirent + buf->used);
unsigned int reclen;
reclen = ALIGN(sizeof(struct buffered_dirent) + namlen, sizeof(u64));
if (buf->used + reclen > PAGE_SIZE) {
buf->full = 1;
return -EINVAL;
}
de->namlen = namlen;
de->offset = offset;
de->ino = ino;
de->d_type = d_type;
memcpy(de->name, name, namlen);
buf->used += reclen;
return 0;
}
static __be32 nfsd_buffered_readdir(struct file *file, filldir_t func,
struct readdir_cd *cdp, loff_t *offsetp)
{
struct readdir_data buf;
struct buffered_dirent *de;
int host_err;
int size;
loff_t offset;
buf.dirent = (void *)__get_free_page(GFP_KERNEL);
if (!buf.dirent)
return nfserrno(-ENOMEM);
offset = *offsetp;
while (1) {
struct inode *dir_inode = file->f_path.dentry->d_inode;
unsigned int reclen;
cdp->err = nfserr_eof; /* will be cleared on successful read */
buf.used = 0;
buf.full = 0;
host_err = vfs_readdir(file, nfsd_buffered_filldir, &buf);
if (buf.full)
host_err = 0;
if (host_err < 0)
break;
size = buf.used;
if (!size)
break;
/*
* Various filldir functions may end up calling back into
* lookup_one_len() and the file system's ->lookup() method.
* These expect i_mutex to be held, as it would within readdir.
*/
host_err = mutex_lock_killable(&dir_inode->i_mutex);
if (host_err)
break;
de = (struct buffered_dirent *)buf.dirent;
while (size > 0) {
offset = de->offset;
if (func(cdp, de->name, de->namlen, de->offset,
de->ino, de->d_type))
break;
if (cdp->err != nfs_ok)
break;
reclen = ALIGN(sizeof(*de) + de->namlen,
sizeof(u64));
size -= reclen;
de = (struct buffered_dirent *)((char *)de + reclen);
}
mutex_unlock(&dir_inode->i_mutex);
if (size > 0) /* We bailed out early */
break;
offset = vfs_llseek(file, 0, SEEK_CUR);
}
free_page((unsigned long)(buf.dirent));
if (host_err)
return nfserrno(host_err);
*offsetp = offset;
return cdp->err;
}
/*
* Read entries from a directory.
* The NFSv3/4 verifier we ignore for now.
*/
__be32
nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
struct readdir_cd *cdp, filldir_t func)
{
__be32 err;
struct file *file;
loff_t offset = *offsetp;
err = nfsd_open(rqstp, fhp, S_IFDIR, NFSD_MAY_READ, &file);
if (err)
goto out;
offset = vfs_llseek(file, offset, 0);
if (offset < 0) {
err = nfserrno((int)offset);
goto out_close;
}
err = nfsd_buffered_readdir(file, func, cdp, offsetp);
if (err == nfserr_eof || err == nfserr_toosmall)
err = nfs_ok; /* can still be found in ->err */
out_close:
nfsd_close(file);
out:
return err;
}
/*
* Get file system stats
* N.B. After this call fhp needs an fh_put
*/
__be32
nfsd_statfs(struct svc_rqst *rqstp, struct svc_fh *fhp, struct kstatfs *stat, int access)
{
__be32 err;
err = fh_verify(rqstp, fhp, 0, NFSD_MAY_NOP | access);
if (!err) {
struct path path = {
.mnt = fhp->fh_export->ex_path.mnt,
.dentry = fhp->fh_dentry,
};
if (vfs_statfs(&path, stat))
err = nfserr_io;
}
return err;
}
static int exp_rdonly(struct svc_rqst *rqstp, struct svc_export *exp)
{
return nfsexp_flags(rqstp, exp) & NFSEXP_READONLY;
}
/*
* Check for a user's access permissions to this inode.
*/
__be32
nfsd_permission(struct svc_rqst *rqstp, struct svc_export *exp,
struct dentry *dentry, int acc)
{
struct inode *inode = dentry->d_inode;
int err;
if ((acc & NFSD_MAY_MASK) == NFSD_MAY_NOP)
return 0;
#if 0
dprintk("nfsd: permission 0x%x%s%s%s%s%s%s%s mode 0%o%s%s%s\n",
acc,
(acc & NFSD_MAY_READ)? " read" : "",
(acc & NFSD_MAY_WRITE)? " write" : "",
(acc & NFSD_MAY_EXEC)? " exec" : "",
(acc & NFSD_MAY_SATTR)? " sattr" : "",
(acc & NFSD_MAY_TRUNC)? " trunc" : "",
(acc & NFSD_MAY_LOCK)? " lock" : "",
(acc & NFSD_MAY_OWNER_OVERRIDE)? " owneroverride" : "",
inode->i_mode,
IS_IMMUTABLE(inode)? " immut" : "",
IS_APPEND(inode)? " append" : "",
__mnt_is_readonly(exp->ex_path.mnt)? " ro" : "");
dprintk(" owner %d/%d user %d/%d\n",
inode->i_uid, inode->i_gid, current_fsuid(), current_fsgid());
#endif
/* Normally we reject any write/sattr etc access on a read-only file
* system. But if it is IRIX doing check on write-access for a
* device special file, we ignore rofs.
*/
if (!(acc & NFSD_MAY_LOCAL_ACCESS))
if (acc & (NFSD_MAY_WRITE | NFSD_MAY_SATTR | NFSD_MAY_TRUNC)) {
if (exp_rdonly(rqstp, exp) ||
__mnt_is_readonly(exp->ex_path.mnt))
return nfserr_rofs;
if (/* (acc & NFSD_MAY_WRITE) && */ IS_IMMUTABLE(inode))
return nfserr_perm;
}
if ((acc & NFSD_MAY_TRUNC) && IS_APPEND(inode))
return nfserr_perm;
if (acc & NFSD_MAY_LOCK) {
/* If we cannot rely on authentication in NLM requests,
* just allow locks, otherwise require read permission, or
* ownership
*/
if (exp->ex_flags & NFSEXP_NOAUTHNLM)
return 0;
else
acc = NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE;
}
/*
* The file owner always gets access permission for accesses that
* would normally be checked at open time. This is to make
* file access work even when the client has done a fchmod(fd, 0).
*
* However, `cp foo bar' should fail nevertheless when bar is
* readonly. A sensible way to do this might be to reject all
* attempts to truncate a read-only file, because a creat() call
* always implies file truncation.
* ... but this isn't really fair. A process may reasonably call
* ftruncate on an open file descriptor on a file with perm 000.
* We must trust the client to do permission checking - using "ACCESS"
* with NFSv3.
*/
if ((acc & NFSD_MAY_OWNER_OVERRIDE) &&
inode->i_uid == current_fsuid())
return 0;
/* This assumes NFSD_MAY_{READ,WRITE,EXEC} == MAY_{READ,WRITE,EXEC} */
err = inode_permission(inode, acc & (MAY_READ|MAY_WRITE|MAY_EXEC));
/* Allow read access to binaries even when mode 111 */
if (err == -EACCES && S_ISREG(inode->i_mode) &&
(acc == (NFSD_MAY_READ | NFSD_MAY_OWNER_OVERRIDE) ||
acc == (NFSD_MAY_READ | NFSD_MAY_READ_IF_EXEC)))
err = inode_permission(inode, MAY_EXEC);
return err? nfserrno(err) : 0;
}
void
nfsd_racache_shutdown(void)
{
struct raparms *raparm, *last_raparm;
unsigned int i;
dprintk("nfsd: freeing readahead buffers.\n");
for (i = 0; i < RAPARM_HASH_SIZE; i++) {
raparm = raparm_hash[i].pb_head;
while(raparm) {
last_raparm = raparm;
raparm = raparm->p_next;
kfree(last_raparm);
}
raparm_hash[i].pb_head = NULL;
}
}
/*
* Initialize readahead param cache
*/
int
nfsd_racache_init(int cache_size)
{
int i;
int j = 0;
int nperbucket;
struct raparms **raparm = NULL;
if (raparm_hash[0].pb_head)
return 0;
nperbucket = DIV_ROUND_UP(cache_size, RAPARM_HASH_SIZE);
if (nperbucket < 2)
nperbucket = 2;
cache_size = nperbucket * RAPARM_HASH_SIZE;
dprintk("nfsd: allocating %d readahead buffers.\n", cache_size);
for (i = 0; i < RAPARM_HASH_SIZE; i++) {
spin_lock_init(&raparm_hash[i].pb_lock);
raparm = &raparm_hash[i].pb_head;
for (j = 0; j < nperbucket; j++) {
*raparm = kzalloc(sizeof(struct raparms), GFP_KERNEL);
if (!*raparm)
goto out_nomem;
raparm = &(*raparm)->p_next;
}
*raparm = NULL;
}
nfsdstats.ra_size = cache_size;
return 0;
out_nomem:
dprintk("nfsd: kmalloc failed, freeing readahead buffers\n");
nfsd_racache_shutdown();
return -ENOMEM;
}
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
struct posix_acl *
nfsd_get_posix_acl(struct svc_fh *fhp, int type)
{
struct inode *inode = fhp->fh_dentry->d_inode;
char *name;
void *value = NULL;
ssize_t size;
struct posix_acl *acl;
if (!IS_POSIXACL(inode))
return ERR_PTR(-EOPNOTSUPP);
switch (type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
break;
default:
return ERR_PTR(-EOPNOTSUPP);
}
size = nfsd_getxattr(fhp->fh_dentry, name, &value);
if (size < 0)
return ERR_PTR(size);
acl = posix_acl_from_xattr(value, size);
kfree(value);
return acl;
}
int
nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
{
struct inode *inode = fhp->fh_dentry->d_inode;
char *name;
void *value = NULL;
size_t size;
int error;
if (!IS_POSIXACL(inode) ||
!inode->i_op->setxattr || !inode->i_op->removexattr)
return -EOPNOTSUPP;
switch(type) {
case ACL_TYPE_ACCESS:
name = POSIX_ACL_XATTR_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name = POSIX_ACL_XATTR_DEFAULT;
break;
default:
return -EOPNOTSUPP;
}
if (acl && acl->a_count) {
size = posix_acl_xattr_size(acl->a_count);
value = kmalloc(size, GFP_KERNEL);
if (!value)
return -ENOMEM;
error = posix_acl_to_xattr(acl, value, size);
if (error < 0)
goto getout;
size = error;
} else
size = 0;
error = mnt_want_write(fhp->fh_export->ex_path.mnt);
if (error)
goto getout;
if (size)
error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0);
else {
if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
error = 0;
else {
error = vfs_removexattr(fhp->fh_dentry, name);
if (error == -ENODATA)
error = 0;
}
}
mnt_drop_write(fhp->fh_export->ex_path.mnt);
getout:
kfree(value);
return error;
}
#endif /* defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL) */
| gpl-2.0 |
mydongistiny/android_kernel_motorola_shamu | drivers/usb/gadget/u_sdio.c | 2446 | 26328 | /*
* u_sdio.c - utilities for USB gadget serial over sdio
*
* This code also borrows from drivers/usb/gadget/u_serial.c, which is
* Copyright (C) 2003 Al Borchers (alborchers@steinerpoint.com)
* Copyright (C) 2008 David Brownell
* Copyright (C) 2008 by Nokia Corporation
* Copyright (c) 2011, The Linux Foundation. All rights reserved.
*
* This program from The Linux Foundation is free software; you can
* redistribute it and/or modify it under the GNU General Public License
* version 2 and only version 2 as published by the Free Software Foundation.
* The original work available from [kernel.org] is subject to the notice below.
*
* This software is distributed under the terms of the GNU General
* Public License ("GPL") as published by the Free Software Foundation,
* either version 2 of that License or (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/device.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/termios.h>
#include <linux/debugfs.h>
#include <mach/sdio_al.h>
#include <mach/sdio_cmux.h>
#include "u_serial.h"
#define SDIO_RX_QUEUE_SIZE 8
#define SDIO_RX_BUF_SIZE 2048
#define SDIO_TX_QUEUE_SIZE 8
#define SDIO_TX_BUF_SIZE 2048
/* 1 - DUN, 2-NMEA/GPS */
#define SDIO_N_PORTS 2
static struct sdio_portmaster {
struct mutex lock;
struct gsdio_port *port;
struct platform_driver gsdio_ch;
} sdio_ports[SDIO_N_PORTS];
static unsigned n_sdio_ports;
struct sdio_port_info {
/* data channel info */
char *data_ch_name;
struct sdio_channel *ch;
/* control channel info */
int ctrl_ch_id;
};
struct sdio_port_info sport_info[SDIO_N_PORTS] = {
{
.data_ch_name = "SDIO_DUN",
.ctrl_ch_id = 9,
},
{
.data_ch_name = "SDIO_NMEA",
.ctrl_ch_id = 10,
},
};
static struct workqueue_struct *gsdio_wq;
struct gsdio_port {
unsigned port_num;
spinlock_t port_lock;
unsigned n_read;
struct list_head read_pool;
struct list_head read_queue;
struct work_struct push;
unsigned long rp_len;
unsigned long rq_len;
struct list_head write_pool;
struct work_struct pull;
unsigned long wp_len;
struct work_struct notify_modem;
struct gserial *port_usb;
struct usb_cdc_line_coding line_coding;
int sdio_open;
int sdio_probe;
int ctrl_ch_err;
struct sdio_port_info *sport_info;
struct delayed_work sdio_open_work;
#define SDIO_ACM_CTRL_RI (1 << 3)
#define SDIO_ACM_CTRL_DSR (1 << 1)
#define SDIO_ACM_CTRL_DCD (1 << 0)
int cbits_to_laptop;
#define SDIO_ACM_CTRL_RTS (1 << 1) /* unused with full duplex */
#define SDIO_ACM_CTRL_DTR (1 << 0) /* host is ready for data r/w */
int cbits_to_modem;
/* pkt logging */
unsigned long nbytes_tolaptop;
unsigned long nbytes_tomodem;
};
void gsdio_free_req(struct usb_ep *ep, struct usb_request *req)
{
kfree(req->buf);
usb_ep_free_request(ep, req);
}
struct usb_request *
gsdio_alloc_req(struct usb_ep *ep, unsigned len, gfp_t flags)
{
struct usb_request *req;
req = usb_ep_alloc_request(ep, flags);
if (!req) {
pr_err("%s: usb alloc request failed\n", __func__);
return NULL;
}
req->length = len;
req->buf = kmalloc(len, flags);
if (!req->buf) {
pr_err("%s: request buf allocation failed\n", __func__);
usb_ep_free_request(ep, req);
return NULL;
}
return req;
}
void gsdio_free_requests(struct usb_ep *ep, struct list_head *head)
{
struct usb_request *req;
while (!list_empty(head)) {
req = list_entry(head->next, struct usb_request, list);
list_del(&req->list);
gsdio_free_req(ep, req);
}
}
int gsdio_alloc_requests(struct usb_ep *ep, struct list_head *head,
int num, int size,
void (*cb)(struct usb_ep *ep, struct usb_request *))
{
int i;
struct usb_request *req;
pr_debug("%s: ep:%p head:%p num:%d size:%d cb:%p", __func__,
ep, head, num, size, cb);
for (i = 0; i < num; i++) {
req = gsdio_alloc_req(ep, size, GFP_ATOMIC);
if (!req) {
pr_debug("%s: req allocated:%d\n", __func__, i);
return list_empty(head) ? -ENOMEM : 0;
}
req->complete = cb;
list_add(&req->list, head);
}
return 0;
}
void gsdio_start_rx(struct gsdio_port *port)
{
struct list_head *pool;
struct usb_ep *out;
int ret;
if (!port) {
pr_err("%s: port is null\n", __func__);
return;
}
pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
spin_lock_irq(&port->port_lock);
if (!port->port_usb) {
pr_debug("%s: usb is disconnected\n", __func__);
goto start_rx_end;
}
if (!port->sdio_open) {
pr_debug("%s: sdio is not open\n", __func__);
goto start_rx_end;
}
pool = &port->read_pool;
out = port->port_usb->out;
while (!list_empty(pool)) {
struct usb_request *req;
req = list_entry(pool->next, struct usb_request, list);
list_del(&req->list);
req->length = SDIO_RX_BUF_SIZE;
port->rp_len--;
spin_unlock_irq(&port->port_lock);
ret = usb_ep_queue(out, req, GFP_ATOMIC);
spin_lock_irq(&port->port_lock);
if (ret) {
pr_err("%s: usb ep out queue failed"
"port:%p, port#%d\n",
__func__, port, port->port_num);
list_add_tail(&req->list, pool);
port->rp_len++;
break;
}
/* usb could have disconnected while we released spin lock */
if (!port->port_usb) {
pr_debug("%s: usb is disconnected\n", __func__);
goto start_rx_end;
}
}
start_rx_end:
spin_unlock_irq(&port->port_lock);
}
int gsdio_write(struct gsdio_port *port, struct usb_request *req)
{
unsigned avail;
char *packet;
unsigned size;
unsigned n;
int ret = 0;
if (!port) {
pr_err("%s: port is null\n", __func__);
return -ENODEV;
}
if (!req) {
pr_err("%s: usb request is null port#%d\n",
__func__, port->port_num);
return -ENODEV;
}
pr_debug("%s: port:%p port#%d req:%p actual:%d n_read:%d\n",
__func__, port, port->port_num, req,
req->actual, port->n_read);
if (!port->sdio_open) {
pr_debug("%s: SDIO IO is not supported\n", __func__);
return -ENODEV;
}
avail = sdio_write_avail(port->sport_info->ch);
pr_debug("%s: sdio_write_avail:%d", __func__, avail);
if (!avail)
return -EBUSY;
if (!req->actual) {
pr_debug("%s: req->actual is already zero,update bytes read\n",
__func__);
port->n_read = 0;
return -ENODEV;
}
size = req->actual;
packet = req->buf;
n = port->n_read;
if (n) {
packet += n;
size -= n;
}
if (size > avail)
size = avail;
spin_unlock_irq(&port->port_lock);
ret = sdio_write(port->sport_info->ch, packet, size);
spin_lock_irq(&port->port_lock);
if (ret) {
pr_err("%s: port#%d sdio write failed err:%d",
__func__, port->port_num, ret);
/* try again later */
return ret;
}
port->nbytes_tomodem += size;
if (size + n == req->actual)
port->n_read = 0;
else
port->n_read += size;
return ret;
}
void gsdio_rx_push(struct work_struct *w)
{
struct gsdio_port *port = container_of(w, struct gsdio_port, push);
struct list_head *q = &port->read_queue;
struct usb_ep *out;
int ret;
pr_debug("%s: port:%p port#%d read_queue:%p", __func__,
port, port->port_num, q);
spin_lock_irq(&port->port_lock);
if (!port->port_usb) {
pr_debug("%s: usb cable is disconencted\n", __func__);
spin_unlock_irq(&port->port_lock);
return;
}
out = port->port_usb->out;
while (!list_empty(q)) {
struct usb_request *req;
req = list_first_entry(q, struct usb_request, list);
switch (req->status) {
case -ESHUTDOWN:
pr_debug("%s: req status shutdown portno#%d port:%p",
__func__, port->port_num, port);
goto rx_push_end;
default:
pr_warning("%s: port:%p port#%d"
" Unexpected Rx Status:%d\n", __func__,
port, port->port_num, req->status);
/* FALL THROUGH */
case 0:
/* normal completion */
break;
}
if (!port->sdio_open) {
pr_err("%s: sio channel is not open\n", __func__);
list_move(&req->list, &port->read_pool);
port->rp_len++;
port->rq_len--;
goto rx_push_end;
}
list_del(&req->list);
port->rq_len--;
ret = gsdio_write(port, req);
/* as gsdio_write drops spin_lock while writing data
* to sdio usb cable may have been disconnected
*/
if (!port->port_usb) {
port->n_read = 0;
gsdio_free_req(out, req);
spin_unlock_irq(&port->port_lock);
return;
}
if (ret || port->n_read) {
list_add(&req->list, &port->read_queue);
port->rq_len++;
goto rx_push_end;
}
list_add(&req->list, &port->read_pool);
port->rp_len++;
}
if (port->sdio_open && !list_empty(q)) {
if (sdio_write_avail(port->sport_info->ch))
queue_work(gsdio_wq, &port->push);
}
rx_push_end:
spin_unlock_irq(&port->port_lock);
/* start queuing out requests again to host */
gsdio_start_rx(port);
}
void gsdio_read_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gsdio_port *port = ep->driver_data;
unsigned long flags;
pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
if (!port) {
pr_err("%s: port is null\n", __func__);
return;
}
spin_lock_irqsave(&port->port_lock, flags);
list_add_tail(&req->list, &port->read_queue);
port->rq_len++;
queue_work(gsdio_wq, &port->push);
spin_unlock_irqrestore(&port->port_lock, flags);
return;
}
void gsdio_write_complete(struct usb_ep *ep, struct usb_request *req)
{
struct gsdio_port *port = ep->driver_data;
unsigned long flags;
pr_debug("%s: ep:%p port:%p\n", __func__, ep, port);
if (!port) {
pr_err("%s: port is null\n", __func__);
return;
}
spin_lock_irqsave(&port->port_lock, flags);
list_add(&req->list, &port->write_pool);
port->wp_len++;
switch (req->status) {
default:
pr_warning("%s: port:%p port#%d unexpected %s status %d\n",
__func__, port, port->port_num,
ep->name, req->status);
/* FALL THROUGH */
case 0:
queue_work(gsdio_wq, &port->pull);
break;
case -ESHUTDOWN:
/* disconnect */
pr_debug("%s: %s shutdown\n", __func__, ep->name);
break;
}
spin_unlock_irqrestore(&port->port_lock, flags);
return;
}
void gsdio_read_pending(struct gsdio_port *port)
{
struct sdio_channel *ch;
char buf[1024];
int avail;
if (!port) {
pr_err("%s: port is null\n", __func__);
return;
}
ch = port->sport_info->ch;
if (!ch)
return;
while ((avail = sdio_read_avail(ch))) {
if (avail > 1024)
avail = 1024;
sdio_read(ch, buf, avail);
pr_debug("%s: flushed out %d bytes\n", __func__, avail);
}
}
void gsdio_tx_pull(struct work_struct *w)
{
struct gsdio_port *port = container_of(w, struct gsdio_port, pull);
struct list_head *pool = &port->write_pool;
pr_debug("%s: port:%p port#%d pool:%p\n", __func__,
port, port->port_num, pool);
if (!port->port_usb) {
pr_err("%s: usb disconnected\n", __func__);
/* take out all the pending data from sdio */
gsdio_read_pending(port);
return;
}
spin_lock_irq(&port->port_lock);
while (!list_empty(pool)) {
int avail;
struct usb_ep *in = port->port_usb->in;
struct sdio_channel *ch = port->sport_info->ch;
struct usb_request *req;
unsigned len = SDIO_TX_BUF_SIZE;
int ret;
req = list_entry(pool->next, struct usb_request, list);
if (!port->sdio_open) {
pr_debug("%s: SDIO channel is not open\n", __func__);
goto tx_pull_end;
}
avail = sdio_read_avail(ch);
if (!avail) {
/* REVISIT: for ZLP */
pr_debug("%s: read_avail:%d port:%p port#%d\n",
__func__, avail, port, port->port_num);
goto tx_pull_end;
}
if (avail > len)
avail = len;
list_del(&req->list);
port->wp_len--;
spin_unlock_irq(&port->port_lock);
ret = sdio_read(ch, req->buf, avail);
spin_lock_irq(&port->port_lock);
if (ret) {
pr_err("%s: port:%p port#%d sdio read failed err:%d",
__func__, port, port->port_num, ret);
/* check if usb is still active */
if (!port->port_usb) {
gsdio_free_req(in, req);
} else {
list_add(&req->list, pool);
port->wp_len++;
}
goto tx_pull_end;
}
req->length = avail;
spin_unlock_irq(&port->port_lock);
ret = usb_ep_queue(in, req, GFP_KERNEL);
spin_lock_irq(&port->port_lock);
if (ret) {
pr_err("%s: usb ep out queue failed"
"port:%p, port#%d err:%d\n",
__func__, port, port->port_num, ret);
/* could be usb disconnected */
if (!port->port_usb) {
gsdio_free_req(in, req);
} else {
list_add(&req->list, pool);
port->wp_len++;
}
goto tx_pull_end;
}
port->nbytes_tolaptop += avail;
}
tx_pull_end:
spin_unlock_irq(&port->port_lock);
}
int gsdio_start_io(struct gsdio_port *port)
{
int ret;
unsigned long flags;
pr_debug("%s:\n", __func__);
spin_lock_irqsave(&port->port_lock, flags);
if (!port->port_usb) {
spin_unlock_irqrestore(&port->port_lock, flags);
return -ENODEV;
}
/* start usb out queue */
ret = gsdio_alloc_requests(port->port_usb->out,
&port->read_pool,
SDIO_RX_QUEUE_SIZE, SDIO_RX_BUF_SIZE,
gsdio_read_complete);
if (ret) {
spin_unlock_irqrestore(&port->port_lock, flags);
pr_err("%s: unable to allocate out reqs\n", __func__);
return ret;
}
port->rp_len = SDIO_RX_QUEUE_SIZE;
ret = gsdio_alloc_requests(port->port_usb->in,
&port->write_pool,
SDIO_TX_QUEUE_SIZE, SDIO_TX_BUF_SIZE,
gsdio_write_complete);
if (ret) {
gsdio_free_requests(port->port_usb->out, &port->read_pool);
port->rp_len = 0;
spin_unlock_irqrestore(&port->port_lock, flags);
pr_err("%s: unable to allocate in reqs\n", __func__);
return ret;
}
port->wp_len = SDIO_TX_QUEUE_SIZE;
spin_unlock_irqrestore(&port->port_lock, flags);
gsdio_start_rx(port);
queue_work(gsdio_wq, &port->pull);
return 0;
}
void gsdio_port_free(unsigned portno)
{
struct gsdio_port *port = sdio_ports[portno].port;
struct platform_driver *pdriver = &sdio_ports[portno].gsdio_ch;
if (!port) {
pr_err("%s: invalid portno#%d\n", __func__, portno);
return;
}
platform_driver_unregister(pdriver);
kfree(port);
}
void gsdio_ctrl_wq(struct work_struct *w)
{
struct gsdio_port *port;
port = container_of(w, struct gsdio_port, notify_modem);
if (!port) {
pr_err("%s: port is null\n", __func__);
return;
}
if (!port->sdio_open || port->ctrl_ch_err)
return;
sdio_cmux_tiocmset(port->sport_info->ctrl_ch_id,
port->cbits_to_modem, ~(port->cbits_to_modem));
}
void gsdio_ctrl_notify_modem(void *gptr, u8 portno, int ctrl_bits)
{
struct gsdio_port *port;
int temp;
struct gserial *gser = gptr;
if (portno >= n_sdio_ports) {
pr_err("%s: invalid portno#%d\n", __func__, portno);
return;
}
if (!gser) {
pr_err("%s: gser is null\n", __func__);
return;
}
port = sdio_ports[portno].port;
temp = ctrl_bits & SDIO_ACM_CTRL_DTR ? TIOCM_DTR : 0;
if (port->cbits_to_modem == temp)
return;
port->cbits_to_modem = temp;
/* TIOCM_DTR - 0x002 - bit(1) */
pr_debug("%s: port:%p port#%d ctrl_bits:%08x\n", __func__,
port, port->port_num, ctrl_bits);
if (!port->sdio_open) {
pr_err("%s: port:%p port#%d sdio not connected\n",
__func__, port, port->port_num);
return;
}
/* whenever DTR is high let laptop know that modem status */
if (port->cbits_to_modem && gser->send_modem_ctrl_bits)
gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
queue_work(gsdio_wq, &port->notify_modem);
}
void gsdio_ctrl_modem_status(int ctrl_bits, void *_dev)
{
struct gsdio_port *port = _dev;
/* TIOCM_CD - 0x040 - bit(6)
* TIOCM_RI - 0x080 - bit(7)
* TIOCM_DSR- 0x100 - bit(8)
*/
pr_debug("%s: port:%p port#%d event:%08x\n", __func__,
port, port->port_num, ctrl_bits);
port->cbits_to_laptop = 0;
ctrl_bits &= TIOCM_RI | TIOCM_CD | TIOCM_DSR;
if (ctrl_bits & TIOCM_RI)
port->cbits_to_laptop |= SDIO_ACM_CTRL_RI;
if (ctrl_bits & TIOCM_CD)
port->cbits_to_laptop |= SDIO_ACM_CTRL_DCD;
if (ctrl_bits & TIOCM_DSR)
port->cbits_to_laptop |= SDIO_ACM_CTRL_DSR;
if (port->port_usb && port->port_usb->send_modem_ctrl_bits)
port->port_usb->send_modem_ctrl_bits(port->port_usb,
port->cbits_to_laptop);
}
void gsdio_ch_notify(void *_dev, unsigned event)
{
struct gsdio_port *port = _dev;
pr_debug("%s: port:%p port#%d event:%s\n", __func__,
port, port->port_num,
event == 1 ? "READ AVAIL" : "WRITE_AVAIL");
if (event == SDIO_EVENT_DATA_WRITE_AVAIL)
queue_work(gsdio_wq, &port->push);
if (event == SDIO_EVENT_DATA_READ_AVAIL)
queue_work(gsdio_wq, &port->pull);
}
static void gsdio_open_work(struct work_struct *w)
{
struct gsdio_port *port =
container_of(w, struct gsdio_port, sdio_open_work.work);
struct sdio_port_info *pi = port->sport_info;
struct gserial *gser;
int ret;
int ctrl_bits;
int startio;
ret = sdio_open(pi->data_ch_name, &pi->ch, port, gsdio_ch_notify);
if (ret) {
pr_err("%s: port:%p port#%d unable to open sdio ch:%s\n",
__func__, port, port->port_num,
pi->data_ch_name);
return;
}
port->ctrl_ch_err = 0;
ret = sdio_cmux_open(pi->ctrl_ch_id, 0, 0,
gsdio_ctrl_modem_status, port);
if (ret) {
pr_err("%s: port:%p port#%d unable to open ctrl ch:%d\n",
__func__, port, port->port_num, pi->ctrl_ch_id);
port->ctrl_ch_err = 1;
}
/* check for latest status update from modem */
if (!port->ctrl_ch_err) {
ctrl_bits = sdio_cmux_tiocmget(pi->ctrl_ch_id);
gsdio_ctrl_modem_status(ctrl_bits, port);
}
pr_debug("%s: SDIO data:%s ctrl:%d are open\n", __func__,
pi->data_ch_name,
pi->ctrl_ch_id);
port->sdio_open = 1;
/* start tx if usb is open already */
spin_lock_irq(&port->port_lock);
startio = port->port_usb ? 1 : 0;
gser = port->port_usb;
spin_unlock_irq(&port->port_lock);
if (startio) {
pr_debug("%s: USB is already open, start io\n", __func__);
gsdio_start_io(port);
if (gser->send_modem_ctrl_bits)
gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
}
}
#define SDIO_CH_NAME_MAX_LEN 9
#define SDIO_OPEN_DELAY msecs_to_jiffies(10000)
static int gsdio_ch_remove(struct platform_device *dev)
{
struct gsdio_port *port;
struct sdio_port_info *pi;
int i;
unsigned long flags;
pr_debug("%s: name:%s\n", __func__, dev->name);
for (i = 0; i < n_sdio_ports; i++) {
port = sdio_ports[i].port;
pi = port->sport_info;
if (!strncmp(pi->data_ch_name, dev->name,
SDIO_CH_NAME_MAX_LEN)) {
struct gserial *gser = port->port_usb;
port->sdio_open = 0;
port->sdio_probe = 0;
port->ctrl_ch_err = 1;
/* check if usb cable is connected */
if (!gser)
continue;
/* indicated call status to usb host */
gsdio_ctrl_modem_status(0, port);
usb_ep_fifo_flush(gser->in);
usb_ep_fifo_flush(gser->out);
cancel_work_sync(&port->push);
cancel_work_sync(&port->pull);
spin_lock_irqsave(&port->port_lock, flags);
gsdio_free_requests(gser->out, &port->read_pool);
gsdio_free_requests(gser->out, &port->read_queue);
gsdio_free_requests(gser->in, &port->write_pool);
port->rp_len = 0;
port->rq_len = 0;
port->wp_len = 0;
port->n_read = 0;
spin_unlock_irqrestore(&port->port_lock, flags);
}
}
return 0;
}
static int gsdio_ch_probe(struct platform_device *dev)
{
struct gsdio_port *port;
struct sdio_port_info *pi;
int i;
pr_debug("%s: name:%s\n", __func__, dev->name);
for (i = 0; i < n_sdio_ports; i++) {
port = sdio_ports[i].port;
pi = port->sport_info;
pr_debug("%s: sdio_ch_name:%s dev_name:%s\n", __func__,
pi->data_ch_name, dev->name);
/* unfortunately cmux channle might not be ready even if
* sdio channel is ready. as we dont have good notification
* mechanism schedule a delayed work
*/
if (!strncmp(pi->data_ch_name, dev->name,
SDIO_CH_NAME_MAX_LEN)) {
port->sdio_probe = 1;
queue_delayed_work(gsdio_wq,
&port->sdio_open_work, SDIO_OPEN_DELAY);
return 0;
}
}
pr_info("%s: name:%s is not found\n", __func__, dev->name);
return -ENODEV;
}
int gsdio_port_alloc(unsigned portno,
struct usb_cdc_line_coding *coding,
struct sdio_port_info *pi)
{
struct gsdio_port *port;
struct platform_driver *pdriver;
port = kzalloc(sizeof(struct gsdio_port), GFP_KERNEL);
if (!port) {
pr_err("%s: port allocation failed\n", __func__);
return -ENOMEM;
}
port->port_num = portno;
spin_lock_init(&port->port_lock);
port->line_coding = *coding;
/* READ: read from usb and write into sdio */
INIT_LIST_HEAD(&port->read_pool);
INIT_LIST_HEAD(&port->read_queue);
INIT_WORK(&port->push, gsdio_rx_push);
INIT_LIST_HEAD(&port->write_pool);
INIT_WORK(&port->pull, gsdio_tx_pull);
INIT_WORK(&port->notify_modem, gsdio_ctrl_wq);
INIT_DELAYED_WORK(&port->sdio_open_work, gsdio_open_work);
sdio_ports[portno].port = port;
port->sport_info = pi;
pdriver = &sdio_ports[portno].gsdio_ch;
pdriver->probe = gsdio_ch_probe;
pdriver->remove = gsdio_ch_remove;
pdriver->driver.name = pi->data_ch_name;
pdriver->driver.owner = THIS_MODULE;
pr_debug("%s: port:%p port#%d sdio_name: %s\n", __func__,
port, port->port_num, pi->data_ch_name);
platform_driver_register(pdriver);
pr_debug("%s: port:%p port#%d\n", __func__, port, port->port_num);
return 0;
}
int gsdio_connect(struct gserial *gser, u8 portno)
{
struct gsdio_port *port;
int ret = 0;
unsigned long flags;
if (portno >= n_sdio_ports) {
pr_err("%s: invalid portno#%d\n", __func__, portno);
return -EINVAL;
}
if (!gser) {
pr_err("%s: gser is null\n", __func__);
return -EINVAL;
}
port = sdio_ports[portno].port;
spin_lock_irqsave(&port->port_lock, flags);
port->port_usb = gser;
gser->notify_modem = gsdio_ctrl_notify_modem;
spin_unlock_irqrestore(&port->port_lock, flags);
ret = usb_ep_enable(gser->in);
if (ret) {
pr_err("%s: failed to enable in ep w/ err:%d\n",
__func__, ret);
port->port_usb = 0;
return ret;
}
gser->in->driver_data = port;
ret = usb_ep_enable(gser->out);
if (ret) {
pr_err("%s: failed to enable in ep w/ err:%d\n",
__func__, ret);
usb_ep_disable(gser->in);
port->port_usb = 0;
gser->in->driver_data = 0;
return ret;
}
gser->out->driver_data = port;
if (port->sdio_open) {
pr_debug("%s: sdio is already open, start io\n", __func__);
gsdio_start_io(port);
if (gser->send_modem_ctrl_bits)
gser->send_modem_ctrl_bits(gser, port->cbits_to_laptop);
}
return 0;
}
void gsdio_disconnect(struct gserial *gser, u8 portno)
{
unsigned long flags;
struct gsdio_port *port;
if (portno >= n_sdio_ports) {
pr_err("%s: invalid portno#%d\n", __func__, portno);
return;
}
if (!gser) {
pr_err("%s: gser is null\n", __func__);
return;
}
port = sdio_ports[portno].port;
/* send dtr zero to modem to notify disconnect */
port->cbits_to_modem = 0;
queue_work(gsdio_wq, &port->notify_modem);
spin_lock_irqsave(&port->port_lock, flags);
port->port_usb = 0;
port->nbytes_tomodem = 0;
port->nbytes_tolaptop = 0;
spin_unlock_irqrestore(&port->port_lock, flags);
/* disable endpoints, aborting down any active I/O */
usb_ep_disable(gser->out);
gser->out->driver_data = NULL;
usb_ep_disable(gser->in);
gser->in->driver_data = NULL;
spin_lock_irqsave(&port->port_lock, flags);
gsdio_free_requests(gser->out, &port->read_pool);
gsdio_free_requests(gser->out, &port->read_queue);
gsdio_free_requests(gser->in, &port->write_pool);
port->rp_len = 0;
port->rq_len = 0;
port->wp_len = 0;
port->n_read = 0;
spin_unlock_irqrestore(&port->port_lock, flags);
}
#if defined(CONFIG_DEBUG_FS)
static char debug_buffer[PAGE_SIZE];
static ssize_t debug_sdio_read_stats(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
struct gsdio_port *port;
char *buf;
unsigned long flags;
int i = 0;
int temp = 0;
int ret;
buf = kzalloc(sizeof(char) * 1024, GFP_KERNEL);
if (!buf)
return -ENOMEM;
while (i < n_sdio_ports) {
port = sdio_ports[i].port;
spin_lock_irqsave(&port->port_lock, flags);
temp += scnprintf(buf + temp, PAGE_SIZE - temp,
"###PORT:%d port:%p###\n"
"nbytes_tolaptop: %lu\n"
"nbytes_tomodem: %lu\n"
"cbits_to_modem: %u\n"
"cbits_to_laptop: %u\n"
"read_pool_len: %lu\n"
"read_queue_len: %lu\n"
"write_pool_len: %lu\n"
"n_read: %u\n"
"sdio_open: %d\n"
"sdio_probe: %d\n",
i, port,
port->nbytes_tolaptop, port->nbytes_tomodem,
port->cbits_to_modem, port->cbits_to_laptop,
port->rp_len, port->rq_len, port->wp_len,
port->n_read,
port->sdio_open, port->sdio_probe);
spin_unlock_irqrestore(&port->port_lock, flags);
i++;
}
ret = simple_read_from_buffer(ubuf, count, ppos, buf, temp);
kfree(buf);
return ret;
}
static ssize_t debug_sdio_reset_stats(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
struct gsdio_port *port;
unsigned long flags;
int i = 0;
while (i < n_sdio_ports) {
port = sdio_ports[i].port;
spin_lock_irqsave(&port->port_lock, flags);
port->nbytes_tolaptop = 0;
port->nbytes_tomodem = 0;
spin_unlock_irqrestore(&port->port_lock, flags);
i++;
}
return count;
}
static int debug_sdio_open(struct inode *inode, struct file *file)
{
return 0;
}
static const struct file_operations debug_gsdio_ops = {
.open = debug_sdio_open,
.read = debug_sdio_read_stats,
.write = debug_sdio_reset_stats,
};
static void gsdio_debugfs_init(void)
{
struct dentry *dent;
dent = debugfs_create_dir("usb_gsdio", 0);
if (IS_ERR(dent))
return;
debugfs_create_file("status", 0444, dent, 0, &debug_gsdio_ops);
}
#else
static void gsdio_debugfs_init(void)
{
return;
}
#endif
/* connect, disconnect, alloc_requests, free_requests */
int gsdio_setup(struct usb_gadget *g, unsigned count)
{
struct usb_cdc_line_coding coding;
int i;
int ret = 0;
pr_debug("%s: gadget:(%p) count:%d\n", __func__, g, count);
if (count == 0 || count > SDIO_N_PORTS) {
pr_err("%s: invalid number of ports count:%d max_ports:%d\n",
__func__, count, SDIO_N_PORTS);
return -EINVAL;
}
coding.dwDTERate = cpu_to_le32(9600);
coding.bCharFormat = 8;
coding.bParityType = USB_CDC_NO_PARITY;
coding.bDataBits = USB_CDC_1_STOP_BITS;
gsdio_wq = create_singlethread_workqueue("k_gserial");
if (!gsdio_wq) {
pr_err("%s: unable to create workqueue gsdio_wq\n",
__func__);
return -ENOMEM;
}
for (i = 0; i < count; i++) {
mutex_init(&sdio_ports[i].lock);
ret = gsdio_port_alloc(i, &coding, sport_info + i);
n_sdio_ports++;
if (ret) {
n_sdio_ports--;
pr_err("%s: sdio logical port allocation failed\n",
__func__);
goto free_sdio_ports;
}
}
gsdio_debugfs_init();
return 0;
free_sdio_ports:
for (i = 0; i < n_sdio_ports; i++)
gsdio_port_free(i);
destroy_workqueue(gsdio_wq);
return ret;
}
/* TODO: Add gserial_cleanup */
| gpl-2.0 |
SlimLPXperia/android_kernel_sony_u8500 | drivers/media/dvb/frontends/ix2505v.c | 2958 | 7896 | /**
* Driver for Sharp IX2505V (marked B0017) DVB-S silicon tuner
*
* Copyright (C) 2010 Malcolm Priestley
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License Version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/dvb/frontend.h>
#include <linux/slab.h>
#include <linux/types.h>
#include "ix2505v.h"
static int ix2505v_debug;
#define dprintk(level, args...) do { \
if (ix2505v_debug & level) \
printk(KERN_DEBUG "ix2505v: " args); \
} while (0)
#define deb_info(args...) dprintk(0x01, args)
#define deb_i2c(args...) dprintk(0x02, args)
struct ix2505v_state {
struct i2c_adapter *i2c;
const struct ix2505v_config *config;
u32 frequency;
};
/**
* Data read format of the Sharp IX2505V B0017
*
* byte1: 1 | 1 | 0 | 0 | 0 | MA1 | MA0 | 1
* byte2: POR | FL | RD2 | RD1 | RD0 | X | X | X
*
* byte1 = address
* byte2;
* POR = Power on Reset (VCC H=<2.2v L=>2.2v)
* FL = Phase Lock (H=lock L=unlock)
* RD0-2 = Reserved internal operations
*
* Only POR can be used to check the tuner is present
*
* Caution: after byte2 the I2C reverts to write mode continuing to read
* may corrupt tuning data.
*
*/
static int ix2505v_read_status_reg(struct ix2505v_state *state)
{
u8 addr = state->config->tuner_address;
u8 b2[] = {0};
int ret;
struct i2c_msg msg[1] = {
{ .addr = addr, .flags = I2C_M_RD, .buf = b2, .len = 1 }
};
ret = i2c_transfer(state->i2c, msg, 1);
deb_i2c("Read %s ", __func__);
return (ret == 1) ? (int) b2[0] : -1;
}
static int ix2505v_write(struct ix2505v_state *state, u8 buf[], u8 count)
{
struct i2c_msg msg[1] = {
{ .addr = state->config->tuner_address, .flags = 0,
.buf = buf, .len = count },
};
int ret;
ret = i2c_transfer(state->i2c, msg, 1);
if (ret != 1) {
deb_i2c("%s: i2c error, ret=%d\n", __func__, ret);
return -EIO;
}
return 0;
}
static int ix2505v_release(struct dvb_frontend *fe)
{
struct ix2505v_state *state = fe->tuner_priv;
fe->tuner_priv = NULL;
kfree(state);
return 0;
}
/**
* Data write format of the Sharp IX2505V B0017
*
* byte1: 1 | 1 | 0 | 0 | 0 | 0(MA1)| 0(MA0)| 0
* byte2: 0 | BG1 | BG2 | N8 | N7 | N6 | N5 | N4
* byte3: N3 | N2 | N1 | A5 | A4 | A3 | A2 | A1
* byte4: 1 | 1(C1) | 1(C0) | PD5 | PD4 | TM | 0(RTS)| 1(REF)
* byte5: BA2 | BA1 | BA0 | PSC | PD3 |PD2/TS2|DIV/TS1|PD0/TS0
*
* byte1 = address
*
* Write order
* 1) byte1 -> byte2 -> byte3 -> byte4 -> byte5
* 2) byte1 -> byte4 -> byte5 -> byte2 -> byte3
* 3) byte1 -> byte2 -> byte3 -> byte4
* 4) byte1 -> byte4 -> byte5 -> byte2
* 5) byte1 -> byte2 -> byte3
* 6) byte1 -> byte4 -> byte5
* 7) byte1 -> byte2
* 8) byte1 -> byte4
*
* Recommended Setup
* 1 -> 8 -> 6
*/
static int ix2505v_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct ix2505v_state *state = fe->tuner_priv;
u32 frequency = params->frequency;
u32 b_w = (params->u.qpsk.symbol_rate * 27) / 32000;
u32 div_factor, N , A, x;
int ret = 0, len;
u8 gain, cc, ref, psc, local_osc, lpf;
u8 data[4] = {0};
if ((frequency < fe->ops.info.frequency_min)
|| (frequency > fe->ops.info.frequency_max))
return -EINVAL;
if (state->config->tuner_gain)
gain = (state->config->tuner_gain < 4)
? state->config->tuner_gain : 0;
else
gain = 0x0;
if (state->config->tuner_chargepump)
cc = state->config->tuner_chargepump;
else
cc = 0x3;
ref = 8; /* REF =1 */
psc = 32; /* PSC = 0 */
div_factor = (frequency * ref) / 40; /* local osc = 4Mhz */
x = div_factor / psc;
N = x/100;
A = ((x - (N * 100)) * psc) / 100;
data[0] = ((gain & 0x3) << 5) | (N >> 3);
data[1] = (N << 5) | (A & 0x1f);
data[2] = 0x81 | ((cc & 0x3) << 5) ; /*PD5,PD4 & TM = 0|C1,C0|REF=1*/
deb_info("Frq=%d x=%d N=%d A=%d\n", frequency, x, N, A);
if (frequency <= 1065000)
local_osc = (6 << 5) | 2;
else if (frequency <= 1170000)
local_osc = (7 << 5) | 2;
else if (frequency <= 1300000)
local_osc = (1 << 5);
else if (frequency <= 1445000)
local_osc = (2 << 5);
else if (frequency <= 1607000)
local_osc = (3 << 5);
else if (frequency <= 1778000)
local_osc = (4 << 5);
else if (frequency <= 1942000)
local_osc = (5 << 5);
else /*frequency up to 2150000*/
local_osc = (6 << 5);
data[3] = local_osc; /* all other bits set 0 */
if (b_w <= 10000)
lpf = 0xc;
else if (b_w <= 12000)
lpf = 0x2;
else if (b_w <= 14000)
lpf = 0xa;
else if (b_w <= 16000)
lpf = 0x6;
else if (b_w <= 18000)
lpf = 0xe;
else if (b_w <= 20000)
lpf = 0x1;
else if (b_w <= 22000)
lpf = 0x9;
else if (b_w <= 24000)
lpf = 0x5;
else if (b_w <= 26000)
lpf = 0xd;
else if (b_w <= 28000)
lpf = 0x3;
else
lpf = 0xb;
deb_info("Osc=%x b_w=%x lpf=%x\n", local_osc, b_w, lpf);
deb_info("Data 0=[%x%x%x%x]\n", data[0], data[1], data[2], data[3]);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
len = sizeof(data);
ret |= ix2505v_write(state, data, len);
data[2] |= 0x4; /* set TM = 1 other bits same */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
len = 1;
ret |= ix2505v_write(state, &data[2], len); /* write byte 4 only */
msleep(10);
data[2] |= ((lpf >> 2) & 0x3) << 3; /* lpf */
data[3] |= (lpf & 0x3) << 2;
deb_info("Data 2=[%x%x]\n", data[2], data[3]);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
len = 2;
ret |= ix2505v_write(state, &data[2], len); /* write byte 4 & 5 */
if (state->config->min_delay_ms)
msleep(state->config->min_delay_ms);
state->frequency = frequency;
return ret;
}
static int ix2505v_get_frequency(struct dvb_frontend *fe, u32 *frequency)
{
struct ix2505v_state *state = fe->tuner_priv;
*frequency = state->frequency;
return 0;
}
static struct dvb_tuner_ops ix2505v_tuner_ops = {
.info = {
.name = "Sharp IX2505V (B0017)",
.frequency_min = 950000,
.frequency_max = 2175000
},
.release = ix2505v_release,
.set_params = ix2505v_set_params,
.get_frequency = ix2505v_get_frequency,
};
struct dvb_frontend *ix2505v_attach(struct dvb_frontend *fe,
const struct ix2505v_config *config,
struct i2c_adapter *i2c)
{
struct ix2505v_state *state = NULL;
int ret;
if (NULL == config) {
deb_i2c("%s: no config ", __func__);
goto error;
}
state = kzalloc(sizeof(struct ix2505v_state), GFP_KERNEL);
if (NULL == state)
return NULL;
state->config = config;
state->i2c = i2c;
if (state->config->tuner_write_only) {
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1);
ret = ix2505v_read_status_reg(state);
if (ret & 0x80) {
deb_i2c("%s: No IX2505V found\n", __func__);
goto error;
}
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0);
}
fe->tuner_priv = state;
memcpy(&fe->ops.tuner_ops, &ix2505v_tuner_ops,
sizeof(struct dvb_tuner_ops));
deb_i2c("%s: initialization (%s addr=0x%02x) ok\n",
__func__, fe->ops.tuner_ops.info.name, config->tuner_address);
return fe;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(ix2505v_attach);
module_param_named(debug, ix2505v_debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off).");
MODULE_DESCRIPTION("DVB IX2505V tuner driver");
MODULE_AUTHOR("Malcolm Priestley");
MODULE_LICENSE("GPL");
| gpl-2.0 |
s0627js/android_kernel_SHV-E300S | drivers/usb/gadget/dummy_hcd.c | 3214 | 67882 | /*
* dummy_hcd.c -- Dummy/Loopback USB host and device emulator driver.
*
* Maintainer: Alan Stern <stern@rowland.harvard.edu>
*
* Copyright (C) 2003 David Brownell
* Copyright (C) 2003-2005 Alan Stern
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
/*
* This exposes a device side "USB gadget" API, driven by requests to a
* Linux-USB host controller driver. USB traffic is simulated; there's
* no need for USB hardware. Use this with two other drivers:
*
* - Gadget driver, responding to requests (slave);
* - Host-side device driver, as already familiar in Linux.
*
* Having this all in one kernel can help some stages of development,
* bypassing some hardware (and driver) issues. UML could help too.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <linux/usb.h>
#include <linux/usb/gadget.h>
#include <linux/usb/hcd.h>
#include <linux/scatterlist.h>
#include <asm/byteorder.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#define DRIVER_DESC "USB Host+Gadget Emulator"
#define DRIVER_VERSION "02 May 2005"
#define POWER_BUDGET 500 /* in mA; use 8 for low-power port testing */
static const char driver_name[] = "dummy_hcd";
static const char driver_desc[] = "USB Host+Gadget Emulator";
static const char gadget_name[] = "dummy_udc";
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_AUTHOR("David Brownell");
MODULE_LICENSE("GPL");
struct dummy_hcd_module_parameters {
bool is_super_speed;
bool is_high_speed;
};
static struct dummy_hcd_module_parameters mod_data = {
.is_super_speed = false,
.is_high_speed = true,
};
module_param_named(is_super_speed, mod_data.is_super_speed, bool, S_IRUGO);
MODULE_PARM_DESC(is_super_speed, "true to simulate SuperSpeed connection");
module_param_named(is_high_speed, mod_data.is_high_speed, bool, S_IRUGO);
MODULE_PARM_DESC(is_high_speed, "true to simulate HighSpeed connection");
/*-------------------------------------------------------------------------*/
/* gadget side driver data structres */
struct dummy_ep {
struct list_head queue;
unsigned long last_io; /* jiffies timestamp */
struct usb_gadget *gadget;
const struct usb_endpoint_descriptor *desc;
struct usb_ep ep;
unsigned halted:1;
unsigned wedged:1;
unsigned already_seen:1;
unsigned setup_stage:1;
unsigned stream_en:1;
};
struct dummy_request {
struct list_head queue; /* ep's requests */
struct usb_request req;
};
static inline struct dummy_ep *usb_ep_to_dummy_ep(struct usb_ep *_ep)
{
return container_of(_ep, struct dummy_ep, ep);
}
static inline struct dummy_request *usb_request_to_dummy_request
(struct usb_request *_req)
{
return container_of(_req, struct dummy_request, req);
}
/*-------------------------------------------------------------------------*/
/*
* Every device has ep0 for control requests, plus up to 30 more endpoints,
* in one of two types:
*
* - Configurable: direction (in/out), type (bulk, iso, etc), and endpoint
* number can be changed. Names like "ep-a" are used for this type.
*
* - Fixed Function: in other cases. some characteristics may be mutable;
* that'd be hardware-specific. Names like "ep12out-bulk" are used.
*
* Gadget drivers are responsible for not setting up conflicting endpoint
* configurations, illegal or unsupported packet lengths, and so on.
*/
static const char ep0name[] = "ep0";
static const char *const ep_name[] = {
ep0name, /* everyone has ep0 */
/* act like a net2280: high speed, six configurable endpoints */
"ep-a", "ep-b", "ep-c", "ep-d", "ep-e", "ep-f",
/* or like pxa250: fifteen fixed function endpoints */
"ep1in-bulk", "ep2out-bulk", "ep3in-iso", "ep4out-iso", "ep5in-int",
"ep6in-bulk", "ep7out-bulk", "ep8in-iso", "ep9out-iso", "ep10in-int",
"ep11in-bulk", "ep12out-bulk", "ep13in-iso", "ep14out-iso",
"ep15in-int",
/* or like sa1100: two fixed function endpoints */
"ep1out-bulk", "ep2in-bulk",
};
#define DUMMY_ENDPOINTS ARRAY_SIZE(ep_name)
/*-------------------------------------------------------------------------*/
#define FIFO_SIZE 64
struct urbp {
struct urb *urb;
struct list_head urbp_list;
struct sg_mapping_iter miter;
u32 miter_started;
};
enum dummy_rh_state {
DUMMY_RH_RESET,
DUMMY_RH_SUSPENDED,
DUMMY_RH_RUNNING
};
struct dummy_hcd {
struct dummy *dum;
enum dummy_rh_state rh_state;
struct timer_list timer;
u32 port_status;
u32 old_status;
unsigned long re_timeout;
struct usb_device *udev;
struct list_head urbp_list;
u32 stream_en_ep;
u8 num_stream[30 / 2];
unsigned active:1;
unsigned old_active:1;
unsigned resuming:1;
};
struct dummy {
spinlock_t lock;
/*
* SLAVE/GADGET side support
*/
struct dummy_ep ep[DUMMY_ENDPOINTS];
int address;
struct usb_gadget gadget;
struct usb_gadget_driver *driver;
struct dummy_request fifo_req;
u8 fifo_buf[FIFO_SIZE];
u16 devstatus;
unsigned udc_suspended:1;
unsigned pullup:1;
/*
* MASTER/HOST side support
*/
struct dummy_hcd *hs_hcd;
struct dummy_hcd *ss_hcd;
};
static inline struct dummy_hcd *hcd_to_dummy_hcd(struct usb_hcd *hcd)
{
return (struct dummy_hcd *) (hcd->hcd_priv);
}
static inline struct usb_hcd *dummy_hcd_to_hcd(struct dummy_hcd *dum)
{
return container_of((void *) dum, struct usb_hcd, hcd_priv);
}
static inline struct device *dummy_dev(struct dummy_hcd *dum)
{
return dummy_hcd_to_hcd(dum)->self.controller;
}
static inline struct device *udc_dev(struct dummy *dum)
{
return dum->gadget.dev.parent;
}
static inline struct dummy *ep_to_dummy(struct dummy_ep *ep)
{
return container_of(ep->gadget, struct dummy, gadget);
}
static inline struct dummy_hcd *gadget_to_dummy_hcd(struct usb_gadget *gadget)
{
struct dummy *dum = container_of(gadget, struct dummy, gadget);
if (dum->gadget.speed == USB_SPEED_SUPER)
return dum->ss_hcd;
else
return dum->hs_hcd;
}
static inline struct dummy *gadget_dev_to_dummy(struct device *dev)
{
return container_of(dev, struct dummy, gadget.dev);
}
static struct dummy the_controller;
/*-------------------------------------------------------------------------*/
/* SLAVE/GADGET SIDE UTILITY ROUTINES */
/* called with spinlock held */
static void nuke(struct dummy *dum, struct dummy_ep *ep)
{
while (!list_empty(&ep->queue)) {
struct dummy_request *req;
req = list_entry(ep->queue.next, struct dummy_request, queue);
list_del_init(&req->queue);
req->req.status = -ESHUTDOWN;
spin_unlock(&dum->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dum->lock);
}
}
/* caller must hold lock */
static void stop_activity(struct dummy *dum)
{
struct dummy_ep *ep;
/* prevent any more requests */
dum->address = 0;
/* The timer is left running so that outstanding URBs can fail */
/* nuke any pending requests first, so driver i/o is quiesced */
list_for_each_entry(ep, &dum->gadget.ep_list, ep.ep_list)
nuke(dum, ep);
/* driver now does any non-usb quiescing necessary */
}
/**
* set_link_state_by_speed() - Sets the current state of the link according to
* the hcd speed
* @dum_hcd: pointer to the dummy_hcd structure to update the link state for
*
* This function updates the port_status according to the link state and the
* speed of the hcd.
*/
static void set_link_state_by_speed(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
if (dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3) {
if ((dum_hcd->port_status & USB_SS_PORT_STAT_POWER) == 0) {
dum_hcd->port_status = 0;
} else if (!dum->pullup || dum->udc_suspended) {
/* UDC suspend must cause a disconnect */
dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE);
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) != 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
} else {
/* device is connected and not suspended */
dum_hcd->port_status |= (USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_SPEED_5GBPS) ;
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) == 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
if ((dum_hcd->port_status &
USB_PORT_STAT_ENABLE) == 1 &&
(dum_hcd->port_status &
USB_SS_PORT_LS_U0) == 1 &&
dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
dum_hcd->active = 1;
}
} else {
if ((dum_hcd->port_status & USB_PORT_STAT_POWER) == 0) {
dum_hcd->port_status = 0;
} else if (!dum->pullup || dum->udc_suspended) {
/* UDC suspend must cause a disconnect */
dum_hcd->port_status &= ~(USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_ENABLE |
USB_PORT_STAT_LOW_SPEED |
USB_PORT_STAT_HIGH_SPEED |
USB_PORT_STAT_SUSPEND);
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) != 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
} else {
dum_hcd->port_status |= USB_PORT_STAT_CONNECTION;
if ((dum_hcd->old_status &
USB_PORT_STAT_CONNECTION) == 0)
dum_hcd->port_status |=
(USB_PORT_STAT_C_CONNECTION << 16);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0)
dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
else if ((dum_hcd->port_status &
USB_PORT_STAT_SUSPEND) == 0 &&
dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
dum_hcd->active = 1;
}
}
}
/* caller must hold lock */
static void set_link_state(struct dummy_hcd *dum_hcd)
{
struct dummy *dum = dum_hcd->dum;
dum_hcd->active = 0;
if (dum->pullup)
if ((dummy_hcd_to_hcd(dum_hcd)->speed == HCD_USB3 &&
dum->gadget.speed != USB_SPEED_SUPER) ||
(dummy_hcd_to_hcd(dum_hcd)->speed != HCD_USB3 &&
dum->gadget.speed == USB_SPEED_SUPER))
return;
set_link_state_by_speed(dum_hcd);
if ((dum_hcd->port_status & USB_PORT_STAT_ENABLE) == 0 ||
dum_hcd->active)
dum_hcd->resuming = 0;
/* if !connected or reset */
if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0 ||
(dum_hcd->port_status & USB_PORT_STAT_RESET) != 0) {
/*
* We're connected and not reset (reset occurred now),
* and driver attached - disconnect!
*/
if ((dum_hcd->old_status & USB_PORT_STAT_CONNECTION) != 0 &&
(dum_hcd->old_status & USB_PORT_STAT_RESET) == 0 &&
dum->driver) {
stop_activity(dum);
spin_unlock(&dum->lock);
dum->driver->disconnect(&dum->gadget);
spin_lock(&dum->lock);
}
} else if (dum_hcd->active != dum_hcd->old_active) {
if (dum_hcd->old_active && dum->driver->suspend) {
spin_unlock(&dum->lock);
dum->driver->suspend(&dum->gadget);
spin_lock(&dum->lock);
} else if (!dum_hcd->old_active && dum->driver->resume) {
spin_unlock(&dum->lock);
dum->driver->resume(&dum->gadget);
spin_lock(&dum->lock);
}
}
dum_hcd->old_status = dum_hcd->port_status;
dum_hcd->old_active = dum_hcd->active;
}
/*-------------------------------------------------------------------------*/
/* SLAVE/GADGET SIDE DRIVER
*
* This only tracks gadget state. All the work is done when the host
* side tries some (emulated) i/o operation. Real device controller
* drivers would do real i/o using dma, fifos, irqs, timers, etc.
*/
#define is_enabled(dum) \
(dum->port_status & USB_PORT_STAT_ENABLE)
static int dummy_enable(struct usb_ep *_ep,
const struct usb_endpoint_descriptor *desc)
{
struct dummy *dum;
struct dummy_hcd *dum_hcd;
struct dummy_ep *ep;
unsigned max;
int retval;
ep = usb_ep_to_dummy_ep(_ep);
if (!_ep || !desc || ep->desc || _ep->name == ep0name
|| desc->bDescriptorType != USB_DT_ENDPOINT)
return -EINVAL;
dum = ep_to_dummy(ep);
if (!dum->driver)
return -ESHUTDOWN;
dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
if (!is_enabled(dum_hcd))
return -ESHUTDOWN;
/*
* For HS/FS devices only bits 0..10 of the wMaxPacketSize represent the
* maximum packet size.
* For SS devices the wMaxPacketSize is limited by 1024.
*/
max = usb_endpoint_maxp(desc) & 0x7ff;
/* drivers must not request bad settings, since lower levels
* (hardware or its drivers) may not check. some endpoints
* can't do iso, many have maxpacket limitations, etc.
*
* since this "hardware" driver is here to help debugging, we
* have some extra sanity checks. (there could be more though,
* especially for "ep9out" style fixed function ones.)
*/
retval = -EINVAL;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
if (strstr(ep->ep.name, "-iso")
|| strstr(ep->ep.name, "-int")) {
goto done;
}
switch (dum->gadget.speed) {
case USB_SPEED_SUPER:
if (max == 1024)
break;
goto done;
case USB_SPEED_HIGH:
if (max == 512)
break;
goto done;
case USB_SPEED_FULL:
if (max == 8 || max == 16 || max == 32 || max == 64)
/* we'll fake any legal size */
break;
/* save a return statement */
default:
goto done;
}
break;
case USB_ENDPOINT_XFER_INT:
if (strstr(ep->ep.name, "-iso")) /* bulk is ok */
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
if (max <= 1024)
break;
/* save a return statement */
case USB_SPEED_FULL:
if (max <= 64)
break;
/* save a return statement */
default:
if (max <= 8)
break;
goto done;
}
break;
case USB_ENDPOINT_XFER_ISOC:
if (strstr(ep->ep.name, "-bulk")
|| strstr(ep->ep.name, "-int"))
goto done;
/* real hardware might not handle all packet sizes */
switch (dum->gadget.speed) {
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
if (max <= 1024)
break;
/* save a return statement */
case USB_SPEED_FULL:
if (max <= 1023)
break;
/* save a return statement */
default:
goto done;
}
break;
default:
/* few chips support control except on ep0 */
goto done;
}
_ep->maxpacket = max;
if (usb_ss_max_streams(_ep->comp_desc)) {
if (!usb_endpoint_xfer_bulk(desc)) {
dev_err(udc_dev(dum), "Can't enable stream support on "
"non-bulk ep %s\n", _ep->name);
return -EINVAL;
}
ep->stream_en = 1;
}
ep->desc = desc;
dev_dbg(udc_dev(dum), "enabled %s (ep%d%s-%s) maxpacket %d stream %s\n",
_ep->name,
desc->bEndpointAddress & 0x0f,
(desc->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
({ char *val;
switch (usb_endpoint_type(desc)) {
case USB_ENDPOINT_XFER_BULK:
val = "bulk";
break;
case USB_ENDPOINT_XFER_ISOC:
val = "iso";
break;
case USB_ENDPOINT_XFER_INT:
val = "intr";
break;
default:
val = "ctrl";
break;
}; val; }),
max, ep->stream_en ? "enabled" : "disabled");
/* at this point real hardware should be NAKing transfers
* to that endpoint, until a buffer is queued to it.
*/
ep->halted = ep->wedged = 0;
retval = 0;
done:
return retval;
}
static int dummy_disable(struct usb_ep *_ep)
{
struct dummy_ep *ep;
struct dummy *dum;
unsigned long flags;
int retval;
ep = usb_ep_to_dummy_ep(_ep);
if (!_ep || !ep->desc || _ep->name == ep0name)
return -EINVAL;
dum = ep_to_dummy(ep);
spin_lock_irqsave(&dum->lock, flags);
ep->desc = NULL;
ep->stream_en = 0;
retval = 0;
nuke(dum, ep);
spin_unlock_irqrestore(&dum->lock, flags);
dev_dbg(udc_dev(dum), "disabled %s\n", _ep->name);
return retval;
}
static struct usb_request *dummy_alloc_request(struct usb_ep *_ep,
gfp_t mem_flags)
{
struct dummy_ep *ep;
struct dummy_request *req;
if (!_ep)
return NULL;
ep = usb_ep_to_dummy_ep(_ep);
req = kzalloc(sizeof(*req), mem_flags);
if (!req)
return NULL;
INIT_LIST_HEAD(&req->queue);
return &req->req;
}
static void dummy_free_request(struct usb_ep *_ep, struct usb_request *_req)
{
struct dummy_ep *ep;
struct dummy_request *req;
if (!_ep || !_req)
return;
ep = usb_ep_to_dummy_ep(_ep);
if (!ep->desc && _ep->name != ep0name)
return;
req = usb_request_to_dummy_request(_req);
WARN_ON(!list_empty(&req->queue));
kfree(req);
}
static void fifo_complete(struct usb_ep *ep, struct usb_request *req)
{
}
static int dummy_queue(struct usb_ep *_ep, struct usb_request *_req,
gfp_t mem_flags)
{
struct dummy_ep *ep;
struct dummy_request *req;
struct dummy *dum;
struct dummy_hcd *dum_hcd;
unsigned long flags;
req = usb_request_to_dummy_request(_req);
if (!_req || !list_empty(&req->queue) || !_req->complete)
return -EINVAL;
ep = usb_ep_to_dummy_ep(_ep);
if (!_ep || (!ep->desc && _ep->name != ep0name))
return -EINVAL;
dum = ep_to_dummy(ep);
dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
if (!dum->driver || !is_enabled(dum_hcd))
return -ESHUTDOWN;
#if 0
dev_dbg(udc_dev(dum), "ep %p queue req %p to %s, len %d buf %p\n",
ep, _req, _ep->name, _req->length, _req->buf);
#endif
_req->status = -EINPROGRESS;
_req->actual = 0;
spin_lock_irqsave(&dum->lock, flags);
/* implement an emulated single-request FIFO */
if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
list_empty(&dum->fifo_req.queue) &&
list_empty(&ep->queue) &&
_req->length <= FIFO_SIZE) {
req = &dum->fifo_req;
req->req = *_req;
req->req.buf = dum->fifo_buf;
memcpy(dum->fifo_buf, _req->buf, _req->length);
req->req.context = dum;
req->req.complete = fifo_complete;
list_add_tail(&req->queue, &ep->queue);
spin_unlock(&dum->lock);
_req->actual = _req->length;
_req->status = 0;
_req->complete(_ep, _req);
spin_lock(&dum->lock);
} else
list_add_tail(&req->queue, &ep->queue);
spin_unlock_irqrestore(&dum->lock, flags);
/* real hardware would likely enable transfers here, in case
* it'd been left NAKing.
*/
return 0;
}
static int dummy_dequeue(struct usb_ep *_ep, struct usb_request *_req)
{
struct dummy_ep *ep;
struct dummy *dum;
int retval = -EINVAL;
unsigned long flags;
struct dummy_request *req = NULL;
if (!_ep || !_req)
return retval;
ep = usb_ep_to_dummy_ep(_ep);
dum = ep_to_dummy(ep);
if (!dum->driver)
return -ESHUTDOWN;
local_irq_save(flags);
spin_lock(&dum->lock);
list_for_each_entry(req, &ep->queue, queue) {
if (&req->req == _req) {
list_del_init(&req->queue);
_req->status = -ECONNRESET;
retval = 0;
break;
}
}
spin_unlock(&dum->lock);
if (retval == 0) {
dev_dbg(udc_dev(dum),
"dequeued req %p from %s, len %d buf %p\n",
req, _ep->name, _req->length, _req->buf);
_req->complete(_ep, _req);
}
local_irq_restore(flags);
return retval;
}
static int
dummy_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
{
struct dummy_ep *ep;
struct dummy *dum;
if (!_ep)
return -EINVAL;
ep = usb_ep_to_dummy_ep(_ep);
dum = ep_to_dummy(ep);
if (!dum->driver)
return -ESHUTDOWN;
if (!value)
ep->halted = ep->wedged = 0;
else if (ep->desc && (ep->desc->bEndpointAddress & USB_DIR_IN) &&
!list_empty(&ep->queue))
return -EAGAIN;
else {
ep->halted = 1;
if (wedged)
ep->wedged = 1;
}
/* FIXME clear emulated data toggle too */
return 0;
}
static int
dummy_set_halt(struct usb_ep *_ep, int value)
{
return dummy_set_halt_and_wedge(_ep, value, 0);
}
static int dummy_set_wedge(struct usb_ep *_ep)
{
if (!_ep || _ep->name == ep0name)
return -EINVAL;
return dummy_set_halt_and_wedge(_ep, 1, 1);
}
static const struct usb_ep_ops dummy_ep_ops = {
.enable = dummy_enable,
.disable = dummy_disable,
.alloc_request = dummy_alloc_request,
.free_request = dummy_free_request,
.queue = dummy_queue,
.dequeue = dummy_dequeue,
.set_halt = dummy_set_halt,
.set_wedge = dummy_set_wedge,
};
/*-------------------------------------------------------------------------*/
/* there are both host and device side versions of this call ... */
static int dummy_g_get_frame(struct usb_gadget *_gadget)
{
struct timeval tv;
do_gettimeofday(&tv);
return tv.tv_usec / 1000;
}
static int dummy_wakeup(struct usb_gadget *_gadget)
{
struct dummy_hcd *dum_hcd;
dum_hcd = gadget_to_dummy_hcd(_gadget);
if (!(dum_hcd->dum->devstatus & ((1 << USB_DEVICE_B_HNP_ENABLE)
| (1 << USB_DEVICE_REMOTE_WAKEUP))))
return -EINVAL;
if ((dum_hcd->port_status & USB_PORT_STAT_CONNECTION) == 0)
return -ENOLINK;
if ((dum_hcd->port_status & USB_PORT_STAT_SUSPEND) == 0 &&
dum_hcd->rh_state != DUMMY_RH_SUSPENDED)
return -EIO;
/* FIXME: What if the root hub is suspended but the port isn't? */
/* hub notices our request, issues downstream resume, etc */
dum_hcd->resuming = 1;
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(20);
mod_timer(&dummy_hcd_to_hcd(dum_hcd)->rh_timer, dum_hcd->re_timeout);
return 0;
}
static int dummy_set_selfpowered(struct usb_gadget *_gadget, int value)
{
struct dummy *dum;
dum = gadget_to_dummy_hcd(_gadget)->dum;
if (value)
dum->devstatus |= (1 << USB_DEVICE_SELF_POWERED);
else
dum->devstatus &= ~(1 << USB_DEVICE_SELF_POWERED);
return 0;
}
static void dummy_udc_update_ep0(struct dummy *dum)
{
if (dum->gadget.speed == USB_SPEED_SUPER)
dum->ep[0].ep.maxpacket = 9;
else
dum->ep[0].ep.maxpacket = 64;
}
static int dummy_pullup(struct usb_gadget *_gadget, int value)
{
struct dummy_hcd *dum_hcd;
struct dummy *dum;
unsigned long flags;
dum = gadget_dev_to_dummy(&_gadget->dev);
if (value && dum->driver) {
if (mod_data.is_super_speed)
dum->gadget.speed = dum->driver->max_speed;
else if (mod_data.is_high_speed)
dum->gadget.speed = min_t(u8, USB_SPEED_HIGH,
dum->driver->max_speed);
else
dum->gadget.speed = USB_SPEED_FULL;
dummy_udc_update_ep0(dum);
if (dum->gadget.speed < dum->driver->max_speed)
dev_dbg(udc_dev(dum), "This device can perform faster"
" if you connect it to a %s port...\n",
usb_speed_string(dum->driver->max_speed));
}
dum_hcd = gadget_to_dummy_hcd(_gadget);
spin_lock_irqsave(&dum->lock, flags);
dum->pullup = (value != 0);
set_link_state(dum_hcd);
spin_unlock_irqrestore(&dum->lock, flags);
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
static int dummy_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static int dummy_udc_stop(struct usb_gadget *g,
struct usb_gadget_driver *driver);
static const struct usb_gadget_ops dummy_ops = {
.get_frame = dummy_g_get_frame,
.wakeup = dummy_wakeup,
.set_selfpowered = dummy_set_selfpowered,
.pullup = dummy_pullup,
.udc_start = dummy_udc_start,
.udc_stop = dummy_udc_stop,
};
/*-------------------------------------------------------------------------*/
/* "function" sysfs attribute */
static ssize_t show_function(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct dummy *dum = gadget_dev_to_dummy(dev);
if (!dum->driver || !dum->driver->function)
return 0;
return scnprintf(buf, PAGE_SIZE, "%s\n", dum->driver->function);
}
static DEVICE_ATTR(function, S_IRUGO, show_function, NULL);
/*-------------------------------------------------------------------------*/
/*
* Driver registration/unregistration.
*
* This is basically hardware-specific; there's usually only one real USB
* device (not host) controller since that's how USB devices are intended
* to work. So most implementations of these api calls will rely on the
* fact that only one driver will ever bind to the hardware. But curious
* hardware can be built with discrete components, so the gadget API doesn't
* require that assumption.
*
* For this emulator, it might be convenient to create a usb slave device
* for each driver that registers: just add to a big root hub.
*/
static int dummy_udc_start(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
if (driver->max_speed == USB_SPEED_UNKNOWN)
return -EINVAL;
/*
* SLAVE side init ... the layer above hardware, which
* can't enumerate without help from the driver we're binding.
*/
dum->devstatus = 0;
dum->driver = driver;
dev_dbg(udc_dev(dum), "binding gadget driver '%s'\n",
driver->driver.name);
return 0;
}
static int dummy_udc_stop(struct usb_gadget *g,
struct usb_gadget_driver *driver)
{
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(g);
struct dummy *dum = dum_hcd->dum;
dev_dbg(udc_dev(dum), "unregister gadget driver '%s'\n",
driver->driver.name);
dum->driver = NULL;
return 0;
}
#undef is_enabled
/* The gadget structure is stored inside the hcd structure and will be
* released along with it. */
static void dummy_gadget_release(struct device *dev)
{
return;
}
static void init_dummy_udc_hw(struct dummy *dum)
{
int i;
INIT_LIST_HEAD(&dum->gadget.ep_list);
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
struct dummy_ep *ep = &dum->ep[i];
if (!ep_name[i])
break;
ep->ep.name = ep_name[i];
ep->ep.ops = &dummy_ep_ops;
list_add_tail(&ep->ep.ep_list, &dum->gadget.ep_list);
ep->halted = ep->wedged = ep->already_seen =
ep->setup_stage = 0;
ep->ep.maxpacket = ~0;
ep->ep.max_streams = 16;
ep->last_io = jiffies;
ep->gadget = &dum->gadget;
ep->desc = NULL;
INIT_LIST_HEAD(&ep->queue);
}
dum->gadget.ep0 = &dum->ep[0].ep;
list_del_init(&dum->ep[0].ep.ep_list);
INIT_LIST_HEAD(&dum->fifo_req.queue);
#ifdef CONFIG_USB_OTG
dum->gadget.is_otg = 1;
#endif
}
static int dummy_udc_probe(struct platform_device *pdev)
{
struct dummy *dum = &the_controller;
int rc;
dum->gadget.name = gadget_name;
dum->gadget.ops = &dummy_ops;
dum->gadget.max_speed = USB_SPEED_SUPER;
dev_set_name(&dum->gadget.dev, "gadget");
dum->gadget.dev.parent = &pdev->dev;
dum->gadget.dev.release = dummy_gadget_release;
rc = device_register(&dum->gadget.dev);
if (rc < 0) {
put_device(&dum->gadget.dev);
return rc;
}
init_dummy_udc_hw(dum);
rc = usb_add_gadget_udc(&pdev->dev, &dum->gadget);
if (rc < 0)
goto err_udc;
rc = device_create_file(&dum->gadget.dev, &dev_attr_function);
if (rc < 0)
goto err_dev;
platform_set_drvdata(pdev, dum);
return rc;
err_dev:
usb_del_gadget_udc(&dum->gadget);
err_udc:
device_unregister(&dum->gadget.dev);
return rc;
}
static int dummy_udc_remove(struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata(pdev);
usb_del_gadget_udc(&dum->gadget);
platform_set_drvdata(pdev, NULL);
device_remove_file(&dum->gadget.dev, &dev_attr_function);
device_unregister(&dum->gadget.dev);
return 0;
}
static void dummy_udc_pm(struct dummy *dum, struct dummy_hcd *dum_hcd,
int suspend)
{
spin_lock_irq(&dum->lock);
dum->udc_suspended = suspend;
set_link_state(dum_hcd);
spin_unlock_irq(&dum->lock);
}
static int dummy_udc_suspend(struct platform_device *pdev, pm_message_t state)
{
struct dummy *dum = platform_get_drvdata(pdev);
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
dev_dbg(&pdev->dev, "%s\n", __func__);
dummy_udc_pm(dum, dum_hcd, 1);
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
static int dummy_udc_resume(struct platform_device *pdev)
{
struct dummy *dum = platform_get_drvdata(pdev);
struct dummy_hcd *dum_hcd = gadget_to_dummy_hcd(&dum->gadget);
dev_dbg(&pdev->dev, "%s\n", __func__);
dummy_udc_pm(dum, dum_hcd, 0);
usb_hcd_poll_rh_status(dummy_hcd_to_hcd(dum_hcd));
return 0;
}
static struct platform_driver dummy_udc_driver = {
.probe = dummy_udc_probe,
.remove = dummy_udc_remove,
.suspend = dummy_udc_suspend,
.resume = dummy_udc_resume,
.driver = {
.name = (char *) gadget_name,
.owner = THIS_MODULE,
},
};
/*-------------------------------------------------------------------------*/
static unsigned int dummy_get_ep_idx(const struct usb_endpoint_descriptor *desc)
{
unsigned int index;
index = usb_endpoint_num(desc) << 1;
if (usb_endpoint_dir_in(desc))
index |= 1;
return index;
}
/* MASTER/HOST SIDE DRIVER
*
* this uses the hcd framework to hook up to host side drivers.
* its root hub will only have one device, otherwise it acts like
* a normal host controller.
*
* when urbs are queued, they're just stuck on a list that we
* scan in a timer callback. that callback connects writes from
* the host with reads from the device, and so on, based on the
* usb 2.0 rules.
*/
static int dummy_ep_stream_en(struct dummy_hcd *dum_hcd, struct urb *urb)
{
const struct usb_endpoint_descriptor *desc = &urb->ep->desc;
u32 index;
if (!usb_endpoint_xfer_bulk(desc))
return 0;
index = dummy_get_ep_idx(desc);
return (1 << index) & dum_hcd->stream_en_ep;
}
/*
* The max stream number is saved as a nibble so for the 30 possible endpoints
* we only 15 bytes of memory. Therefore we are limited to max 16 streams (0
* means we use only 1 stream). The maximum according to the spec is 16bit so
* if the 16 stream limit is about to go, the array size should be incremented
* to 30 elements of type u16.
*/
static int get_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
unsigned int pipe)
{
int max_streams;
max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
if (usb_pipeout(pipe))
max_streams >>= 4;
else
max_streams &= 0xf;
max_streams++;
return max_streams;
}
static void set_max_streams_for_pipe(struct dummy_hcd *dum_hcd,
unsigned int pipe, unsigned int streams)
{
int max_streams;
streams--;
max_streams = dum_hcd->num_stream[usb_pipeendpoint(pipe)];
if (usb_pipeout(pipe)) {
streams <<= 4;
max_streams &= 0xf;
} else {
max_streams &= 0xf0;
}
max_streams |= streams;
dum_hcd->num_stream[usb_pipeendpoint(pipe)] = max_streams;
}
static int dummy_validate_stream(struct dummy_hcd *dum_hcd, struct urb *urb)
{
unsigned int max_streams;
int enabled;
enabled = dummy_ep_stream_en(dum_hcd, urb);
if (!urb->stream_id) {
if (enabled)
return -EINVAL;
return 0;
}
if (!enabled)
return -EINVAL;
max_streams = get_max_streams_for_pipe(dum_hcd,
usb_pipeendpoint(urb->pipe));
if (urb->stream_id > max_streams) {
dev_err(dummy_dev(dum_hcd), "Stream id %d is out of range.\n",
urb->stream_id);
BUG();
return -EINVAL;
}
return 0;
}
static int dummy_urb_enqueue(
struct usb_hcd *hcd,
struct urb *urb,
gfp_t mem_flags
) {
struct dummy_hcd *dum_hcd;
struct urbp *urbp;
unsigned long flags;
int rc;
urbp = kmalloc(sizeof *urbp, mem_flags);
if (!urbp)
return -ENOMEM;
urbp->urb = urb;
urbp->miter_started = 0;
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
rc = dummy_validate_stream(dum_hcd, urb);
if (rc) {
kfree(urbp);
goto done;
}
rc = usb_hcd_link_urb_to_ep(hcd, urb);
if (rc) {
kfree(urbp);
goto done;
}
if (!dum_hcd->udev) {
dum_hcd->udev = urb->dev;
usb_get_dev(dum_hcd->udev);
} else if (unlikely(dum_hcd->udev != urb->dev))
dev_err(dummy_dev(dum_hcd), "usb_device address has changed!\n");
list_add_tail(&urbp->urbp_list, &dum_hcd->urbp_list);
urb->hcpriv = urbp;
if (usb_pipetype(urb->pipe) == PIPE_CONTROL)
urb->error_count = 1; /* mark as a new urb */
/* kick the scheduler, it'll do the rest */
if (!timer_pending(&dum_hcd->timer))
mod_timer(&dum_hcd->timer, jiffies + 1);
done:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
}
static int dummy_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct dummy_hcd *dum_hcd;
unsigned long flags;
int rc;
/* giveback happens automatically in timer callback,
* so make sure the callback happens */
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
rc = usb_hcd_check_unlink_urb(hcd, urb, status);
if (!rc && dum_hcd->rh_state != DUMMY_RH_RUNNING &&
!list_empty(&dum_hcd->urbp_list))
mod_timer(&dum_hcd->timer, jiffies);
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return rc;
}
static int dummy_perform_transfer(struct urb *urb, struct dummy_request *req,
u32 len)
{
void *ubuf, *rbuf;
struct urbp *urbp = urb->hcpriv;
int to_host;
struct sg_mapping_iter *miter = &urbp->miter;
u32 trans = 0;
u32 this_sg;
bool next_sg;
to_host = usb_pipein(urb->pipe);
rbuf = req->req.buf + req->req.actual;
if (!urb->num_sgs) {
ubuf = urb->transfer_buffer + urb->actual_length;
if (to_host)
memcpy(ubuf, rbuf, len);
else
memcpy(rbuf, ubuf, len);
return len;
}
if (!urbp->miter_started) {
u32 flags = SG_MITER_ATOMIC;
if (to_host)
flags |= SG_MITER_TO_SG;
else
flags |= SG_MITER_FROM_SG;
sg_miter_start(miter, urb->sg, urb->num_sgs, flags);
urbp->miter_started = 1;
}
next_sg = sg_miter_next(miter);
if (next_sg == false) {
WARN_ON_ONCE(1);
return -EINVAL;
}
do {
ubuf = miter->addr;
this_sg = min_t(u32, len, miter->length);
miter->consumed = this_sg;
trans += this_sg;
if (to_host)
memcpy(ubuf, rbuf, this_sg);
else
memcpy(rbuf, ubuf, this_sg);
len -= this_sg;
if (!len)
break;
next_sg = sg_miter_next(miter);
if (next_sg == false) {
WARN_ON_ONCE(1);
return -EINVAL;
}
rbuf += this_sg;
} while (1);
sg_miter_stop(miter);
return trans;
}
/* transfer up to a frame's worth; caller must own lock */
static int transfer(struct dummy_hcd *dum_hcd, struct urb *urb,
struct dummy_ep *ep, int limit, int *status)
{
struct dummy *dum = dum_hcd->dum;
struct dummy_request *req;
top:
/* if there's no request queued, the device is NAKing; return */
list_for_each_entry(req, &ep->queue, queue) {
unsigned host_len, dev_len, len;
int is_short, to_host;
int rescan = 0;
if (dummy_ep_stream_en(dum_hcd, urb)) {
if ((urb->stream_id != req->req.stream_id))
continue;
}
/* 1..N packets of ep->ep.maxpacket each ... the last one
* may be short (including zero length).
*
* writer can send a zlp explicitly (length 0) or implicitly
* (length mod maxpacket zero, and 'zero' flag); they always
* terminate reads.
*/
host_len = urb->transfer_buffer_length - urb->actual_length;
dev_len = req->req.length - req->req.actual;
len = min(host_len, dev_len);
/* FIXME update emulated data toggle too */
to_host = usb_pipein(urb->pipe);
if (unlikely(len == 0))
is_short = 1;
else {
/* not enough bandwidth left? */
if (limit < ep->ep.maxpacket && limit < len)
break;
len = min_t(unsigned, len, limit);
if (len == 0)
break;
/* use an extra pass for the final short packet */
if (len > ep->ep.maxpacket) {
rescan = 1;
len -= (len % ep->ep.maxpacket);
}
is_short = (len % ep->ep.maxpacket) != 0;
len = dummy_perform_transfer(urb, req, len);
ep->last_io = jiffies;
if ((int)len < 0) {
req->req.status = len;
} else {
limit -= len;
urb->actual_length += len;
req->req.actual += len;
}
}
/* short packets terminate, maybe with overflow/underflow.
* it's only really an error to write too much.
*
* partially filling a buffer optionally blocks queue advances
* (so completion handlers can clean up the queue) but we don't
* need to emulate such data-in-flight.
*/
if (is_short) {
if (host_len == dev_len) {
req->req.status = 0;
*status = 0;
} else if (to_host) {
req->req.status = 0;
if (dev_len > host_len)
*status = -EOVERFLOW;
else
*status = 0;
} else if (!to_host) {
*status = 0;
if (host_len > dev_len)
req->req.status = -EOVERFLOW;
else
req->req.status = 0;
}
/* many requests terminate without a short packet */
} else {
if (req->req.length == req->req.actual
&& !req->req.zero)
req->req.status = 0;
if (urb->transfer_buffer_length == urb->actual_length
&& !(urb->transfer_flags
& URB_ZERO_PACKET))
*status = 0;
}
/* device side completion --> continuable */
if (req->req.status != -EINPROGRESS) {
list_del_init(&req->queue);
spin_unlock(&dum->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dum->lock);
/* requests might have been unlinked... */
rescan = 1;
}
/* host side completion --> terminate */
if (*status != -EINPROGRESS)
break;
/* rescan to continue with any other queued i/o */
if (rescan)
goto top;
}
return limit;
}
static int periodic_bytes(struct dummy *dum, struct dummy_ep *ep)
{
int limit = ep->ep.maxpacket;
if (dum->gadget.speed == USB_SPEED_HIGH) {
int tmp;
/* high bandwidth mode */
tmp = usb_endpoint_maxp(ep->desc);
tmp = (tmp >> 11) & 0x03;
tmp *= 8 /* applies to entire frame */;
limit += limit * tmp;
}
if (dum->gadget.speed == USB_SPEED_SUPER) {
switch (usb_endpoint_type(ep->desc)) {
case USB_ENDPOINT_XFER_ISOC:
/* Sec. 4.4.8.2 USB3.0 Spec */
limit = 3 * 16 * 1024 * 8;
break;
case USB_ENDPOINT_XFER_INT:
/* Sec. 4.4.7.2 USB3.0 Spec */
limit = 3 * 1024 * 8;
break;
case USB_ENDPOINT_XFER_BULK:
default:
break;
}
}
return limit;
}
#define is_active(dum_hcd) ((dum_hcd->port_status & \
(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | \
USB_PORT_STAT_SUSPEND)) \
== (USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE))
static struct dummy_ep *find_endpoint(struct dummy *dum, u8 address)
{
int i;
if (!is_active((dum->gadget.speed == USB_SPEED_SUPER ?
dum->ss_hcd : dum->hs_hcd)))
return NULL;
if ((address & ~USB_DIR_IN) == 0)
return &dum->ep[0];
for (i = 1; i < DUMMY_ENDPOINTS; i++) {
struct dummy_ep *ep = &dum->ep[i];
if (!ep->desc)
continue;
if (ep->desc->bEndpointAddress == address)
return ep;
}
return NULL;
}
#undef is_active
#define Dev_Request (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
#define Dev_InRequest (Dev_Request | USB_DIR_IN)
#define Intf_Request (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
#define Intf_InRequest (Intf_Request | USB_DIR_IN)
#define Ep_Request (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
#define Ep_InRequest (Ep_Request | USB_DIR_IN)
/**
* handle_control_request() - handles all control transfers
* @dum: pointer to dummy (the_controller)
* @urb: the urb request to handle
* @setup: pointer to the setup data for a USB device control
* request
* @status: pointer to request handling status
*
* Return 0 - if the request was handled
* 1 - if the request wasn't handles
* error code on error
*/
static int handle_control_request(struct dummy_hcd *dum_hcd, struct urb *urb,
struct usb_ctrlrequest *setup,
int *status)
{
struct dummy_ep *ep2;
struct dummy *dum = dum_hcd->dum;
int ret_val = 1;
unsigned w_index;
unsigned w_value;
w_index = le16_to_cpu(setup->wIndex);
w_value = le16_to_cpu(setup->wValue);
switch (setup->bRequest) {
case USB_REQ_SET_ADDRESS:
if (setup->bRequestType != Dev_Request)
break;
dum->address = w_value;
*status = 0;
dev_dbg(udc_dev(dum), "set_address = %d\n",
w_value);
ret_val = 0;
break;
case USB_REQ_SET_FEATURE:
if (setup->bRequestType == Dev_Request) {
ret_val = 0;
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
break;
case USB_DEVICE_B_HNP_ENABLE:
dum->gadget.b_hnp_enable = 1;
break;
case USB_DEVICE_A_HNP_SUPPORT:
dum->gadget.a_hnp_support = 1;
break;
case USB_DEVICE_A_ALT_HNP_SUPPORT:
dum->gadget.a_alt_hnp_support = 1;
break;
case USB_DEVICE_U1_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U1_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_U2_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U2_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_LTM_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_LTM_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
default:
ret_val = -EOPNOTSUPP;
}
if (ret_val == 0) {
dum->devstatus |= (1 << w_value);
*status = 0;
}
} else if (setup->bRequestType == Ep_Request) {
/* endpoint halt */
ep2 = find_endpoint(dum, w_index);
if (!ep2 || ep2->ep.name == ep0name) {
ret_val = -EOPNOTSUPP;
break;
}
ep2->halted = 1;
ret_val = 0;
*status = 0;
}
break;
case USB_REQ_CLEAR_FEATURE:
if (setup->bRequestType == Dev_Request) {
ret_val = 0;
switch (w_value) {
case USB_DEVICE_REMOTE_WAKEUP:
w_value = USB_DEVICE_REMOTE_WAKEUP;
break;
case USB_DEVICE_U1_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U1_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_U2_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_U2_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
case USB_DEVICE_LTM_ENABLE:
if (dummy_hcd_to_hcd(dum_hcd)->speed ==
HCD_USB3)
w_value = USB_DEV_STAT_LTM_ENABLED;
else
ret_val = -EOPNOTSUPP;
break;
default:
ret_val = -EOPNOTSUPP;
break;
}
if (ret_val == 0) {
dum->devstatus &= ~(1 << w_value);
*status = 0;
}
} else if (setup->bRequestType == Ep_Request) {
/* endpoint halt */
ep2 = find_endpoint(dum, w_index);
if (!ep2) {
ret_val = -EOPNOTSUPP;
break;
}
if (!ep2->wedged)
ep2->halted = 0;
ret_val = 0;
*status = 0;
}
break;
case USB_REQ_GET_STATUS:
if (setup->bRequestType == Dev_InRequest
|| setup->bRequestType == Intf_InRequest
|| setup->bRequestType == Ep_InRequest) {
char *buf;
/*
* device: remote wakeup, selfpowered
* interface: nothing
* endpoint: halt
*/
buf = (char *)urb->transfer_buffer;
if (urb->transfer_buffer_length > 0) {
if (setup->bRequestType == Ep_InRequest) {
ep2 = find_endpoint(dum, w_index);
if (!ep2) {
ret_val = -EOPNOTSUPP;
break;
}
buf[0] = ep2->halted;
} else if (setup->bRequestType ==
Dev_InRequest) {
buf[0] = (u8)dum->devstatus;
} else
buf[0] = 0;
}
if (urb->transfer_buffer_length > 1)
buf[1] = 0;
urb->actual_length = min_t(u32, 2,
urb->transfer_buffer_length);
ret_val = 0;
*status = 0;
}
break;
}
return ret_val;
}
/* drive both sides of the transfers; looks like irq handlers to
* both drivers except the callbacks aren't in_irq().
*/
static void dummy_timer(unsigned long _dum_hcd)
{
struct dummy_hcd *dum_hcd = (struct dummy_hcd *) _dum_hcd;
struct dummy *dum = dum_hcd->dum;
struct urbp *urbp, *tmp;
unsigned long flags;
int limit, total;
int i;
/* simplistic model for one frame's bandwidth */
switch (dum->gadget.speed) {
case USB_SPEED_LOW:
total = 8/*bytes*/ * 12/*packets*/;
break;
case USB_SPEED_FULL:
total = 64/*bytes*/ * 19/*packets*/;
break;
case USB_SPEED_HIGH:
total = 512/*bytes*/ * 13/*packets*/ * 8/*uframes*/;
break;
case USB_SPEED_SUPER:
/* Bus speed is 500000 bytes/ms, so use a little less */
total = 490000;
break;
default:
dev_err(dummy_dev(dum_hcd), "bogus device speed\n");
return;
}
/* FIXME if HZ != 1000 this will probably misbehave ... */
/* look at each urb queued by the host side driver */
spin_lock_irqsave(&dum->lock, flags);
if (!dum_hcd->udev) {
dev_err(dummy_dev(dum_hcd),
"timer fired with no URBs pending?\n");
spin_unlock_irqrestore(&dum->lock, flags);
return;
}
for (i = 0; i < DUMMY_ENDPOINTS; i++) {
if (!ep_name[i])
break;
dum->ep[i].already_seen = 0;
}
restart:
list_for_each_entry_safe(urbp, tmp, &dum_hcd->urbp_list, urbp_list) {
struct urb *urb;
struct dummy_request *req;
u8 address;
struct dummy_ep *ep = NULL;
int type;
int status = -EINPROGRESS;
urb = urbp->urb;
if (urb->unlinked)
goto return_urb;
else if (dum_hcd->rh_state != DUMMY_RH_RUNNING)
continue;
type = usb_pipetype(urb->pipe);
/* used up this frame's non-periodic bandwidth?
* FIXME there's infinite bandwidth for control and
* periodic transfers ... unrealistic.
*/
if (total <= 0 && type == PIPE_BULK)
continue;
/* find the gadget's ep for this request (if configured) */
address = usb_pipeendpoint (urb->pipe);
if (usb_pipein(urb->pipe))
address |= USB_DIR_IN;
ep = find_endpoint(dum, address);
if (!ep) {
/* set_configuration() disagreement */
dev_dbg(dummy_dev(dum_hcd),
"no ep configured for urb %p\n",
urb);
status = -EPROTO;
goto return_urb;
}
if (ep->already_seen)
continue;
ep->already_seen = 1;
if (ep == &dum->ep[0] && urb->error_count) {
ep->setup_stage = 1; /* a new urb */
urb->error_count = 0;
}
if (ep->halted && !ep->setup_stage) {
/* NOTE: must not be iso! */
dev_dbg(dummy_dev(dum_hcd), "ep %s halted, urb %p\n",
ep->ep.name, urb);
status = -EPIPE;
goto return_urb;
}
/* FIXME make sure both ends agree on maxpacket */
/* handle control requests */
if (ep == &dum->ep[0] && ep->setup_stage) {
struct usb_ctrlrequest setup;
int value = 1;
setup = *(struct usb_ctrlrequest *) urb->setup_packet;
/* paranoia, in case of stale queued data */
list_for_each_entry(req, &ep->queue, queue) {
list_del_init(&req->queue);
req->req.status = -EOVERFLOW;
dev_dbg(udc_dev(dum), "stale req = %p\n",
req);
spin_unlock(&dum->lock);
req->req.complete(&ep->ep, &req->req);
spin_lock(&dum->lock);
ep->already_seen = 0;
goto restart;
}
/* gadget driver never sees set_address or operations
* on standard feature flags. some hardware doesn't
* even expose them.
*/
ep->last_io = jiffies;
ep->setup_stage = 0;
ep->halted = 0;
value = handle_control_request(dum_hcd, urb, &setup,
&status);
/* gadget driver handles all other requests. block
* until setup() returns; no reentrancy issues etc.
*/
if (value > 0) {
spin_unlock(&dum->lock);
value = dum->driver->setup(&dum->gadget,
&setup);
spin_lock(&dum->lock);
if (value >= 0) {
/* no delays (max 64KB data stage) */
limit = 64*1024;
goto treat_control_like_bulk;
}
/* error, see below */
}
if (value < 0) {
if (value != -EOPNOTSUPP)
dev_dbg(udc_dev(dum),
"setup --> %d\n",
value);
status = -EPIPE;
urb->actual_length = 0;
}
goto return_urb;
}
/* non-control requests */
limit = total;
switch (usb_pipetype(urb->pipe)) {
case PIPE_ISOCHRONOUS:
/* FIXME is it urb->interval since the last xfer?
* use urb->iso_frame_desc[i].
* complete whether or not ep has requests queued.
* report random errors, to debug drivers.
*/
limit = max(limit, periodic_bytes(dum, ep));
status = -ENOSYS;
break;
case PIPE_INTERRUPT:
/* FIXME is it urb->interval since the last xfer?
* this almost certainly polls too fast.
*/
limit = max(limit, periodic_bytes(dum, ep));
/* FALLTHROUGH */
default:
treat_control_like_bulk:
ep->last_io = jiffies;
total = transfer(dum_hcd, urb, ep, limit, &status);
break;
}
/* incomplete transfer? */
if (status == -EINPROGRESS)
continue;
return_urb:
list_del(&urbp->urbp_list);
kfree(urbp);
if (ep)
ep->already_seen = ep->setup_stage = 0;
usb_hcd_unlink_urb_from_ep(dummy_hcd_to_hcd(dum_hcd), urb);
spin_unlock(&dum->lock);
usb_hcd_giveback_urb(dummy_hcd_to_hcd(dum_hcd), urb, status);
spin_lock(&dum->lock);
goto restart;
}
if (list_empty(&dum_hcd->urbp_list)) {
usb_put_dev(dum_hcd->udev);
dum_hcd->udev = NULL;
} else if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
/* want a 1 msec delay here */
mod_timer(&dum_hcd->timer, jiffies + msecs_to_jiffies(1));
}
spin_unlock_irqrestore(&dum->lock, flags);
}
/*-------------------------------------------------------------------------*/
#define PORT_C_MASK \
((USB_PORT_STAT_C_CONNECTION \
| USB_PORT_STAT_C_ENABLE \
| USB_PORT_STAT_C_SUSPEND \
| USB_PORT_STAT_C_OVERCURRENT \
| USB_PORT_STAT_C_RESET) << 16)
static int dummy_hub_status(struct usb_hcd *hcd, char *buf)
{
struct dummy_hcd *dum_hcd;
unsigned long flags;
int retval = 0;
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
if (!HCD_HW_ACCESSIBLE(hcd))
goto done;
if (dum_hcd->resuming && time_after_eq(jiffies, dum_hcd->re_timeout)) {
dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
set_link_state(dum_hcd);
}
if ((dum_hcd->port_status & PORT_C_MASK) != 0) {
*buf = (1 << 1);
dev_dbg(dummy_dev(dum_hcd), "port status 0x%08x has changes\n",
dum_hcd->port_status);
retval = 1;
if (dum_hcd->rh_state == DUMMY_RH_SUSPENDED)
usb_hcd_resume_root_hub(hcd);
}
done:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return retval;
}
static inline void
ss_hub_descriptor(struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof *desc);
desc->bDescriptorType = 0x2a;
desc->bDescLength = 12;
desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = 1;
desc->u.ss.bHubHdrDecLat = 0x04; /* Worst case: 0.4 micro sec*/
desc->u.ss.DeviceRemovable = 0xffff;
}
static inline void hub_descriptor(struct usb_hub_descriptor *desc)
{
memset(desc, 0, sizeof *desc);
desc->bDescriptorType = 0x29;
desc->bDescLength = 9;
desc->wHubCharacteristics = cpu_to_le16(0x0001);
desc->bNbrPorts = 1;
desc->u.hs.DeviceRemovable[0] = 0xff;
desc->u.hs.DeviceRemovable[1] = 0xff;
}
static int dummy_hub_control(
struct usb_hcd *hcd,
u16 typeReq,
u16 wValue,
u16 wIndex,
char *buf,
u16 wLength
) {
struct dummy_hcd *dum_hcd;
int retval = 0;
unsigned long flags;
if (!HCD_HW_ACCESSIBLE(hcd))
return -ETIMEDOUT;
dum_hcd = hcd_to_dummy_hcd(hcd);
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
switch (typeReq) {
case ClearHubFeature:
break;
case ClearPortFeature:
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if (hcd->speed == HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
}
if (dum_hcd->port_status & USB_PORT_STAT_SUSPEND) {
/* 20msec resume signaling */
dum_hcd->resuming = 1;
dum_hcd->re_timeout = jiffies +
msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_POWER:
if (hcd->speed == HCD_USB3) {
if (dum_hcd->port_status & USB_PORT_STAT_POWER)
dev_dbg(dummy_dev(dum_hcd),
"power-off\n");
} else
if (dum_hcd->port_status &
USB_SS_PORT_STAT_POWER)
dev_dbg(dummy_dev(dum_hcd),
"power-off\n");
/* FALLS THROUGH */
default:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
}
break;
case GetHubDescriptor:
if (hcd->speed == HCD_USB3 &&
(wLength < USB_DT_SS_HUB_SIZE ||
wValue != (USB_DT_SS_HUB << 8))) {
dev_dbg(dummy_dev(dum_hcd),
"Wrong hub descriptor type for "
"USB 3.0 roothub.\n");
goto error;
}
if (hcd->speed == HCD_USB3)
ss_hub_descriptor((struct usb_hub_descriptor *) buf);
else
hub_descriptor((struct usb_hub_descriptor *) buf);
break;
case GetHubStatus:
*(__le32 *) buf = cpu_to_le32(0);
break;
case GetPortStatus:
if (wIndex != 1)
retval = -EPIPE;
/* whoever resets or resumes must GetPortStatus to
* complete it!!
*/
if (dum_hcd->resuming &&
time_after_eq(jiffies, dum_hcd->re_timeout)) {
dum_hcd->port_status |= (USB_PORT_STAT_C_SUSPEND << 16);
dum_hcd->port_status &= ~USB_PORT_STAT_SUSPEND;
}
if ((dum_hcd->port_status & USB_PORT_STAT_RESET) != 0 &&
time_after_eq(jiffies, dum_hcd->re_timeout)) {
dum_hcd->port_status |= (USB_PORT_STAT_C_RESET << 16);
dum_hcd->port_status &= ~USB_PORT_STAT_RESET;
if (dum_hcd->dum->pullup) {
dum_hcd->port_status |= USB_PORT_STAT_ENABLE;
if (hcd->speed < HCD_USB3) {
switch (dum_hcd->dum->gadget.speed) {
case USB_SPEED_HIGH:
dum_hcd->port_status |=
USB_PORT_STAT_HIGH_SPEED;
break;
case USB_SPEED_LOW:
dum_hcd->dum->gadget.ep0->
maxpacket = 8;
dum_hcd->port_status |=
USB_PORT_STAT_LOW_SPEED;
break;
default:
dum_hcd->dum->gadget.speed =
USB_SPEED_FULL;
break;
}
}
}
}
set_link_state(dum_hcd);
((__le16 *) buf)[0] = cpu_to_le16(dum_hcd->port_status);
((__le16 *) buf)[1] = cpu_to_le16(dum_hcd->port_status >> 16);
break;
case SetHubFeature:
retval = -EPIPE;
break;
case SetPortFeature:
switch (wValue) {
case USB_PORT_FEAT_LINK_STATE:
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_LINK_STATE req not "
"supported for USB 2.0 roothub\n");
goto error;
}
/*
* Since this is dummy we don't have an actual link so
* there is nothing to do for the SET_LINK_STATE cmd
*/
break;
case USB_PORT_FEAT_U1_TIMEOUT:
case USB_PORT_FEAT_U2_TIMEOUT:
/* TODO: add suspend/resume support! */
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_U1/2_TIMEOUT req not "
"supported for USB 2.0 roothub\n");
goto error;
}
break;
case USB_PORT_FEAT_SUSPEND:
/* Applicable only for USB2.0 hub */
if (hcd->speed == HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_SUSPEND req not "
"supported for USB 3.0 roothub\n");
goto error;
}
if (dum_hcd->active) {
dum_hcd->port_status |= USB_PORT_STAT_SUSPEND;
/* HNP would happen here; for now we
* assume b_bus_req is always true.
*/
set_link_state(dum_hcd);
if (((1 << USB_DEVICE_B_HNP_ENABLE)
& dum_hcd->dum->devstatus) != 0)
dev_dbg(dummy_dev(dum_hcd),
"no HNP yet!\n");
}
break;
case USB_PORT_FEAT_POWER:
if (hcd->speed == HCD_USB3)
dum_hcd->port_status |= USB_SS_PORT_STAT_POWER;
else
dum_hcd->port_status |= USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
case USB_PORT_FEAT_BH_PORT_RESET:
/* Applicable only for USB3.0 hub */
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"USB_PORT_FEAT_BH_PORT_RESET req not "
"supported for USB 2.0 roothub\n");
goto error;
}
/* FALLS THROUGH */
case USB_PORT_FEAT_RESET:
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
dum_hcd->port_status = 0;
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
} else
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
/*
* We want to reset device status. All but the
* Self powered feature
*/
dum_hcd->dum->devstatus &=
(1 << USB_DEVICE_SELF_POWERED);
/*
* FIXME USB3.0: what is the correct reset signaling
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
/* FALLS THROUGH */
default:
if (hcd->speed == HCD_USB3) {
if ((dum_hcd->port_status &
USB_SS_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
set_link_state(dum_hcd);
}
} else
if ((dum_hcd->port_status &
USB_PORT_STAT_POWER) != 0) {
dum_hcd->port_status |= (1 << wValue);
set_link_state(dum_hcd);
}
}
break;
case GetPortErrorCount:
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"GetPortErrorCount req not "
"supported for USB 2.0 roothub\n");
goto error;
}
/* We'll always return 0 since this is a dummy hub */
*(__le32 *) buf = cpu_to_le32(0);
break;
case SetHubDepth:
if (hcd->speed != HCD_USB3) {
dev_dbg(dummy_dev(dum_hcd),
"SetHubDepth req not supported for "
"USB 2.0 roothub\n");
goto error;
}
break;
default:
dev_dbg(dummy_dev(dum_hcd),
"hub control req%04x v%04x i%04x l%d\n",
typeReq, wValue, wIndex, wLength);
error:
/* "protocol stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
if ((dum_hcd->port_status & PORT_C_MASK) != 0)
usb_hcd_poll_rh_status(hcd);
return retval;
}
static int dummy_bus_suspend(struct usb_hcd *hcd)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq(&dum_hcd->dum->lock);
dum_hcd->rh_state = DUMMY_RH_SUSPENDED;
set_link_state(dum_hcd);
hcd->state = HC_STATE_SUSPENDED;
spin_unlock_irq(&dum_hcd->dum->lock);
return 0;
}
static int dummy_bus_resume(struct usb_hcd *hcd)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
int rc = 0;
dev_dbg(&hcd->self.root_hub->dev, "%s\n", __func__);
spin_lock_irq(&dum_hcd->dum->lock);
if (!HCD_HW_ACCESSIBLE(hcd)) {
rc = -ESHUTDOWN;
} else {
dum_hcd->rh_state = DUMMY_RH_RUNNING;
set_link_state(dum_hcd);
if (!list_empty(&dum_hcd->urbp_list))
mod_timer(&dum_hcd->timer, jiffies);
hcd->state = HC_STATE_RUNNING;
}
spin_unlock_irq(&dum_hcd->dum->lock);
return rc;
}
/*-------------------------------------------------------------------------*/
static inline ssize_t show_urb(char *buf, size_t size, struct urb *urb)
{
int ep = usb_pipeendpoint(urb->pipe);
return snprintf(buf, size,
"urb/%p %s ep%d%s%s len %d/%d\n",
urb,
({ char *s;
switch (urb->dev->speed) {
case USB_SPEED_LOW:
s = "ls";
break;
case USB_SPEED_FULL:
s = "fs";
break;
case USB_SPEED_HIGH:
s = "hs";
break;
case USB_SPEED_SUPER:
s = "ss";
break;
default:
s = "?";
break;
}; s; }),
ep, ep ? (usb_pipein(urb->pipe) ? "in" : "out") : "",
({ char *s; \
switch (usb_pipetype(urb->pipe)) { \
case PIPE_CONTROL: \
s = ""; \
break; \
case PIPE_BULK: \
s = "-bulk"; \
break; \
case PIPE_INTERRUPT: \
s = "-int"; \
break; \
default: \
s = "-iso"; \
break; \
}; s; }),
urb->actual_length, urb->transfer_buffer_length);
}
static ssize_t show_urbs(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct usb_hcd *hcd = dev_get_drvdata(dev);
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
struct urbp *urbp;
size_t size = 0;
unsigned long flags;
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
list_for_each_entry(urbp, &dum_hcd->urbp_list, urbp_list) {
size_t temp;
temp = show_urb(buf, PAGE_SIZE - size, urbp->urb);
buf += temp;
size += temp;
}
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return size;
}
static DEVICE_ATTR(urbs, S_IRUGO, show_urbs, NULL);
static int dummy_start_ss(struct dummy_hcd *dum_hcd)
{
init_timer(&dum_hcd->timer);
dum_hcd->timer.function = dummy_timer;
dum_hcd->timer.data = (unsigned long)dum_hcd;
dum_hcd->rh_state = DUMMY_RH_RUNNING;
dum_hcd->stream_en_ep = 0;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
dummy_hcd_to_hcd(dum_hcd)->power_budget = POWER_BUDGET;
dummy_hcd_to_hcd(dum_hcd)->state = HC_STATE_RUNNING;
dummy_hcd_to_hcd(dum_hcd)->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
dummy_hcd_to_hcd(dum_hcd)->self.otg_port = 1;
#endif
return 0;
/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
}
static int dummy_start(struct usb_hcd *hcd)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
/*
* MASTER side init ... we emulate a root hub that'll only ever
* talk to one device (the slave side). Also appears in sysfs,
* just like more familiar pci-based HCDs.
*/
if (!usb_hcd_is_primary_hcd(hcd))
return dummy_start_ss(dum_hcd);
spin_lock_init(&dum_hcd->dum->lock);
init_timer(&dum_hcd->timer);
dum_hcd->timer.function = dummy_timer;
dum_hcd->timer.data = (unsigned long)dum_hcd;
dum_hcd->rh_state = DUMMY_RH_RUNNING;
INIT_LIST_HEAD(&dum_hcd->urbp_list);
hcd->power_budget = POWER_BUDGET;
hcd->state = HC_STATE_RUNNING;
hcd->uses_new_polling = 1;
#ifdef CONFIG_USB_OTG
hcd->self.otg_port = 1;
#endif
/* FIXME 'urbs' should be a per-device thing, maybe in usbcore */
return device_create_file(dummy_dev(dum_hcd), &dev_attr_urbs);
}
static void dummy_stop(struct usb_hcd *hcd)
{
struct dummy *dum;
dum = hcd_to_dummy_hcd(hcd)->dum;
device_remove_file(dummy_dev(hcd_to_dummy_hcd(hcd)), &dev_attr_urbs);
usb_gadget_unregister_driver(dum->driver);
dev_info(dummy_dev(hcd_to_dummy_hcd(hcd)), "stopped\n");
}
/*-------------------------------------------------------------------------*/
static int dummy_h_get_frame(struct usb_hcd *hcd)
{
return dummy_g_get_frame(NULL);
}
static int dummy_setup(struct usb_hcd *hcd)
{
hcd->self.sg_tablesize = ~0;
if (usb_hcd_is_primary_hcd(hcd)) {
the_controller.hs_hcd = hcd_to_dummy_hcd(hcd);
the_controller.hs_hcd->dum = &the_controller;
/*
* Mark the first roothub as being USB 2.0.
* The USB 3.0 roothub will be registered later by
* dummy_hcd_probe()
*/
hcd->speed = HCD_USB2;
hcd->self.root_hub->speed = USB_SPEED_HIGH;
} else {
the_controller.ss_hcd = hcd_to_dummy_hcd(hcd);
the_controller.ss_hcd->dum = &the_controller;
hcd->speed = HCD_USB3;
hcd->self.root_hub->speed = USB_SPEED_SUPER;
}
return 0;
}
/* Change a group of bulk endpoints to support multiple stream IDs */
static int dummy_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
unsigned long flags;
int max_stream;
int ret_streams = num_streams;
unsigned int index;
unsigned int i;
if (!num_eps)
return -EINVAL;
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
if ((1 << index) & dum_hcd->stream_en_ep) {
ret_streams = -EINVAL;
goto out;
}
max_stream = usb_ss_max_streams(&eps[i]->ss_ep_comp);
if (!max_stream) {
ret_streams = -EINVAL;
goto out;
}
if (max_stream < ret_streams) {
dev_dbg(dummy_dev(dum_hcd), "Ep 0x%x only supports %u "
"stream IDs.\n",
eps[i]->desc.bEndpointAddress,
max_stream);
ret_streams = max_stream;
}
}
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
dum_hcd->stream_en_ep |= 1 << index;
set_max_streams_for_pipe(dum_hcd,
usb_endpoint_num(&eps[i]->desc), ret_streams);
}
out:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return ret_streams;
}
/* Reverts a group of bulk endpoints back to not using stream IDs. */
static int dummy_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags)
{
struct dummy_hcd *dum_hcd = hcd_to_dummy_hcd(hcd);
unsigned long flags;
int ret;
unsigned int index;
unsigned int i;
spin_lock_irqsave(&dum_hcd->dum->lock, flags);
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
if (!((1 << index) & dum_hcd->stream_en_ep)) {
ret = -EINVAL;
goto out;
}
}
for (i = 0; i < num_eps; i++) {
index = dummy_get_ep_idx(&eps[i]->desc);
dum_hcd->stream_en_ep &= ~(1 << index);
set_max_streams_for_pipe(dum_hcd,
usb_endpoint_num(&eps[i]->desc), 0);
}
ret = 0;
out:
spin_unlock_irqrestore(&dum_hcd->dum->lock, flags);
return ret;
}
static struct hc_driver dummy_hcd = {
.description = (char *) driver_name,
.product_desc = "Dummy host controller",
.hcd_priv_size = sizeof(struct dummy_hcd),
.flags = HCD_USB3 | HCD_SHARED,
.reset = dummy_setup,
.start = dummy_start,
.stop = dummy_stop,
.urb_enqueue = dummy_urb_enqueue,
.urb_dequeue = dummy_urb_dequeue,
.get_frame_number = dummy_h_get_frame,
.hub_status_data = dummy_hub_status,
.hub_control = dummy_hub_control,
.bus_suspend = dummy_bus_suspend,
.bus_resume = dummy_bus_resume,
.alloc_streams = dummy_alloc_streams,
.free_streams = dummy_free_streams,
};
static int dummy_hcd_probe(struct platform_device *pdev)
{
struct usb_hcd *hs_hcd;
struct usb_hcd *ss_hcd;
int retval;
dev_info(&pdev->dev, "%s, driver " DRIVER_VERSION "\n", driver_desc);
if (!mod_data.is_super_speed)
dummy_hcd.flags = HCD_USB2;
hs_hcd = usb_create_hcd(&dummy_hcd, &pdev->dev, dev_name(&pdev->dev));
if (!hs_hcd)
return -ENOMEM;
hs_hcd->has_tt = 1;
retval = usb_add_hcd(hs_hcd, 0, 0);
if (retval != 0) {
usb_put_hcd(hs_hcd);
return retval;
}
if (mod_data.is_super_speed) {
ss_hcd = usb_create_shared_hcd(&dummy_hcd, &pdev->dev,
dev_name(&pdev->dev), hs_hcd);
if (!ss_hcd) {
retval = -ENOMEM;
goto dealloc_usb2_hcd;
}
retval = usb_add_hcd(ss_hcd, 0, 0);
if (retval)
goto put_usb3_hcd;
}
return 0;
put_usb3_hcd:
usb_put_hcd(ss_hcd);
dealloc_usb2_hcd:
usb_put_hcd(hs_hcd);
the_controller.hs_hcd = the_controller.ss_hcd = NULL;
return retval;
}
static int dummy_hcd_remove(struct platform_device *pdev)
{
struct dummy *dum;
dum = hcd_to_dummy_hcd(platform_get_drvdata(pdev))->dum;
if (dum->ss_hcd) {
usb_remove_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
usb_put_hcd(dummy_hcd_to_hcd(dum->ss_hcd));
}
usb_remove_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
usb_put_hcd(dummy_hcd_to_hcd(dum->hs_hcd));
the_controller.hs_hcd = NULL;
the_controller.ss_hcd = NULL;
return 0;
}
static int dummy_hcd_suspend(struct platform_device *pdev, pm_message_t state)
{
struct usb_hcd *hcd;
struct dummy_hcd *dum_hcd;
int rc = 0;
dev_dbg(&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata(pdev);
dum_hcd = hcd_to_dummy_hcd(hcd);
if (dum_hcd->rh_state == DUMMY_RH_RUNNING) {
dev_warn(&pdev->dev, "Root hub isn't suspended!\n");
rc = -EBUSY;
} else
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
return rc;
}
static int dummy_hcd_resume(struct platform_device *pdev)
{
struct usb_hcd *hcd;
dev_dbg(&pdev->dev, "%s\n", __func__);
hcd = platform_get_drvdata(pdev);
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
usb_hcd_poll_rh_status(hcd);
return 0;
}
static struct platform_driver dummy_hcd_driver = {
.probe = dummy_hcd_probe,
.remove = dummy_hcd_remove,
.suspend = dummy_hcd_suspend,
.resume = dummy_hcd_resume,
.driver = {
.name = (char *) driver_name,
.owner = THIS_MODULE,
},
};
/*-------------------------------------------------------------------------*/
static struct platform_device *the_udc_pdev;
static struct platform_device *the_hcd_pdev;
static int __init init(void)
{
int retval = -ENOMEM;
if (usb_disabled())
return -ENODEV;
if (!mod_data.is_high_speed && mod_data.is_super_speed)
return -EINVAL;
the_hcd_pdev = platform_device_alloc(driver_name, -1);
if (!the_hcd_pdev)
return retval;
the_udc_pdev = platform_device_alloc(gadget_name, -1);
if (!the_udc_pdev)
goto err_alloc_udc;
retval = platform_driver_register(&dummy_hcd_driver);
if (retval < 0)
goto err_register_hcd_driver;
retval = platform_driver_register(&dummy_udc_driver);
if (retval < 0)
goto err_register_udc_driver;
retval = platform_device_add(the_hcd_pdev);
if (retval < 0)
goto err_add_hcd;
if (!the_controller.hs_hcd ||
(!the_controller.ss_hcd && mod_data.is_super_speed)) {
/*
* The hcd was added successfully but its probe function failed
* for some reason.
*/
retval = -EINVAL;
goto err_add_udc;
}
retval = platform_device_add(the_udc_pdev);
if (retval < 0)
goto err_add_udc;
if (!platform_get_drvdata(the_udc_pdev)) {
/*
* The udc was added successfully but its probe function failed
* for some reason.
*/
retval = -EINVAL;
goto err_probe_udc;
}
return retval;
err_probe_udc:
platform_device_del(the_udc_pdev);
err_add_udc:
platform_device_del(the_hcd_pdev);
err_add_hcd:
platform_driver_unregister(&dummy_udc_driver);
err_register_udc_driver:
platform_driver_unregister(&dummy_hcd_driver);
err_register_hcd_driver:
platform_device_put(the_udc_pdev);
err_alloc_udc:
platform_device_put(the_hcd_pdev);
return retval;
}
module_init(init);
static void __exit cleanup(void)
{
platform_device_unregister(the_udc_pdev);
platform_device_unregister(the_hcd_pdev);
platform_driver_unregister(&dummy_udc_driver);
platform_driver_unregister(&dummy_hcd_driver);
}
module_exit(cleanup);
| gpl-2.0 |
jxxhwy/Thunder-Kenel-JB-N719 | drivers/i2c/busses/i2c-ixp2000.c | 3982 | 4373 | /*
* drivers/i2c/busses/i2c-ixp2000.c
*
* I2C adapter for IXP2000 systems using GPIOs for I2C bus
*
* Author: Deepak Saxena <dsaxena@plexity.net>
* Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
* Made generic by: Jeff Daly <jeffrey.daly@intel.com>
*
* Copyright (c) 2003-2004 MontaVista Software Inc.
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*
* From Jeff Daly:
*
* I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
* IXP2000 platform if it uses the HW GPIO in the same manner. Basically,
* SDA and SCL GPIOs have external pullups. Setting the respective GPIO to
* an input will make the signal a '1' via the pullup. Setting them to
* outputs will pull them down.
*
* The GPIOs are open drain signals and are used as configuration strap inputs
* during power-up so there's generally a buffer on the board that needs to be
* 'enabled' to drive the GPIOs.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/i2c-algo-bit.h>
#include <linux/slab.h>
#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
#include <mach/gpio.h>
static inline int ixp2000_scl_pin(void *data)
{
return ((struct ixp2000_i2c_pins*)data)->scl_pin;
}
static inline int ixp2000_sda_pin(void *data)
{
return ((struct ixp2000_i2c_pins*)data)->sda_pin;
}
static void ixp2000_bit_setscl(void *data, int val)
{
int i = 5000;
if (val) {
gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
} else {
gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
}
}
static void ixp2000_bit_setsda(void *data, int val)
{
if (val) {
gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
} else {
gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
}
}
static int ixp2000_bit_getscl(void *data)
{
return gpio_line_get(ixp2000_scl_pin(data));
}
static int ixp2000_bit_getsda(void *data)
{
return gpio_line_get(ixp2000_sda_pin(data));
}
struct ixp2000_i2c_data {
struct ixp2000_i2c_pins *gpio_pins;
struct i2c_adapter adapter;
struct i2c_algo_bit_data algo_data;
};
static int ixp2000_i2c_remove(struct platform_device *plat_dev)
{
struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
platform_set_drvdata(plat_dev, NULL);
i2c_del_adapter(&drv_data->adapter);
kfree(drv_data);
return 0;
}
static int ixp2000_i2c_probe(struct platform_device *plat_dev)
{
int err;
struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
struct ixp2000_i2c_data *drv_data =
kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
if (!drv_data)
return -ENOMEM;
drv_data->gpio_pins = gpio;
drv_data->algo_data.data = gpio;
drv_data->algo_data.setsda = ixp2000_bit_setsda;
drv_data->algo_data.setscl = ixp2000_bit_setscl;
drv_data->algo_data.getsda = ixp2000_bit_getsda;
drv_data->algo_data.getscl = ixp2000_bit_getscl;
drv_data->algo_data.udelay = 6;
drv_data->algo_data.timeout = HZ;
strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
sizeof(drv_data->adapter.name));
drv_data->adapter.algo_data = &drv_data->algo_data,
drv_data->adapter.dev.parent = &plat_dev->dev;
gpio_line_config(gpio->sda_pin, GPIO_IN);
gpio_line_config(gpio->scl_pin, GPIO_IN);
gpio_line_set(gpio->scl_pin, 0);
gpio_line_set(gpio->sda_pin, 0);
if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
kfree(drv_data);
return err;
}
platform_set_drvdata(plat_dev, drv_data);
return 0;
}
static struct platform_driver ixp2000_i2c_driver = {
.probe = ixp2000_i2c_probe,
.remove = ixp2000_i2c_remove,
.driver = {
.name = "IXP2000-I2C",
.owner = THIS_MODULE,
},
};
static int __init ixp2000_i2c_init(void)
{
return platform_driver_register(&ixp2000_i2c_driver);
}
static void __exit ixp2000_i2c_exit(void)
{
platform_driver_unregister(&ixp2000_i2c_driver);
}
module_init(ixp2000_i2c_init);
module_exit(ixp2000_i2c_exit);
MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:IXP2000-I2C");
| gpl-2.0 |
Shabbypenguin/Kettle_Corn_Kernel | drivers/net/sonic.c | 4238 | 22081 | /*
* sonic.c
*
* (C) 2005 Finn Thain
*
* Converted to DMA API, added zero-copy buffer handling, and
* (from the mac68k project) introduced dhd's support for 16-bit cards.
*
* (C) 1996,1998 by Thomas Bogendoerfer (tsbogend@alpha.franken.de)
*
* This driver is based on work from Andreas Busse, but most of
* the code is rewritten.
*
* (C) 1995 by Andreas Busse (andy@waldorf-gmbh.de)
*
* Core code included by system sonic drivers
*
* And... partially rewritten again by David Huggins-Daines in order
* to cope with screwed up Macintosh NICs that may or may not use
* 16-bit DMA.
*
* (C) 1999 David Huggins-Daines <dhd@debian.org>
*
*/
/*
* Sources: Olivetti M700-10 Risc Personal Computer hardware handbook,
* National Semiconductors data sheet for the DP83932B Sonic Ethernet
* controller, and the files "8390.c" and "skeleton.c" in this directory.
*
* Additional sources: Nat Semi data sheet for the DP83932C and Nat Semi
* Application Note AN-746, the files "lance.c" and "ibmlana.c". See also
* the NetBSD file "sys/arch/mac68k/dev/if_sn.c".
*/
/*
* Open/initialize the SONIC controller.
*
* This routine should set everything up anew at each open, even
* registers that "should" only need to be set once at boot, so that
* there is non-reboot way to recover if something goes wrong.
*/
static int sonic_open(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
if (sonic_debug > 2)
printk("sonic_open: initializing sonic driver.\n");
for (i = 0; i < SONIC_NUM_RRS; i++) {
struct sk_buff *skb = dev_alloc_skb(SONIC_RBSIZE + 2);
if (skb == NULL) {
while(i > 0) { /* free any that were allocated successfully */
i--;
dev_kfree_skb(lp->rx_skb[i]);
lp->rx_skb[i] = NULL;
}
printk(KERN_ERR "%s: couldn't allocate receive buffers\n",
dev->name);
return -ENOMEM;
}
/* align IP header unless DMA requires otherwise */
if (SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
skb_reserve(skb, 2);
lp->rx_skb[i] = skb;
}
for (i = 0; i < SONIC_NUM_RRS; i++) {
dma_addr_t laddr = dma_map_single(lp->device, skb_put(lp->rx_skb[i], SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
if (!laddr) {
while(i > 0) { /* free any that were mapped successfully */
i--;
dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
lp->rx_laddr[i] = (dma_addr_t)0;
}
for (i = 0; i < SONIC_NUM_RRS; i++) {
dev_kfree_skb(lp->rx_skb[i]);
lp->rx_skb[i] = NULL;
}
printk(KERN_ERR "%s: couldn't map rx DMA buffers\n",
dev->name);
return -ENOMEM;
}
lp->rx_laddr[i] = laddr;
}
/*
* Initialize the SONIC
*/
sonic_init(dev);
netif_start_queue(dev);
if (sonic_debug > 2)
printk("sonic_open: Initialization done.\n");
return 0;
}
/*
* Close the SONIC device
*/
static int sonic_close(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
if (sonic_debug > 2)
printk("sonic_close\n");
netif_stop_queue(dev);
/*
* stop the SONIC, disable interrupts
*/
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
/* unmap and free skbs that haven't been transmitted */
for (i = 0; i < SONIC_NUM_TDS; i++) {
if(lp->tx_laddr[i]) {
dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
lp->tx_laddr[i] = (dma_addr_t)0;
}
if(lp->tx_skb[i]) {
dev_kfree_skb(lp->tx_skb[i]);
lp->tx_skb[i] = NULL;
}
}
/* unmap and free the receive buffers */
for (i = 0; i < SONIC_NUM_RRS; i++) {
if(lp->rx_laddr[i]) {
dma_unmap_single(lp->device, lp->rx_laddr[i], SONIC_RBSIZE, DMA_FROM_DEVICE);
lp->rx_laddr[i] = (dma_addr_t)0;
}
if(lp->rx_skb[i]) {
dev_kfree_skb(lp->rx_skb[i]);
lp->rx_skb[i] = NULL;
}
}
return 0;
}
static void sonic_tx_timeout(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
int i;
/*
* put the Sonic into software-reset mode and
* disable all interrupts before releasing DMA buffers
*/
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
/* We could resend the original skbs. Easier to re-initialise. */
for (i = 0; i < SONIC_NUM_TDS; i++) {
if(lp->tx_laddr[i]) {
dma_unmap_single(lp->device, lp->tx_laddr[i], lp->tx_len[i], DMA_TO_DEVICE);
lp->tx_laddr[i] = (dma_addr_t)0;
}
if(lp->tx_skb[i]) {
dev_kfree_skb(lp->tx_skb[i]);
lp->tx_skb[i] = NULL;
}
}
/* Try to restart the adaptor. */
sonic_init(dev);
lp->stats.tx_errors++;
dev->trans_start = jiffies; /* prevent tx timeout */
netif_wake_queue(dev);
}
/*
* transmit packet
*
* Appends new TD during transmission thus avoiding any TX interrupts
* until we run out of TDs.
* This routine interacts closely with the ISR in that it may,
* set tx_skb[i]
* reset the status flags of the new TD
* set and reset EOL flags
* stop the tx queue
* The ISR interacts with this routine in various ways. It may,
* reset tx_skb[i]
* test the EOL and status flags of the TDs
* wake the tx queue
* Concurrently with all of this, the SONIC is potentially writing to
* the status flags of the TDs.
* Until some mutual exclusion is added, this code will not work with SMP. However,
* MIPS Jazz machines and m68k Macs were all uni-processor machines.
*/
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
dma_addr_t laddr;
int length;
int entry = lp->next_tx;
if (sonic_debug > 2)
printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);
length = skb->len;
if (length < ETH_ZLEN) {
if (skb_padto(skb, ETH_ZLEN))
return NETDEV_TX_OK;
length = ETH_ZLEN;
}
/*
* Map the packet data into the logical DMA address space
*/
laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
if (!laddr) {
printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
dev_kfree_skb(skb);
return NETDEV_TX_BUSY;
}
sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0); /* clear status */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1); /* single fragment */
sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
sonic_tda_put(dev, entry, SONIC_TD_LINK,
sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);
/*
* Must set tx_skb[entry] only after clearing status, and
* before clearing EOL and before stopping queue
*/
wmb();
lp->tx_len[entry] = length;
lp->tx_laddr[entry] = laddr;
lp->tx_skb[entry] = skb;
wmb();
sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
lp->eol_tx = entry;
lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
if (lp->tx_skb[lp->next_tx] != NULL) {
/* The ring is full, the ISR has yet to process the next TD. */
if (sonic_debug > 3)
printk("%s: stopping queue\n", dev->name);
netif_stop_queue(dev);
/* after this packet, wait for ISR to free up some TDAs */
} else netif_start_queue(dev);
if (sonic_debug > 2)
printk("sonic_send_packet: issuing Tx command\n");
SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);
return NETDEV_TX_OK;
}
/*
* The typical workload of the driver:
* Handle the network interface interrupts.
*/
static irqreturn_t sonic_interrupt(int irq, void *dev_id)
{
struct net_device *dev = dev_id;
struct sonic_local *lp = netdev_priv(dev);
int status;
if (!(status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT))
return IRQ_NONE;
do {
if (status & SONIC_INT_PKTRX) {
if (sonic_debug > 2)
printk("%s: packet rx\n", dev->name);
sonic_rx(dev); /* got packet(s) */
SONIC_WRITE(SONIC_ISR, SONIC_INT_PKTRX); /* clear the interrupt */
}
if (status & SONIC_INT_TXDN) {
int entry = lp->cur_tx;
int td_status;
int freed_some = 0;
/* At this point, cur_tx is the index of a TD that is one of:
* unallocated/freed (status set & tx_skb[entry] clear)
* allocated and sent (status set & tx_skb[entry] set )
* allocated and not yet sent (status clear & tx_skb[entry] set )
* still being allocated by sonic_send_packet (status clear & tx_skb[entry] clear)
*/
if (sonic_debug > 2)
printk("%s: tx done\n", dev->name);
while (lp->tx_skb[entry] != NULL) {
if ((td_status = sonic_tda_get(dev, entry, SONIC_TD_STATUS)) == 0)
break;
if (td_status & 0x0001) {
lp->stats.tx_packets++;
lp->stats.tx_bytes += sonic_tda_get(dev, entry, SONIC_TD_PKTSIZE);
} else {
lp->stats.tx_errors++;
if (td_status & 0x0642)
lp->stats.tx_aborted_errors++;
if (td_status & 0x0180)
lp->stats.tx_carrier_errors++;
if (td_status & 0x0020)
lp->stats.tx_window_errors++;
if (td_status & 0x0004)
lp->stats.tx_fifo_errors++;
}
/* We must free the original skb */
dev_kfree_skb_irq(lp->tx_skb[entry]);
lp->tx_skb[entry] = NULL;
/* and unmap DMA buffer */
dma_unmap_single(lp->device, lp->tx_laddr[entry], lp->tx_len[entry], DMA_TO_DEVICE);
lp->tx_laddr[entry] = (dma_addr_t)0;
freed_some = 1;
if (sonic_tda_get(dev, entry, SONIC_TD_LINK) & SONIC_EOL) {
entry = (entry + 1) & SONIC_TDS_MASK;
break;
}
entry = (entry + 1) & SONIC_TDS_MASK;
}
if (freed_some || lp->tx_skb[entry] == NULL)
netif_wake_queue(dev); /* The ring is no longer full */
lp->cur_tx = entry;
SONIC_WRITE(SONIC_ISR, SONIC_INT_TXDN); /* clear the interrupt */
}
/*
* check error conditions
*/
if (status & SONIC_INT_RFO) {
if (sonic_debug > 1)
printk("%s: rx fifo overrun\n", dev->name);
lp->stats.rx_fifo_errors++;
SONIC_WRITE(SONIC_ISR, SONIC_INT_RFO); /* clear the interrupt */
}
if (status & SONIC_INT_RDE) {
if (sonic_debug > 1)
printk("%s: rx descriptors exhausted\n", dev->name);
lp->stats.rx_dropped++;
SONIC_WRITE(SONIC_ISR, SONIC_INT_RDE); /* clear the interrupt */
}
if (status & SONIC_INT_RBAE) {
if (sonic_debug > 1)
printk("%s: rx buffer area exceeded\n", dev->name);
lp->stats.rx_dropped++;
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBAE); /* clear the interrupt */
}
/* counter overruns; all counters are 16bit wide */
if (status & SONIC_INT_FAE) {
lp->stats.rx_frame_errors += 65536;
SONIC_WRITE(SONIC_ISR, SONIC_INT_FAE); /* clear the interrupt */
}
if (status & SONIC_INT_CRC) {
lp->stats.rx_crc_errors += 65536;
SONIC_WRITE(SONIC_ISR, SONIC_INT_CRC); /* clear the interrupt */
}
if (status & SONIC_INT_MP) {
lp->stats.rx_missed_errors += 65536;
SONIC_WRITE(SONIC_ISR, SONIC_INT_MP); /* clear the interrupt */
}
/* transmit error */
if (status & SONIC_INT_TXER) {
if ((SONIC_READ(SONIC_TCR) & SONIC_TCR_FU) && (sonic_debug > 2))
printk(KERN_ERR "%s: tx fifo underrun\n", dev->name);
SONIC_WRITE(SONIC_ISR, SONIC_INT_TXER); /* clear the interrupt */
}
/* bus retry */
if (status & SONIC_INT_BR) {
printk(KERN_ERR "%s: Bus retry occurred! Device interrupt disabled.\n",
dev->name);
/* ... to help debug DMA problems causing endless interrupts. */
/* Bounce the eth interface to turn on the interrupt again. */
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, SONIC_INT_BR); /* clear the interrupt */
}
/* load CAM done */
if (status & SONIC_INT_LCD)
SONIC_WRITE(SONIC_ISR, SONIC_INT_LCD); /* clear the interrupt */
} while((status = SONIC_READ(SONIC_ISR) & SONIC_IMR_DEFAULT));
return IRQ_HANDLED;
}
/*
* We have a good packet(s), pass it/them up the network stack.
*/
static void sonic_rx(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
int status;
int entry = lp->cur_rx;
while (sonic_rda_get(dev, entry, SONIC_RD_IN_USE) == 0) {
struct sk_buff *used_skb;
struct sk_buff *new_skb;
dma_addr_t new_laddr;
u16 bufadr_l;
u16 bufadr_h;
int pkt_len;
status = sonic_rda_get(dev, entry, SONIC_RD_STATUS);
if (status & SONIC_RCR_PRX) {
/* Malloc up new buffer. */
new_skb = dev_alloc_skb(SONIC_RBSIZE + 2);
if (new_skb == NULL) {
printk(KERN_ERR "%s: Memory squeeze, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
break;
}
/* provide 16 byte IP header alignment unless DMA requires otherwise */
if(SONIC_BUS_SCALE(lp->dma_bitmode) == 2)
skb_reserve(new_skb, 2);
new_laddr = dma_map_single(lp->device, skb_put(new_skb, SONIC_RBSIZE),
SONIC_RBSIZE, DMA_FROM_DEVICE);
if (!new_laddr) {
dev_kfree_skb(new_skb);
printk(KERN_ERR "%s: Failed to map rx buffer, dropping packet.\n", dev->name);
lp->stats.rx_dropped++;
break;
}
/* now we have a new skb to replace it, pass the used one up the stack */
dma_unmap_single(lp->device, lp->rx_laddr[entry], SONIC_RBSIZE, DMA_FROM_DEVICE);
used_skb = lp->rx_skb[entry];
pkt_len = sonic_rda_get(dev, entry, SONIC_RD_PKTLEN);
skb_trim(used_skb, pkt_len);
used_skb->protocol = eth_type_trans(used_skb, dev);
netif_rx(used_skb);
lp->stats.rx_packets++;
lp->stats.rx_bytes += pkt_len;
/* and insert the new skb */
lp->rx_laddr[entry] = new_laddr;
lp->rx_skb[entry] = new_skb;
bufadr_l = (unsigned long)new_laddr & 0xffff;
bufadr_h = (unsigned long)new_laddr >> 16;
sonic_rra_put(dev, entry, SONIC_RR_BUFADR_L, bufadr_l);
sonic_rra_put(dev, entry, SONIC_RR_BUFADR_H, bufadr_h);
} else {
/* This should only happen, if we enable accepting broken packets. */
lp->stats.rx_errors++;
if (status & SONIC_RCR_FAER)
lp->stats.rx_frame_errors++;
if (status & SONIC_RCR_CRCR)
lp->stats.rx_crc_errors++;
}
if (status & SONIC_RCR_LPKT) {
/*
* this was the last packet out of the current receive buffer
* give the buffer back to the SONIC
*/
lp->cur_rwp += SIZEOF_SONIC_RR * SONIC_BUS_SCALE(lp->dma_bitmode);
if (lp->cur_rwp >= lp->rra_end) lp->cur_rwp = lp->rra_laddr & 0xffff;
SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
if (SONIC_READ(SONIC_ISR) & SONIC_INT_RBE) {
if (sonic_debug > 2)
printk("%s: rx buffer exhausted\n", dev->name);
SONIC_WRITE(SONIC_ISR, SONIC_INT_RBE); /* clear the flag */
}
} else
printk(KERN_ERR "%s: rx desc without RCR_LPKT. Shouldn't happen !?\n",
dev->name);
/*
* give back the descriptor
*/
sonic_rda_put(dev, entry, SONIC_RD_LINK,
sonic_rda_get(dev, entry, SONIC_RD_LINK) | SONIC_EOL);
sonic_rda_put(dev, entry, SONIC_RD_IN_USE, 1);
sonic_rda_put(dev, lp->eol_rx, SONIC_RD_LINK,
sonic_rda_get(dev, lp->eol_rx, SONIC_RD_LINK) & ~SONIC_EOL);
lp->eol_rx = entry;
lp->cur_rx = entry = (entry + 1) & SONIC_RDS_MASK;
}
/*
* If any worth-while packets have been received, netif_rx()
* has done a mark_bh(NET_BH) for us and will work on them
* when we get to the bottom-half routine.
*/
}
/*
* Get the current statistics.
* This may be called with the device open or closed.
*/
static struct net_device_stats *sonic_get_stats(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
/* read the tally counter from the SONIC and reset them */
lp->stats.rx_crc_errors += SONIC_READ(SONIC_CRCT);
SONIC_WRITE(SONIC_CRCT, 0xffff);
lp->stats.rx_frame_errors += SONIC_READ(SONIC_FAET);
SONIC_WRITE(SONIC_FAET, 0xffff);
lp->stats.rx_missed_errors += SONIC_READ(SONIC_MPT);
SONIC_WRITE(SONIC_MPT, 0xffff);
return &lp->stats;
}
/*
* Set or clear the multicast filter for this adaptor.
*/
static void sonic_multicast_list(struct net_device *dev)
{
struct sonic_local *lp = netdev_priv(dev);
unsigned int rcr;
struct netdev_hw_addr *ha;
unsigned char *addr;
int i;
rcr = SONIC_READ(SONIC_RCR) & ~(SONIC_RCR_PRO | SONIC_RCR_AMC);
rcr |= SONIC_RCR_BRD; /* accept broadcast packets */
if (dev->flags & IFF_PROMISC) { /* set promiscuous mode */
rcr |= SONIC_RCR_PRO;
} else {
if ((dev->flags & IFF_ALLMULTI) ||
(netdev_mc_count(dev) > 15)) {
rcr |= SONIC_RCR_AMC;
} else {
if (sonic_debug > 2)
printk("sonic_multicast_list: mc_count %d\n",
netdev_mc_count(dev));
sonic_set_cam_enable(dev, 1); /* always enable our own address */
i = 1;
netdev_for_each_mc_addr(ha, dev) {
addr = ha->addr;
sonic_cda_put(dev, i, SONIC_CD_CAP0, addr[1] << 8 | addr[0]);
sonic_cda_put(dev, i, SONIC_CD_CAP1, addr[3] << 8 | addr[2]);
sonic_cda_put(dev, i, SONIC_CD_CAP2, addr[5] << 8 | addr[4]);
sonic_set_cam_enable(dev, sonic_get_cam_enable(dev) | (1 << i));
i++;
}
SONIC_WRITE(SONIC_CDC, 16);
/* issue Load CAM command */
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
}
}
if (sonic_debug > 2)
printk("sonic_multicast_list: setting RCR=%x\n", rcr);
SONIC_WRITE(SONIC_RCR, rcr);
}
/*
* Initialize the SONIC ethernet controller.
*/
static int sonic_init(struct net_device *dev)
{
unsigned int cmd;
struct sonic_local *lp = netdev_priv(dev);
int i;
/*
* put the Sonic into software-reset mode and
* disable all interrupts
*/
SONIC_WRITE(SONIC_IMR, 0);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RST);
/*
* clear software reset flag, disable receiver, clear and
* enable interrupts, then completely initialize the SONIC
*/
SONIC_WRITE(SONIC_CMD, 0);
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXDIS);
/*
* initialize the receive resource area
*/
if (sonic_debug > 2)
printk("sonic_init: initialize receive resource area\n");
for (i = 0; i < SONIC_NUM_RRS; i++) {
u16 bufadr_l = (unsigned long)lp->rx_laddr[i] & 0xffff;
u16 bufadr_h = (unsigned long)lp->rx_laddr[i] >> 16;
sonic_rra_put(dev, i, SONIC_RR_BUFADR_L, bufadr_l);
sonic_rra_put(dev, i, SONIC_RR_BUFADR_H, bufadr_h);
sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_L, SONIC_RBSIZE >> 1);
sonic_rra_put(dev, i, SONIC_RR_BUFSIZE_H, 0);
}
/* initialize all RRA registers */
lp->rra_end = (lp->rra_laddr + SONIC_NUM_RRS * SIZEOF_SONIC_RR *
SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
lp->cur_rwp = (lp->rra_laddr + (SONIC_NUM_RRS - 1) * SIZEOF_SONIC_RR *
SONIC_BUS_SCALE(lp->dma_bitmode)) & 0xffff;
SONIC_WRITE(SONIC_RSA, lp->rra_laddr & 0xffff);
SONIC_WRITE(SONIC_REA, lp->rra_end);
SONIC_WRITE(SONIC_RRP, lp->rra_laddr & 0xffff);
SONIC_WRITE(SONIC_RWP, lp->cur_rwp);
SONIC_WRITE(SONIC_URRA, lp->rra_laddr >> 16);
SONIC_WRITE(SONIC_EOBC, (SONIC_RBSIZE >> 1) - (lp->dma_bitmode ? 2 : 1));
/* load the resource pointers */
if (sonic_debug > 3)
printk("sonic_init: issuing RRRA command\n");
SONIC_WRITE(SONIC_CMD, SONIC_CR_RRRA);
i = 0;
while (i++ < 100) {
if (SONIC_READ(SONIC_CMD) & SONIC_CR_RRRA)
break;
}
if (sonic_debug > 2)
printk("sonic_init: status=%x i=%d\n", SONIC_READ(SONIC_CMD), i);
/*
* Initialize the receive descriptors so that they
* become a circular linked list, ie. let the last
* descriptor point to the first again.
*/
if (sonic_debug > 2)
printk("sonic_init: initialize receive descriptors\n");
for (i=0; i<SONIC_NUM_RDS; i++) {
sonic_rda_put(dev, i, SONIC_RD_STATUS, 0);
sonic_rda_put(dev, i, SONIC_RD_PKTLEN, 0);
sonic_rda_put(dev, i, SONIC_RD_PKTPTR_L, 0);
sonic_rda_put(dev, i, SONIC_RD_PKTPTR_H, 0);
sonic_rda_put(dev, i, SONIC_RD_SEQNO, 0);
sonic_rda_put(dev, i, SONIC_RD_IN_USE, 1);
sonic_rda_put(dev, i, SONIC_RD_LINK,
lp->rda_laddr +
((i+1) * SIZEOF_SONIC_RD * SONIC_BUS_SCALE(lp->dma_bitmode)));
}
/* fix last descriptor */
sonic_rda_put(dev, SONIC_NUM_RDS - 1, SONIC_RD_LINK,
(lp->rda_laddr & 0xffff) | SONIC_EOL);
lp->eol_rx = SONIC_NUM_RDS - 1;
lp->cur_rx = 0;
SONIC_WRITE(SONIC_URDA, lp->rda_laddr >> 16);
SONIC_WRITE(SONIC_CRDA, lp->rda_laddr & 0xffff);
/*
* initialize transmit descriptors
*/
if (sonic_debug > 2)
printk("sonic_init: initialize transmit descriptors\n");
for (i = 0; i < SONIC_NUM_TDS; i++) {
sonic_tda_put(dev, i, SONIC_TD_STATUS, 0);
sonic_tda_put(dev, i, SONIC_TD_CONFIG, 0);
sonic_tda_put(dev, i, SONIC_TD_PKTSIZE, 0);
sonic_tda_put(dev, i, SONIC_TD_FRAG_COUNT, 0);
sonic_tda_put(dev, i, SONIC_TD_LINK,
(lp->tda_laddr & 0xffff) +
(i + 1) * SIZEOF_SONIC_TD * SONIC_BUS_SCALE(lp->dma_bitmode));
lp->tx_skb[i] = NULL;
}
/* fix last descriptor */
sonic_tda_put(dev, SONIC_NUM_TDS - 1, SONIC_TD_LINK,
(lp->tda_laddr & 0xffff));
SONIC_WRITE(SONIC_UTDA, lp->tda_laddr >> 16);
SONIC_WRITE(SONIC_CTDA, lp->tda_laddr & 0xffff);
lp->cur_tx = lp->next_tx = 0;
lp->eol_tx = SONIC_NUM_TDS - 1;
/*
* put our own address to CAM desc[0]
*/
sonic_cda_put(dev, 0, SONIC_CD_CAP0, dev->dev_addr[1] << 8 | dev->dev_addr[0]);
sonic_cda_put(dev, 0, SONIC_CD_CAP1, dev->dev_addr[3] << 8 | dev->dev_addr[2]);
sonic_cda_put(dev, 0, SONIC_CD_CAP2, dev->dev_addr[5] << 8 | dev->dev_addr[4]);
sonic_set_cam_enable(dev, 1);
for (i = 0; i < 16; i++)
sonic_cda_put(dev, i, SONIC_CD_ENTRY_POINTER, i);
/*
* initialize CAM registers
*/
SONIC_WRITE(SONIC_CDP, lp->cda_laddr & 0xffff);
SONIC_WRITE(SONIC_CDC, 16);
/*
* load the CAM
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_LCAM);
i = 0;
while (i++ < 100) {
if (SONIC_READ(SONIC_ISR) & SONIC_INT_LCD)
break;
}
if (sonic_debug > 2) {
printk("sonic_init: CMD=%x, ISR=%x\n, i=%d",
SONIC_READ(SONIC_CMD), SONIC_READ(SONIC_ISR), i);
}
/*
* enable receiver, disable loopback
* and enable all interrupts
*/
SONIC_WRITE(SONIC_CMD, SONIC_CR_RXEN | SONIC_CR_STP);
SONIC_WRITE(SONIC_RCR, SONIC_RCR_DEFAULT);
SONIC_WRITE(SONIC_TCR, SONIC_TCR_DEFAULT);
SONIC_WRITE(SONIC_ISR, 0x7fff);
SONIC_WRITE(SONIC_IMR, SONIC_IMR_DEFAULT);
cmd = SONIC_READ(SONIC_CMD);
if ((cmd & SONIC_CR_RXEN) == 0 || (cmd & SONIC_CR_STP) == 0)
printk(KERN_ERR "sonic_init: failed, status=%x\n", cmd);
if (sonic_debug > 2)
printk("sonic_init: new status=%x\n",
SONIC_READ(SONIC_CMD));
return 0;
}
MODULE_LICENSE("GPL");
| gpl-2.0 |
tarunkapadia93/android_kernel_xiaomi_armani | arch/powerpc/mm/icswx.c | 4750 | 8065 | /*
* ICSWX and ACOP Management
*
* Copyright (C) 2011 Anton Blanchard, IBM Corp. <anton@samba.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include "icswx.h"
/*
* The processor and its L2 cache cause the icswx instruction to
* generate a COP_REQ transaction on PowerBus. The transaction has no
* address, and the processor does not perform an MMU access to
* authenticate the transaction. The command portion of the PowerBus
* COP_REQ transaction includes the LPAR_ID (LPID) and the coprocessor
* Process ID (PID), which the coprocessor compares to the authorized
* LPID and PID held in the coprocessor, to determine if the process
* is authorized to generate the transaction. The data of the COP_REQ
* transaction is 128-byte or less in size and is placed in cacheable
* memory on a 128-byte cache line boundary.
*
* The task to use a coprocessor should use use_cop() to mark the use
* of the Coprocessor Type (CT) and context switching. On a server
* class processor, the PID register is used only for coprocessor
* management + * and so a coprocessor PID is allocated before
* executing icswx + * instruction. Drop_cop() is used to free the
* coprocessor PID.
*
* Example:
* Host Fabric Interface (HFI) is a PowerPC network coprocessor.
* Each HFI have multiple windows. Each HFI window serves as a
* network device sending to and receiving from HFI network.
* HFI immediate send function uses icswx instruction. The immediate
* send function allows small (single cache-line) packets be sent
* without using the regular HFI send FIFO and doorbell, which are
* much slower than immediate send.
*
* For each task intending to use HFI immediate send, the HFI driver
* calls use_cop() to obtain a coprocessor PID for the task.
* The HFI driver then allocate a free HFI window and save the
* coprocessor PID to the HFI window to allow the task to use the
* HFI window.
*
* The HFI driver repeatedly creates immediate send packets and
* issues icswx instruction to send data through the HFI window.
* The HFI compares the coprocessor PID in the CPU PID register
* to the PID held in the HFI window to determine if the transaction
* is allowed.
*
* When the task to release the HFI window, the HFI driver calls
* drop_cop() to release the coprocessor PID.
*/
void switch_cop(struct mm_struct *next)
{
#ifdef CONFIG_ICSWX_PID
mtspr(SPRN_PID, next->context.cop_pid);
#endif
mtspr(SPRN_ACOP, next->context.acop);
}
/**
* Start using a coprocessor.
* @acop: mask of coprocessor to be used.
* @mm: The mm the coprocessor to associate with. Most likely current mm.
*
* Return a positive PID if successful. Negative errno otherwise.
* The returned PID will be fed to the coprocessor to determine if an
* icswx transaction is authenticated.
*/
int use_cop(unsigned long acop, struct mm_struct *mm)
{
int ret;
if (!cpu_has_feature(CPU_FTR_ICSWX))
return -ENODEV;
if (!mm || !acop)
return -EINVAL;
/* The page_table_lock ensures mm_users won't change under us */
spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
ret = get_cop_pid(mm);
if (ret < 0)
goto out;
/* update acop */
mm->context.acop |= acop;
sync_cop(mm);
/*
* If this is a threaded process then there might be other threads
* running. We need to send an IPI to force them to pick up any
* change in PID and ACOP.
*/
if (atomic_read(&mm->mm_users) > 1)
smp_call_function(sync_cop, mm, 1);
out:
spin_unlock(mm->context.cop_lockp);
spin_unlock(&mm->page_table_lock);
return ret;
}
EXPORT_SYMBOL_GPL(use_cop);
/**
* Stop using a coprocessor.
* @acop: mask of coprocessor to be stopped.
* @mm: The mm the coprocessor associated with.
*/
void drop_cop(unsigned long acop, struct mm_struct *mm)
{
int free_pid;
if (!cpu_has_feature(CPU_FTR_ICSWX))
return;
if (WARN_ON_ONCE(!mm))
return;
/* The page_table_lock ensures mm_users won't change under us */
spin_lock(&mm->page_table_lock);
spin_lock(mm->context.cop_lockp);
mm->context.acop &= ~acop;
free_pid = disable_cop_pid(mm);
sync_cop(mm);
/*
* If this is a threaded process then there might be other threads
* running. We need to send an IPI to force them to pick up any
* change in PID and ACOP.
*/
if (atomic_read(&mm->mm_users) > 1)
smp_call_function(sync_cop, mm, 1);
if (free_pid != COP_PID_NONE)
free_cop_pid(free_pid);
spin_unlock(mm->context.cop_lockp);
spin_unlock(&mm->page_table_lock);
}
EXPORT_SYMBOL_GPL(drop_cop);
static int acop_use_cop(int ct)
{
/* There is no alternate policy, yet */
return -1;
}
/*
* Get the instruction word at the NIP
*/
static u32 acop_get_inst(struct pt_regs *regs)
{
u32 inst;
u32 __user *p;
p = (u32 __user *)regs->nip;
if (!access_ok(VERIFY_READ, p, sizeof(*p)))
return 0;
if (__get_user(inst, p))
return 0;
return inst;
}
/**
* @regs: regsiters at time of interrupt
* @address: storage address
* @error_code: Fault code, usually the DSISR or ESR depending on
* processor type
*
* Return 0 if we are able to resolve the data storage fault that
* results from a CT miss in the ACOP register.
*/
int acop_handle_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code)
{
int ct;
u32 inst = 0;
if (!cpu_has_feature(CPU_FTR_ICSWX)) {
pr_info("No coprocessors available");
_exception(SIGILL, regs, ILL_ILLOPN, address);
}
if (!user_mode(regs)) {
/* this could happen if the HV denies the
* kernel access, for now we just die */
die("ICSWX from kernel failed", regs, SIGSEGV);
}
/* Some implementations leave us a hint for the CT */
ct = ICSWX_GET_CT_HINT(error_code);
if (ct < 0) {
/* we have to peek at the instruction word to figure out CT */
u32 ccw;
u32 rs;
inst = acop_get_inst(regs);
if (inst == 0)
return -1;
rs = (inst >> (31 - 10)) & 0x1f;
ccw = regs->gpr[rs];
ct = (ccw >> 16) & 0x3f;
}
/*
* We could be here because another thread has enabled acop
* but the ACOP register has yet to be updated.
*
* This should have been taken care of by the IPI to sync all
* the threads (see smp_call_function(sync_cop, mm, 1)), but
* that could take forever if there are a significant amount
* of threads.
*
* Given the number of threads on some of these systems,
* perhaps this is the best way to sync ACOP rather than whack
* every thread with an IPI.
*/
if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) {
sync_cop(current->active_mm);
return 0;
}
/* check for alternate policy */
if (!acop_use_cop(ct))
return 0;
/* at this point the CT is unknown to the system */
pr_warn("%s[%d]: Coprocessor %d is unavailable\n",
current->comm, current->pid, ct);
/* get inst if we don't already have it */
if (inst == 0) {
inst = acop_get_inst(regs);
if (inst == 0)
return -1;
}
/* Check if the instruction is the "record form" */
if (inst & 1) {
/*
* the instruction is "record" form so we can reject
* using CR0
*/
regs->ccr &= ~(0xful << 28);
regs->ccr |= ICSWX_RC_NOT_FOUND << 28;
/* Move on to the next instruction */
regs->nip += 4;
} else {
/*
* There is no architected mechanism to report a bad
* CT so we could either SIGILL or report nothing.
* Since the non-record version should only bu used
* for "hints" or "don't care" we should probably do
* nothing. However, I could see how some people
* might want an SIGILL so it here if you want it.
*/
#ifdef CONFIG_PPC_ICSWX_USE_SIGILL
_exception(SIGILL, regs, ILL_ILLOPN, address);
#else
regs->nip += 4;
#endif
}
return 0;
}
EXPORT_SYMBOL_GPL(acop_handle_fault);
| gpl-2.0 |
kamarush/yuga_aosp_kernel_lp | arch/arm/plat-s5p/dev-uart.c | 5006 | 2894 | /* linux/arch/arm/plat-s5p/dev-uart.c
*
* Copyright (c) 2009 Samsung Electronics Co., Ltd.
* http://www.samsung.com/
*
* Base S5P UART resource and device definitions
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/interrupt.h>
#include <linux/list.h>
#include <linux/platform_device.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <mach/hardware.h>
#include <mach/map.h>
#include <plat/devs.h>
/* Serial port registrations */
static struct resource s5p_uart0_resource[] = {
[0] = {
.start = S5P_PA_UART0,
.end = S5P_PA_UART0 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_UART0,
.end = IRQ_UART0,
.flags = IORESOURCE_IRQ,
},
};
static struct resource s5p_uart1_resource[] = {
[0] = {
.start = S5P_PA_UART1,
.end = S5P_PA_UART1 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_UART1,
.end = IRQ_UART1,
.flags = IORESOURCE_IRQ,
},
};
static struct resource s5p_uart2_resource[] = {
[0] = {
.start = S5P_PA_UART2,
.end = S5P_PA_UART2 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_UART2,
.end = IRQ_UART2,
.flags = IORESOURCE_IRQ,
},
};
static struct resource s5p_uart3_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
[0] = {
.start = S5P_PA_UART3,
.end = S5P_PA_UART3 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_UART3,
.end = IRQ_UART3,
.flags = IORESOURCE_IRQ,
},
#endif
};
static struct resource s5p_uart4_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
[0] = {
.start = S5P_PA_UART4,
.end = S5P_PA_UART4 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_UART4,
.end = IRQ_UART4,
.flags = IORESOURCE_IRQ,
},
#endif
};
static struct resource s5p_uart5_resource[] = {
#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
[0] = {
.start = S5P_PA_UART5,
.end = S5P_PA_UART5 + S5P_SZ_UART - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = IRQ_UART5,
.end = IRQ_UART5,
.flags = IORESOURCE_IRQ,
},
#endif
};
struct s3c24xx_uart_resources s5p_uart_resources[] __initdata = {
[0] = {
.resources = s5p_uart0_resource,
.nr_resources = ARRAY_SIZE(s5p_uart0_resource),
},
[1] = {
.resources = s5p_uart1_resource,
.nr_resources = ARRAY_SIZE(s5p_uart1_resource),
},
[2] = {
.resources = s5p_uart2_resource,
.nr_resources = ARRAY_SIZE(s5p_uart2_resource),
},
[3] = {
.resources = s5p_uart3_resource,
.nr_resources = ARRAY_SIZE(s5p_uart3_resource),
},
[4] = {
.resources = s5p_uart4_resource,
.nr_resources = ARRAY_SIZE(s5p_uart4_resource),
},
[5] = {
.resources = s5p_uart5_resource,
.nr_resources = ARRAY_SIZE(s5p_uart5_resource),
},
};
| gpl-2.0 |
Dalton-West/kernel-lge-msm8226 | drivers/infiniband/hw/mthca/mthca_qp.c | 5518 | 62386 | /*
* Copyright (c) 2004 Topspin Communications. All rights reserved.
* Copyright (c) 2005 Cisco Systems. All rights reserved.
* Copyright (c) 2005 Mellanox Technologies. All rights reserved.
* Copyright (c) 2004 Voltaire, Inc. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
* General Public License (GPL) Version 2, available from the file
* COPYING in the main directory of this source tree, or the
* OpenIB.org BSD license below:
*
* Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* - Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
*
* - Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include <linux/string.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <asm/io.h>
#include <rdma/ib_verbs.h>
#include <rdma/ib_cache.h>
#include <rdma/ib_pack.h>
#include "mthca_dev.h"
#include "mthca_cmd.h"
#include "mthca_memfree.h"
#include "mthca_wqe.h"
enum {
MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
MTHCA_ACK_REQ_FREQ = 10,
MTHCA_FLIGHT_LIMIT = 9,
MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
};
enum {
MTHCA_QP_STATE_RST = 0,
MTHCA_QP_STATE_INIT = 1,
MTHCA_QP_STATE_RTR = 2,
MTHCA_QP_STATE_RTS = 3,
MTHCA_QP_STATE_SQE = 4,
MTHCA_QP_STATE_SQD = 5,
MTHCA_QP_STATE_ERR = 6,
MTHCA_QP_STATE_DRAINING = 7
};
enum {
MTHCA_QP_ST_RC = 0x0,
MTHCA_QP_ST_UC = 0x1,
MTHCA_QP_ST_RD = 0x2,
MTHCA_QP_ST_UD = 0x3,
MTHCA_QP_ST_MLX = 0x7
};
enum {
MTHCA_QP_PM_MIGRATED = 0x3,
MTHCA_QP_PM_ARMED = 0x0,
MTHCA_QP_PM_REARM = 0x1
};
enum {
/* qp_context flags */
MTHCA_QP_BIT_DE = 1 << 8,
/* params1 */
MTHCA_QP_BIT_SRE = 1 << 15,
MTHCA_QP_BIT_SWE = 1 << 14,
MTHCA_QP_BIT_SAE = 1 << 13,
MTHCA_QP_BIT_SIC = 1 << 4,
MTHCA_QP_BIT_SSC = 1 << 3,
/* params2 */
MTHCA_QP_BIT_RRE = 1 << 15,
MTHCA_QP_BIT_RWE = 1 << 14,
MTHCA_QP_BIT_RAE = 1 << 13,
MTHCA_QP_BIT_RIC = 1 << 4,
MTHCA_QP_BIT_RSC = 1 << 3
};
enum {
MTHCA_SEND_DOORBELL_FENCE = 1 << 5
};
struct mthca_qp_path {
__be32 port_pkey;
u8 rnr_retry;
u8 g_mylmc;
__be16 rlid;
u8 ackto;
u8 mgid_index;
u8 static_rate;
u8 hop_limit;
__be32 sl_tclass_flowlabel;
u8 rgid[16];
} __attribute__((packed));
struct mthca_qp_context {
__be32 flags;
__be32 tavor_sched_queue; /* Reserved on Arbel */
u8 mtu_msgmax;
u8 rq_size_stride; /* Reserved on Tavor */
u8 sq_size_stride; /* Reserved on Tavor */
u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
__be32 usr_page;
__be32 local_qpn;
__be32 remote_qpn;
u32 reserved1[2];
struct mthca_qp_path pri_path;
struct mthca_qp_path alt_path;
__be32 rdd;
__be32 pd;
__be32 wqe_base;
__be32 wqe_lkey;
__be32 params1;
__be32 reserved2;
__be32 next_send_psn;
__be32 cqn_snd;
__be32 snd_wqe_base_l; /* Next send WQE on Tavor */
__be32 snd_db_index; /* (debugging only entries) */
__be32 last_acked_psn;
__be32 ssn;
__be32 params2;
__be32 rnr_nextrecvpsn;
__be32 ra_buff_indx;
__be32 cqn_rcv;
__be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
__be32 rcv_db_index; /* (debugging only entries) */
__be32 qkey;
__be32 srqn;
__be32 rmsn;
__be16 rq_wqe_counter; /* reserved on Tavor */
__be16 sq_wqe_counter; /* reserved on Tavor */
u32 reserved3[18];
} __attribute__((packed));
struct mthca_qp_param {
__be32 opt_param_mask;
u32 reserved1;
struct mthca_qp_context context;
u32 reserved2[62];
} __attribute__((packed));
enum {
MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
MTHCA_QP_OPTPAR_RRE = 1 << 1,
MTHCA_QP_OPTPAR_RAE = 1 << 2,
MTHCA_QP_OPTPAR_RWE = 1 << 3,
MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
};
static const u8 mthca_opcode[] = {
[IB_WR_SEND] = MTHCA_OPCODE_SEND,
[IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
[IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
[IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
[IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
[IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
[IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
};
static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
{
return qp->qpn >= dev->qp_table.sqp_start &&
qp->qpn <= dev->qp_table.sqp_start + 3;
}
static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
{
return qp->qpn >= dev->qp_table.sqp_start &&
qp->qpn <= dev->qp_table.sqp_start + 1;
}
static void *get_recv_wqe(struct mthca_qp *qp, int n)
{
if (qp->is_direct)
return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
else
return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
}
static void *get_send_wqe(struct mthca_qp *qp, int n)
{
if (qp->is_direct)
return qp->queue.direct.buf + qp->send_wqe_offset +
(n << qp->sq.wqe_shift);
else
return qp->queue.page_list[(qp->send_wqe_offset +
(n << qp->sq.wqe_shift)) >>
PAGE_SHIFT].buf +
((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
(PAGE_SIZE - 1));
}
static void mthca_wq_reset(struct mthca_wq *wq)
{
wq->next_ind = 0;
wq->last_comp = wq->max - 1;
wq->head = 0;
wq->tail = 0;
}
void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
enum ib_event_type event_type)
{
struct mthca_qp *qp;
struct ib_event event;
spin_lock(&dev->qp_table.lock);
qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
if (qp)
++qp->refcount;
spin_unlock(&dev->qp_table.lock);
if (!qp) {
mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
return;
}
if (event_type == IB_EVENT_PATH_MIG)
qp->port = qp->alt_port;
event.device = &dev->ib_dev;
event.event = event_type;
event.element.qp = &qp->ibqp;
if (qp->ibqp.event_handler)
qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
spin_lock(&dev->qp_table.lock);
if (!--qp->refcount)
wake_up(&qp->wait);
spin_unlock(&dev->qp_table.lock);
}
static int to_mthca_state(enum ib_qp_state ib_state)
{
switch (ib_state) {
case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
default: return -1;
}
}
enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
static int to_mthca_st(int transport)
{
switch (transport) {
case RC: return MTHCA_QP_ST_RC;
case UC: return MTHCA_QP_ST_UC;
case UD: return MTHCA_QP_ST_UD;
case RD: return MTHCA_QP_ST_RD;
case MLX: return MTHCA_QP_ST_MLX;
default: return -1;
}
}
static void store_attrs(struct mthca_sqp *sqp, const struct ib_qp_attr *attr,
int attr_mask)
{
if (attr_mask & IB_QP_PKEY_INDEX)
sqp->pkey_index = attr->pkey_index;
if (attr_mask & IB_QP_QKEY)
sqp->qkey = attr->qkey;
if (attr_mask & IB_QP_SQ_PSN)
sqp->send_psn = attr->sq_psn;
}
static void init_port(struct mthca_dev *dev, int port)
{
int err;
struct mthca_init_ib_param param;
memset(¶m, 0, sizeof param);
param.port_width = dev->limits.port_width_cap;
param.vl_cap = dev->limits.vl_cap;
param.mtu_cap = dev->limits.mtu_cap;
param.gid_cap = dev->limits.gid_table_len;
param.pkey_cap = dev->limits.pkey_table_len;
err = mthca_INIT_IB(dev, ¶m, port);
if (err)
mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
}
static __be32 get_hw_access_flags(struct mthca_qp *qp, const struct ib_qp_attr *attr,
int attr_mask)
{
u8 dest_rd_atomic;
u32 access_flags;
u32 hw_access_flags = 0;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
dest_rd_atomic = attr->max_dest_rd_atomic;
else
dest_rd_atomic = qp->resp_depth;
if (attr_mask & IB_QP_ACCESS_FLAGS)
access_flags = attr->qp_access_flags;
else
access_flags = qp->atomic_rd_en;
if (!dest_rd_atomic)
access_flags &= IB_ACCESS_REMOTE_WRITE;
if (access_flags & IB_ACCESS_REMOTE_READ)
hw_access_flags |= MTHCA_QP_BIT_RRE;
if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
hw_access_flags |= MTHCA_QP_BIT_RAE;
if (access_flags & IB_ACCESS_REMOTE_WRITE)
hw_access_flags |= MTHCA_QP_BIT_RWE;
return cpu_to_be32(hw_access_flags);
}
static inline enum ib_qp_state to_ib_qp_state(int mthca_state)
{
switch (mthca_state) {
case MTHCA_QP_STATE_RST: return IB_QPS_RESET;
case MTHCA_QP_STATE_INIT: return IB_QPS_INIT;
case MTHCA_QP_STATE_RTR: return IB_QPS_RTR;
case MTHCA_QP_STATE_RTS: return IB_QPS_RTS;
case MTHCA_QP_STATE_DRAINING:
case MTHCA_QP_STATE_SQD: return IB_QPS_SQD;
case MTHCA_QP_STATE_SQE: return IB_QPS_SQE;
case MTHCA_QP_STATE_ERR: return IB_QPS_ERR;
default: return -1;
}
}
static inline enum ib_mig_state to_ib_mig_state(int mthca_mig_state)
{
switch (mthca_mig_state) {
case 0: return IB_MIG_ARMED;
case 1: return IB_MIG_REARM;
case 3: return IB_MIG_MIGRATED;
default: return -1;
}
}
static int to_ib_qp_access_flags(int mthca_flags)
{
int ib_flags = 0;
if (mthca_flags & MTHCA_QP_BIT_RRE)
ib_flags |= IB_ACCESS_REMOTE_READ;
if (mthca_flags & MTHCA_QP_BIT_RWE)
ib_flags |= IB_ACCESS_REMOTE_WRITE;
if (mthca_flags & MTHCA_QP_BIT_RAE)
ib_flags |= IB_ACCESS_REMOTE_ATOMIC;
return ib_flags;
}
static void to_ib_ah_attr(struct mthca_dev *dev, struct ib_ah_attr *ib_ah_attr,
struct mthca_qp_path *path)
{
memset(ib_ah_attr, 0, sizeof *ib_ah_attr);
ib_ah_attr->port_num = (be32_to_cpu(path->port_pkey) >> 24) & 0x3;
if (ib_ah_attr->port_num == 0 || ib_ah_attr->port_num > dev->limits.num_ports)
return;
ib_ah_attr->dlid = be16_to_cpu(path->rlid);
ib_ah_attr->sl = be32_to_cpu(path->sl_tclass_flowlabel) >> 28;
ib_ah_attr->src_path_bits = path->g_mylmc & 0x7f;
ib_ah_attr->static_rate = mthca_rate_to_ib(dev,
path->static_rate & 0xf,
ib_ah_attr->port_num);
ib_ah_attr->ah_flags = (path->g_mylmc & (1 << 7)) ? IB_AH_GRH : 0;
if (ib_ah_attr->ah_flags) {
ib_ah_attr->grh.sgid_index = path->mgid_index & (dev->limits.gid_table_len - 1);
ib_ah_attr->grh.hop_limit = path->hop_limit;
ib_ah_attr->grh.traffic_class =
(be32_to_cpu(path->sl_tclass_flowlabel) >> 20) & 0xff;
ib_ah_attr->grh.flow_label =
be32_to_cpu(path->sl_tclass_flowlabel) & 0xfffff;
memcpy(ib_ah_attr->grh.dgid.raw,
path->rgid, sizeof ib_ah_attr->grh.dgid.raw);
}
}
int mthca_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr_mask,
struct ib_qp_init_attr *qp_init_attr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
int err = 0;
struct mthca_mailbox *mailbox = NULL;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *context;
int mthca_state;
mutex_lock(&qp->mutex);
if (qp->state == IB_QPS_RESET) {
qp_attr->qp_state = IB_QPS_RESET;
goto done;
}
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto out;
}
err = mthca_QUERY_QP(dev, qp->qpn, 0, mailbox);
if (err) {
mthca_warn(dev, "QUERY_QP failed (%d)\n", err);
goto out_mailbox;
}
qp_param = mailbox->buf;
context = &qp_param->context;
mthca_state = be32_to_cpu(context->flags) >> 28;
qp->state = to_ib_qp_state(mthca_state);
qp_attr->qp_state = qp->state;
qp_attr->path_mtu = context->mtu_msgmax >> 5;
qp_attr->path_mig_state =
to_ib_mig_state((be32_to_cpu(context->flags) >> 11) & 0x3);
qp_attr->qkey = be32_to_cpu(context->qkey);
qp_attr->rq_psn = be32_to_cpu(context->rnr_nextrecvpsn) & 0xffffff;
qp_attr->sq_psn = be32_to_cpu(context->next_send_psn) & 0xffffff;
qp_attr->dest_qp_num = be32_to_cpu(context->remote_qpn) & 0xffffff;
qp_attr->qp_access_flags =
to_ib_qp_access_flags(be32_to_cpu(context->params2));
if (qp->transport == RC || qp->transport == UC) {
to_ib_ah_attr(dev, &qp_attr->ah_attr, &context->pri_path);
to_ib_ah_attr(dev, &qp_attr->alt_ah_attr, &context->alt_path);
qp_attr->alt_pkey_index =
be32_to_cpu(context->alt_path.port_pkey) & 0x7f;
qp_attr->alt_port_num = qp_attr->alt_ah_attr.port_num;
}
qp_attr->pkey_index = be32_to_cpu(context->pri_path.port_pkey) & 0x7f;
qp_attr->port_num =
(be32_to_cpu(context->pri_path.port_pkey) >> 24) & 0x3;
/* qp_attr->en_sqd_async_notify is only applicable in modify qp */
qp_attr->sq_draining = mthca_state == MTHCA_QP_STATE_DRAINING;
qp_attr->max_rd_atomic = 1 << ((be32_to_cpu(context->params1) >> 21) & 0x7);
qp_attr->max_dest_rd_atomic =
1 << ((be32_to_cpu(context->params2) >> 21) & 0x7);
qp_attr->min_rnr_timer =
(be32_to_cpu(context->rnr_nextrecvpsn) >> 24) & 0x1f;
qp_attr->timeout = context->pri_path.ackto >> 3;
qp_attr->retry_cnt = (be32_to_cpu(context->params1) >> 16) & 0x7;
qp_attr->rnr_retry = context->pri_path.rnr_retry >> 5;
qp_attr->alt_timeout = context->alt_path.ackto >> 3;
done:
qp_attr->cur_qp_state = qp_attr->qp_state;
qp_attr->cap.max_send_wr = qp->sq.max;
qp_attr->cap.max_recv_wr = qp->rq.max;
qp_attr->cap.max_send_sge = qp->sq.max_gs;
qp_attr->cap.max_recv_sge = qp->rq.max_gs;
qp_attr->cap.max_inline_data = qp->max_inline_data;
qp_init_attr->cap = qp_attr->cap;
out_mailbox:
mthca_free_mailbox(dev, mailbox);
out:
mutex_unlock(&qp->mutex);
return err;
}
static int mthca_path_set(struct mthca_dev *dev, const struct ib_ah_attr *ah,
struct mthca_qp_path *path, u8 port)
{
path->g_mylmc = ah->src_path_bits & 0x7f;
path->rlid = cpu_to_be16(ah->dlid);
path->static_rate = mthca_get_rate(dev, ah->static_rate, port);
if (ah->ah_flags & IB_AH_GRH) {
if (ah->grh.sgid_index >= dev->limits.gid_table_len) {
mthca_dbg(dev, "sgid_index (%u) too large. max is %d\n",
ah->grh.sgid_index, dev->limits.gid_table_len-1);
return -1;
}
path->g_mylmc |= 1 << 7;
path->mgid_index = ah->grh.sgid_index;
path->hop_limit = ah->grh.hop_limit;
path->sl_tclass_flowlabel =
cpu_to_be32((ah->sl << 28) |
(ah->grh.traffic_class << 20) |
(ah->grh.flow_label));
memcpy(path->rgid, ah->grh.dgid.raw, 16);
} else
path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
return 0;
}
static int __mthca_modify_qp(struct ib_qp *ibqp,
const struct ib_qp_attr *attr, int attr_mask,
enum ib_qp_state cur_state, enum ib_qp_state new_state)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
struct mthca_mailbox *mailbox;
struct mthca_qp_param *qp_param;
struct mthca_qp_context *qp_context;
u32 sqd_event = 0;
int err = -EINVAL;
mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
if (IS_ERR(mailbox)) {
err = PTR_ERR(mailbox);
goto out;
}
qp_param = mailbox->buf;
qp_context = &qp_param->context;
memset(qp_param, 0, sizeof *qp_param);
qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
(to_mthca_st(qp->transport) << 16));
qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
if (!(attr_mask & IB_QP_PATH_MIG_STATE))
qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
else {
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
switch (attr->path_mig_state) {
case IB_MIG_MIGRATED:
qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
break;
case IB_MIG_REARM:
qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
break;
case IB_MIG_ARMED:
qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
break;
}
}
/* leave tavor_sched_queue as 0 */
if (qp->transport == MLX || qp->transport == UD)
qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
else if (attr_mask & IB_QP_PATH_MTU) {
if (attr->path_mtu < IB_MTU_256 || attr->path_mtu > IB_MTU_2048) {
mthca_dbg(dev, "path MTU (%u) is invalid\n",
attr->path_mtu);
goto out_mailbox;
}
qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
}
if (mthca_is_memfree(dev)) {
if (qp->rq.max)
qp_context->rq_size_stride = ilog2(qp->rq.max) << 3;
qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
if (qp->sq.max)
qp_context->sq_size_stride = ilog2(qp->sq.max) << 3;
qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
}
/* leave arbel_sched_queue as 0 */
if (qp->ibqp.uobject)
qp_context->usr_page =
cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
else
qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
qp_context->local_qpn = cpu_to_be32(qp->qpn);
if (attr_mask & IB_QP_DEST_QPN) {
qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
}
if (qp->transport == MLX)
qp_context->pri_path.port_pkey |=
cpu_to_be32(qp->port << 24);
else {
if (attr_mask & IB_QP_PORT) {
qp_context->pri_path.port_pkey |=
cpu_to_be32(attr->port_num << 24);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
}
}
if (attr_mask & IB_QP_PKEY_INDEX) {
qp_context->pri_path.port_pkey |=
cpu_to_be32(attr->pkey_index);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
}
if (attr_mask & IB_QP_RNR_RETRY) {
qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
attr->rnr_retry << 5;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
}
if (attr_mask & IB_QP_AV) {
if (mthca_path_set(dev, &attr->ah_attr, &qp_context->pri_path,
attr_mask & IB_QP_PORT ? attr->port_num : qp->port))
goto out_mailbox;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
}
if (ibqp->qp_type == IB_QPT_RC &&
cur_state == IB_QPS_INIT && new_state == IB_QPS_RTR) {
u8 sched_queue = ibqp->uobject ? 0x2 : 0x1;
if (mthca_is_memfree(dev))
qp_context->rlkey_arbel_sched_queue |= sched_queue;
else
qp_context->tavor_sched_queue |= cpu_to_be32(sched_queue);
qp_param->opt_param_mask |=
cpu_to_be32(MTHCA_QP_OPTPAR_SCHED_QUEUE);
}
if (attr_mask & IB_QP_TIMEOUT) {
qp_context->pri_path.ackto = attr->timeout << 3;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
}
if (attr_mask & IB_QP_ALT_PATH) {
if (attr->alt_pkey_index >= dev->limits.pkey_table_len) {
mthca_dbg(dev, "Alternate P_Key index (%u) too large. max is %d\n",
attr->alt_pkey_index, dev->limits.pkey_table_len-1);
goto out_mailbox;
}
if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
attr->alt_port_num);
goto out_mailbox;
}
if (mthca_path_set(dev, &attr->alt_ah_attr, &qp_context->alt_path,
attr->alt_ah_attr.port_num))
goto out_mailbox;
qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
attr->alt_port_num << 24);
qp_context->alt_path.ackto = attr->alt_timeout << 3;
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
}
/* leave rdd as 0 */
qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
/* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
(MTHCA_FLIGHT_LIMIT << 24) |
MTHCA_QP_BIT_SWE);
if (qp->sq_policy == IB_SIGNAL_ALL_WR)
qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
if (attr_mask & IB_QP_RETRY_CNT) {
qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
if (attr->max_rd_atomic) {
qp_context->params1 |=
cpu_to_be32(MTHCA_QP_BIT_SRE |
MTHCA_QP_BIT_SAE);
qp_context->params1 |=
cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
}
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
}
if (attr_mask & IB_QP_SQ_PSN)
qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
if (mthca_is_memfree(dev)) {
qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
if (attr->max_dest_rd_atomic)
qp_context->params2 |=
cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
}
if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
MTHCA_QP_OPTPAR_RRE |
MTHCA_QP_OPTPAR_RAE);
}
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
if (ibqp->srq)
qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
if (attr_mask & IB_QP_MIN_RNR_TIMER) {
qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
}
if (attr_mask & IB_QP_RQ_PSN)
qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
qp_context->ra_buff_indx =
cpu_to_be32(dev->qp_table.rdb_base +
((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
dev->qp_table.rdb_shift));
qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
if (mthca_is_memfree(dev))
qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
if (attr_mask & IB_QP_QKEY) {
qp_context->qkey = cpu_to_be32(attr->qkey);
qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
}
if (ibqp->srq)
qp_context->srqn = cpu_to_be32(1 << 24 |
to_msrq(ibqp->srq)->srqn);
if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
attr->en_sqd_async_notify)
sqd_event = 1 << 31;
err = mthca_MODIFY_QP(dev, cur_state, new_state, qp->qpn, 0,
mailbox, sqd_event);
if (err) {
mthca_warn(dev, "modify QP %d->%d returned %d.\n",
cur_state, new_state, err);
goto out_mailbox;
}
qp->state = new_state;
if (attr_mask & IB_QP_ACCESS_FLAGS)
qp->atomic_rd_en = attr->qp_access_flags;
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
qp->resp_depth = attr->max_dest_rd_atomic;
if (attr_mask & IB_QP_PORT)
qp->port = attr->port_num;
if (attr_mask & IB_QP_ALT_PATH)
qp->alt_port = attr->alt_port_num;
if (is_sqp(dev, qp))
store_attrs(to_msqp(qp), attr, attr_mask);
/*
* If we moved QP0 to RTR, bring the IB link up; if we moved
* QP0 to RESET or ERROR, bring the link back down.
*/
if (is_qp0(dev, qp)) {
if (cur_state != IB_QPS_RTR &&
new_state == IB_QPS_RTR)
init_port(dev, qp->port);
if (cur_state != IB_QPS_RESET &&
cur_state != IB_QPS_ERR &&
(new_state == IB_QPS_RESET ||
new_state == IB_QPS_ERR))
mthca_CLOSE_IB(dev, qp->port);
}
/*
* If we moved a kernel QP to RESET, clean up all old CQ
* entries and reinitialize the QP.
*/
if (new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq), qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq), qp->qpn, NULL);
mthca_wq_reset(&qp->sq);
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
mthca_wq_reset(&qp->rq);
qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
if (mthca_is_memfree(dev)) {
*qp->sq.db = 0;
*qp->rq.db = 0;
}
}
out_mailbox:
mthca_free_mailbox(dev, mailbox);
out:
return err;
}
int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask,
struct ib_udata *udata)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
enum ib_qp_state cur_state, new_state;
int err = -EINVAL;
mutex_lock(&qp->mutex);
if (attr_mask & IB_QP_CUR_STATE) {
cur_state = attr->cur_qp_state;
} else {
spin_lock_irq(&qp->sq.lock);
spin_lock(&qp->rq.lock);
cur_state = qp->state;
spin_unlock(&qp->rq.lock);
spin_unlock_irq(&qp->sq.lock);
}
new_state = attr_mask & IB_QP_STATE ? attr->qp_state : cur_state;
if (!ib_modify_qp_is_ok(cur_state, new_state, ibqp->qp_type, attr_mask)) {
mthca_dbg(dev, "Bad QP transition (transport %d) "
"%d->%d with attr 0x%08x\n",
qp->transport, cur_state, new_state,
attr_mask);
goto out;
}
if ((attr_mask & IB_QP_PKEY_INDEX) &&
attr->pkey_index >= dev->limits.pkey_table_len) {
mthca_dbg(dev, "P_Key index (%u) too large. max is %d\n",
attr->pkey_index, dev->limits.pkey_table_len-1);
goto out;
}
if ((attr_mask & IB_QP_PORT) &&
(attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
goto out;
}
if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
goto out;
}
if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
goto out;
}
if (cur_state == new_state && cur_state == IB_QPS_RESET) {
err = 0;
goto out;
}
err = __mthca_modify_qp(ibqp, attr, attr_mask, cur_state, new_state);
out:
mutex_unlock(&qp->mutex);
return err;
}
static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
{
/*
* Calculate the maximum size of WQE s/g segments, excluding
* the next segment and other non-data segments.
*/
int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
switch (qp->transport) {
case MLX:
max_data_size -= 2 * sizeof (struct mthca_data_seg);
break;
case UD:
if (mthca_is_memfree(dev))
max_data_size -= sizeof (struct mthca_arbel_ud_seg);
else
max_data_size -= sizeof (struct mthca_tavor_ud_seg);
break;
default:
max_data_size -= sizeof (struct mthca_raddr_seg);
break;
}
return max_data_size;
}
static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
{
/* We don't support inline data for kernel QPs (yet). */
return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
}
static void mthca_adjust_qp_caps(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_qp *qp)
{
int max_data_size = mthca_max_data_size(dev, qp,
min(dev->limits.max_desc_sz,
1 << qp->sq.wqe_shift));
qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
qp->sq.max_gs = min_t(int, dev->limits.max_sg,
max_data_size / sizeof (struct mthca_data_seg));
qp->rq.max_gs = min_t(int, dev->limits.max_sg,
(min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
sizeof (struct mthca_next_seg)) /
sizeof (struct mthca_data_seg));
}
/*
* Allocate and register buffer for WQEs. qp->rq.max, sq.max,
* rq.max_gs and sq.max_gs must all be assigned.
* mthca_alloc_wqe_buf will calculate rq.wqe_shift and
* sq.wqe_shift (as well as send_wqe_offset, is_direct, and
* queue)
*/
static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_qp *qp)
{
int size;
int err = -ENOMEM;
size = sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg);
if (size > dev->limits.max_desc_sz)
return -EINVAL;
for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
qp->rq.wqe_shift++)
; /* nothing */
size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
switch (qp->transport) {
case MLX:
size += 2 * sizeof (struct mthca_data_seg);
break;
case UD:
size += mthca_is_memfree(dev) ?
sizeof (struct mthca_arbel_ud_seg) :
sizeof (struct mthca_tavor_ud_seg);
break;
case UC:
size += sizeof (struct mthca_raddr_seg);
break;
case RC:
size += sizeof (struct mthca_raddr_seg);
/*
* An atomic op will require an atomic segment, a
* remote address segment and one scatter entry.
*/
size = max_t(int, size,
sizeof (struct mthca_atomic_seg) +
sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_data_seg));
break;
default:
break;
}
/* Make sure that we have enough space for a bind request */
size = max_t(int, size, sizeof (struct mthca_bind_seg));
size += sizeof (struct mthca_next_seg);
if (size > dev->limits.max_desc_sz)
return -EINVAL;
for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
qp->sq.wqe_shift++)
; /* nothing */
qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1 << qp->sq.wqe_shift);
/*
* If this is a userspace QP, we don't actually have to
* allocate anything. All we need is to calculate the WQE
* sizes and the send_wqe_offset, so we're done now.
*/
if (pd->ibpd.uobject)
return 0;
size = PAGE_ALIGN(qp->send_wqe_offset +
(qp->sq.max << qp->sq.wqe_shift));
qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
GFP_KERNEL);
if (!qp->wrid)
goto err_out;
err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
&qp->queue, &qp->is_direct, pd, 0, &qp->mr);
if (err)
goto err_out;
return 0;
err_out:
kfree(qp->wrid);
return err;
}
static void mthca_free_wqe_buf(struct mthca_dev *dev,
struct mthca_qp *qp)
{
mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
(qp->sq.max << qp->sq.wqe_shift)),
&qp->queue, qp->is_direct, &qp->mr);
kfree(qp->wrid);
}
static int mthca_map_memfree(struct mthca_dev *dev,
struct mthca_qp *qp)
{
int ret;
if (mthca_is_memfree(dev)) {
ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
if (ret)
return ret;
ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
if (ret)
goto err_qpc;
ret = mthca_table_get(dev, dev->qp_table.rdb_table,
qp->qpn << dev->qp_table.rdb_shift);
if (ret)
goto err_eqpc;
}
return 0;
err_eqpc:
mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
err_qpc:
mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
return ret;
}
static void mthca_unmap_memfree(struct mthca_dev *dev,
struct mthca_qp *qp)
{
mthca_table_put(dev, dev->qp_table.rdb_table,
qp->qpn << dev->qp_table.rdb_shift);
mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
}
static int mthca_alloc_memfree(struct mthca_dev *dev,
struct mthca_qp *qp)
{
if (mthca_is_memfree(dev)) {
qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
qp->qpn, &qp->rq.db);
if (qp->rq.db_index < 0)
return -ENOMEM;
qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
qp->qpn, &qp->sq.db);
if (qp->sq.db_index < 0) {
mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
return -ENOMEM;
}
}
return 0;
}
static void mthca_free_memfree(struct mthca_dev *dev,
struct mthca_qp *qp)
{
if (mthca_is_memfree(dev)) {
mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
}
}
static int mthca_alloc_qp_common(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_sig_type send_policy,
struct mthca_qp *qp)
{
int ret;
int i;
struct mthca_next_seg *next;
qp->refcount = 1;
init_waitqueue_head(&qp->wait);
mutex_init(&qp->mutex);
qp->state = IB_QPS_RESET;
qp->atomic_rd_en = 0;
qp->resp_depth = 0;
qp->sq_policy = send_policy;
mthca_wq_reset(&qp->sq);
mthca_wq_reset(&qp->rq);
spin_lock_init(&qp->sq.lock);
spin_lock_init(&qp->rq.lock);
ret = mthca_map_memfree(dev, qp);
if (ret)
return ret;
ret = mthca_alloc_wqe_buf(dev, pd, qp);
if (ret) {
mthca_unmap_memfree(dev, qp);
return ret;
}
mthca_adjust_qp_caps(dev, pd, qp);
/*
* If this is a userspace QP, we're done now. The doorbells
* will be allocated and buffers will be initialized in
* userspace.
*/
if (pd->ibpd.uobject)
return 0;
ret = mthca_alloc_memfree(dev, qp);
if (ret) {
mthca_free_wqe_buf(dev, qp);
mthca_unmap_memfree(dev, qp);
return ret;
}
if (mthca_is_memfree(dev)) {
struct mthca_data_seg *scatter;
int size = (sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
for (i = 0; i < qp->rq.max; ++i) {
next = get_recv_wqe(qp, i);
next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
qp->rq.wqe_shift);
next->ee_nds = cpu_to_be32(size);
for (scatter = (void *) (next + 1);
(void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
++scatter)
scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
}
for (i = 0; i < qp->sq.max; ++i) {
next = get_send_wqe(qp, i);
next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
qp->sq.wqe_shift) +
qp->send_wqe_offset);
}
} else {
for (i = 0; i < qp->rq.max; ++i) {
next = get_recv_wqe(qp, i);
next->nda_op = htonl((((i + 1) % qp->rq.max) <<
qp->rq.wqe_shift) | 1);
}
}
qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
return 0;
}
static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
struct mthca_pd *pd, struct mthca_qp *qp)
{
int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
/* Sanity check QP size before proceeding */
if (cap->max_send_wr > dev->limits.max_wqes ||
cap->max_recv_wr > dev->limits.max_wqes ||
cap->max_send_sge > dev->limits.max_sg ||
cap->max_recv_sge > dev->limits.max_sg ||
cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
return -EINVAL;
/*
* For MLX transport we need 2 extra send gather entries:
* one for the header and one for the checksum at the end
*/
if (qp->transport == MLX && cap->max_send_sge + 2 > dev->limits.max_sg)
return -EINVAL;
if (mthca_is_memfree(dev)) {
qp->rq.max = cap->max_recv_wr ?
roundup_pow_of_two(cap->max_recv_wr) : 0;
qp->sq.max = cap->max_send_wr ?
roundup_pow_of_two(cap->max_send_wr) : 0;
} else {
qp->rq.max = cap->max_recv_wr;
qp->sq.max = cap->max_send_wr;
}
qp->rq.max_gs = cap->max_recv_sge;
qp->sq.max_gs = max_t(int, cap->max_send_sge,
ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
MTHCA_INLINE_CHUNK_SIZE) /
sizeof (struct mthca_data_seg));
return 0;
}
int mthca_alloc_qp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_qp_type type,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
struct mthca_qp *qp)
{
int err;
switch (type) {
case IB_QPT_RC: qp->transport = RC; break;
case IB_QPT_UC: qp->transport = UC; break;
case IB_QPT_UD: qp->transport = UD; break;
default: return -EINVAL;
}
err = mthca_set_qp_size(dev, cap, pd, qp);
if (err)
return err;
qp->qpn = mthca_alloc(&dev->qp_table.alloc);
if (qp->qpn == -1)
return -ENOMEM;
/* initialize port to zero for error-catching. */
qp->port = 0;
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
send_policy, qp);
if (err) {
mthca_free(&dev->qp_table.alloc, qp->qpn);
return err;
}
spin_lock_irq(&dev->qp_table.lock);
mthca_array_set(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1), qp);
spin_unlock_irq(&dev->qp_table.lock);
return 0;
}
static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
__acquires(&send_cq->lock) __acquires(&recv_cq->lock)
{
if (send_cq == recv_cq) {
spin_lock_irq(&send_cq->lock);
__acquire(&recv_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_lock_irq(&send_cq->lock);
spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
} else {
spin_lock_irq(&recv_cq->lock);
spin_lock_nested(&send_cq->lock, SINGLE_DEPTH_NESTING);
}
}
static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
__releases(&send_cq->lock) __releases(&recv_cq->lock)
{
if (send_cq == recv_cq) {
__release(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else if (send_cq->cqn < recv_cq->cqn) {
spin_unlock(&recv_cq->lock);
spin_unlock_irq(&send_cq->lock);
} else {
spin_unlock(&send_cq->lock);
spin_unlock_irq(&recv_cq->lock);
}
}
int mthca_alloc_sqp(struct mthca_dev *dev,
struct mthca_pd *pd,
struct mthca_cq *send_cq,
struct mthca_cq *recv_cq,
enum ib_sig_type send_policy,
struct ib_qp_cap *cap,
int qpn,
int port,
struct mthca_sqp *sqp)
{
u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
int err;
sqp->qp.transport = MLX;
err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
if (err)
return err;
sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
&sqp->header_dma, GFP_KERNEL);
if (!sqp->header_buf)
return -ENOMEM;
spin_lock_irq(&dev->qp_table.lock);
if (mthca_array_get(&dev->qp_table.qp, mqpn))
err = -EBUSY;
else
mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
spin_unlock_irq(&dev->qp_table.lock);
if (err)
goto err_out;
sqp->qp.port = port;
sqp->qp.qpn = mqpn;
sqp->qp.transport = MLX;
err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
send_policy, &sqp->qp);
if (err)
goto err_out_free;
atomic_inc(&pd->sqp_count);
return 0;
err_out_free:
/*
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp, mqpn);
spin_unlock(&dev->qp_table.lock);
mthca_unlock_cqs(send_cq, recv_cq);
err_out:
dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
sqp->header_buf, sqp->header_dma);
return err;
}
static inline int get_qp_refcount(struct mthca_dev *dev, struct mthca_qp *qp)
{
int c;
spin_lock_irq(&dev->qp_table.lock);
c = qp->refcount;
spin_unlock_irq(&dev->qp_table.lock);
return c;
}
void mthca_free_qp(struct mthca_dev *dev,
struct mthca_qp *qp)
{
struct mthca_cq *send_cq;
struct mthca_cq *recv_cq;
send_cq = to_mcq(qp->ibqp.send_cq);
recv_cq = to_mcq(qp->ibqp.recv_cq);
/*
* Lock CQs here, so that CQ polling code can do QP lookup
* without taking a lock.
*/
mthca_lock_cqs(send_cq, recv_cq);
spin_lock(&dev->qp_table.lock);
mthca_array_clear(&dev->qp_table.qp,
qp->qpn & (dev->limits.num_qps - 1));
--qp->refcount;
spin_unlock(&dev->qp_table.lock);
mthca_unlock_cqs(send_cq, recv_cq);
wait_event(qp->wait, !get_qp_refcount(dev, qp));
if (qp->state != IB_QPS_RESET)
mthca_MODIFY_QP(dev, qp->state, IB_QPS_RESET, qp->qpn, 0,
NULL, 0);
/*
* If this is a userspace QP, the buffers, MR, CQs and so on
* will be cleaned up in userspace, so all we have to do is
* unref the mem-free tables and free the QPN in our table.
*/
if (!qp->ibqp.uobject) {
mthca_cq_clean(dev, recv_cq, qp->qpn,
qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
if (send_cq != recv_cq)
mthca_cq_clean(dev, send_cq, qp->qpn, NULL);
mthca_free_memfree(dev, qp);
mthca_free_wqe_buf(dev, qp);
}
mthca_unmap_memfree(dev, qp);
if (is_sqp(dev, qp)) {
atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
dma_free_coherent(&dev->pdev->dev,
to_msqp(qp)->header_buf_size,
to_msqp(qp)->header_buf,
to_msqp(qp)->header_dma);
} else
mthca_free(&dev->qp_table.alloc, qp->qpn);
}
/* Create UD header for an MLX send and build a data segment for it */
static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
int ind, struct ib_send_wr *wr,
struct mthca_mlx_seg *mlx,
struct mthca_data_seg *data)
{
int header_size;
int err;
u16 pkey;
ib_ud_header_init(256, /* assume a MAD */ 1, 0, 0,
mthca_ah_grh_present(to_mah(wr->wr.ud.ah)), 0,
&sqp->ud_header);
err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
if (err)
return err;
mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
(sqp->ud_header.lrh.destination_lid ==
IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
(sqp->ud_header.lrh.service_level << 8));
mlx->rlid = sqp->ud_header.lrh.destination_lid;
mlx->vcrc = 0;
switch (wr->opcode) {
case IB_WR_SEND:
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
sqp->ud_header.immediate_present = 0;
break;
case IB_WR_SEND_WITH_IMM:
sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
sqp->ud_header.immediate_present = 1;
sqp->ud_header.immediate_data = wr->ex.imm_data;
break;
default:
return -EINVAL;
}
sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
if (!sqp->qp.ibqp.qp_num)
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
sqp->pkey_index, &pkey);
else
ib_get_cached_pkey(&dev->ib_dev, sqp->qp.port,
wr->wr.ud.pkey_index, &pkey);
sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
sqp->qkey : wr->wr.ud.remote_qkey);
sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
header_size = ib_ud_header_pack(&sqp->ud_header,
sqp->header_buf +
ind * MTHCA_UD_HEADER_SIZE);
data->byte_count = cpu_to_be32(header_size);
data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
data->addr = cpu_to_be64(sqp->header_dma +
ind * MTHCA_UD_HEADER_SIZE);
return 0;
}
static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
struct ib_cq *ib_cq)
{
unsigned cur;
struct mthca_cq *cq;
cur = wq->head - wq->tail;
if (likely(cur + nreq < wq->max))
return 0;
cq = to_mcq(ib_cq);
spin_lock(&cq->lock);
cur = wq->head - wq->tail;
spin_unlock(&cq->lock);
return cur + nreq >= wq->max;
}
static __always_inline void set_raddr_seg(struct mthca_raddr_seg *rseg,
u64 remote_addr, u32 rkey)
{
rseg->raddr = cpu_to_be64(remote_addr);
rseg->rkey = cpu_to_be32(rkey);
rseg->reserved = 0;
}
static __always_inline void set_atomic_seg(struct mthca_atomic_seg *aseg,
struct ib_send_wr *wr)
{
if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.swap);
aseg->compare = cpu_to_be64(wr->wr.atomic.compare_add);
} else {
aseg->swap_add = cpu_to_be64(wr->wr.atomic.compare_add);
aseg->compare = 0;
}
}
static void set_tavor_ud_seg(struct mthca_tavor_ud_seg *useg,
struct ib_send_wr *wr)
{
useg->lkey = cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
useg->av_addr = cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
}
static void set_arbel_ud_seg(struct mthca_arbel_ud_seg *useg,
struct ib_send_wr *wr)
{
memcpy(useg->av, to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
useg->dqpn = cpu_to_be32(wr->wr.ud.remote_qpn);
useg->qkey = cpu_to_be32(wr->wr.ud.remote_qkey);
}
int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
void *wqe;
void *prev_wqe;
unsigned long flags;
int err = 0;
int nreq;
int i;
int size;
/*
* f0 and size0 are only used if nreq != 0, and they will
* always be initialized the first time through the main loop
* before nreq is incremented. So nreq cannot become non-zero
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
int uninitialized_var(size0);
u32 uninitialized_var(f0);
int ind;
u8 op0 = 0;
spin_lock_irqsave(&qp->sq.lock, flags);
/* XXX check that state is OK to post send */
ind = qp->sq.next_ind;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
mthca_err(dev, "SQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
qp->sq.head, qp->sq.tail,
qp->sq.max, nreq);
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
wqe = get_send_wqe(qp, ind);
prev_wqe = qp->sq.last;
qp->sq.last = wqe;
((struct mthca_next_seg *) wqe)->nda_op = 0;
((struct mthca_next_seg *) wqe)->ee_nds = 0;
((struct mthca_next_seg *) wqe)->flags =
((wr->send_flags & IB_SEND_SIGNALED) ?
cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
((wr->send_flags & IB_SEND_SOLICITED) ?
cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
cpu_to_be32(1);
if (wr->opcode == IB_WR_SEND_WITH_IMM ||
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
wqe += sizeof (struct mthca_next_seg);
size = sizeof (struct mthca_next_seg) / 16;
switch (qp->transport) {
case RC:
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
break;
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
case IB_WR_RDMA_READ:
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
default:
/* No extra segments required for sends */
break;
}
break;
case UC:
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
default:
/* No extra segments required for sends */
break;
}
break;
case UD:
set_tavor_ud_seg(wqe, wr);
wqe += sizeof (struct mthca_tavor_ud_seg);
size += sizeof (struct mthca_tavor_ud_seg) / 16;
break;
case MLX:
err = build_mlx_header(dev, to_msqp(qp), ind, wr,
wqe - sizeof (struct mthca_next_seg),
wqe);
if (err) {
*bad_wr = wr;
goto out;
}
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
break;
}
if (wr->num_sge > qp->sq.max_gs) {
mthca_err(dev, "too many gathers\n");
err = -EINVAL;
*bad_wr = wr;
goto out;
}
for (i = 0; i < wr->num_sge; ++i) {
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
/* Add one more inline data segment for ICRC */
if (qp->transport == MLX) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32((1 << 31) | 4);
((u32 *) wqe)[1] = 0;
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
qp->wrid[ind + qp->rq.max] = wr->wr_id;
if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
mthca_err(dev, "opcode invalid\n");
err = -EINVAL;
*bad_wr = wr;
goto out;
}
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32(((ind << qp->sq.wqe_shift) +
qp->send_wqe_offset) |
mthca_opcode[wr->opcode]);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32((nreq ? 0 : MTHCA_NEXT_DBD) | size |
((wr->send_flags & IB_SEND_FENCE) ?
MTHCA_NEXT_FENCE : 0));
if (!nreq) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;
if (unlikely(ind >= qp->sq.max))
ind -= qp->sq.max;
}
out:
if (likely(nreq)) {
wmb();
mthca_write64(((qp->sq.next_ind << qp->sq.wqe_shift) +
qp->send_wqe_offset) | f0 | op0,
(qp->qpn << 8) | size0,
dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
/*
* Make sure doorbells don't leak out of SQ spinlock
* and reach the HCA out of order:
*/
mmiowb();
}
qp->sq.next_ind = ind;
qp->sq.head += nreq;
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
unsigned long flags;
int err = 0;
int nreq;
int i;
int size;
/*
* size0 is only used if nreq != 0, and it will always be
* initialized the first time through the main loop before
* nreq is incremented. So nreq cannot become non-zero
* without initializing size0, and it is in fact never used
* uninitialized.
*/
int uninitialized_var(size0);
int ind;
void *wqe;
void *prev_wqe;
spin_lock_irqsave(&qp->rq.lock, flags);
/* XXX check that state is OK to post receive */
ind = qp->rq.next_ind;
for (nreq = 0; wr; wr = wr->next) {
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
qp->rq.head, qp->rq.tail,
qp->rq.max, nreq);
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
wqe = get_recv_wqe(qp, ind);
prev_wqe = qp->rq.last;
qp->rq.last = wqe;
((struct mthca_next_seg *) wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD);
((struct mthca_next_seg *) wqe)->flags = 0;
wqe += sizeof (struct mthca_next_seg);
size = sizeof (struct mthca_next_seg) / 16;
if (unlikely(wr->num_sge > qp->rq.max_gs)) {
err = -EINVAL;
*bad_wr = wr;
goto out;
}
for (i = 0; i < wr->num_sge; ++i) {
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
qp->wrid[ind] = wr->wr_id;
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size);
if (!nreq)
size0 = size;
++ind;
if (unlikely(ind >= qp->rq.max))
ind -= qp->rq.max;
++nreq;
if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
nreq = 0;
wmb();
mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
qp->qpn << 8, dev->kar + MTHCA_RECEIVE_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
qp->rq.next_ind = ind;
qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
}
}
out:
if (likely(nreq)) {
wmb();
mthca_write64((qp->rq.next_ind << qp->rq.wqe_shift) | size0,
qp->qpn << 8 | nreq, dev->kar + MTHCA_RECEIVE_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
qp->rq.next_ind = ind;
qp->rq.head += nreq;
/*
* Make sure doorbells don't leak out of RQ spinlock and reach
* the HCA out of order:
*/
mmiowb();
spin_unlock_irqrestore(&qp->rq.lock, flags);
return err;
}
int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
struct ib_send_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
u32 dbhi;
void *wqe;
void *prev_wqe;
unsigned long flags;
int err = 0;
int nreq;
int i;
int size;
/*
* f0 and size0 are only used if nreq != 0, and they will
* always be initialized the first time through the main loop
* before nreq is incremented. So nreq cannot become non-zero
* without initializing f0 and size0, and they are in fact
* never used uninitialized.
*/
int uninitialized_var(size0);
u32 uninitialized_var(f0);
int ind;
u8 op0 = 0;
spin_lock_irqsave(&qp->sq.lock, flags);
/* XXX check that state is OK to post send */
ind = qp->sq.head & (qp->sq.max - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
nreq = 0;
dbhi = (MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
((qp->sq.head & 0xffff) << 8) | f0 | op0;
qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
/*
* Make sure that descriptors are written before
* doorbell record.
*/
wmb();
*qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
/*
* Make sure doorbell record is written before we
* write MMIO send doorbell.
*/
wmb();
mthca_write64(dbhi, (qp->qpn << 8) | size0,
dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
mthca_err(dev, "SQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
qp->sq.head, qp->sq.tail,
qp->sq.max, nreq);
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
wqe = get_send_wqe(qp, ind);
prev_wqe = qp->sq.last;
qp->sq.last = wqe;
((struct mthca_next_seg *) wqe)->flags =
((wr->send_flags & IB_SEND_SIGNALED) ?
cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
((wr->send_flags & IB_SEND_SOLICITED) ?
cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
((wr->send_flags & IB_SEND_IP_CSUM) ?
cpu_to_be32(MTHCA_NEXT_IP_CSUM | MTHCA_NEXT_TCP_UDP_CSUM) : 0) |
cpu_to_be32(1);
if (wr->opcode == IB_WR_SEND_WITH_IMM ||
wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
((struct mthca_next_seg *) wqe)->imm = wr->ex.imm_data;
wqe += sizeof (struct mthca_next_seg);
size = sizeof (struct mthca_next_seg) / 16;
switch (qp->transport) {
case RC:
switch (wr->opcode) {
case IB_WR_ATOMIC_CMP_AND_SWP:
case IB_WR_ATOMIC_FETCH_AND_ADD:
set_raddr_seg(wqe, wr->wr.atomic.remote_addr,
wr->wr.atomic.rkey);
wqe += sizeof (struct mthca_raddr_seg);
set_atomic_seg(wqe, wr);
wqe += sizeof (struct mthca_atomic_seg);
size += (sizeof (struct mthca_raddr_seg) +
sizeof (struct mthca_atomic_seg)) / 16;
break;
case IB_WR_RDMA_READ:
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
default:
/* No extra segments required for sends */
break;
}
break;
case UC:
switch (wr->opcode) {
case IB_WR_RDMA_WRITE:
case IB_WR_RDMA_WRITE_WITH_IMM:
set_raddr_seg(wqe, wr->wr.rdma.remote_addr,
wr->wr.rdma.rkey);
wqe += sizeof (struct mthca_raddr_seg);
size += sizeof (struct mthca_raddr_seg) / 16;
break;
default:
/* No extra segments required for sends */
break;
}
break;
case UD:
set_arbel_ud_seg(wqe, wr);
wqe += sizeof (struct mthca_arbel_ud_seg);
size += sizeof (struct mthca_arbel_ud_seg) / 16;
break;
case MLX:
err = build_mlx_header(dev, to_msqp(qp), ind, wr,
wqe - sizeof (struct mthca_next_seg),
wqe);
if (err) {
*bad_wr = wr;
goto out;
}
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
break;
}
if (wr->num_sge > qp->sq.max_gs) {
mthca_err(dev, "too many gathers\n");
err = -EINVAL;
*bad_wr = wr;
goto out;
}
for (i = 0; i < wr->num_sge; ++i) {
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
/* Add one more inline data segment for ICRC */
if (qp->transport == MLX) {
((struct mthca_data_seg *) wqe)->byte_count =
cpu_to_be32((1 << 31) | 4);
((u32 *) wqe)[1] = 0;
wqe += sizeof (struct mthca_data_seg);
size += sizeof (struct mthca_data_seg) / 16;
}
qp->wrid[ind + qp->rq.max] = wr->wr_id;
if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
mthca_err(dev, "opcode invalid\n");
err = -EINVAL;
*bad_wr = wr;
goto out;
}
((struct mthca_next_seg *) prev_wqe)->nda_op =
cpu_to_be32(((ind << qp->sq.wqe_shift) +
qp->send_wqe_offset) |
mthca_opcode[wr->opcode]);
wmb();
((struct mthca_next_seg *) prev_wqe)->ee_nds =
cpu_to_be32(MTHCA_NEXT_DBD | size |
((wr->send_flags & IB_SEND_FENCE) ?
MTHCA_NEXT_FENCE : 0));
if (!nreq) {
size0 = size;
op0 = mthca_opcode[wr->opcode];
f0 = wr->send_flags & IB_SEND_FENCE ?
MTHCA_SEND_DOORBELL_FENCE : 0;
}
++ind;
if (unlikely(ind >= qp->sq.max))
ind -= qp->sq.max;
}
out:
if (likely(nreq)) {
dbhi = (nreq << 24) | ((qp->sq.head & 0xffff) << 8) | f0 | op0;
qp->sq.head += nreq;
/*
* Make sure that descriptors are written before
* doorbell record.
*/
wmb();
*qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
/*
* Make sure doorbell record is written before we
* write MMIO send doorbell.
*/
wmb();
mthca_write64(dbhi, (qp->qpn << 8) | size0, dev->kar + MTHCA_SEND_DOORBELL,
MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
}
/*
* Make sure doorbells don't leak out of SQ spinlock and reach
* the HCA out of order:
*/
mmiowb();
spin_unlock_irqrestore(&qp->sq.lock, flags);
return err;
}
int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct ib_recv_wr **bad_wr)
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
unsigned long flags;
int err = 0;
int nreq;
int ind;
int i;
void *wqe;
spin_lock_irqsave(&qp->rq.lock, flags);
/* XXX check that state is OK to post receive */
ind = qp->rq.head & (qp->rq.max - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
qp->rq.head, qp->rq.tail,
qp->rq.max, nreq);
err = -ENOMEM;
*bad_wr = wr;
goto out;
}
wqe = get_recv_wqe(qp, ind);
((struct mthca_next_seg *) wqe)->flags = 0;
wqe += sizeof (struct mthca_next_seg);
if (unlikely(wr->num_sge > qp->rq.max_gs)) {
err = -EINVAL;
*bad_wr = wr;
goto out;
}
for (i = 0; i < wr->num_sge; ++i) {
mthca_set_data_seg(wqe, wr->sg_list + i);
wqe += sizeof (struct mthca_data_seg);
}
if (i < qp->rq.max_gs)
mthca_set_data_seg_inval(wqe);
qp->wrid[ind] = wr->wr_id;
++ind;
if (unlikely(ind >= qp->rq.max))
ind -= qp->rq.max;
}
out:
if (likely(nreq)) {
qp->rq.head += nreq;
/*
* Make sure that descriptors are written before
* doorbell record.
*/
wmb();
*qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
}
spin_unlock_irqrestore(&qp->rq.lock, flags);
return err;
}
void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
int index, int *dbd, __be32 *new_wqe)
{
struct mthca_next_seg *next;
/*
* For SRQs, all receive WQEs generate a CQE, so we're always
* at the end of the doorbell chain.
*/
if (qp->ibqp.srq && !is_send) {
*new_wqe = 0;
return;
}
if (is_send)
next = get_send_wqe(qp, index);
else
next = get_recv_wqe(qp, index);
*dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
if (next->ee_nds & cpu_to_be32(0x3f))
*new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
(next->ee_nds & cpu_to_be32(0x3f));
else
*new_wqe = 0;
}
int mthca_init_qp_table(struct mthca_dev *dev)
{
int err;
int i;
spin_lock_init(&dev->qp_table.lock);
/*
* We reserve 2 extra QPs per port for the special QPs. The
* special QP for port 1 has to be even, so round up.
*/
dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
err = mthca_alloc_init(&dev->qp_table.alloc,
dev->limits.num_qps,
(1 << 24) - 1,
dev->qp_table.sqp_start +
MTHCA_MAX_PORTS * 2);
if (err)
return err;
err = mthca_array_init(&dev->qp_table.qp,
dev->limits.num_qps);
if (err) {
mthca_alloc_cleanup(&dev->qp_table.alloc);
return err;
}
for (i = 0; i < 2; ++i) {
err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
dev->qp_table.sqp_start + i * 2);
if (err) {
mthca_warn(dev, "CONF_SPECIAL_QP returned "
"%d, aborting.\n", err);
goto err_out;
}
}
return 0;
err_out:
for (i = 0; i < 2; ++i)
mthca_CONF_SPECIAL_QP(dev, i, 0);
mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
mthca_alloc_cleanup(&dev->qp_table.alloc);
return err;
}
void mthca_cleanup_qp_table(struct mthca_dev *dev)
{
int i;
for (i = 0; i < 2; ++i)
mthca_CONF_SPECIAL_QP(dev, i, 0);
mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
mthca_alloc_cleanup(&dev->qp_table.alloc);
}
| gpl-2.0 |
piaoxue99/lp_l24_kernel | drivers/gpu/drm/i915/i915_gem_debug.c | 5518 | 5674 | /*
* Copyright © 2008 Intel Corporation
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Authors:
* Keith Packard <keithp@keithp.com>
*
*/
#include "drmP.h"
#include "drm.h"
#include "i915_drm.h"
#include "i915_drv.h"
#if WATCH_LISTS
int
i915_verify_lists(struct drm_device *dev)
{
static int warned;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
int err = 0;
if (warned)
return 0;
list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed render active %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
DRM_ERROR("invalid render active %p (a %d r %x)\n",
obj,
obj->active,
obj->base.read_domains);
err++;
} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
obj,
obj->base.write_domain,
!list_empty(&obj->gpu_write_list));
err++;
}
}
list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed flushing %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
list_empty(&obj->gpu_write_list)) {
DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
obj,
obj->active,
obj->base.write_domain,
!list_empty(&obj->gpu_write_list));
err++;
}
}
list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed gpu write %p\n", obj);
err++;
break;
} else if (!obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
obj,
obj->active,
obj->base.write_domain);
err++;
}
}
list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed inactive %p\n", obj);
err++;
break;
} else if (obj->pin_count || obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
obj,
obj->pin_count, obj->active,
obj->base.write_domain);
err++;
}
}
list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
if (obj->base.dev != dev ||
!atomic_read(&obj->base.refcount.refcount)) {
DRM_ERROR("freed pinned %p\n", obj);
err++;
break;
} else if (!obj->pin_count || obj->active ||
(obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
obj,
obj->pin_count, obj->active,
obj->base.write_domain);
err++;
}
}
return warned = err;
}
#endif /* WATCH_INACTIVE */
#if WATCH_COHERENCY
void
i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
__func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
}
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
backing_map = kmap_atomic(obj->pages[page]);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
goto out;
}
for (i = 0; i < PAGE_SIZE / 4; i++) {
uint32_t cpuval = backing_map[i];
uint32_t gttval = readl(gtt_mapping +
page * 1024 + i);
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
(int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
DRM_INFO("...\n");
goto out;
}
}
}
kunmap_atomic(backing_map);
backing_map = NULL;
}
out:
if (backing_map != NULL)
kunmap_atomic(backing_map);
iounmap(gtt_mapping);
/* give syslog time to catch up */
msleep(1);
/* Directly flush the object, since we just loaded values with the CPU
* from the backing pages and we don't want to disturb the cache
* management that we're trying to observe.
*/
i915_gem_clflush_object(obj);
}
#endif
| gpl-2.0 |
TeamOrion-Devices/kernel_asus_grouper | drivers/char/pc8736x_gpio.c | 8334 | 8996 | /* linux/drivers/char/pc8736x_gpio.c
National Semiconductor PC8736x GPIO driver. Allows a user space
process to play with the GPIO pins.
Copyright (c) 2005,2006 Jim Cromie <jim.cromie@gmail.com>
adapted from linux/drivers/char/scx200_gpio.c
Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>,
*/
#include <linux/fs.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/cdev.h>
#include <linux/io.h>
#include <linux/ioport.h>
#include <linux/mutex.h>
#include <linux/nsc_gpio.h>
#include <linux/platform_device.h>
#include <asm/uaccess.h>
#define DEVNAME "pc8736x_gpio"
MODULE_AUTHOR("Jim Cromie <jim.cromie@gmail.com>");
MODULE_DESCRIPTION("NatSemi/Winbond PC-8736x GPIO Pin Driver");
MODULE_LICENSE("GPL");
static int major; /* default to dynamic major */
module_param(major, int, 0);
MODULE_PARM_DESC(major, "Major device number");
static DEFINE_MUTEX(pc8736x_gpio_config_lock);
static unsigned pc8736x_gpio_base;
static u8 pc8736x_gpio_shadow[4];
#define SIO_BASE1 0x2E /* 1st command-reg to check */
#define SIO_BASE2 0x4E /* alt command-reg to check */
#define SIO_SID 0x20 /* SuperI/O ID Register */
#define SIO_SID_PC87365 0xe5 /* Expected value in ID Register for PC87365 */
#define SIO_SID_PC87366 0xe9 /* Expected value in ID Register for PC87366 */
#define SIO_CF1 0x21 /* chip config, bit0 is chip enable */
#define PC8736X_GPIO_RANGE 16 /* ioaddr range */
#define PC8736X_GPIO_CT 32 /* minors matching 4 8 bit ports */
#define SIO_UNIT_SEL 0x7 /* unit select reg */
#define SIO_UNIT_ACT 0x30 /* unit enable */
#define SIO_GPIO_UNIT 0x7 /* unit number of GPIO */
#define SIO_VLM_UNIT 0x0D
#define SIO_TMS_UNIT 0x0E
/* config-space addrs to read/write each unit's runtime addr */
#define SIO_BASE_HADDR 0x60
#define SIO_BASE_LADDR 0x61
/* GPIO config-space pin-control addresses */
#define SIO_GPIO_PIN_SELECT 0xF0
#define SIO_GPIO_PIN_CONFIG 0xF1
#define SIO_GPIO_PIN_EVENT 0xF2
static unsigned char superio_cmd = 0;
static unsigned char selected_device = 0xFF; /* bogus start val */
/* GPIO port runtime access, functionality */
static int port_offset[] = { 0, 4, 8, 10 }; /* non-uniform offsets ! */
/* static int event_capable[] = { 1, 1, 0, 0 }; ports 2,3 are hobbled */
#define PORT_OUT 0
#define PORT_IN 1
#define PORT_EVT_EN 2
#define PORT_EVT_STST 3
static struct platform_device *pdev; /* use in dev_*() */
static inline void superio_outb(int addr, int val)
{
outb_p(addr, superio_cmd);
outb_p(val, superio_cmd + 1);
}
static inline int superio_inb(int addr)
{
outb_p(addr, superio_cmd);
return inb_p(superio_cmd + 1);
}
static int pc8736x_superio_present(void)
{
int id;
/* try the 2 possible values, read a hardware reg to verify */
superio_cmd = SIO_BASE1;
id = superio_inb(SIO_SID);
if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
return superio_cmd;
superio_cmd = SIO_BASE2;
id = superio_inb(SIO_SID);
if (id == SIO_SID_PC87365 || id == SIO_SID_PC87366)
return superio_cmd;
return 0;
}
static void device_select(unsigned devldn)
{
superio_outb(SIO_UNIT_SEL, devldn);
selected_device = devldn;
}
static void select_pin(unsigned iminor)
{
/* select GPIO port/pin from device minor number */
device_select(SIO_GPIO_UNIT);
superio_outb(SIO_GPIO_PIN_SELECT,
((iminor << 1) & 0xF0) | (iminor & 0x7));
}
static inline u32 pc8736x_gpio_configure_fn(unsigned index, u32 mask, u32 bits,
u32 func_slct)
{
u32 config, new_config;
mutex_lock(&pc8736x_gpio_config_lock);
device_select(SIO_GPIO_UNIT);
select_pin(index);
/* read current config value */
config = superio_inb(func_slct);
/* set new config */
new_config = (config & mask) | bits;
superio_outb(func_slct, new_config);
mutex_unlock(&pc8736x_gpio_config_lock);
return config;
}
static u32 pc8736x_gpio_configure(unsigned index, u32 mask, u32 bits)
{
return pc8736x_gpio_configure_fn(index, mask, bits,
SIO_GPIO_PIN_CONFIG);
}
static int pc8736x_gpio_get(unsigned minor)
{
int port, bit, val;
port = minor >> 3;
bit = minor & 7;
val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
val >>= bit;
val &= 1;
dev_dbg(&pdev->dev, "_gpio_get(%d from %x bit %d) == val %d\n",
minor, pc8736x_gpio_base + port_offset[port] + PORT_IN, bit,
val);
return val;
}
static void pc8736x_gpio_set(unsigned minor, int val)
{
int port, bit, curval;
minor &= 0x1f;
port = minor >> 3;
bit = minor & 7;
curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
dev_dbg(&pdev->dev, "addr:%x cur:%x bit-pos:%d cur-bit:%x + new:%d -> bit-new:%d\n",
pc8736x_gpio_base + port_offset[port] + PORT_OUT,
curval, bit, (curval & ~(1 << bit)), val, (val << bit));
val = (curval & ~(1 << bit)) | (val << bit);
dev_dbg(&pdev->dev, "gpio_set(minor:%d port:%d bit:%d)"
" %2x -> %2x\n", minor, port, bit, curval, val);
outb_p(val, pc8736x_gpio_base + port_offset[port] + PORT_OUT);
curval = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_OUT);
val = inb_p(pc8736x_gpio_base + port_offset[port] + PORT_IN);
dev_dbg(&pdev->dev, "wrote %x, read: %x\n", curval, val);
pc8736x_gpio_shadow[port] = val;
}
static int pc8736x_gpio_current(unsigned minor)
{
int port, bit;
minor &= 0x1f;
port = minor >> 3;
bit = minor & 7;
return ((pc8736x_gpio_shadow[port] >> bit) & 0x01);
}
static void pc8736x_gpio_change(unsigned index)
{
pc8736x_gpio_set(index, !pc8736x_gpio_current(index));
}
static struct nsc_gpio_ops pc8736x_gpio_ops = {
.owner = THIS_MODULE,
.gpio_config = pc8736x_gpio_configure,
.gpio_dump = nsc_gpio_dump,
.gpio_get = pc8736x_gpio_get,
.gpio_set = pc8736x_gpio_set,
.gpio_change = pc8736x_gpio_change,
.gpio_current = pc8736x_gpio_current
};
static int pc8736x_gpio_open(struct inode *inode, struct file *file)
{
unsigned m = iminor(inode);
file->private_data = &pc8736x_gpio_ops;
dev_dbg(&pdev->dev, "open %d\n", m);
if (m >= PC8736X_GPIO_CT)
return -EINVAL;
return nonseekable_open(inode, file);
}
static const struct file_operations pc8736x_gpio_fileops = {
.owner = THIS_MODULE,
.open = pc8736x_gpio_open,
.write = nsc_gpio_write,
.read = nsc_gpio_read,
.llseek = no_llseek,
};
static void __init pc8736x_init_shadow(void)
{
int port;
/* read the current values driven on the GPIO signals */
for (port = 0; port < 4; ++port)
pc8736x_gpio_shadow[port]
= inb_p(pc8736x_gpio_base + port_offset[port]
+ PORT_OUT);
}
static struct cdev pc8736x_gpio_cdev;
static int __init pc8736x_gpio_init(void)
{
int rc;
dev_t devid;
pdev = platform_device_alloc(DEVNAME, 0);
if (!pdev)
return -ENOMEM;
rc = platform_device_add(pdev);
if (rc) {
rc = -ENODEV;
goto undo_platform_dev_alloc;
}
dev_info(&pdev->dev, "NatSemi pc8736x GPIO Driver Initializing\n");
if (!pc8736x_superio_present()) {
rc = -ENODEV;
dev_err(&pdev->dev, "no device found\n");
goto undo_platform_dev_add;
}
pc8736x_gpio_ops.dev = &pdev->dev;
/* Verify that chip and it's GPIO unit are both enabled.
My BIOS does this, so I take minimum action here
*/
rc = superio_inb(SIO_CF1);
if (!(rc & 0x01)) {
rc = -ENODEV;
dev_err(&pdev->dev, "device not enabled\n");
goto undo_platform_dev_add;
}
device_select(SIO_GPIO_UNIT);
if (!superio_inb(SIO_UNIT_ACT)) {
rc = -ENODEV;
dev_err(&pdev->dev, "GPIO unit not enabled\n");
goto undo_platform_dev_add;
}
/* read the GPIO unit base addr that chip responds to */
pc8736x_gpio_base = (superio_inb(SIO_BASE_HADDR) << 8
| superio_inb(SIO_BASE_LADDR));
if (!request_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE, DEVNAME)) {
rc = -ENODEV;
dev_err(&pdev->dev, "GPIO ioport %x busy\n",
pc8736x_gpio_base);
goto undo_platform_dev_add;
}
dev_info(&pdev->dev, "GPIO ioport %x reserved\n", pc8736x_gpio_base);
if (major) {
devid = MKDEV(major, 0);
rc = register_chrdev_region(devid, PC8736X_GPIO_CT, DEVNAME);
} else {
rc = alloc_chrdev_region(&devid, 0, PC8736X_GPIO_CT, DEVNAME);
major = MAJOR(devid);
}
if (rc < 0) {
dev_err(&pdev->dev, "register-chrdev failed: %d\n", rc);
goto undo_request_region;
}
if (!major) {
major = rc;
dev_dbg(&pdev->dev, "got dynamic major %d\n", major);
}
pc8736x_init_shadow();
/* ignore minor errs, and succeed */
cdev_init(&pc8736x_gpio_cdev, &pc8736x_gpio_fileops);
cdev_add(&pc8736x_gpio_cdev, devid, PC8736X_GPIO_CT);
return 0;
undo_request_region:
release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
undo_platform_dev_add:
platform_device_del(pdev);
undo_platform_dev_alloc:
platform_device_put(pdev);
return rc;
}
static void __exit pc8736x_gpio_cleanup(void)
{
dev_dbg(&pdev->dev, "cleanup\n");
cdev_del(&pc8736x_gpio_cdev);
unregister_chrdev_region(MKDEV(major,0), PC8736X_GPIO_CT);
release_region(pc8736x_gpio_base, PC8736X_GPIO_RANGE);
platform_device_del(pdev);
platform_device_put(pdev);
}
module_init(pc8736x_gpio_init);
module_exit(pc8736x_gpio_cleanup);
| gpl-2.0 |
omnirom/android_kernel_htc_pico | arch/sh/boards/mach-landisk/setup.c | 12174 | 2669 | /*
* arch/sh/boards/landisk/setup.c
*
* I-O DATA Device, Inc. LANDISK Support.
*
* Copyright (C) 2000 Kazumoto Kojima
* Copyright (C) 2002 Paul Mundt
* Copylight (C) 2002 Atom Create Engineering Co., Ltd.
* Copyright (C) 2005-2007 kogiidena
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*/
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/ata_platform.h>
#include <linux/pm.h>
#include <linux/mm.h>
#include <asm/machvec.h>
#include <mach-landisk/mach/iodata_landisk.h>
#include <asm/io.h>
static void landisk_power_off(void)
{
__raw_writeb(0x01, PA_SHUTDOWN);
}
static struct resource cf_ide_resources[3];
static struct pata_platform_info pata_info = {
.ioport_shift = 1,
};
static struct platform_device cf_ide_device = {
.name = "pata_platform",
.id = -1,
.num_resources = ARRAY_SIZE(cf_ide_resources),
.resource = cf_ide_resources,
.dev = {
.platform_data = &pata_info,
},
};
static struct platform_device rtc_device = {
.name = "rs5c313",
.id = -1,
};
static struct platform_device *landisk_devices[] __initdata = {
&cf_ide_device,
&rtc_device,
};
static int __init landisk_devices_setup(void)
{
pgprot_t prot;
unsigned long paddrbase;
void *cf_ide_base;
/* open I/O area window */
paddrbase = virt_to_phys((void *)PA_AREA5_IO);
prot = PAGE_KERNEL_PCC(1, _PAGE_PCC_IO16);
cf_ide_base = ioremap_prot(paddrbase, PAGE_SIZE, pgprot_val(prot));
if (!cf_ide_base) {
printk("allocate_cf_area : can't open CF I/O window!\n");
return -ENOMEM;
}
/* IDE cmd address : 0x1f0-0x1f7 and 0x3f6 */
cf_ide_resources[0].start = (unsigned long)cf_ide_base + 0x40;
cf_ide_resources[0].end = (unsigned long)cf_ide_base + 0x40 + 0x0f;
cf_ide_resources[0].flags = IORESOURCE_IO;
cf_ide_resources[1].start = (unsigned long)cf_ide_base + 0x2c;
cf_ide_resources[1].end = (unsigned long)cf_ide_base + 0x2c + 0x03;
cf_ide_resources[1].flags = IORESOURCE_IO;
cf_ide_resources[2].start = IRQ_FATA;
cf_ide_resources[2].flags = IORESOURCE_IRQ;
return platform_add_devices(landisk_devices,
ARRAY_SIZE(landisk_devices));
}
device_initcall(landisk_devices_setup);
static void __init landisk_setup(char **cmdline_p)
{
/* LED ON */
__raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
printk(KERN_INFO "I-O DATA DEVICE, INC. \"LANDISK Series\" support.\n");
pm_power_off = landisk_power_off;
}
/*
* The Machine Vector
*/
static struct sh_machine_vector mv_landisk __initmv = {
.mv_name = "LANDISK",
.mv_setup = landisk_setup,
.mv_init_irq = init_landisk_IRQ,
};
| gpl-2.0 |
quadcores/linux | drivers/usb/host/oxu210hp-hcd.c | 143 | 100529 | /*
* Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it>
* Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it>
*
* This code is *strongly* based on EHCI-HCD code by David Brownell since
* the chip is a quasi-EHCI compatible.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/dmapool.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/timer.h>
#include <linux/list.h>
#include <linux/interrupt.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
#include <linux/moduleparam.h>
#include <linux/dma-mapping.h>
#include <linux/io.h>
#include <asm/irq.h>
#include <asm/unaligned.h>
#include <linux/irq.h>
#include <linux/platform_device.h>
#include "oxu210hp.h"
#define DRIVER_VERSION "0.0.50"
/*
* Main defines
*/
#define oxu_dbg(oxu, fmt, args...) \
dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_err(oxu, fmt, args...) \
dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#define oxu_info(oxu, fmt, args...) \
dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args)
#ifdef CONFIG_DYNAMIC_DEBUG
#define DEBUG
#endif
static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu)
{
return container_of((void *) oxu, struct usb_hcd, hcd_priv);
}
static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd)
{
return (struct oxu_hcd *) (hcd->hcd_priv);
}
/*
* Debug stuff
*/
#undef OXU_URB_TRACE
#undef OXU_VERBOSE_DEBUG
#ifdef OXU_VERBOSE_DEBUG
#define oxu_vdbg oxu_dbg
#else
#define oxu_vdbg(oxu, fmt, args...) /* Nop */
#endif
#ifdef DEBUG
static int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{
return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", status,
(status & STS_ASS) ? " Async" : "",
(status & STS_PSS) ? " Periodic" : "",
(status & STS_RECL) ? " Recl" : "",
(status & STS_HALT) ? " Halt" : "",
(status & STS_IAA) ? " IAA" : "",
(status & STS_FATAL) ? " FATAL" : "",
(status & STS_FLR) ? " FLR" : "",
(status & STS_PCD) ? " PCD" : "",
(status & STS_ERR) ? " ERR" : "",
(status & STS_INT) ? " INT" : ""
);
}
static int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{
return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s",
label, label[0] ? " " : "", enable,
(enable & STS_IAA) ? " IAA" : "",
(enable & STS_FATAL) ? " FATAL" : "",
(enable & STS_FLR) ? " FLR" : "",
(enable & STS_PCD) ? " PCD" : "",
(enable & STS_ERR) ? " ERR" : "",
(enable & STS_INT) ? " INT" : ""
);
}
static const char *const fls_strings[] =
{ "1024", "512", "256", "??" };
static int dbg_command_buf(char *buf, unsigned len,
const char *label, u32 command)
{
return scnprintf(buf, len,
"%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s",
label, label[0] ? " " : "", command,
(command & CMD_PARK) ? "park" : "(park)",
CMD_PARK_CNT(command),
(command >> 16) & 0x3f,
(command & CMD_LRESET) ? " LReset" : "",
(command & CMD_IAAD) ? " IAAD" : "",
(command & CMD_ASE) ? " Async" : "",
(command & CMD_PSE) ? " Periodic" : "",
fls_strings[(command >> 2) & 0x3],
(command & CMD_RESET) ? " Reset" : "",
(command & CMD_RUN) ? "RUN" : "HALT"
);
}
static int dbg_port_buf(char *buf, unsigned len, const char *label,
int port, u32 status)
{
char *sig;
/* signaling state */
switch (status & (3 << 10)) {
case 0 << 10:
sig = "se0";
break;
case 1 << 10:
sig = "k"; /* low speed */
break;
case 2 << 10:
sig = "j";
break;
default:
sig = "?";
break;
}
return scnprintf(buf, len,
"%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s",
label, label[0] ? " " : "", port, status,
(status & PORT_POWER) ? " POWER" : "",
(status & PORT_OWNER) ? " OWNER" : "",
sig,
(status & PORT_RESET) ? " RESET" : "",
(status & PORT_SUSPEND) ? " SUSPEND" : "",
(status & PORT_RESUME) ? " RESUME" : "",
(status & PORT_OCC) ? " OCC" : "",
(status & PORT_OC) ? " OC" : "",
(status & PORT_PEC) ? " PEC" : "",
(status & PORT_PE) ? " PE" : "",
(status & PORT_CSC) ? " CSC" : "",
(status & PORT_CONNECT) ? " CONNECT" : ""
);
}
#else
static inline int __attribute__((__unused__))
dbg_status_buf(char *buf, unsigned len, const char *label, u32 status)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_command_buf(char *buf, unsigned len, const char *label, u32 command)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable)
{ return 0; }
static inline int __attribute__((__unused__))
dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status)
{ return 0; }
#endif /* DEBUG */
/* functions have the "wrong" filename when they're output... */
#define dbg_status(oxu, label, status) { \
char _buf[80]; \
dbg_status_buf(_buf, sizeof _buf, label, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_cmd(oxu, label, command) { \
char _buf[80]; \
dbg_command_buf(_buf, sizeof _buf, label, command); \
oxu_dbg(oxu, "%s\n", _buf); \
}
#define dbg_port(oxu, label, port, status) { \
char _buf[80]; \
dbg_port_buf(_buf, sizeof _buf, label, port, status); \
oxu_dbg(oxu, "%s\n", _buf); \
}
/*
* Module parameters
*/
/* Initial IRQ latency: faster than hw default */
static int log2_irq_thresh; /* 0 to 6 */
module_param(log2_irq_thresh, int, S_IRUGO);
MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes");
/* Initial park setting: slower than hw default */
static unsigned park;
module_param(park, uint, S_IRUGO);
MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets");
/* For flakey hardware, ignore overcurrent indicators */
static bool ignore_oc;
module_param(ignore_oc, bool, S_IRUGO);
MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications");
static void ehci_work(struct oxu_hcd *oxu);
static int oxu_hub_control(struct usb_hcd *hcd,
u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
/*
* Local functions
*/
/* Low level read/write registers functions */
static inline u32 oxu_readl(void *base, u32 reg)
{
return readl(base + reg);
}
static inline void oxu_writel(void *base, u32 reg, u32 val)
{
writel(val, base + reg);
}
static inline void timer_action_done(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
clear_bit(action, &oxu->actions);
}
static inline void timer_action(struct oxu_hcd *oxu,
enum ehci_timer_action action)
{
if (!test_and_set_bit(action, &oxu->actions)) {
unsigned long t;
switch (action) {
case TIMER_IAA_WATCHDOG:
t = EHCI_IAA_JIFFIES;
break;
case TIMER_IO_WATCHDOG:
t = EHCI_IO_JIFFIES;
break;
case TIMER_ASYNC_OFF:
t = EHCI_ASYNC_JIFFIES;
break;
case TIMER_ASYNC_SHRINK:
default:
t = EHCI_SHRINK_JIFFIES;
break;
}
t += jiffies;
/* all timings except IAA watchdog can be overridden.
* async queue SHRINK often precedes IAA. while it's ready
* to go OFF neither can matter, and afterwards the IO
* watchdog stops unless there's still periodic traffic.
*/
if (action != TIMER_IAA_WATCHDOG
&& t > oxu->watchdog.expires
&& timer_pending(&oxu->watchdog))
return;
mod_timer(&oxu->watchdog, t);
}
}
/*
* handshake - spin reading hc until handshake completes or fails
* @ptr: address of hc register to be read
* @mask: bits to look at in result of read
* @done: value of those bits when handshake succeeds
* @usec: timeout in microseconds
*
* Returns negative errno, or zero on success
*
* Success happens when the "mask" bits have the specified value (hardware
* handshake done). There are two failure modes: "usec" have passed (major
* hardware flakeout), or the register reads as all-ones (hardware removed).
*
* That last failure should_only happen in cases like physical cardbus eject
* before driver shutdown. But it also seems to be caused by bugs in cardbus
* bridge shutdown: shutting down the bridge before the devices using it.
*/
static int handshake(struct oxu_hcd *oxu, void __iomem *ptr,
u32 mask, u32 done, int usec)
{
u32 result;
do {
result = readl(ptr);
if (result == ~(u32)0) /* card removed */
return -ENODEV;
result &= mask;
if (result == done)
return 0;
udelay(1);
usec--;
} while (usec > 0);
return -ETIMEDOUT;
}
/* Force HC to halt state from unknown (EHCI spec section 2.3) */
static int ehci_halt(struct oxu_hcd *oxu)
{
u32 temp = readl(&oxu->regs->status);
/* disable any irqs left enabled by previous code */
writel(0, &oxu->regs->intr_enable);
if ((temp & STS_HALT) != 0)
return 0;
temp = readl(&oxu->regs->command);
temp &= ~CMD_RUN;
writel(temp, &oxu->regs->command);
return handshake(oxu, &oxu->regs->status,
STS_HALT, STS_HALT, 16 * 125);
}
/* Put TDI/ARC silicon into EHCI mode */
static void tdi_reset(struct oxu_hcd *oxu)
{
u32 __iomem *reg_ptr;
u32 tmp;
reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68);
tmp = readl(reg_ptr);
tmp |= 0x3;
writel(tmp, reg_ptr);
}
/* Reset a non-running (STS_HALT == 1) controller */
static int ehci_reset(struct oxu_hcd *oxu)
{
int retval;
u32 command = readl(&oxu->regs->command);
command |= CMD_RESET;
dbg_cmd(oxu, "reset", command);
writel(command, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
oxu->next_statechange = jiffies;
retval = handshake(oxu, &oxu->regs->command,
CMD_RESET, 0, 250 * 1000);
if (retval)
return retval;
tdi_reset(oxu);
return retval;
}
/* Idle the controller (from running) */
static void ehci_quiesce(struct oxu_hcd *oxu)
{
u32 temp;
#ifdef DEBUG
if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
BUG();
#endif
/* wait for any schedule enables/disables to take effect */
temp = readl(&oxu->regs->command) << 10;
temp &= STS_ASS | STS_PSS;
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
temp, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
/* then disable anything that's still active */
temp = readl(&oxu->regs->command);
temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE);
writel(temp, &oxu->regs->command);
/* hardware can take 16 microframes to turn off ... */
if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS,
0, 16 * 125) != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
return;
}
}
static int check_reset_complete(struct oxu_hcd *oxu, int index,
u32 __iomem *status_reg, int port_status)
{
if (!(port_status & PORT_CONNECT)) {
oxu->reset_done[index] = 0;
return port_status;
}
/* if reset finished and it's still not enabled -- handoff */
if (!(port_status & PORT_PE)) {
oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n",
index+1);
return port_status;
} else
oxu_dbg(oxu, "port %d high speed\n", index + 1);
return port_status;
}
static void ehci_hub_descriptor(struct oxu_hcd *oxu,
struct usb_hub_descriptor *desc)
{
int ports = HCS_N_PORTS(oxu->hcs_params);
u16 temp;
desc->bDescriptorType = USB_DT_HUB;
desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */
desc->bHubContrCurrent = 0;
desc->bNbrPorts = ports;
temp = 1 + (ports / 8);
desc->bDescLength = 7 + 2 * temp;
/* ports removable, and usb 1.0 legacy PortPwrCtrlMask */
memset(&desc->u.hs.DeviceRemovable[0], 0, temp);
memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp);
temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */
if (HCS_PPC(oxu->hcs_params))
temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */
else
temp |= HUB_CHAR_NO_LPSM; /* no power switching */
desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp);
}
/* Allocate an OXU210HP on-chip memory data buffer
*
* An on-chip memory data buffer is required for each OXU210HP USB transfer.
* Each transfer descriptor has one or more on-chip memory data buffers.
*
* Data buffers are allocated from a fix sized pool of data blocks.
* To minimise fragmentation and give reasonable memory utlisation,
* data buffers are allocated with sizes the power of 2 multiples of
* the block size, starting on an address a multiple of the allocated size.
*
* FIXME: callers of this function require a buffer to be allocated for
* len=0. This is a waste of on-chip memory and should be fix. Then this
* function should be changed to not allocate a buffer for len=0.
*/
static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len)
{
int n_blocks; /* minium blocks needed to hold len */
int a_blocks; /* blocks allocated */
int i, j;
/* Don't allocte bigger than supported */
if (len > BUFFER_SIZE * BUFFER_NUM) {
oxu_err(oxu, "buffer too big (%d)\n", len);
return -ENOMEM;
}
spin_lock(&oxu->mem_lock);
/* Number of blocks needed to hold len */
n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE;
/* Round the number of blocks up to the power of 2 */
for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1)
;
/* Find a suitable available data buffer */
for (i = 0; i < BUFFER_NUM;
i += max(a_blocks, (int)oxu->db_used[i])) {
/* Check all the required blocks are available */
for (j = 0; j < a_blocks; j++)
if (oxu->db_used[i + j])
break;
if (j != a_blocks)
continue;
/* Allocate blocks found! */
qtd->buffer = (void *) &oxu->mem->db_pool[i];
qtd->buffer_dma = virt_to_phys(qtd->buffer);
qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks;
oxu->db_used[i] = a_blocks;
spin_unlock(&oxu->mem_lock);
return 0;
}
/* Failed */
spin_unlock(&oxu->mem_lock);
return -ENOMEM;
}
static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
spin_lock(&oxu->mem_lock);
index = (qtd->buffer - (void *) &oxu->mem->db_pool[0])
/ BUFFER_SIZE;
oxu->db_used[index] = 0;
qtd->qtd_buffer_len = 0;
qtd->buffer_dma = 0;
qtd->buffer = NULL;
spin_unlock(&oxu->mem_lock);
}
static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma)
{
memset(qtd, 0, sizeof *qtd);
qtd->qtd_dma = dma;
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
}
static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd)
{
int index;
if (qtd->buffer)
oxu_buf_free(oxu, qtd);
spin_lock(&oxu->mem_lock);
index = qtd - &oxu->mem->qtd_pool[0];
oxu->qtd_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qtd *qtd = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QTD_NUM; i++)
if (!oxu->qtd_used[i])
break;
if (i < QTD_NUM) {
qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i];
memset(qtd, 0, sizeof *qtd);
qtd->hw_token = cpu_to_le32(QTD_STS_HALT);
qtd->hw_next = EHCI_LIST_END;
qtd->hw_alt_next = EHCI_LIST_END;
INIT_LIST_HEAD(&qtd->qtd_list);
qtd->qtd_dma = virt_to_phys(qtd);
oxu->qtd_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return qtd;
}
static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int index;
spin_lock(&oxu->mem_lock);
index = qh - &oxu->mem->qh_pool[0];
oxu->qh_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static void qh_destroy(struct kref *kref)
{
struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref);
struct oxu_hcd *oxu = qh->oxu;
/* clean qtds first, and know this is not linked */
if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) {
oxu_dbg(oxu, "unused qh not empty!\n");
BUG();
}
if (qh->dummy)
oxu_qtd_free(oxu, qh->dummy);
oxu_qh_free(oxu, qh);
}
static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu)
{
int i;
struct ehci_qh *qh = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < QHEAD_NUM; i++)
if (!oxu->qh_used[i])
break;
if (i < QHEAD_NUM) {
qh = (struct ehci_qh *) &oxu->mem->qh_pool[i];
memset(qh, 0, sizeof *qh);
kref_init(&qh->kref);
qh->oxu = oxu;
qh->qh_dma = virt_to_phys(qh);
INIT_LIST_HEAD(&qh->qtd_list);
/* dummy td enables safe urb queuing */
qh->dummy = ehci_qtd_alloc(oxu);
if (qh->dummy == NULL) {
oxu_dbg(oxu, "no dummy td\n");
oxu->qh_used[i] = 0;
qh = NULL;
goto unlock;
}
oxu->qh_used[i] = 1;
}
unlock:
spin_unlock(&oxu->mem_lock);
return qh;
}
/* to share a qh (cpu threads, or hc) */
static inline struct ehci_qh *qh_get(struct ehci_qh *qh)
{
kref_get(&qh->kref);
return qh;
}
static inline void qh_put(struct ehci_qh *qh)
{
kref_put(&qh->kref, qh_destroy);
}
static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb)
{
int index;
spin_lock(&oxu->mem_lock);
index = murb - &oxu->murb_pool[0];
oxu->murb_used[index] = 0;
spin_unlock(&oxu->mem_lock);
}
static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu)
{
int i;
struct oxu_murb *murb = NULL;
spin_lock(&oxu->mem_lock);
for (i = 0; i < MURB_NUM; i++)
if (!oxu->murb_used[i])
break;
if (i < MURB_NUM) {
murb = &(oxu->murb_pool)[i];
oxu->murb_used[i] = 1;
}
spin_unlock(&oxu->mem_lock);
return murb;
}
/* The queue heads and transfer descriptors are managed from pools tied
* to each of the "per device" structures.
* This is the initialisation and cleanup code.
*/
static void ehci_mem_cleanup(struct oxu_hcd *oxu)
{
kfree(oxu->murb_pool);
oxu->murb_pool = NULL;
if (oxu->async)
qh_put(oxu->async);
oxu->async = NULL;
del_timer(&oxu->urb_timer);
oxu->periodic = NULL;
/* shadow periodic table */
kfree(oxu->pshadow);
oxu->pshadow = NULL;
}
/* Remember to add cleanup code (above) if you add anything here.
*/
static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags)
{
int i;
for (i = 0; i < oxu->periodic_size; i++)
oxu->mem->frame_list[i] = EHCI_LIST_END;
for (i = 0; i < QHEAD_NUM; i++)
oxu->qh_used[i] = 0;
for (i = 0; i < QTD_NUM; i++)
oxu->qtd_used[i] = 0;
oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags);
if (!oxu->murb_pool)
goto fail;
for (i = 0; i < MURB_NUM; i++)
oxu->murb_used[i] = 0;
oxu->async = oxu_qh_alloc(oxu);
if (!oxu->async)
goto fail;
oxu->periodic = (__le32 *) &oxu->mem->frame_list;
oxu->periodic_dma = virt_to_phys(oxu->periodic);
for (i = 0; i < oxu->periodic_size; i++)
oxu->periodic[i] = EHCI_LIST_END;
/* software shadow of hardware table */
oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags);
if (oxu->pshadow != NULL)
return 0;
fail:
oxu_dbg(oxu, "couldn't init memory\n");
ehci_mem_cleanup(oxu);
return -ENOMEM;
}
/* Fill a qtd, returning how much of the buffer we were able to queue up.
*/
static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len,
int token, int maxpacket)
{
int i, count;
u64 addr = buf;
/* one buffer entry per 4K ... first might be short or unaligned */
qtd->hw_buf[0] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32));
count = 0x1000 - (buf & 0x0fff); /* rest of that page */
if (likely(len < count)) /* ... iff needed */
count = len;
else {
buf += 0x1000;
buf &= ~0x0fff;
/* per-qtd limit: from 16K to 20K (best alignment) */
for (i = 1; count < len && i < 5; i++) {
addr = buf;
qtd->hw_buf[i] = cpu_to_le32((u32)addr);
qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32));
buf += 0x1000;
if ((count + 0x1000) < len)
count += 0x1000;
else
count = len;
}
/* short packets may only terminate transfers */
if (count != len)
count -= (count % maxpacket);
}
qtd->hw_token = cpu_to_le32((count << 16) | token);
qtd->length = count;
return count;
}
static inline void qh_update(struct oxu_hcd *oxu,
struct ehci_qh *qh, struct ehci_qtd *qtd)
{
/* writes to an active overlay are unsafe */
BUG_ON(qh->qh_state != QH_STATE_IDLE);
qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma);
qh->hw_alt_next = EHCI_LIST_END;
/* Except for control endpoints, we make hardware maintain data
* toggle (like OHCI) ... here (re)initialize the toggle in the QH,
* and set the pseudo-toggle in udev. Only usb_clear_halt() will
* ever clear it.
*/
if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) {
unsigned is_out, epnum;
is_out = !(qtd->hw_token & cpu_to_le32(1 << 8));
epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f;
if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) {
qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE);
usb_settoggle(qh->dev, epnum, is_out, 1);
}
}
/* HC must see latest qtd and qh data before we clear ACTIVE+HALT */
wmb();
qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING);
}
/* If it weren't for a common silicon quirk (writing the dummy into the qh
* overlay, so qh->hw_token wrongly becomes inactive/halted), only fault
* recovery (including urb dequeue) would need software changes to a QH...
*/
static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *qtd;
if (list_empty(&qh->qtd_list))
qtd = qh->dummy;
else {
qtd = list_entry(qh->qtd_list.next,
struct ehci_qtd, qtd_list);
/* first qtd may already be partially processed */
if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current)
qtd = NULL;
}
if (qtd)
qh_update(oxu, qh, qtd);
}
static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb,
size_t length, u32 token)
{
/* count IN/OUT bytes, not SETUP (even short packets) */
if (likely(QTD_PID(token) != 2))
urb->actual_length += length - QTD_LENGTH(token);
/* don't modify error codes */
if (unlikely(urb->status != -EINPROGRESS))
return;
/* force cleanup after short read; not always an error */
if (unlikely(IS_SHORT_READ(token)))
urb->status = -EREMOTEIO;
/* serious "can't proceed" faults reported by the hardware */
if (token & QTD_STS_HALT) {
if (token & QTD_STS_BABBLE) {
/* FIXME "must" disable babbling device's port too */
urb->status = -EOVERFLOW;
} else if (token & QTD_STS_MMF) {
/* fs/ls interrupt xfer missed the complete-split */
urb->status = -EPROTO;
} else if (token & QTD_STS_DBE) {
urb->status = (QTD_PID(token) == 1) /* IN ? */
? -ENOSR /* hc couldn't read data */
: -ECOMM; /* hc couldn't write data */
} else if (token & QTD_STS_XACT) {
/* timeout, bad crc, wrong PID, etc; retried */
if (QTD_CERR(token))
urb->status = -EPIPE;
else {
oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n",
urb->dev->devpath,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out");
urb->status = -EPROTO;
}
/* CERR nonzero + no errors + halt --> stall */
} else if (QTD_CERR(token))
urb->status = -EPIPE;
else /* unknown */
urb->status = -EPROTO;
oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n",
usb_pipedevice(urb->pipe),
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
token, urb->status);
}
}
static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb)
__releases(oxu->lock)
__acquires(oxu->lock)
{
if (likely(urb->hcpriv != NULL)) {
struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv;
/* S-mask in a QH means it's an interrupt urb */
if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) {
/* ... update hc-wide periodic stats (for usbfs) */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs--;
}
qh_put(qh);
}
urb->hcpriv = NULL;
switch (urb->status) {
case -EINPROGRESS: /* success */
urb->status = 0;
default: /* fault */
break;
case -EREMOTEIO: /* fault or normal */
if (!(urb->transfer_flags & URB_SHORT_NOT_OK))
urb->status = 0;
break;
case -ECONNRESET: /* canceled */
case -ENOENT:
break;
}
#ifdef OXU_URB_TRACE
oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n",
__func__, urb->dev->devpath, urb,
usb_pipeendpoint(urb->pipe),
usb_pipein(urb->pipe) ? "in" : "out",
urb->status,
urb->actual_length, urb->transfer_buffer_length);
#endif
/* complete() can reenter this HCD */
spin_unlock(&oxu->lock);
usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status);
spin_lock(&oxu->lock);
}
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh);
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh);
#define HALT_BIT cpu_to_le32(QTD_STS_HALT)
/* Process and free completed qtds for a qh, returning URBs to drivers.
* Chases up to qh->hw_current. Returns number of completions called,
* indicating how much "real" work we did.
*/
static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
struct ehci_qtd *last = NULL, *end = qh->dummy;
struct list_head *entry, *tmp;
int stopped;
unsigned count = 0;
int do_status = 0;
u8 state;
struct oxu_murb *murb = NULL;
if (unlikely(list_empty(&qh->qtd_list)))
return count;
/* completions (or tasks on other cpus) must never clobber HALT
* till we've gone through and cleaned everything up, even when
* they add urbs to this qh's queue or mark them for unlinking.
*
* NOTE: unlinking expects to be done in queue order.
*/
state = qh->qh_state;
qh->qh_state = QH_STATE_COMPLETING;
stopped = (state == QH_STATE_IDLE);
/* remove de-activated QTDs from front of queue.
* after faults (including short reads), cleanup this urb
* then let the queue advance.
* if queue is stopped, handles unlinks.
*/
list_for_each_safe(entry, tmp, &qh->qtd_list) {
struct ehci_qtd *qtd;
struct urb *urb;
u32 token = 0;
qtd = list_entry(entry, struct ehci_qtd, qtd_list);
urb = qtd->urb;
/* Clean up any state from previous QTD ...*/
if (last) {
if (likely(last->urb != urb)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
}
oxu_qtd_free(oxu, last);
last = NULL;
}
/* ignore urbs submitted during completions we reported */
if (qtd == end)
break;
/* hardware copies qtd out of qh overlay */
rmb();
token = le32_to_cpu(qtd->hw_token);
/* always clean up qtds the hc de-activated */
if ((token & QTD_STS_ACTIVE) == 0) {
if ((token & QTD_STS_HALT) != 0) {
stopped = 1;
/* magic dummy for some short reads; qh won't advance.
* that silicon quirk can kick in with this dummy too.
*/
} else if (IS_SHORT_READ(token) &&
!(qtd->hw_alt_next & EHCI_LIST_END)) {
stopped = 1;
goto halt;
}
/* stop scanning when we reach qtds the hc is using */
} else if (likely(!stopped &&
HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) {
break;
} else {
stopped = 1;
if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)))
urb->status = -ESHUTDOWN;
/* ignore active urbs unless some previous qtd
* for the urb faulted (including short read) or
* its urb was canceled. we may patch qh or qtds.
*/
if (likely(urb->status == -EINPROGRESS))
continue;
/* issue status after short control reads */
if (unlikely(do_status != 0)
&& QTD_PID(token) == 0 /* OUT */) {
do_status = 0;
continue;
}
/* token in overlay may be most current */
if (state == QH_STATE_IDLE
&& cpu_to_le32(qtd->qtd_dma)
== qh->hw_current)
token = le32_to_cpu(qh->hw_token);
/* force halt for unlinked or blocked qh, so we'll
* patch the qh later and so that completions can't
* activate it while we "know" it's stopped.
*/
if ((HALT_BIT & qh->hw_token) == 0) {
halt:
qh->hw_token |= HALT_BIT;
wmb();
}
}
/* Remove it from the queue */
qtd_copy_status(oxu, urb->complete ?
urb : ((struct oxu_murb *) urb)->main,
qtd->length, token);
if ((usb_pipein(qtd->urb->pipe)) &&
(NULL != qtd->transfer_buffer))
memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length);
do_status = (urb->status == -EREMOTEIO)
&& usb_pipecontrol(urb->pipe);
if (stopped && qtd->qtd_list.prev != &qh->qtd_list) {
last = list_entry(qtd->qtd_list.prev,
struct ehci_qtd, qtd_list);
last->hw_next = qtd->hw_next;
}
list_del(&qtd->qtd_list);
last = qtd;
}
/* last urb's completion might still need calling */
if (likely(last != NULL)) {
if (last->urb->complete == NULL) {
murb = (struct oxu_murb *) last->urb;
last->urb = murb->main;
if (murb->last) {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_murb_free(oxu, murb);
} else {
ehci_urb_done(oxu, last->urb);
count++;
}
oxu_qtd_free(oxu, last);
}
/* restore original state; caller must unlink or relink */
qh->qh_state = state;
/* be sure the hardware's done with the qh before refreshing
* it after fault cleanup, or recovering from silicon wrongly
* overlaying the dummy qtd (which reduces DMA chatter).
*/
if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) {
switch (state) {
case QH_STATE_IDLE:
qh_refresh(oxu, qh);
break;
case QH_STATE_LINKED:
/* should be rare for periodic transfers,
* except maybe high bandwidth ...
*/
if ((cpu_to_le32(QH_SMASK)
& qh->hw_info2) != 0) {
intr_deschedule(oxu, qh);
(void) qh_schedule(oxu, qh);
} else
unlink_async(oxu, qh);
break;
/* otherwise, unlink already started */
}
}
return count;
}
/* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */
#define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03))
/* ... and packet size, for any kind of endpoint descriptor */
#define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff)
/* Reverse of qh_urb_transaction: free a list of TDs.
* used for cleanup after errors, before HC sees an URB's TDs.
*/
static void qtd_list_free(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *qtd_list)
{
struct list_head *entry, *temp;
list_for_each_safe(entry, temp, qtd_list) {
struct ehci_qtd *qtd;
qtd = list_entry(entry, struct ehci_qtd, qtd_list);
list_del(&qtd->qtd_list);
oxu_qtd_free(oxu, qtd);
}
}
/* Create a list of filled qtds for this URB; won't link into qh.
*/
static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu,
struct urb *urb,
struct list_head *head,
gfp_t flags)
{
struct ehci_qtd *qtd, *qtd_prev;
dma_addr_t buf;
int len, maxpacket;
int is_input;
u32 token;
void *transfer_buf = NULL;
int ret;
/*
* URBs map to sequences of QTDs: one logical transaction
*/
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
return NULL;
list_add_tail(&qtd->qtd_list, head);
qtd->urb = urb;
token = QTD_STS_ACTIVE;
token |= (EHCI_TUNE_CERR << 10);
/* for split transactions, SplitXState initialized to zero */
len = urb->transfer_buffer_length;
is_input = usb_pipein(urb->pipe);
if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input)
urb->transfer_buffer = phys_to_virt(urb->transfer_dma);
if (usb_pipecontrol(urb->pipe)) {
/* SETUP pid */
ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest));
if (ret)
goto cleanup;
qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest),
token | (2 /* "setup" */ << 8), 8);
memcpy(qtd->buffer, qtd->urb->setup_packet,
sizeof(struct usb_ctrlrequest));
/* ... and always at least one more pid */
token ^= QTD_TOGGLE;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* for zero length DATA stages, STATUS is always IN */
if (len == 0)
token |= (1 /* "in" */ << 8);
}
/*
* Data transfer stage: buffer setup
*/
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
buf = qtd->buffer_dma;
transfer_buf = urb->transfer_buffer;
if (!is_input)
memcpy(qtd->buffer, qtd->urb->transfer_buffer, len);
if (is_input)
token |= (1 /* "in" */ << 8);
/* else it's already initted to "out" pid (0 << 8) */
maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input));
/*
* buffer gets wrapped in one or more qtds;
* last one may be "short" (including zero len)
* and may serve as a control status ack
*/
for (;;) {
int this_qtd_len;
this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket);
qtd->transfer_buffer = transfer_buf;
len -= this_qtd_len;
buf += this_qtd_len;
transfer_buf += this_qtd_len;
if (is_input)
qtd->hw_alt_next = oxu->async->hw_alt_next;
/* qh makes control packets use qtd toggle; maybe switch it */
if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0)
token ^= QTD_TOGGLE;
if (likely(len <= 0))
break;
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
if (likely(len > 0)) {
ret = oxu_buf_alloc(oxu, qtd, len);
if (ret)
goto cleanup;
}
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
}
/* unless the bulk/interrupt caller wants a chance to clean
* up after short reads, hc should advance qh past this urb
*/
if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0
|| usb_pipecontrol(urb->pipe)))
qtd->hw_alt_next = EHCI_LIST_END;
/*
* control requests may need a terminating data "status" ack;
* bulk ones may need a terminating short packet (zero length).
*/
if (likely(urb->transfer_buffer_length != 0)) {
int one_more = 0;
if (usb_pipecontrol(urb->pipe)) {
one_more = 1;
token ^= 0x0100; /* "in" <--> "out" */
token |= QTD_TOGGLE; /* force DATA1 */
} else if (usb_pipebulk(urb->pipe)
&& (urb->transfer_flags & URB_ZERO_PACKET)
&& !(urb->transfer_buffer_length % maxpacket)) {
one_more = 1;
}
if (one_more) {
qtd_prev = qtd;
qtd = ehci_qtd_alloc(oxu);
if (unlikely(!qtd))
goto cleanup;
qtd->urb = urb;
qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma);
list_add_tail(&qtd->qtd_list, head);
/* never any data in such packets */
qtd_fill(qtd, 0, 0, token, 0);
}
}
/* by default, enable interrupt on urb completion */
qtd->hw_token |= cpu_to_le32(QTD_IOC);
return head;
cleanup:
qtd_list_free(oxu, urb, head);
return NULL;
}
/* Each QH holds a qtd list; a QH is used for everything except iso.
*
* For interrupt urbs, the scheduler must set the microframe scheduling
* mask(s) each time the QH gets scheduled. For highspeed, that's
* just one microframe in the s-mask. For split interrupt transactions
* there are additional complications: c-mask, maybe FSTNs.
*/
static struct ehci_qh *qh_make(struct oxu_hcd *oxu,
struct urb *urb, gfp_t flags)
{
struct ehci_qh *qh = oxu_qh_alloc(oxu);
u32 info1 = 0, info2 = 0;
int is_input, type;
int maxp = 0;
if (!qh)
return qh;
/*
* init endpoint/device data for this QH
*/
info1 |= usb_pipeendpoint(urb->pipe) << 8;
info1 |= usb_pipedevice(urb->pipe) << 0;
is_input = usb_pipein(urb->pipe);
type = usb_pipetype(urb->pipe);
maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input);
/* Compute interrupt scheduling parameters just once, and save.
* - allowing for high bandwidth, how many nsec/uframe are used?
* - split transactions need a second CSPLIT uframe; same question
* - splits also need a schedule gap (for full/low speed I/O)
* - qh has a polling interval
*
* For control/bulk requests, the HC or TT handles these.
*/
if (type == PIPE_INTERRUPT) {
qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH,
is_input, 0,
hb_mult(maxp) * max_packet(maxp)));
qh->start = NO_FRAME;
if (urb->dev->speed == USB_SPEED_HIGH) {
qh->c_usecs = 0;
qh->gap_uf = 0;
qh->period = urb->interval >> 3;
if (qh->period == 0 && urb->interval != 1) {
/* NOTE interval 2 or 4 uframes could work.
* But interval 1 scheduling is simpler, and
* includes high bandwidth.
*/
oxu_dbg(oxu, "intr period %d uframes, NYET!\n",
urb->interval);
goto done;
}
} else {
struct usb_tt *tt = urb->dev->tt;
int think_time;
/* gap is f(FS/LS transfer times) */
qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed,
is_input, 0, maxp) / (125 * 1000);
/* FIXME this just approximates SPLIT/CSPLIT times */
if (is_input) { /* SPLIT, gap, CSPLIT+DATA */
qh->c_usecs = qh->usecs + HS_USECS(0);
qh->usecs = HS_USECS(1);
} else { /* SPLIT+DATA, gap, CSPLIT */
qh->usecs += HS_USECS(1);
qh->c_usecs = HS_USECS(0);
}
think_time = tt ? tt->think_time : 0;
qh->tt_usecs = NS_TO_US(think_time +
usb_calc_bus_time(urb->dev->speed,
is_input, 0, max_packet(maxp)));
qh->period = urb->interval;
}
}
/* support for tt scheduling, and access to toggles */
qh->dev = urb->dev;
/* using TT? */
switch (urb->dev->speed) {
case USB_SPEED_LOW:
info1 |= (1 << 12); /* EPS "low" */
/* FALL THROUGH */
case USB_SPEED_FULL:
/* EPS 0 means "full" */
if (type != PIPE_INTERRUPT)
info1 |= (EHCI_TUNE_RL_TT << 28);
if (type == PIPE_CONTROL) {
info1 |= (1 << 27); /* for TT */
info1 |= 1 << 14; /* toggle from qtd */
}
info1 |= maxp << 16;
info2 |= (EHCI_TUNE_MULT_TT << 30);
info2 |= urb->dev->ttport << 23;
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */
break;
case USB_SPEED_HIGH: /* no TT involved */
info1 |= (2 << 12); /* EPS "high" */
if (type == PIPE_CONTROL) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 64 << 16; /* usb2 fixed maxpacket */
info1 |= 1 << 14; /* toggle from qtd */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else if (type == PIPE_BULK) {
info1 |= (EHCI_TUNE_RL_HS << 28);
info1 |= 512 << 16; /* usb2 fixed maxpacket */
info2 |= (EHCI_TUNE_MULT_HS << 30);
} else { /* PIPE_INTERRUPT */
info1 |= max_packet(maxp) << 16;
info2 |= hb_mult(maxp) << 30;
}
break;
default:
oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed);
done:
qh_put(qh);
return NULL;
}
/* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */
/* init as live, toggle clear, advance to dummy */
qh->qh_state = QH_STATE_IDLE;
qh->hw_info1 = cpu_to_le32(info1);
qh->hw_info2 = cpu_to_le32(info2);
usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1);
qh_refresh(oxu, qh);
return qh;
}
/* Move qh (and its qtds) onto async queue; maybe enable queue.
*/
static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
__le32 dma = QH_NEXT(qh->qh_dma);
struct ehci_qh *head;
/* (re)start the async schedule? */
head = oxu->async;
timer_action_done(oxu, TIMER_ASYNC_OFF);
if (!head->qh_next.qh) {
u32 cmd = readl(&oxu->regs->command);
if (!(cmd & CMD_ASE)) {
/* in case a clear of CMD_ASE didn't take yet */
(void)handshake(oxu, &oxu->regs->status,
STS_ASS, 0, 150);
cmd |= CMD_ASE | CMD_RUN;
writel(cmd, &oxu->regs->command);
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* posted write need not be known to HC yet ... */
}
}
/* clear halt and/or toggle; and maybe recover from silicon quirk */
if (qh->qh_state == QH_STATE_IDLE)
qh_refresh(oxu, qh);
/* splice right after start */
qh->qh_next = head->qh_next;
qh->hw_next = head->hw_next;
wmb();
head->qh_next.qh = qh;
head->hw_next = dma;
qh->qh_state = QH_STATE_LINKED;
/* qtd completions reported later by interrupt */
}
#define QH_ADDR_MASK cpu_to_le32(0x7f)
/*
* For control/bulk/interrupt, return QH with these TDs appended.
* Allocates and initializes the QH if necessary.
* Returns null if it can't allocate a QH it needs to.
* If the QH has TDs (urbs) already, that's great.
*/
static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu,
struct urb *urb, struct list_head *qtd_list,
int epnum, void **ptr)
{
struct ehci_qh *qh = NULL;
qh = (struct ehci_qh *) *ptr;
if (unlikely(qh == NULL)) {
/* can't sleep here, we have oxu->lock... */
qh = qh_make(oxu, urb, GFP_ATOMIC);
*ptr = qh;
}
if (likely(qh != NULL)) {
struct ehci_qtd *qtd;
if (unlikely(list_empty(qtd_list)))
qtd = NULL;
else
qtd = list_entry(qtd_list->next, struct ehci_qtd,
qtd_list);
/* control qh may need patching ... */
if (unlikely(epnum == 0)) {
/* usb_reset_device() briefly reverts to address 0 */
if (usb_pipedevice(urb->pipe) == 0)
qh->hw_info1 &= ~QH_ADDR_MASK;
}
/* just one way to queue requests: swap with the dummy qtd.
* only hc or qh_refresh() ever modify the overlay.
*/
if (likely(qtd != NULL)) {
struct ehci_qtd *dummy;
dma_addr_t dma;
__le32 token;
/* to avoid racing the HC, use the dummy td instead of
* the first td of our list (becomes new dummy). both
* tds stay deactivated until we're done, when the
* HC is allowed to fetch the old dummy (4.10.2).
*/
token = qtd->hw_token;
qtd->hw_token = HALT_BIT;
wmb();
dummy = qh->dummy;
dma = dummy->qtd_dma;
*dummy = *qtd;
dummy->qtd_dma = dma;
list_del(&qtd->qtd_list);
list_add(&dummy->qtd_list, qtd_list);
list_splice(qtd_list, qh->qtd_list.prev);
ehci_qtd_init(qtd, qtd->qtd_dma);
qh->dummy = qtd;
/* hc must see the new dummy at list end */
dma = qtd->qtd_dma;
qtd = list_entry(qh->qtd_list.prev,
struct ehci_qtd, qtd_list);
qtd->hw_next = QTD_NEXT(dma);
/* let the hc process these next qtds */
dummy->hw_token = (token & ~(0x80));
wmb();
dummy->hw_token = token;
urb->hcpriv = qh_get(qh);
}
}
return qh;
}
static int submit_async(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
struct ehci_qtd *qtd;
int epnum;
unsigned long flags;
struct ehci_qh *qh = NULL;
int rc = 0;
qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list);
epnum = urb->ep->desc.bEndpointAddress;
#ifdef OXU_URB_TRACE
oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n",
__func__, urb->dev->devpath, urb,
epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out",
urb->transfer_buffer_length,
qtd, urb->ep->hcpriv);
#endif
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
rc = -ESHUTDOWN;
goto done;
}
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
if (unlikely(qh == NULL)) {
rc = -ENOMEM;
goto done;
}
/* Control/bulk operations through TTs don't need scheduling,
* the HC and TT handle it when the TT has a buffer ready.
*/
if (likely(qh->qh_state == QH_STATE_IDLE))
qh_link_async(oxu, qh_get(qh));
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (unlikely(qh == NULL))
qtd_list_free(oxu, urb, qtd_list);
return rc;
}
/* The async qh for the qtds being reclaimed are now unlinked from the HC */
static void end_unlink_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh = oxu->reclaim;
struct ehci_qh *next;
timer_action_done(oxu, TIMER_IAA_WATCHDOG);
qh->qh_state = QH_STATE_IDLE;
qh->qh_next.qh = NULL;
qh_put(qh); /* refcount from reclaim */
/* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */
next = qh->reclaim;
oxu->reclaim = next;
oxu->reclaim_ready = 0;
qh->reclaim = NULL;
qh_completions(oxu, qh);
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
qh_link_async(oxu, qh);
else {
qh_put(qh); /* refcount from async list */
/* it's not free to turn the async schedule on/off; leave it
* active but idle for a while once it empties.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)
&& oxu->async->qh_next.qh == NULL)
timer_action(oxu, TIMER_ASYNC_OFF);
}
if (next) {
oxu->reclaim = NULL;
start_unlink_async(oxu, next);
}
}
/* makes sure the async qh will become idle */
/* caller must own oxu->lock */
static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int cmd = readl(&oxu->regs->command);
struct ehci_qh *prev;
#ifdef DEBUG
assert_spin_locked(&oxu->lock);
if (oxu->reclaim || (qh->qh_state != QH_STATE_LINKED
&& qh->qh_state != QH_STATE_UNLINK_WAIT))
BUG();
#endif
/* stop async schedule right now? */
if (unlikely(qh == oxu->async)) {
/* can't get here without STS_ASS set */
if (oxu_to_hcd(oxu)->state != HC_STATE_HALT
&& !oxu->reclaim) {
/* ... and CMD_IAAD clear */
writel(cmd & ~CMD_ASE, &oxu->regs->command);
wmb();
/* handshake later, if we need to */
timer_action_done(oxu, TIMER_ASYNC_OFF);
}
return;
}
qh->qh_state = QH_STATE_UNLINK;
oxu->reclaim = qh = qh_get(qh);
prev = oxu->async;
while (prev->qh_next.qh != qh)
prev = prev->qh_next.qh;
prev->hw_next = qh->hw_next;
prev->qh_next = qh->qh_next;
wmb();
if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) {
/* if (unlikely(qh->reclaim != 0))
* this will recurse, probably not much
*/
end_unlink_async(oxu);
return;
}
oxu->reclaim_ready = 0;
cmd |= CMD_IAAD;
writel(cmd, &oxu->regs->command);
(void) readl(&oxu->regs->command);
timer_action(oxu, TIMER_IAA_WATCHDOG);
}
static void scan_async(struct oxu_hcd *oxu)
{
struct ehci_qh *qh;
enum ehci_timer_action action = TIMER_IO_WATCHDOG;
if (!++(oxu->stamp))
oxu->stamp++;
timer_action_done(oxu, TIMER_ASYNC_SHRINK);
rescan:
qh = oxu->async->qh_next.qh;
if (likely(qh != NULL)) {
do {
/* clean any finished work for this qh */
if (!list_empty(&qh->qtd_list)
&& qh->stamp != oxu->stamp) {
int temp;
/* unlinks could happen here; completion
* reporting drops the lock. rescan using
* the latest schedule, but don't rescan
* qhs we already finished (no looping).
*/
qh = qh_get(qh);
qh->stamp = oxu->stamp;
temp = qh_completions(oxu, qh);
qh_put(qh);
if (temp != 0)
goto rescan;
}
/* unlink idle entries, reducing HC PCI usage as well
* as HCD schedule-scanning costs. delay for any qh
* we just scanned, there's a not-unusual case that it
* doesn't stay idle for long.
* (plus, avoids some kind of re-activation race.)
*/
if (list_empty(&qh->qtd_list)) {
if (qh->stamp == oxu->stamp)
action = TIMER_ASYNC_SHRINK;
else if (!oxu->reclaim
&& qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
qh = qh->qh_next.qh;
} while (qh);
}
if (action == TIMER_ASYNC_SHRINK)
timer_action(oxu, TIMER_ASYNC_SHRINK);
}
/*
* periodic_next_shadow - return "next" pointer on shadow list
* @periodic: host pointer to qh/itd/sitd
* @tag: hardware tag for type of this record
*/
static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic,
__le32 tag)
{
switch (tag) {
default:
case Q_TYPE_QH:
return &periodic->qh->qh_next;
}
}
/* caller must hold oxu->lock */
static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr)
{
union ehci_shadow *prev_p = &oxu->pshadow[frame];
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow here = *prev_p;
/* find predecessor of "ptr"; hw and shadow lists are in sync */
while (here.ptr && here.ptr != ptr) {
prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p));
hw_p = here.hw_next;
here = *prev_p;
}
/* an interrupt entry (at list end) could have been shared */
if (!here.ptr)
return;
/* update shadow and hardware lists ... the old "next" pointers
* from ptr may still be in use, the caller updates them.
*/
*prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p));
*hw_p = *here.hw_next;
}
/* how many of the uframe's 125 usecs are allocated? */
static unsigned short periodic_usecs(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe)
{
__le32 *hw_p = &oxu->periodic[frame];
union ehci_shadow *q = &oxu->pshadow[frame];
unsigned usecs = 0;
while (q->ptr) {
switch (Q_NEXT_TYPE(*hw_p)) {
case Q_TYPE_QH:
default:
/* is it in the S-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << uframe))
usecs += q->qh->usecs;
/* ... or C-mask? */
if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe)))
usecs += q->qh->c_usecs;
hw_p = &q->qh->hw_next;
q = &q->qh->qh_next;
break;
}
}
#ifdef DEBUG
if (usecs > 100)
oxu_err(oxu, "uframe %d sched overrun: %d usecs\n",
frame * 8 + uframe, usecs);
#endif
return usecs;
}
static int enable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did clearing PSE did take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) | CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... PSS happens later */
oxu_to_hcd(oxu)->state = HC_STATE_RUNNING;
/* make sure ehci_work scans these */
oxu->next_uframe = readl(&oxu->regs->frame_index)
% (oxu->periodic_size << 3);
return 0;
}
static int disable_periodic(struct oxu_hcd *oxu)
{
u32 cmd;
int status;
/* did setting PSE not take effect yet?
* takes effect only at frame boundaries...
*/
status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125);
if (status != 0) {
oxu_to_hcd(oxu)->state = HC_STATE_HALT;
usb_hc_died(oxu_to_hcd(oxu));
return status;
}
cmd = readl(&oxu->regs->command) & ~CMD_PSE;
writel(cmd, &oxu->regs->command);
/* posted write ... */
oxu->next_uframe = -1;
return 0;
}
/* periodic schedule slots have iso tds (normal or split) first, then a
* sparse tree for active interrupt transfers.
*
* this just links in a qh; caller guarantees uframe masks are set right.
* no FSTN support (yet; oxu 0.96+)
*/
static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period = qh->period;
dev_dbg(&qh->dev->dev,
"link qh%d-%04x/%p start %d [%d/%d us]\n",
period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* high bandwidth, or otherwise every microframe */
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period) {
union ehci_shadow *prev = &oxu->pshadow[i];
__le32 *hw_p = &oxu->periodic[i];
union ehci_shadow here = *prev;
__le32 type = 0;
/* skip the iso nodes at list head */
while (here.ptr) {
type = Q_NEXT_TYPE(*hw_p);
if (type == Q_TYPE_QH)
break;
prev = periodic_next_shadow(prev, type);
hw_p = &here.qh->hw_next;
here = *prev;
}
/* sorting each branch by period (slow-->fast)
* enables sharing interior tree nodes
*/
while (here.ptr && qh != here.qh) {
if (qh->period > here.qh->period)
break;
prev = &here.qh->qh_next;
hw_p = &here.qh->hw_next;
here = *prev;
}
/* link in this qh, unless some earlier pass did that */
if (qh != here.qh) {
qh->qh_next = here;
if (here.qh)
qh->hw_next = *hw_p;
wmb();
prev->qh = qh;
*hw_p = QH_NEXT(qh->qh_dma);
}
}
qh->qh_state = QH_STATE_LINKED;
qh_get(qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
/* maybe enable periodic schedule processing */
if (!oxu->periodic_sched++)
return enable_periodic(oxu);
return 0;
}
static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned i;
unsigned period;
/* FIXME:
* IF this isn't high speed
* and this qh is active in the current uframe
* (and overlay token SplitXstate is false?)
* THEN
* qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore");
*/
/* high bandwidth, or otherwise part of every microframe */
period = qh->period;
if (period == 0)
period = 1;
for (i = qh->start; i < oxu->periodic_size; i += period)
periodic_unlink(oxu, i, qh);
/* update per-qh bandwidth for usbfs */
oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period
? ((qh->usecs + qh->c_usecs) / qh->period)
: (qh->usecs * 8);
dev_dbg(&qh->dev->dev,
"unlink qh%d-%04x/%p start %d [%d/%d us]\n",
qh->period,
le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK),
qh, qh->start, qh->usecs, qh->c_usecs);
/* qh->qh_next still "live" to HC */
qh->qh_state = QH_STATE_UNLINK;
qh->qh_next.ptr = NULL;
qh_put(qh);
/* maybe turn off periodic schedule */
oxu->periodic_sched--;
if (!oxu->periodic_sched)
(void) disable_periodic(oxu);
}
static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
unsigned wait;
qh_unlink_periodic(oxu, qh);
/* simple/paranoid: always delay, expecting the HC needs to read
* qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and
* expect hub_wq to clean up after any CSPLITs we won't issue.
* active high speed queues may need bigger delays...
*/
if (list_empty(&qh->qtd_list)
|| (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0)
wait = 2;
else
wait = 55; /* worst case: 3 * 1024 */
udelay(wait);
qh->qh_state = QH_STATE_IDLE;
qh->hw_next = EHCI_LIST_END;
wmb();
}
static int check_period(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
unsigned period, unsigned usecs)
{
int claimed;
/* complete split running into next frame?
* given FSTN support, we could sometimes check...
*/
if (uframe >= 8)
return 0;
/*
* 80% periodic == 100 usec/uframe available
* convert "usecs we need" to "max already claimed"
*/
usecs = 100 - usecs;
/* we "know" 2 and 4 uframe intervals were rejected; so
* for period 0, check _every_ microframe in the schedule.
*/
if (unlikely(period == 0)) {
do {
for (uframe = 0; uframe < 7; uframe++) {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
}
} while ((frame += 1) < oxu->periodic_size);
/* just check the specified uframe, at that period */
} else {
do {
claimed = periodic_usecs(oxu, frame, uframe);
if (claimed > usecs)
return 0;
} while ((frame += period) < oxu->periodic_size);
}
return 1;
}
static int check_intr_schedule(struct oxu_hcd *oxu,
unsigned frame, unsigned uframe,
const struct ehci_qh *qh, __le32 *c_maskp)
{
int retval = -ENOSPC;
if (qh->c_usecs && uframe >= 6) /* FSTN territory? */
goto done;
if (!check_period(oxu, frame, uframe, qh->period, qh->usecs))
goto done;
if (!qh->c_usecs) {
retval = 0;
*c_maskp = 0;
goto done;
}
done:
return retval;
}
/* "first fit" scheduling policy used the first time through,
* or when the previous schedule slot can't be re-used.
*/
static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
int status;
unsigned uframe;
__le32 c_mask;
unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
qh_refresh(oxu, qh);
qh->hw_next = EHCI_LIST_END;
frame = qh->start;
/* reuse the previous schedule slots, if we can */
if (frame < qh->period) {
uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK);
status = check_intr_schedule(oxu, frame, --uframe,
qh, &c_mask);
} else {
uframe = 0;
c_mask = 0;
status = -ENOSPC;
}
/* else scan the schedule to find a group of slots such that all
* uframes have enough periodic bandwidth available.
*/
if (status) {
/* "normal" case, uframing flexible except with splits */
if (qh->period) {
frame = qh->period - 1;
do {
for (uframe = 0; uframe < 8; uframe++) {
status = check_intr_schedule(oxu,
frame, uframe, qh,
&c_mask);
if (status == 0)
break;
}
} while (status && frame--);
/* qh->period == 0 means every uframe */
} else {
frame = 0;
status = check_intr_schedule(oxu, 0, 0, qh, &c_mask);
}
if (status)
goto done;
qh->start = frame;
/* reset S-frame and (maybe) C-frame masks */
qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK));
qh->hw_info2 |= qh->period
? cpu_to_le32(1 << uframe)
: cpu_to_le32(QH_SMASK);
qh->hw_info2 |= c_mask;
} else
oxu_dbg(oxu, "reused qh %p schedule\n", qh);
/* stuff into the periodic schedule */
status = qh_link_periodic(oxu, qh);
done:
return status;
}
static int intr_submit(struct oxu_hcd *oxu, struct urb *urb,
struct list_head *qtd_list, gfp_t mem_flags)
{
unsigned epnum;
unsigned long flags;
struct ehci_qh *qh;
int status = 0;
struct list_head empty;
/* get endpoint and transfer/schedule data */
epnum = urb->ep->desc.bEndpointAddress;
spin_lock_irqsave(&oxu->lock, flags);
if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) {
status = -ESHUTDOWN;
goto done;
}
/* get qh and force any scheduling errors */
INIT_LIST_HEAD(&empty);
qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv);
if (qh == NULL) {
status = -ENOMEM;
goto done;
}
if (qh->qh_state == QH_STATE_IDLE) {
status = qh_schedule(oxu, qh);
if (status != 0)
goto done;
}
/* then queue the urb's tds to the qh */
qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv);
BUG_ON(qh == NULL);
/* ... update usbfs periodic stats */
oxu_to_hcd(oxu)->self.bandwidth_int_reqs++;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
if (status)
qtd_list_free(oxu, urb, qtd_list);
return status;
}
static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "iso support is missing!\n");
return -ENOSYS;
}
static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb,
gfp_t mem_flags)
{
oxu_dbg(oxu, "split iso support is missing!\n");
return -ENOSYS;
}
static void scan_periodic(struct oxu_hcd *oxu)
{
unsigned frame, clock, now_uframe, mod;
unsigned modified;
mod = oxu->periodic_size << 3;
/*
* When running, scan from last scan point up to "now"
* else clean up by scanning everything that's left.
* Touches as few pages as possible: cache-friendly.
*/
now_uframe = oxu->next_uframe;
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
clock = readl(&oxu->regs->frame_index);
else
clock = now_uframe + mod - 1;
clock %= mod;
for (;;) {
union ehci_shadow q, *q_p;
__le32 type, *hw_p;
unsigned uframes;
/* don't scan past the live uframe */
frame = now_uframe >> 3;
if (frame == (clock >> 3))
uframes = now_uframe & 0x07;
else {
/* safe to scan the whole frame at once */
now_uframe |= 0x07;
uframes = 8;
}
restart:
/* scan each element in frame's queue for completions */
q_p = &oxu->pshadow[frame];
hw_p = &oxu->periodic[frame];
q.ptr = q_p->ptr;
type = Q_NEXT_TYPE(*hw_p);
modified = 0;
while (q.ptr != NULL) {
union ehci_shadow temp;
int live;
live = HC_IS_RUNNING(oxu_to_hcd(oxu)->state);
switch (type) {
case Q_TYPE_QH:
/* handle any completions */
temp.qh = qh_get(q.qh);
type = Q_NEXT_TYPE(q.qh->hw_next);
q = q.qh->qh_next;
modified = qh_completions(oxu, temp.qh);
if (unlikely(list_empty(&temp.qh->qtd_list)))
intr_deschedule(oxu, temp.qh);
qh_put(temp.qh);
break;
default:
oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n",
type, frame, q.ptr);
q.ptr = NULL;
}
/* assume completion callbacks modify the queue */
if (unlikely(modified))
goto restart;
}
/* Stop when we catch up to the HC */
/* FIXME: this assumes we won't get lapped when
* latencies climb; that should be rare, but...
* detect it, and just go all the way around.
* FLR might help detect this case, so long as latencies
* don't exceed periodic_size msec (default 1.024 sec).
*/
/* FIXME: likewise assumes HC doesn't halt mid-scan */
if (now_uframe == clock) {
unsigned now;
if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))
break;
oxu->next_uframe = now_uframe;
now = readl(&oxu->regs->frame_index) % mod;
if (now_uframe == now)
break;
/* rescan the rest of this frame, then ... */
clock = now;
} else {
now_uframe++;
now_uframe %= mod;
}
}
}
/* On some systems, leaving remote wakeup enabled prevents system shutdown.
* The firmware seems to think that powering off is a wakeup event!
* This routine turns off remote wakeup and everything else, on all ports.
*/
static void ehci_turn_off_all_ports(struct oxu_hcd *oxu)
{
int port = HCS_N_PORTS(oxu->hcs_params);
while (port--)
writel(PORT_RWC_BITS, &oxu->regs->port_status[port]);
}
static void ehci_port_power(struct oxu_hcd *oxu, int is_on)
{
unsigned port;
if (!HCS_PPC(oxu->hcs_params))
return;
oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down");
for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; )
(void) oxu_hub_control(oxu_to_hcd(oxu),
is_on ? SetPortFeature : ClearPortFeature,
USB_PORT_FEAT_POWER,
port--, NULL, 0);
msleep(20);
}
/* Called from some interrupts, timers, and so on.
* It calls driver completion functions, after dropping oxu->lock.
*/
static void ehci_work(struct oxu_hcd *oxu)
{
timer_action_done(oxu, TIMER_IO_WATCHDOG);
if (oxu->reclaim_ready)
end_unlink_async(oxu);
/* another CPU may drop oxu->lock during a schedule scan while
* it reports urb completions. this flag guards against bogus
* attempts at re-entrant schedule scanning.
*/
if (oxu->scanning)
return;
oxu->scanning = 1;
scan_async(oxu);
if (oxu->next_uframe != -1)
scan_periodic(oxu);
oxu->scanning = 0;
/* the IO watchdog guards against hardware or driver bugs that
* misplace IRQs, and should let us run completely without IRQs.
* such lossage has been observed on both VT6202 and VT8235.
*/
if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) &&
(oxu->async->qh_next.ptr != NULL ||
oxu->periodic_sched != 0))
timer_action(oxu, TIMER_IO_WATCHDOG);
}
static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh)
{
/* if we need to use IAA and it's busy, defer */
if (qh->qh_state == QH_STATE_LINKED
&& oxu->reclaim
&& HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) {
struct ehci_qh *last;
for (last = oxu->reclaim;
last->reclaim;
last = last->reclaim)
continue;
qh->qh_state = QH_STATE_UNLINK_WAIT;
last->reclaim = qh;
/* bypass IAA if the hc can't care */
} else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim)
end_unlink_async(oxu);
/* something else might have unlinked the qh by now */
if (qh->qh_state == QH_STATE_LINKED)
start_unlink_async(oxu, qh);
}
/*
* USB host controller methods
*/
static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 status, pcd_status = 0;
int bh;
spin_lock(&oxu->lock);
status = readl(&oxu->regs->status);
/* e.g. cardbus physical eject */
if (status == ~(u32) 0) {
oxu_dbg(oxu, "device removed\n");
goto dead;
}
/* Shared IRQ? */
status &= INTR_MASK;
if (!status || unlikely(hcd->state == HC_STATE_HALT)) {
spin_unlock(&oxu->lock);
return IRQ_NONE;
}
/* clear (just) interrupts */
writel(status, &oxu->regs->status);
readl(&oxu->regs->command); /* unblock posted write */
bh = 0;
#ifdef OXU_VERBOSE_DEBUG
/* unrequested/ignored: Frame List Rollover */
dbg_status(oxu, "irq", status);
#endif
/* INT, ERR, and IAA interrupt rates can be throttled */
/* normal [4.15.1.2] or error [4.15.1.1] completion */
if (likely((status & (STS_INT|STS_ERR)) != 0))
bh = 1;
/* complete the unlinking of some qh [4.15.2.3] */
if (status & STS_IAA) {
oxu->reclaim_ready = 1;
bh = 1;
}
/* remote wakeup [4.3.1] */
if (status & STS_PCD) {
unsigned i = HCS_N_PORTS(oxu->hcs_params);
pcd_status = status;
/* resume root hub? */
if (!(readl(&oxu->regs->command) & CMD_RUN))
usb_hcd_resume_root_hub(hcd);
while (i--) {
int pstatus = readl(&oxu->regs->port_status[i]);
if (pstatus & PORT_OWNER)
continue;
if (!(pstatus & PORT_RESUME)
|| oxu->reset_done[i] != 0)
continue;
/* start USB_RESUME_TIMEOUT resume signaling from this
* port, and make hub_wq collect PORT_STAT_C_SUSPEND to
* stop that signaling.
*/
oxu->reset_done[i] = jiffies +
msecs_to_jiffies(USB_RESUME_TIMEOUT);
oxu_dbg(oxu, "port %d remote wakeup\n", i + 1);
mod_timer(&hcd->rh_timer, oxu->reset_done[i]);
}
}
/* PCI errors [4.15.2.4] */
if (unlikely((status & STS_FATAL) != 0)) {
/* bogus "fatal" IRQs appear on some chips... why? */
status = readl(&oxu->regs->status);
dbg_cmd(oxu, "fatal", readl(&oxu->regs->command));
dbg_status(oxu, "fatal", status);
if (status & STS_HALT) {
oxu_err(oxu, "fatal error\n");
dead:
ehci_reset(oxu);
writel(0, &oxu->regs->configured_flag);
usb_hc_died(hcd);
/* generic layer kills/unlinks all urbs, then
* uses oxu_stop to clean up the rest
*/
bh = 1;
}
}
if (bh)
ehci_work(oxu);
spin_unlock(&oxu->lock);
if (pcd_status & STS_PCD)
usb_hcd_poll_rh_status(hcd);
return IRQ_HANDLED;
}
static irqreturn_t oxu_irq(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ret = IRQ_HANDLED;
u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS);
u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET);
/* Disable all interrupt */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable);
if ((oxu->is_otg && (status & OXU_USBOTGI)) ||
(!oxu->is_otg && (status & OXU_USBSPHI)))
oxu210_hcd_irq(hcd);
else
ret = IRQ_NONE;
/* Enable all interrupt back */
oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable);
return ret;
}
static void oxu_watchdog(unsigned long param)
{
struct oxu_hcd *oxu = (struct oxu_hcd *) param;
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
/* lost IAA irqs wedge things badly; seen with a vt8235 */
if (oxu->reclaim) {
u32 status = readl(&oxu->regs->status);
if (status & STS_IAA) {
oxu_vdbg(oxu, "lost IAA\n");
writel(STS_IAA, &oxu->regs->status);
oxu->reclaim_ready = 1;
}
}
/* stop async processing after it's idled a bit */
if (test_bit(TIMER_ASYNC_OFF, &oxu->actions))
start_unlink_async(oxu, oxu->async);
/* oxu could run by timer, without IRQs ... */
ehci_work(oxu);
spin_unlock_irqrestore(&oxu->lock, flags);
}
/* One-time init, only for memory state.
*/
static int oxu_hcd_init(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int retval;
u32 hcc_params;
spin_lock_init(&oxu->lock);
setup_timer(&oxu->watchdog, oxu_watchdog, (unsigned long)oxu);
/*
* hw default: 1K periodic list heads, one per frame.
* periodic_size can shrink by USBCMD update if hcc_params allows.
*/
oxu->periodic_size = DEFAULT_I_TDPS;
retval = ehci_mem_init(oxu, GFP_KERNEL);
if (retval < 0)
return retval;
/* controllers may cache some of the periodic schedule ... */
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */
oxu->i_thresh = 8;
else /* N microframes cached */
oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params);
oxu->reclaim = NULL;
oxu->reclaim_ready = 0;
oxu->next_uframe = -1;
/*
* dedicate a qh for the async ring head, since we couldn't unlink
* a 'real' qh without stopping the async schedule [4.8]. use it
* as the 'reclamation list head' too.
* its dummy is used in hw_alt_next of many tds, to prevent the qh
* from automatically advancing to the next td after short reads.
*/
oxu->async->qh_next.qh = NULL;
oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma);
oxu->async->hw_info1 = cpu_to_le32(QH_HEAD);
oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT);
oxu->async->hw_qtd_next = EHCI_LIST_END;
oxu->async->qh_state = QH_STATE_LINKED;
oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma);
/* clear interrupt enables, set irq latency */
if (log2_irq_thresh < 0 || log2_irq_thresh > 6)
log2_irq_thresh = 0;
temp = 1 << (16 + log2_irq_thresh);
if (HCC_CANPARK(hcc_params)) {
/* HW default park == 3, on hardware that supports it (like
* NVidia and ALI silicon), maximizes throughput on the async
* schedule by avoiding QH fetches between transfers.
*
* With fast usb storage devices and NForce2, "park" seems to
* make problems: throughput reduction (!), data errors...
*/
if (park) {
park = min(park, (unsigned) 3);
temp |= CMD_PARK;
temp |= park << 8;
}
oxu_dbg(oxu, "park %d\n", park);
}
if (HCC_PGM_FRAMELISTLEN(hcc_params)) {
/* periodic schedule size can be smaller than default */
temp &= ~(3 << 2);
temp |= (EHCI_TUNE_FLS << 2);
}
oxu->command = temp;
return 0;
}
/* Called during probe() after chip reset completes.
*/
static int oxu_reset(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
spin_lock_init(&oxu->mem_lock);
INIT_LIST_HEAD(&oxu->urb_list);
oxu->urb_len = 0;
/* FIMXE */
hcd->self.controller->dma_mask = NULL;
if (oxu->is_otg) {
oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_SPH_MEM;
} else {
oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET;
oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \
HC_LENGTH(readl(&oxu->caps->hc_capbase));
oxu->mem = hcd->regs + OXU_OTG_MEM;
}
oxu->hcs_params = readl(&oxu->caps->hcs_params);
oxu->sbrn = 0x20;
return oxu_hcd_init(hcd);
}
static int oxu_run(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int retval;
u32 temp, hcc_params;
hcd->uses_new_polling = 1;
/* EHCI spec section 4.1 */
retval = ehci_reset(oxu);
if (retval != 0) {
ehci_mem_cleanup(oxu);
return retval;
}
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* hcc_params controls whether oxu->regs->segment must (!!!)
* be used; it constrains QH/ITD/SITD and QTD locations.
* pci_pool consistent memory always uses segment zero.
* streaming mappings for I/O buffers, like pci_map_single(),
* can return segments above 4GB, if the device allows.
*
* NOTE: the dma mask is visible through dev->dma_mask, so
* drivers can pass this info along ... like NETIF_F_HIGHDMA,
* Scsi_Host.highmem_io, and so forth. It's readonly to all
* host side drivers though.
*/
hcc_params = readl(&oxu->caps->hcc_params);
if (HCC_64BIT_ADDR(hcc_params))
writel(0, &oxu->regs->segment);
oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE |
CMD_ASE | CMD_RESET);
oxu->command |= CMD_RUN;
writel(oxu->command, &oxu->regs->command);
dbg_cmd(oxu, "init", oxu->command);
/*
* Start, enabling full USB 2.0 functionality ... usb 1.1 devices
* are explicitly handed to companion controller(s), so no TT is
* involved with the root hub. (Except where one is integrated,
* and there's no companion controller unless maybe for USB OTG.)
*/
hcd->state = HC_STATE_RUNNING;
writel(FLAG_CF, &oxu->regs->configured_flag);
readl(&oxu->regs->command); /* unblock posted writes */
temp = HC_VERSION(readl(&oxu->caps->hc_capbase));
oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n",
((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f),
temp >> 8, temp & 0xff, DRIVER_VERSION,
ignore_oc ? ", overcurrent ignored" : "");
writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */
return 0;
}
static void oxu_stop(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
/* Turn off port power on all root hub ports. */
ehci_port_power(oxu, 0);
/* no more interrupts ... */
del_timer_sync(&oxu->watchdog);
spin_lock_irq(&oxu->lock);
if (HC_IS_RUNNING(hcd->state))
ehci_quiesce(oxu);
ehci_reset(oxu);
writel(0, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
/* let companion controllers work when we aren't */
writel(0, &oxu->regs->configured_flag);
/* root hub is shut down separately (first, when possible) */
spin_lock_irq(&oxu->lock);
if (oxu->async)
ehci_work(oxu);
spin_unlock_irq(&oxu->lock);
ehci_mem_cleanup(oxu);
dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status));
}
/* Kick in for silicon on any bus (not just pci, etc).
* This forcibly disables dma and IRQs, helping kexec and other cases
* where the next system software may expect clean state.
*/
static void oxu_shutdown(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
(void) ehci_halt(oxu);
ehci_turn_off_all_ports(oxu);
/* make BIOS/etc use companion controller during reboot */
writel(0, &oxu->regs->configured_flag);
/* unblock posted writes */
readl(&oxu->regs->configured_flag);
}
/* Non-error returns are a promise to giveback() the urb later
* we drop ownership so next owner (or urb unlink) can get it
*
* urb + dev is in hcd.self.controller.urb_list
* we're queueing TDs onto software and hardware lists
*
* hcd-specific init for hcpriv hasn't been done yet
*
* NOTE: control, bulk, and interrupt share the same code to append TDs
* to a (possibly active) QH, and the same QH scanning code.
*/
static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct list_head qtd_list;
INIT_LIST_HEAD(&qtd_list);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return submit_async(oxu, urb, &qtd_list, mem_flags);
case PIPE_INTERRUPT:
if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags))
return -ENOMEM;
return intr_submit(oxu, urb, &qtd_list, mem_flags);
case PIPE_ISOCHRONOUS:
if (urb->dev->speed == USB_SPEED_HIGH)
return itd_submit(oxu, urb, mem_flags);
else
return sitd_submit(oxu, urb, mem_flags);
}
}
/* This function is responsible for breaking URBs with big data size
* into smaller size and processing small urbs in sequence.
*/
static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb,
gfp_t mem_flags)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int num, rem;
int transfer_buffer_length;
void *transfer_buffer;
struct urb *murb;
int i, ret;
/* If not bulk pipe just enqueue the URB */
if (!usb_pipebulk(urb->pipe))
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Otherwise we should verify the USB transfer buffer size! */
transfer_buffer = urb->transfer_buffer;
transfer_buffer_length = urb->transfer_buffer_length;
num = urb->transfer_buffer_length / 4096;
rem = urb->transfer_buffer_length % 4096;
if (rem != 0)
num++;
/* If URB is smaller than 4096 bytes just enqueue it! */
if (num == 1)
return __oxu_urb_enqueue(hcd, urb, mem_flags);
/* Ok, we have more job to do! :) */
for (i = 0; i < num - 1; i++) {
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = 4096;
murb->transfer_buffer = transfer_buffer + i * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 0;
/* This loop is to guarantee urb to be processed when there's
* not enough resources at a particular time by retrying.
*/
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
}
/* Last urb requires special handling */
/* Get free micro URB poll till a free urb is received */
do {
murb = (struct urb *) oxu_murb_alloc(oxu);
if (!murb)
schedule();
} while (!murb);
/* Coping the urb */
memcpy(murb, urb, sizeof(struct urb));
murb->transfer_buffer_length = rem > 0 ? rem : 4096;
murb->transfer_buffer = transfer_buffer + (num - 1) * 4096;
/* Null pointer for the encodes that this is a micro urb */
murb->complete = NULL;
((struct oxu_murb *) murb)->main = urb;
((struct oxu_murb *) murb)->last = 1;
do {
ret = __oxu_urb_enqueue(hcd, murb, mem_flags);
if (ret)
schedule();
} while (ret);
return ret;
}
/* Remove from hardware lists.
* Completions normally happen asynchronously
*/
static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
struct ehci_qh *qh;
unsigned long flags;
spin_lock_irqsave(&oxu->lock, flags);
switch (usb_pipetype(urb->pipe)) {
case PIPE_CONTROL:
case PIPE_BULK:
default:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
unlink_async(oxu, qh);
break;
case PIPE_INTERRUPT:
qh = (struct ehci_qh *) urb->hcpriv;
if (!qh)
break;
switch (qh->qh_state) {
case QH_STATE_LINKED:
intr_deschedule(oxu, qh);
/* FALL THROUGH */
case QH_STATE_IDLE:
qh_completions(oxu, qh);
break;
default:
oxu_dbg(oxu, "bogus qh %p state %d\n",
qh, qh->qh_state);
goto done;
}
/* reschedule QH iff another request is queued */
if (!list_empty(&qh->qtd_list)
&& HC_IS_RUNNING(hcd->state)) {
int status;
status = qh_schedule(oxu, qh);
spin_unlock_irqrestore(&oxu->lock, flags);
if (status != 0) {
/* shouldn't happen often, but ...
* FIXME kill those tds' urbs
*/
dev_err(hcd->self.controller,
"can't reschedule qh %p, err %d\n", qh,
status);
}
return status;
}
break;
}
done:
spin_unlock_irqrestore(&oxu->lock, flags);
return 0;
}
/* Bulk qh holds the data toggle */
static void oxu_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *ep)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
unsigned long flags;
struct ehci_qh *qh, *tmp;
/* ASSERT: any requests/urbs are being unlinked */
/* ASSERT: nobody can be submitting urbs for this any more */
rescan:
spin_lock_irqsave(&oxu->lock, flags);
qh = ep->hcpriv;
if (!qh)
goto done;
/* endpoints can be iso streams. for now, we don't
* accelerate iso completions ... so spin a while.
*/
if (qh->hw_info1 == 0) {
oxu_vdbg(oxu, "iso delay\n");
goto idle_timeout;
}
if (!HC_IS_RUNNING(hcd->state))
qh->qh_state = QH_STATE_IDLE;
switch (qh->qh_state) {
case QH_STATE_LINKED:
for (tmp = oxu->async->qh_next.qh;
tmp && tmp != qh;
tmp = tmp->qh_next.qh)
continue;
/* periodic qh self-unlinks on empty */
if (!tmp)
goto nogood;
unlink_async(oxu, qh);
/* FALL THROUGH */
case QH_STATE_UNLINK: /* wait for hw to finish? */
idle_timeout:
spin_unlock_irqrestore(&oxu->lock, flags);
schedule_timeout_uninterruptible(1);
goto rescan;
case QH_STATE_IDLE: /* fully unlinked */
if (list_empty(&qh->qtd_list)) {
qh_put(qh);
break;
}
/* else FALL THROUGH */
default:
nogood:
/* caller was supposed to have unlinked any requests;
* that's not our job. just leak this memory.
*/
oxu_err(oxu, "qh %p (#%02x) state %d%s\n",
qh, ep->desc.bEndpointAddress, qh->qh_state,
list_empty(&qh->qtd_list) ? "" : "(has tds)");
break;
}
ep->hcpriv = NULL;
done:
spin_unlock_irqrestore(&oxu->lock, flags);
}
static int oxu_get_frame(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
return (readl(&oxu->regs->frame_index) >> 3) %
oxu->periodic_size;
}
/* Build "status change" packet (one or two bytes) from HC registers */
static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp, mask, status = 0;
int ports, i, retval = 1;
unsigned long flags;
/* if !PM, root hub timers won't get shut down ... */
if (!HC_IS_RUNNING(hcd->state))
return 0;
/* init status to no-changes */
buf[0] = 0;
ports = HCS_N_PORTS(oxu->hcs_params);
if (ports > 7) {
buf[1] = 0;
retval++;
}
/* Some boards (mostly VIA?) report bogus overcurrent indications,
* causing massive log spam unless we completely ignore them. It
* may be relevant that VIA VT8235 controllers, where PORT_POWER is
* always set, seem to clear PORT_OCC and PORT_CSC when writing to
* PORT_POWER; that's surprising, but maybe within-spec.
*/
if (!ignore_oc)
mask = PORT_CSC | PORT_PEC | PORT_OCC;
else
mask = PORT_CSC | PORT_PEC;
/* no hub change reports (bit 0) for now (power, ...) */
/* port N changes (bit N)? */
spin_lock_irqsave(&oxu->lock, flags);
for (i = 0; i < ports; i++) {
temp = readl(&oxu->regs->port_status[i]);
/*
* Return status information even for ports with OWNER set.
* Otherwise hub_wq wouldn't see the disconnect event when a
* high-speed device is switched over to the companion
* controller by the user.
*/
if (!(temp & PORT_CONNECT))
oxu->reset_done[i] = 0;
if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 &&
time_after_eq(jiffies, oxu->reset_done[i]))) {
if (i < 7)
buf[0] |= 1 << (i + 1);
else
buf[1] |= 1 << (i - 7);
status = STS_PCD;
}
}
/* FIXME autosuspend idle root hubs */
spin_unlock_irqrestore(&oxu->lock, flags);
return status ? retval : 0;
}
/* Returns the speed of a device attached to a port on the root hub. */
static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu,
unsigned int portsc)
{
switch ((portsc >> 26) & 3) {
case 0:
return 0;
case 1:
return USB_PORT_STAT_LOW_SPEED;
case 2:
default:
return USB_PORT_STAT_HIGH_SPEED;
}
}
#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq,
u16 wValue, u16 wIndex, char *buf, u16 wLength)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int ports = HCS_N_PORTS(oxu->hcs_params);
u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1];
u32 temp, status;
unsigned long flags;
int retval = 0;
unsigned selector;
/*
* FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR.
* HCS_INDICATOR may say we can change LEDs to off/amber/green.
* (track current state ourselves) ... blink for diagnostics,
* power, "this is the one", etc. EHCI spec supports this.
*/
spin_lock_irqsave(&oxu->lock, flags);
switch (typeReq) {
case ClearHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case ClearPortFeature:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
/*
* Even if OWNER is set, so the port is owned by the
* companion controller, hub_wq needs to be able to clear
* the port-change status bits (especially
* USB_PORT_STAT_C_CONNECTION).
*/
switch (wValue) {
case USB_PORT_FEAT_ENABLE:
writel(temp & ~PORT_PE, status_reg);
break;
case USB_PORT_FEAT_C_ENABLE:
writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg);
break;
case USB_PORT_FEAT_SUSPEND:
if (temp & PORT_RESET)
goto error;
if (temp & PORT_SUSPEND) {
if ((temp & PORT_PE) == 0)
goto error;
/* resume signaling for 20 msec */
temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
writel(temp | PORT_RESUME, status_reg);
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
}
break;
case USB_PORT_FEAT_C_SUSPEND:
/* we auto-clear this feature */
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp & ~(PORT_RWC_BITS | PORT_POWER),
status_reg);
break;
case USB_PORT_FEAT_C_CONNECTION:
writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg);
break;
case USB_PORT_FEAT_C_OVER_CURRENT:
writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg);
break;
case USB_PORT_FEAT_C_RESET:
/* GetPortStatus clears reset */
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted write */
break;
case GetHubDescriptor:
ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *)
buf);
break;
case GetHubStatus:
/* no hub-wide feature/status flags */
memset(buf, 0, 4);
break;
case GetPortStatus:
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
status = 0;
temp = readl(status_reg);
/* wPortChange bits */
if (temp & PORT_CSC)
status |= USB_PORT_STAT_C_CONNECTION << 16;
if (temp & PORT_PEC)
status |= USB_PORT_STAT_C_ENABLE << 16;
if ((temp & PORT_OCC) && !ignore_oc)
status |= USB_PORT_STAT_C_OVERCURRENT << 16;
/* whoever resumes must GetPortStatus to complete it!! */
if (temp & PORT_RESUME) {
/* Remote Wakeup received? */
if (!oxu->reset_done[wIndex]) {
/* resume signaling for 20 msec */
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(20);
/* check the port again */
mod_timer(&oxu_to_hcd(oxu)->rh_timer,
oxu->reset_done[wIndex]);
}
/* resume completed? */
else if (time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_SUSPEND << 16;
oxu->reset_done[wIndex] = 0;
/* stop resume signaling */
temp = readl(status_reg);
writel(temp & ~(PORT_RWC_BITS | PORT_RESUME),
status_reg);
retval = handshake(oxu, status_reg,
PORT_RESUME, 0, 2000 /* 2msec */);
if (retval != 0) {
oxu_err(oxu,
"port %d resume error %d\n",
wIndex + 1, retval);
goto error;
}
temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10));
}
}
/* whoever resets must GetPortStatus to complete it!! */
if ((temp & PORT_RESET)
&& time_after_eq(jiffies,
oxu->reset_done[wIndex])) {
status |= USB_PORT_STAT_C_RESET << 16;
oxu->reset_done[wIndex] = 0;
/* force reset to complete */
writel(temp & ~(PORT_RWC_BITS | PORT_RESET),
status_reg);
/* REVISIT: some hardware needs 550+ usec to clear
* this bit; seems too long to spin routinely...
*/
retval = handshake(oxu, status_reg,
PORT_RESET, 0, 750);
if (retval != 0) {
oxu_err(oxu, "port %d reset error %d\n",
wIndex + 1, retval);
goto error;
}
/* see what we found out */
temp = check_reset_complete(oxu, wIndex, status_reg,
readl(status_reg));
}
/* transfer dedicated ports to the companion hc */
if ((temp & PORT_CONNECT) &&
test_bit(wIndex, &oxu->companion_ports)) {
temp &= ~PORT_RWC_BITS;
temp |= PORT_OWNER;
writel(temp, status_reg);
oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1);
temp = readl(status_reg);
}
/*
* Even if OWNER is set, there's no harm letting hub_wq
* see the wPortStatus values (they should all be 0 except
* for PORT_POWER anyway).
*/
if (temp & PORT_CONNECT) {
status |= USB_PORT_STAT_CONNECTION;
/* status may be from integrated TT */
status |= oxu_port_speed(oxu, temp);
}
if (temp & PORT_PE)
status |= USB_PORT_STAT_ENABLE;
if (temp & (PORT_SUSPEND|PORT_RESUME))
status |= USB_PORT_STAT_SUSPEND;
if (temp & PORT_OC)
status |= USB_PORT_STAT_OVERCURRENT;
if (temp & PORT_RESET)
status |= USB_PORT_STAT_RESET;
if (temp & PORT_POWER)
status |= USB_PORT_STAT_POWER;
#ifndef OXU_VERBOSE_DEBUG
if (status & ~0xffff) /* only if wPortChange is interesting */
#endif
dbg_port(oxu, "GetStatus", wIndex + 1, temp);
put_unaligned(cpu_to_le32(status), (__le32 *) buf);
break;
case SetHubFeature:
switch (wValue) {
case C_HUB_LOCAL_POWER:
case C_HUB_OVER_CURRENT:
/* no hub-wide feature/status flags */
break;
default:
goto error;
}
break;
case SetPortFeature:
selector = wIndex >> 8;
wIndex &= 0xff;
if (!wIndex || wIndex > ports)
goto error;
wIndex--;
temp = readl(status_reg);
if (temp & PORT_OWNER)
break;
temp &= ~PORT_RWC_BITS;
switch (wValue) {
case USB_PORT_FEAT_SUSPEND:
if ((temp & PORT_PE) == 0
|| (temp & PORT_RESET) != 0)
goto error;
if (device_may_wakeup(&hcd->self.root_hub->dev))
temp |= PORT_WAKE_BITS;
writel(temp | PORT_SUSPEND, status_reg);
break;
case USB_PORT_FEAT_POWER:
if (HCS_PPC(oxu->hcs_params))
writel(temp | PORT_POWER, status_reg);
break;
case USB_PORT_FEAT_RESET:
if (temp & PORT_RESUME)
goto error;
/* line status bits may report this as low speed,
* which can be fine if this root hub has a
* transaction translator built in.
*/
oxu_vdbg(oxu, "port %d reset\n", wIndex + 1);
temp |= PORT_RESET;
temp &= ~PORT_PE;
/*
* caller must wait, then call GetPortStatus
* usb 2.0 spec says 50 ms resets on root
*/
oxu->reset_done[wIndex] = jiffies
+ msecs_to_jiffies(50);
writel(temp, status_reg);
break;
/* For downstream facing ports (these): one hub port is put
* into test mode according to USB2 11.24.2.13, then the hub
* must be reset (which for root hub now means rmmod+modprobe,
* or else system reboot). See EHCI 2.3.9 and 4.14 for info
* about the EHCI-specific stuff.
*/
case USB_PORT_FEAT_TEST:
if (!selector || selector > 5)
goto error;
ehci_quiesce(oxu);
ehci_halt(oxu);
temp |= selector << 16;
writel(temp, status_reg);
break;
default:
goto error;
}
readl(&oxu->regs->command); /* unblock posted writes */
break;
default:
error:
/* "stall" on error */
retval = -EPIPE;
}
spin_unlock_irqrestore(&oxu->lock, flags);
return retval;
}
#ifdef CONFIG_PM
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
int port;
int mask;
oxu_dbg(oxu, "suspend root hub\n");
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
port = HCS_N_PORTS(oxu->hcs_params);
spin_lock_irq(&oxu->lock);
/* stop schedules, clean any completed work */
if (HC_IS_RUNNING(hcd->state)) {
ehci_quiesce(oxu);
hcd->state = HC_STATE_QUIESCING;
}
oxu->command = readl(&oxu->regs->command);
if (oxu->reclaim)
oxu->reclaim_ready = 1;
ehci_work(oxu);
/* Unlike other USB host controller types, EHCI doesn't have
* any notion of "global" or bus-wide suspend. The driver has
* to manually suspend all the active unsuspended ports, and
* then manually resume them in the bus_resume() routine.
*/
oxu->bus_suspended = 0;
while (port--) {
u32 __iomem *reg = &oxu->regs->port_status[port];
u32 t1 = readl(reg) & ~PORT_RWC_BITS;
u32 t2 = t1;
/* keep track of which ports we suspend */
if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) &&
!(t1 & PORT_SUSPEND)) {
t2 |= PORT_SUSPEND;
set_bit(port, &oxu->bus_suspended);
}
/* enable remote wakeup on all ports */
if (device_may_wakeup(&hcd->self.root_hub->dev))
t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E;
else
t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E);
if (t1 != t2) {
oxu_vdbg(oxu, "port %d, %08x -> %08x\n",
port + 1, t1, t2);
writel(t2, reg);
}
}
/* turn off now-idle HC */
del_timer_sync(&oxu->watchdog);
ehci_halt(oxu);
hcd->state = HC_STATE_SUSPENDED;
/* allow remote wakeup */
mask = INTR_MASK;
if (!device_may_wakeup(&hcd->self.root_hub->dev))
mask &= ~STS_PCD;
writel(mask, &oxu->regs->intr_enable);
readl(&oxu->regs->intr_enable);
oxu->next_statechange = jiffies + msecs_to_jiffies(10);
spin_unlock_irq(&oxu->lock);
return 0;
}
/* Caller has locked the root hub, and should reset/reinit on error */
static int oxu_bus_resume(struct usb_hcd *hcd)
{
struct oxu_hcd *oxu = hcd_to_oxu(hcd);
u32 temp;
int i;
if (time_before(jiffies, oxu->next_statechange))
msleep(5);
spin_lock_irq(&oxu->lock);
/* Ideally and we've got a real resume here, and no port's power
* was lost. (For PCI, that means Vaux was maintained.) But we
* could instead be restoring a swsusp snapshot -- so that BIOS was
* the last user of the controller, not reset/pm hardware keeping
* state we gave to it.
*/
temp = readl(&oxu->regs->intr_enable);
oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss");
/* at least some APM implementations will try to deliver
* IRQs right away, so delay them until we're ready.
*/
writel(0, &oxu->regs->intr_enable);
/* re-init operational registers */
writel(0, &oxu->regs->segment);
writel(oxu->periodic_dma, &oxu->regs->frame_list);
writel((u32) oxu->async->qh_dma, &oxu->regs->async_next);
/* restore CMD_RUN, framelist size, and irq threshold */
writel(oxu->command, &oxu->regs->command);
/* Some controller/firmware combinations need a delay during which
* they set up the port statuses. See Bugzilla #8190. */
mdelay(8);
/* manually resume the ports we suspended during bus_suspend() */
i = HCS_N_PORTS(oxu->hcs_params);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
temp &= ~(PORT_RWC_BITS
| PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
oxu->reset_done[i] = jiffies + msecs_to_jiffies(20);
temp |= PORT_RESUME;
}
writel(temp, &oxu->regs->port_status[i]);
}
i = HCS_N_PORTS(oxu->hcs_params);
mdelay(20);
while (i--) {
temp = readl(&oxu->regs->port_status[i]);
if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) {
temp &= ~(PORT_RWC_BITS | PORT_RESUME);
writel(temp, &oxu->regs->port_status[i]);
oxu_vdbg(oxu, "resumed port %d\n", i + 1);
}
}
(void) readl(&oxu->regs->command);
/* maybe re-activate the schedule(s) */
temp = 0;
if (oxu->async->qh_next.qh)
temp |= CMD_ASE;
if (oxu->periodic_sched)
temp |= CMD_PSE;
if (temp) {
oxu->command |= temp;
writel(oxu->command, &oxu->regs->command);
}
oxu->next_statechange = jiffies + msecs_to_jiffies(5);
hcd->state = HC_STATE_RUNNING;
/* Now we can safely re-enable irqs */
writel(INTR_MASK, &oxu->regs->intr_enable);
spin_unlock_irq(&oxu->lock);
return 0;
}
#else
static int oxu_bus_suspend(struct usb_hcd *hcd)
{
return 0;
}
static int oxu_bus_resume(struct usb_hcd *hcd)
{
return 0;
}
#endif /* CONFIG_PM */
static const struct hc_driver oxu_hc_driver = {
.description = "oxu210hp_hcd",
.product_desc = "oxu210hp HCD",
.hcd_priv_size = sizeof(struct oxu_hcd),
/*
* Generic hardware linkage
*/
.irq = oxu_irq,
.flags = HCD_MEMORY | HCD_USB2,
/*
* Basic lifecycle operations
*/
.reset = oxu_reset,
.start = oxu_run,
.stop = oxu_stop,
.shutdown = oxu_shutdown,
/*
* Managing i/o requests and associated device resources
*/
.urb_enqueue = oxu_urb_enqueue,
.urb_dequeue = oxu_urb_dequeue,
.endpoint_disable = oxu_endpoint_disable,
/*
* Scheduling support
*/
.get_frame_number = oxu_get_frame,
/*
* Root hub support
*/
.hub_status_data = oxu_hub_status_data,
.hub_control = oxu_hub_control,
.bus_suspend = oxu_bus_suspend,
.bus_resume = oxu_bus_resume,
};
/*
* Module stuff
*/
static void oxu_configuration(struct platform_device *pdev, void *base)
{
u32 tmp;
/* Initialize top level registers.
* First write ever
*/
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
oxu_writel(base, OXU_SOFTRESET, OXU_SRESET);
oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D);
tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL);
oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040);
oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN |
OXU_COMPARATOR | OXU_ASO_OP);
tmp = oxu_readl(base, OXU_CLKCTRL_SET);
oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN);
/* Clear all top interrupt enable */
oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff);
/* Clear all top interrupt status */
oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff);
/* Enable all needed top interrupt except OTG SPH core */
oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI);
}
static int oxu_verify_id(struct platform_device *pdev, void *base)
{
u32 id;
static const char * const bo[] = {
"reserved",
"128-pin LQFP",
"84-pin TFBGA",
"reserved",
};
/* Read controller signature register to find a match */
id = oxu_readl(base, OXU_DEVICEID);
dev_info(&pdev->dev, "device ID %x\n", id);
if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT))
return -1;
dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n",
id >> OXU_REV_SHIFT,
bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT],
(id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT,
(id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT);
return 0;
}
static const struct hc_driver oxu_hc_driver;
static struct usb_hcd *oxu_create(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void *base, int irq, int otg)
{
struct device *dev = &pdev->dev;
struct usb_hcd *hcd;
struct oxu_hcd *oxu;
int ret;
/* Set endian mode and host mode */
oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET),
OXU_USBMODE,
OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS);
hcd = usb_create_hcd(&oxu_hc_driver, dev,
otg ? "oxu210hp_otg" : "oxu210hp_sph");
if (!hcd)
return ERR_PTR(-ENOMEM);
hcd->rsrc_start = memstart;
hcd->rsrc_len = memlen;
hcd->regs = base;
hcd->irq = irq;
hcd->state = HC_STATE_HALT;
oxu = hcd_to_oxu(hcd);
oxu->is_otg = otg;
ret = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (ret < 0)
return ERR_PTR(ret);
device_wakeup_enable(hcd->self.controller);
return hcd;
}
static int oxu_init(struct platform_device *pdev,
unsigned long memstart, unsigned long memlen,
void *base, int irq)
{
struct oxu_info *info = platform_get_drvdata(pdev);
struct usb_hcd *hcd;
int ret;
/* First time configuration at start up */
oxu_configuration(pdev, base);
ret = oxu_verify_id(pdev, base);
if (ret) {
dev_err(&pdev->dev, "no devices found!\n");
return -ENODEV;
}
/* Create the OTG controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 1);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create OTG controller!\n");
ret = PTR_ERR(hcd);
goto error_create_otg;
}
info->hcd[0] = hcd;
/* Create the SPH host controller */
hcd = oxu_create(pdev, memstart, memlen, base, irq, 0);
if (IS_ERR(hcd)) {
dev_err(&pdev->dev, "cannot create SPH controller!\n");
ret = PTR_ERR(hcd);
goto error_create_sph;
}
info->hcd[1] = hcd;
oxu_writel(base, OXU_CHIPIRQEN_SET,
oxu_readl(base, OXU_CHIPIRQEN_SET) | 3);
return 0;
error_create_sph:
usb_remove_hcd(info->hcd[0]);
usb_put_hcd(info->hcd[0]);
error_create_otg:
return ret;
}
static int oxu_drv_probe(struct platform_device *pdev)
{
struct resource *res;
void *base;
unsigned long memstart, memlen;
int irq, ret;
struct oxu_info *info;
if (usb_disabled())
return -ENODEV;
/*
* Get the platform resources
*/
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
dev_err(&pdev->dev,
"no IRQ! Check %s setup!\n", dev_name(&pdev->dev));
return -ENODEV;
}
irq = res->start;
dev_dbg(&pdev->dev, "IRQ resource %d\n", irq);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(base)) {
ret = PTR_ERR(base);
goto error;
}
memstart = res->start;
memlen = resource_size(res);
ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING);
if (ret) {
dev_err(&pdev->dev, "error setting irq type\n");
ret = -EFAULT;
goto error;
}
/* Allocate a driver data struct to hold useful info for both
* SPH & OTG devices
*/
info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL);
if (!info) {
ret = -EFAULT;
goto error;
}
platform_set_drvdata(pdev, info);
ret = oxu_init(pdev, memstart, memlen, base, irq);
if (ret < 0) {
dev_dbg(&pdev->dev, "cannot init USB devices\n");
goto error;
}
dev_info(&pdev->dev, "devices enabled and running\n");
platform_set_drvdata(pdev, info);
return 0;
error:
dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret);
return ret;
}
static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd)
{
usb_remove_hcd(hcd);
usb_put_hcd(hcd);
}
static int oxu_drv_remove(struct platform_device *pdev)
{
struct oxu_info *info = platform_get_drvdata(pdev);
oxu_remove(pdev, info->hcd[0]);
oxu_remove(pdev, info->hcd[1]);
return 0;
}
static void oxu_drv_shutdown(struct platform_device *pdev)
{
oxu_drv_remove(pdev);
}
#if 0
/* FIXME: TODO */
static int oxu_drv_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
static int oxu_drv_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct usb_hcd *hcd = dev_get_drvdata(dev);
return 0;
}
#else
#define oxu_drv_suspend NULL
#define oxu_drv_resume NULL
#endif
static struct platform_driver oxu_driver = {
.probe = oxu_drv_probe,
.remove = oxu_drv_remove,
.shutdown = oxu_drv_shutdown,
.suspend = oxu_drv_suspend,
.resume = oxu_drv_resume,
.driver = {
.name = "oxu210hp-hcd",
.bus = &platform_bus_type
}
};
module_platform_driver(oxu_driver);
MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION);
MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
chrisy/linux | arch/x86/xen/time.c | 143 | 13984 | /*
* Xen time implementation.
*
* This is implemented in terms of a clocksource driver which uses
* the hypervisor clock as a nanosecond timebase, and a clockevent
* driver which uses the hypervisor's timer mechanism.
*
* Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/clocksource.h>
#include <linux/clockchips.h>
#include <linux/kernel_stat.h>
#include <linux/math64.h>
#include <linux/gfp.h>
#include <linux/slab.h>
#include <linux/pvclock_gtod.h>
#include <asm/pvclock.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/events.h>
#include <xen/features.h>
#include <xen/interface/xen.h>
#include <xen/interface/vcpu.h>
#include "xen-ops.h"
/* Xen may fire a timer up to this many ns early */
#define TIMER_SLOP 100000
#define NS_PER_TICK (1000000000LL / HZ)
/* runstate info updated by Xen */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
/* snapshots of runstate info */
static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot);
/* unused ns of stolen time */
static DEFINE_PER_CPU(u64, xen_residual_stolen);
/* return an consistent snapshot of 64-bit time/counter value */
static u64 get64(const u64 *p)
{
u64 ret;
if (BITS_PER_LONG < 64) {
u32 *p32 = (u32 *)p;
u32 h, l;
/*
* Read high then low, and then make sure high is
* still the same; this will only loop if low wraps
* and carries into high.
* XXX some clean way to make this endian-proof?
*/
do {
h = p32[1];
barrier();
l = p32[0];
barrier();
} while (p32[1] != h);
ret = (((u64)h) << 32) | l;
} else
ret = *p;
return ret;
}
/*
* Runstate accounting
*/
static void get_runstate_snapshot(struct vcpu_runstate_info *res)
{
u64 state_time;
struct vcpu_runstate_info *state;
BUG_ON(preemptible());
state = &__get_cpu_var(xen_runstate);
/*
* The runstate info is always updated by the hypervisor on
* the current CPU, so there's no need to use anything
* stronger than a compiler barrier when fetching it.
*/
do {
state_time = get64(&state->state_entry_time);
barrier();
*res = *state;
barrier();
} while (get64(&state->state_entry_time) != state_time);
}
/* return true when a vcpu could run but has no real cpu to run on */
bool xen_vcpu_stolen(int vcpu)
{
return per_cpu(xen_runstate, vcpu).state == RUNSTATE_runnable;
}
void xen_setup_runstate_info(int cpu)
{
struct vcpu_register_runstate_memory_area area;
area.addr.v = &per_cpu(xen_runstate, cpu);
if (HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area,
cpu, &area))
BUG();
}
static void do_stolen_accounting(void)
{
struct vcpu_runstate_info state;
struct vcpu_runstate_info *snap;
s64 runnable, offline, stolen;
cputime_t ticks;
get_runstate_snapshot(&state);
WARN_ON(state.state != RUNSTATE_running);
snap = &__get_cpu_var(xen_runstate_snapshot);
/* work out how much time the VCPU has not been runn*ing* */
runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable];
offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline];
*snap = state;
/* Add the appropriate number of ticks of stolen time,
including any left-overs from last time. */
stolen = runnable + offline + __this_cpu_read(xen_residual_stolen);
if (stolen < 0)
stolen = 0;
ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen);
__this_cpu_write(xen_residual_stolen, stolen);
account_steal_ticks(ticks);
}
/* Get the TSC speed from Xen */
static unsigned long xen_tsc_khz(void)
{
struct pvclock_vcpu_time_info *info =
&HYPERVISOR_shared_info->vcpu_info[0].time;
return pvclock_tsc_khz(info);
}
cycle_t xen_clocksource_read(void)
{
struct pvclock_vcpu_time_info *src;
cycle_t ret;
preempt_disable_notrace();
src = &__get_cpu_var(xen_vcpu)->time;
ret = pvclock_clocksource_read(src);
preempt_enable_notrace();
return ret;
}
static cycle_t xen_clocksource_get_cycles(struct clocksource *cs)
{
return xen_clocksource_read();
}
static void xen_read_wallclock(struct timespec *ts)
{
struct shared_info *s = HYPERVISOR_shared_info;
struct pvclock_wall_clock *wall_clock = &(s->wc);
struct pvclock_vcpu_time_info *vcpu_time;
vcpu_time = &get_cpu_var(xen_vcpu)->time;
pvclock_read_wallclock(wall_clock, vcpu_time, ts);
put_cpu_var(xen_vcpu);
}
static void xen_get_wallclock(struct timespec *now)
{
xen_read_wallclock(now);
}
static int xen_set_wallclock(const struct timespec *now)
{
return -1;
}
static int xen_pvclock_gtod_notify(struct notifier_block *nb,
unsigned long was_set, void *priv)
{
/* Protected by the calling core code serialization */
static struct timespec next_sync;
struct xen_platform_op op;
struct timespec now;
now = __current_kernel_time();
/*
* We only take the expensive HV call when the clock was set
* or when the 11 minutes RTC synchronization time elapsed.
*/
if (!was_set && timespec_compare(&now, &next_sync) < 0)
return NOTIFY_OK;
op.cmd = XENPF_settime;
op.u.settime.secs = now.tv_sec;
op.u.settime.nsecs = now.tv_nsec;
op.u.settime.system_time = xen_clocksource_read();
(void)HYPERVISOR_dom0_op(&op);
/*
* Move the next drift compensation time 11 minutes
* ahead. That's emulating the sync_cmos_clock() update for
* the hardware RTC.
*/
next_sync = now;
next_sync.tv_sec += 11 * 60;
return NOTIFY_OK;
}
static struct notifier_block xen_pvclock_gtod_notifier = {
.notifier_call = xen_pvclock_gtod_notify,
};
static struct clocksource xen_clocksource __read_mostly = {
.name = "xen",
.rating = 400,
.read = xen_clocksource_get_cycles,
.mask = ~0,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
/*
Xen clockevent implementation
Xen has two clockevent implementations:
The old timer_op one works with all released versions of Xen prior
to version 3.0.4. This version of the hypervisor provides a
single-shot timer with nanosecond resolution. However, sharing the
same event channel is a 100Hz tick which is delivered while the
vcpu is running. We don't care about or use this tick, but it will
cause the core time code to think the timer fired too soon, and
will end up resetting it each time. It could be filtered, but
doing so has complications when the ktime clocksource is not yet
the xen clocksource (ie, at boot time).
The new vcpu_op-based timer interface allows the tick timer period
to be changed or turned off. The tick timer is not useful as a
periodic timer because events are only delivered to running vcpus.
The one-shot timer can report when a timeout is in the past, so
set_next_event is capable of returning -ETIME when appropriate.
This interface is used when available.
*/
/*
Get a hypervisor absolute time. In theory we could maintain an
offset between the kernel's time and the hypervisor's time, and
apply that to a kernel's absolute timeout. Unfortunately the
hypervisor and kernel times can drift even if the kernel is using
the Xen clocksource, because ntp can warp the kernel's clocksource.
*/
static s64 get_abs_timeout(unsigned long delta)
{
return xen_clocksource_read() + delta;
}
static void xen_timerop_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
/* unsupported */
WARN_ON(1);
break;
case CLOCK_EVT_MODE_ONESHOT:
case CLOCK_EVT_MODE_RESUME:
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
HYPERVISOR_set_timer_op(0); /* cancel timeout */
break;
}
}
static int xen_timerop_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0)
BUG();
/* We may have missed the deadline, but there's no real way of
knowing for sure. If the event was in the past, then we'll
get an immediate interrupt. */
return 0;
}
static const struct clock_event_device xen_timerop_clockevent = {
.name = "xen",
.features = CLOCK_EVT_FEAT_ONESHOT,
.max_delta_ns = 0xffffffff,
.min_delta_ns = TIMER_SLOP,
.mult = 1,
.shift = 0,
.rating = 500,
.set_mode = xen_timerop_set_mode,
.set_next_event = xen_timerop_set_next_event,
};
static void xen_vcpuop_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
int cpu = smp_processor_id();
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
WARN_ON(1); /* unsupported */
break;
case CLOCK_EVT_MODE_ONESHOT:
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
BUG();
break;
case CLOCK_EVT_MODE_UNUSED:
case CLOCK_EVT_MODE_SHUTDOWN:
if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, cpu, NULL) ||
HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
BUG();
break;
case CLOCK_EVT_MODE_RESUME:
break;
}
}
static int xen_vcpuop_set_next_event(unsigned long delta,
struct clock_event_device *evt)
{
int cpu = smp_processor_id();
struct vcpu_set_singleshot_timer single;
int ret;
WARN_ON(evt->mode != CLOCK_EVT_MODE_ONESHOT);
single.timeout_abs_ns = get_abs_timeout(delta);
single.flags = VCPU_SSHOTTMR_future;
ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single);
BUG_ON(ret != 0 && ret != -ETIME);
return ret;
}
static const struct clock_event_device xen_vcpuop_clockevent = {
.name = "xen",
.features = CLOCK_EVT_FEAT_ONESHOT,
.max_delta_ns = 0xffffffff,
.min_delta_ns = TIMER_SLOP,
.mult = 1,
.shift = 0,
.rating = 500,
.set_mode = xen_vcpuop_set_mode,
.set_next_event = xen_vcpuop_set_next_event,
};
static const struct clock_event_device *xen_clockevent =
&xen_timerop_clockevent;
struct xen_clock_event_device {
struct clock_event_device evt;
char *name;
};
static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 };
static irqreturn_t xen_timer_interrupt(int irq, void *dev_id)
{
struct clock_event_device *evt = &__get_cpu_var(xen_clock_events).evt;
irqreturn_t ret;
ret = IRQ_NONE;
if (evt->event_handler) {
evt->event_handler(evt);
ret = IRQ_HANDLED;
}
do_stolen_accounting();
return ret;
}
void xen_teardown_timer(int cpu)
{
struct clock_event_device *evt;
BUG_ON(cpu == 0);
evt = &per_cpu(xen_clock_events, cpu).evt;
if (evt->irq >= 0) {
unbind_from_irqhandler(evt->irq, NULL);
evt->irq = -1;
kfree(per_cpu(xen_clock_events, cpu).name);
per_cpu(xen_clock_events, cpu).name = NULL;
}
}
void xen_setup_timer(int cpu)
{
char *name;
struct clock_event_device *evt;
int irq;
evt = &per_cpu(xen_clock_events, cpu).evt;
WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu);
if (evt->irq >= 0)
xen_teardown_timer(cpu);
printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu);
name = kasprintf(GFP_KERNEL, "timer%d", cpu);
if (!name)
name = "<timer kasprintf failed>";
irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt,
IRQF_DISABLED|IRQF_PERCPU|
IRQF_NOBALANCING|IRQF_TIMER|
IRQF_FORCE_RESUME,
name, NULL);
memcpy(evt, xen_clockevent, sizeof(*evt));
evt->cpumask = cpumask_of(cpu);
evt->irq = irq;
per_cpu(xen_clock_events, cpu).name = name;
}
void xen_setup_cpu_clockevents(void)
{
BUG_ON(preemptible());
clockevents_register_device(&__get_cpu_var(xen_clock_events).evt);
}
void xen_timer_resume(void)
{
int cpu;
pvclock_resume();
if (xen_clockevent != &xen_vcpuop_clockevent)
return;
for_each_online_cpu(cpu) {
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL))
BUG();
}
}
static const struct pv_time_ops xen_time_ops __initconst = {
.sched_clock = xen_clocksource_read,
};
static void __init xen_time_init(void)
{
int cpu = smp_processor_id();
struct timespec tp;
clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC);
if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL) == 0) {
/* Successfully turned off 100Hz tick, so we have the
vcpuop-based timer interface */
printk(KERN_DEBUG "Xen: using vcpuop timer interface\n");
xen_clockevent = &xen_vcpuop_clockevent;
}
/* Set initial system time with full resolution */
xen_read_wallclock(&tp);
do_settimeofday(&tp);
setup_force_cpu_cap(X86_FEATURE_TSC);
xen_setup_runstate_info(cpu);
xen_setup_timer(cpu);
xen_setup_cpu_clockevents();
if (xen_initial_domain())
pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier);
}
void __init xen_init_time_ops(void)
{
pv_time_ops = xen_time_ops;
x86_init.timers.timer_init = xen_time_init;
x86_init.timers.setup_percpu_clockev = x86_init_noop;
x86_cpuinit.setup_percpu_clockev = x86_init_noop;
x86_platform.calibrate_tsc = xen_tsc_khz;
x86_platform.get_wallclock = xen_get_wallclock;
/* Dom0 uses the native method to set the hardware RTC. */
if (!xen_initial_domain())
x86_platform.set_wallclock = xen_set_wallclock;
}
#ifdef CONFIG_XEN_PVHVM
static void xen_hvm_setup_cpu_clockevents(void)
{
int cpu = smp_processor_id();
xen_setup_runstate_info(cpu);
/*
* xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence
* doing it xen_hvm_cpu_notify (which gets called by smp_init during
* early bootup and also during CPU hotplug events).
*/
xen_setup_cpu_clockevents();
}
void __init xen_hvm_init_time_ops(void)
{
/* vector callback is needed otherwise we cannot receive interrupts
* on cpu > 0 and at this point we don't know how many cpus are
* available */
if (!xen_have_vector_callback)
return;
if (!xen_feature(XENFEAT_hvm_safe_pvclock)) {
printk(KERN_INFO "Xen doesn't support pvclock on HVM,"
"disable pv timer\n");
return;
}
pv_time_ops = xen_time_ops;
x86_init.timers.setup_percpu_clockev = xen_time_init;
x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents;
x86_platform.calibrate_tsc = xen_tsc_khz;
x86_platform.get_wallclock = xen_get_wallclock;
x86_platform.set_wallclock = xen_set_wallclock;
}
#endif
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.