repo_name
string
path
string
copies
string
size
string
content
string
license
string
keecker/kernel_msm-3.10
drivers/scsi/bfa/bfa_fcs_fcpim.c
4285
22054
/* * Copyright (c) 2005-2010 Brocade Communications Systems, Inc. * All rights reserved * www.brocade.com * * Linux driver for Brocade Fibre Channel Host Bus Adapter. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License (GPL) Version 2 as * published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. */ /* * fcpim.c - FCP initiator mode i-t nexus state machine */ #include "bfad_drv.h" #include "bfa_fcs.h" #include "bfa_fcbuild.h" #include "bfad_im.h" BFA_TRC_FILE(FCS, FCPIM); /* * forward declarations */ static void bfa_fcs_itnim_timeout(void *arg); static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim); static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced); static void bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs); static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, enum bfa_itnim_aen_event event); static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event); static struct bfa_sm_table_s itnim_sm_table[] = { {BFA_SM(bfa_fcs_itnim_sm_offline), BFA_ITNIM_OFFLINE}, {BFA_SM(bfa_fcs_itnim_sm_prli_send), BFA_ITNIM_PRLI_SEND}, {BFA_SM(bfa_fcs_itnim_sm_prli), BFA_ITNIM_PRLI_SENT}, {BFA_SM(bfa_fcs_itnim_sm_prli_retry), BFA_ITNIM_PRLI_RETRY}, {BFA_SM(bfa_fcs_itnim_sm_hcb_online), BFA_ITNIM_HCB_ONLINE}, {BFA_SM(bfa_fcs_itnim_sm_online), BFA_ITNIM_ONLINE}, {BFA_SM(bfa_fcs_itnim_sm_hcb_offline), BFA_ITNIM_HCB_OFFLINE}, {BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR}, }; /* * fcs_itnim_sm FCS itnim state machine */ static void bfa_fcs_itnim_sm_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_FCS_ONLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); itnim->prli_retries = 0; bfa_fcs_itnim_send_prli(itnim, NULL); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli_send(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_FRMSENT: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_walloc_cancel(itnim->fcs->bfa, &itnim->fcxp_wqe); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_RSP_OK: if (itnim->rport->scsi_function == BFA_RPORT_INITIATOR) bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); else bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hal_rport_online); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_retry); bfa_timer_start(itnim->fcs->bfa, &itnim->timer, bfa_fcs_itnim_timeout, itnim, BFA_FCS_RETRY_TIMEOUT); break; case BFA_FCS_ITNIM_SM_RSP_NOT_SUPP: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_fcxp_discard(itnim->fcxp); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcxp_discard(itnim->fcxp); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hal_rport_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HAL_ONLINE: if (!itnim->bfa_itnim) itnim->bfa_itnim = bfa_itnim_create(itnim->fcs->bfa, itnim->rport->bfa_rport, itnim); if (itnim->bfa_itnim) { bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_online); bfa_itnim_online(itnim->bfa_itnim, itnim->seq_rec); } else { bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_DELETE); } break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_prli_retry(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_TIMEOUT: if (itnim->prli_retries < BFA_FCS_RPORT_MAX_RETRIES) { itnim->prli_retries++; bfa_trc(itnim->fcs, itnim->prli_retries); bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_prli_send); bfa_fcs_itnim_send_prli(itnim, NULL); } else { /* invoke target offline */ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); } break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_INITIATOR: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_initiator); bfa_timer_stop(&itnim->timer); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_timer_stop(&itnim->timer); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hcb_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HCB_ONLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_online); bfa_fcb_itnim_online(itnim->itnim_drv); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); wwn2str(rpwwn_buf, itnim->rport->pwwn); BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) is online for initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_ONLINE); break; case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); bfa_itnim_offline(itnim->bfa_itnim); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_online(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; char lpwwn_buf[BFA_STRING_32]; char rpwwn_buf[BFA_STRING_32]; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_hcb_offline); bfa_fcb_itnim_offline(itnim->itnim_drv); bfa_itnim_offline(itnim->bfa_itnim); wwn2str(lpwwn_buf, bfa_fcs_lport_get_pwwn(itnim->rport->port)); wwn2str(rpwwn_buf, itnim->rport->pwwn); if (bfa_fcs_lport_is_online(itnim->rport->port) == BFA_TRUE) { BFA_LOG(KERN_ERR, bfad, bfa_log_level, "Target (WWN = %s) connectivity lost for " "initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_DISCONNECT); } else { BFA_LOG(KERN_INFO, bfad, bfa_log_level, "Target (WWN = %s) offlined by initiator (WWN = %s)\n", rpwwn_buf, lpwwn_buf); bfa_fcs_itnim_aen_post(itnim, BFA_ITNIM_AEN_OFFLINE); } break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_sm_hcb_offline(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_HCB_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } /* * This state is set when a discovered rport is also in intiator mode. * This ITN is marked as no_op and is not active and will not be truned into * online state. */ static void bfa_fcs_itnim_sm_initiator(struct bfa_fcs_itnim_s *itnim, enum bfa_fcs_itnim_event event) { bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_trc(itnim->fcs, event); switch (event) { case BFA_FCS_ITNIM_SM_OFFLINE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_OFFLINE); break; /* * fcs_online is expected here for well known initiator ports */ case BFA_FCS_ITNIM_SM_FCS_ONLINE: bfa_sm_send_event(itnim->rport, RPSM_EVENT_FC4_FCS_ONLINE); break; case BFA_FCS_ITNIM_SM_RSP_ERROR: case BFA_FCS_ITNIM_SM_INITIATOR: break; case BFA_FCS_ITNIM_SM_DELETE: bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); bfa_fcs_itnim_free(itnim); break; default: bfa_sm_fault(itnim->fcs, event); } } static void bfa_fcs_itnim_aen_post(struct bfa_fcs_itnim_s *itnim, enum bfa_itnim_aen_event event) { struct bfa_fcs_rport_s *rport = itnim->rport; struct bfad_s *bfad = (struct bfad_s *)itnim->fcs->bfad; struct bfa_aen_entry_s *aen_entry; /* Don't post events for well known addresses */ if (BFA_FCS_PID_IS_WKA(rport->pid)) return; bfad_get_aen_entry(bfad, aen_entry); if (!aen_entry) return; aen_entry->aen_data.itnim.vf_id = rport->port->fabric->vf_id; aen_entry->aen_data.itnim.ppwwn = bfa_fcs_lport_get_pwwn( bfa_fcs_get_base_port(itnim->fcs)); aen_entry->aen_data.itnim.lpwwn = bfa_fcs_lport_get_pwwn(rport->port); aen_entry->aen_data.itnim.rpwwn = rport->pwwn; /* Send the AEN notification */ bfad_im_post_vendor_event(aen_entry, bfad, ++rport->fcs->fcs_aen_seq, BFA_AEN_CAT_ITNIM, event); } static void bfa_fcs_itnim_send_prli(void *itnim_cbarg, struct bfa_fcxp_s *fcxp_alloced) { struct bfa_fcs_itnim_s *itnim = itnim_cbarg; struct bfa_fcs_rport_s *rport = itnim->rport; struct bfa_fcs_lport_s *port = rport->port; struct fchs_s fchs; struct bfa_fcxp_s *fcxp; int len; bfa_trc(itnim->fcs, itnim->rport->pwwn); fcxp = fcxp_alloced ? fcxp_alloced : bfa_fcs_fcxp_alloc(port->fcs, BFA_TRUE); if (!fcxp) { itnim->stats.fcxp_alloc_wait++; bfa_fcs_fcxp_alloc_wait(port->fcs->bfa, &itnim->fcxp_wqe, bfa_fcs_itnim_send_prli, itnim, BFA_TRUE); return; } itnim->fcxp = fcxp; len = fc_prli_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), itnim->rport->pid, bfa_fcs_lport_get_fcid(port), 0); bfa_fcxp_send(fcxp, rport->bfa_rport, port->fabric->vf_id, port->lp_tag, BFA_FALSE, FC_CLASS_3, len, &fchs, bfa_fcs_itnim_prli_response, (void *)itnim, FC_MAX_PDUSZ, FC_ELS_TOV); itnim->stats.prli_sent++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_FRMSENT); } static void bfa_fcs_itnim_prli_response(void *fcsarg, struct bfa_fcxp_s *fcxp, void *cbarg, bfa_status_t req_status, u32 rsp_len, u32 resid_len, struct fchs_s *rsp_fchs) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; struct fc_els_cmd_s *els_cmd; struct fc_prli_s *prli_resp; struct fc_ls_rjt_s *ls_rjt; struct fc_prli_params_s *sparams; bfa_trc(itnim->fcs, req_status); /* * Sanity Checks */ if (req_status != BFA_STATUS_OK) { itnim->stats.prli_rsp_err++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); return; } els_cmd = (struct fc_els_cmd_s *) BFA_FCXP_RSP_PLD(fcxp); if (els_cmd->els_code == FC_ELS_ACC) { prli_resp = (struct fc_prli_s *) els_cmd; if (fc_prli_rsp_parse(prli_resp, rsp_len) != FC_PARSE_OK) { bfa_trc(itnim->fcs, rsp_len); /* * Check if this r-port is also in Initiator mode. * If so, we need to set this ITN as a no-op. */ if (prli_resp->parampage.servparams.initiator) { bfa_trc(itnim->fcs, prli_resp->parampage.type); itnim->rport->scsi_function = BFA_RPORT_INITIATOR; itnim->stats.prli_rsp_acc++; itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); return; } itnim->stats.prli_rsp_parse_err++; return; } itnim->rport->scsi_function = BFA_RPORT_TARGET; sparams = &prli_resp->parampage.servparams; itnim->seq_rec = sparams->retry; itnim->rec_support = sparams->rec_support; itnim->task_retry_id = sparams->task_retry_id; itnim->conf_comp = sparams->confirm; itnim->stats.prli_rsp_acc++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_OK); } else { ls_rjt = (struct fc_ls_rjt_s *) BFA_FCXP_RSP_PLD(fcxp); bfa_trc(itnim->fcs, ls_rjt->reason_code); bfa_trc(itnim->fcs, ls_rjt->reason_code_expl); itnim->stats.prli_rsp_rjt++; if (ls_rjt->reason_code == FC_LS_RJT_RSN_CMD_NOT_SUPP) { bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_NOT_SUPP); return; } bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_RSP_ERROR); } } static void bfa_fcs_itnim_timeout(void *arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) arg; itnim->stats.timeout++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_TIMEOUT); } static void bfa_fcs_itnim_free(struct bfa_fcs_itnim_s *itnim) { if (itnim->bfa_itnim) { bfa_itnim_delete(itnim->bfa_itnim); itnim->bfa_itnim = NULL; } bfa_fcb_itnim_free(itnim->fcs->bfad, itnim->itnim_drv); } /* * itnim_public FCS ITNIM public interfaces */ /* * Called by rport when a new rport is created. * * @param[in] rport - remote port. */ struct bfa_fcs_itnim_s * bfa_fcs_itnim_create(struct bfa_fcs_rport_s *rport) { struct bfa_fcs_lport_s *port = rport->port; struct bfa_fcs_itnim_s *itnim; struct bfad_itnim_s *itnim_drv; /* * call bfad to allocate the itnim */ bfa_fcb_itnim_alloc(port->fcs->bfad, &itnim, &itnim_drv); if (itnim == NULL) { bfa_trc(port->fcs, rport->pwwn); return NULL; } /* * Initialize itnim */ itnim->rport = rport; itnim->fcs = rport->fcs; itnim->itnim_drv = itnim_drv; itnim->bfa_itnim = NULL; itnim->seq_rec = BFA_FALSE; itnim->rec_support = BFA_FALSE; itnim->conf_comp = BFA_FALSE; itnim->task_retry_id = BFA_FALSE; /* * Set State machine */ bfa_sm_set_state(itnim, bfa_fcs_itnim_sm_offline); return itnim; } /* * Called by rport to delete the instance of FCPIM. * * @param[in] rport - remote port. */ void bfa_fcs_itnim_delete(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE); } /* * Notification from rport that PLOGI is complete to initiate FC-4 session. */ void bfa_fcs_itnim_brp_online(struct bfa_fcs_itnim_s *itnim) { itnim->stats.onlines++; if (!BFA_FCS_PID_IS_WKA(itnim->rport->pid)) bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HAL_ONLINE); } /* * Called by rport to handle a remote device offline. */ void bfa_fcs_itnim_rport_offline(struct bfa_fcs_itnim_s *itnim) { itnim->stats.offlines++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE); } /* * Called by rport when remote port is known to be an initiator from * PRLI received. */ void bfa_fcs_itnim_is_initiator(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); itnim->stats.initiator++; bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR); } /* * Called by rport to check if the itnim is online. */ bfa_status_t bfa_fcs_itnim_get_online_state(struct bfa_fcs_itnim_s *itnim) { bfa_trc(itnim->fcs, itnim->rport->pid); switch (bfa_sm_to_state(itnim_sm_table, itnim->sm)) { case BFA_ITNIM_ONLINE: case BFA_ITNIM_INITIATIOR: return BFA_STATUS_OK; default: return BFA_STATUS_NO_FCPIM_NEXUS; } } /* * BFA completion callback for bfa_itnim_online(). */ void bfa_cb_itnim_online(void *cbarg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cbarg; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE); } /* * BFA completion callback for bfa_itnim_offline(). */ void bfa_cb_itnim_offline(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE); } /* * Mark the beginning of PATH TOV handling. IO completion callbacks * are still pending. */ void bfa_cb_itnim_tov_begin(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; bfa_trc(itnim->fcs, itnim->rport->pwwn); } /* * Mark the end of PATH TOV handling. All pending IOs are already cleaned up. */ void bfa_cb_itnim_tov(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; struct bfad_itnim_s *itnim_drv = itnim->itnim_drv; bfa_trc(itnim->fcs, itnim->rport->pwwn); itnim_drv->state = ITNIM_STATE_TIMEOUT; } /* * BFA notification to FCS/driver for second level error recovery. * * Atleast one I/O request has timedout and target is unresponsive to * repeated abort requests. Second level error recovery should be initiated * by starting implicit logout and recovery procedures. */ void bfa_cb_itnim_sler(void *cb_arg) { struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *) cb_arg; itnim->stats.sler++; bfa_trc(itnim->fcs, itnim->rport->pwwn); bfa_sm_send_event(itnim->rport, RPSM_EVENT_LOGO_IMP); } struct bfa_fcs_itnim_s * bfa_fcs_itnim_lookup(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_rport_s *rport; rport = bfa_fcs_rport_lookup(port, rpwwn); if (!rport) return NULL; WARN_ON(rport->itnim == NULL); return rport->itnim; } bfa_status_t bfa_fcs_itnim_attr_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, struct bfa_itnim_attr_s *attr) { struct bfa_fcs_itnim_s *itnim = NULL; itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; attr->state = bfa_sm_to_state(itnim_sm_table, itnim->sm); attr->retry = itnim->seq_rec; attr->rec_support = itnim->rec_support; attr->conf_comp = itnim->conf_comp; attr->task_retry_id = itnim->task_retry_id; return BFA_STATUS_OK; } bfa_status_t bfa_fcs_itnim_stats_get(struct bfa_fcs_lport_s *port, wwn_t rpwwn, struct bfa_itnim_stats_s *stats) { struct bfa_fcs_itnim_s *itnim = NULL; WARN_ON(port == NULL); itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } bfa_status_t bfa_fcs_itnim_stats_clear(struct bfa_fcs_lport_s *port, wwn_t rpwwn) { struct bfa_fcs_itnim_s *itnim = NULL; WARN_ON(port == NULL); itnim = bfa_fcs_itnim_lookup(port, rpwwn); if (itnim == NULL) return BFA_STATUS_NO_FCPIM_NEXUS; memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s)); return BFA_STATUS_OK; } void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs, u16 len) { struct fc_els_cmd_s *els_cmd; bfa_trc(itnim->fcs, fchs->type); if (fchs->type != FC_TYPE_ELS) return; els_cmd = (struct fc_els_cmd_s *) (fchs + 1); bfa_trc(itnim->fcs, els_cmd->els_code); switch (els_cmd->els_code) { case FC_ELS_PRLO: bfa_fcs_rport_prlo(itnim->rport, fchs->ox_id); break; default: WARN_ON(1); } }
gpl-2.0
RenderBroken/Victara-CM-kernel
arch/ia64/kernel/setup.c
4541
29569
/* * Architecture-specific setup. * * Copyright (C) 1998-2001, 2003-2004 Hewlett-Packard Co * David Mosberger-Tang <davidm@hpl.hp.com> * Stephane Eranian <eranian@hpl.hp.com> * Copyright (C) 2000, 2004 Intel Corp * Rohit Seth <rohit.seth@intel.com> * Suresh Siddha <suresh.b.siddha@intel.com> * Gordon Jin <gordon.jin@intel.com> * Copyright (C) 1999 VA Linux Systems * Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * * 12/26/04 S.Siddha, G.Jin, R.Seth * Add multi-threading and multi-core detection * 11/12/01 D.Mosberger Convert get_cpuinfo() to seq_file based show_cpuinfo(). * 04/04/00 D.Mosberger renamed cpu_initialized to cpu_online_map * 03/31/00 R.Seth cpu_initialized and current->processor fixes * 02/04/00 D.Mosberger some more get_cpuinfo fixes... * 02/01/00 R.Seth fixed get_cpuinfo for SMP * 01/07/99 S.Eranian added the support for command line argument * 06/24/99 W.Drummond added boot_cpu_data. * 05/28/05 Z. Menyhart Dynamic stride size for "flush_icache_range()" */ #include <linux/module.h> #include <linux/init.h> #include <linux/acpi.h> #include <linux/bootmem.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/threads.h> #include <linux/screen_info.h> #include <linux/dmi.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/efi.h> #include <linux/initrd.h> #include <linux/pm.h> #include <linux/cpufreq.h> #include <linux/kexec.h> #include <linux/crash_dump.h> #include <asm/machvec.h> #include <asm/mca.h> #include <asm/meminit.h> #include <asm/page.h> #include <asm/paravirt.h> #include <asm/paravirt_patch.h> #include <asm/patch.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/sal.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/smp.h> #include <asm/tlbflush.h> #include <asm/unistd.h> #include <asm/hpsim.h> #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE) # error "struct cpuinfo_ia64 too big!" #endif #ifdef CONFIG_SMP unsigned long __per_cpu_offset[NR_CPUS]; EXPORT_SYMBOL(__per_cpu_offset); #endif DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info); DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); unsigned long ia64_cycles_per_usec; struct ia64_boot_param *ia64_boot_param; struct screen_info screen_info; unsigned long vga_console_iobase; unsigned long vga_console_membase; static struct resource data_resource = { .name = "Kernel data", .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource code_resource = { .name = "Kernel code", .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; static struct resource bss_resource = { .name = "Kernel bss", .flags = IORESOURCE_BUSY | IORESOURCE_MEM }; unsigned long ia64_max_cacheline_size; unsigned long ia64_iobase; /* virtual address for I/O accesses */ EXPORT_SYMBOL(ia64_iobase); struct io_space io_space[MAX_IO_SPACES]; EXPORT_SYMBOL(io_space); unsigned int num_io_spaces; /* * "flush_icache_range()" needs to know what processor dependent stride size to use * when it makes i-cache(s) coherent with d-caches. */ #define I_CACHE_STRIDE_SHIFT 5 /* Safest way to go: 32 bytes by 32 bytes */ unsigned long ia64_i_cache_stride_shift = ~0; /* * "clflush_cache_range()" needs to know what processor dependent stride size to * use when it flushes cache lines including both d-cache and i-cache. */ /* Safest way to go: 32 bytes by 32 bytes */ #define CACHE_STRIDE_SHIFT 5 unsigned long ia64_cache_stride_shift = ~0; /* * The merge_mask variable needs to be set to (max(iommu_page_size(iommu)) - 1). This * mask specifies a mask of address bits that must be 0 in order for two buffers to be * mergeable by the I/O MMU (i.e., the end address of the first buffer and the start * address of the second buffer must be aligned to (merge_mask+1) in order to be * mergeable). By default, we assume there is no I/O MMU which can merge physically * discontiguous buffers, so we set the merge_mask to ~0UL, which corresponds to a iommu * page-size of 2^64. */ unsigned long ia64_max_iommu_merge_mask = ~0UL; EXPORT_SYMBOL(ia64_max_iommu_merge_mask); /* * We use a special marker for the end of memory and it uses the extra (+1) slot */ struct rsvd_region rsvd_region[IA64_MAX_RSVD_REGIONS + 1] __initdata; int num_rsvd_regions __initdata; /* * Filter incoming memory segments based on the primitive map created from the boot * parameters. Segments contained in the map are removed from the memory ranges. A * caller-specified function is called with the memory ranges that remain after filtering. * This routine does not assume the incoming segments are sorted. */ int __init filter_rsvd_memory (u64 start, u64 end, void *arg) { u64 range_start, range_end, prev_start; void (*func)(unsigned long, unsigned long, int); int i; #if IGNORE_PFN0 if (start == PAGE_OFFSET) { printk(KERN_WARNING "warning: skipping physical page 0\n"); start += PAGE_SIZE; if (start >= end) return 0; } #endif /* * lowest possible address(walker uses virtual) */ prev_start = PAGE_OFFSET; func = arg; for (i = 0; i < num_rsvd_regions; ++i) { range_start = max(start, prev_start); range_end = min(end, rsvd_region[i].start); if (range_start < range_end) call_pernode_memory(__pa(range_start), range_end - range_start, func); /* nothing more available in this segment */ if (range_end == end) return 0; prev_start = rsvd_region[i].end; } /* end of memory marker allows full processing inside loop body */ return 0; } /* * Similar to "filter_rsvd_memory()", but the reserved memory ranges * are not filtered out. */ int __init filter_memory(u64 start, u64 end, void *arg) { void (*func)(unsigned long, unsigned long, int); #if IGNORE_PFN0 if (start == PAGE_OFFSET) { printk(KERN_WARNING "warning: skipping physical page 0\n"); start += PAGE_SIZE; if (start >= end) return 0; } #endif func = arg; if (start < end) call_pernode_memory(__pa(start), end - start, func); return 0; } static void __init sort_regions (struct rsvd_region *rsvd_region, int max) { int j; /* simple bubble sorting */ while (max--) { for (j = 0; j < max; ++j) { if (rsvd_region[j].start > rsvd_region[j+1].start) { struct rsvd_region tmp; tmp = rsvd_region[j]; rsvd_region[j] = rsvd_region[j + 1]; rsvd_region[j + 1] = tmp; } } } } /* merge overlaps */ static int __init merge_regions (struct rsvd_region *rsvd_region, int max) { int i; for (i = 1; i < max; ++i) { if (rsvd_region[i].start >= rsvd_region[i-1].end) continue; if (rsvd_region[i].end > rsvd_region[i-1].end) rsvd_region[i-1].end = rsvd_region[i].end; --max; memmove(&rsvd_region[i], &rsvd_region[i+1], (max - i) * sizeof(struct rsvd_region)); } return max; } /* * Request address space for all standard resources */ static int __init register_memory(void) { code_resource.start = ia64_tpa(_text); code_resource.end = ia64_tpa(_etext) - 1; data_resource.start = ia64_tpa(_etext); data_resource.end = ia64_tpa(_edata) - 1; bss_resource.start = ia64_tpa(__bss_start); bss_resource.end = ia64_tpa(_end) - 1; efi_initialize_iomem_resources(&code_resource, &data_resource, &bss_resource); return 0; } __initcall(register_memory); #ifdef CONFIG_KEXEC /* * This function checks if the reserved crashkernel is allowed on the specific * IA64 machine flavour. Machines without an IO TLB use swiotlb and require * some memory below 4 GB (i.e. in 32 bit area), see the implementation of * lib/swiotlb.c. The hpzx1 architecture has an IO TLB but cannot use that * in kdump case. See the comment in sba_init() in sba_iommu.c. * * So, the only machvec that really supports loading the kdump kernel * over 4 GB is "sn2". */ static int __init check_crashkernel_memory(unsigned long pbase, size_t size) { if (ia64_platform_is("sn2") || ia64_platform_is("uv")) return 1; else return pbase < (1UL << 32); } static void __init setup_crashkernel(unsigned long total, int *n) { unsigned long long base = 0, size = 0; int ret; ret = parse_crashkernel(boot_command_line, total, &size, &base); if (ret == 0 && size > 0) { if (!base) { sort_regions(rsvd_region, *n); *n = merge_regions(rsvd_region, *n); base = kdump_find_rsvd_region(size, rsvd_region, *n); } if (!check_crashkernel_memory(base, size)) { pr_warning("crashkernel: There would be kdump memory " "at %ld GB but this is unusable because it " "must\nbe below 4 GB. Change the memory " "configuration of the machine.\n", (unsigned long)(base >> 30)); return; } if (base != ~0UL) { printk(KERN_INFO "Reserving %ldMB of memory at %ldMB " "for crashkernel (System RAM: %ldMB)\n", (unsigned long)(size >> 20), (unsigned long)(base >> 20), (unsigned long)(total >> 20)); rsvd_region[*n].start = (unsigned long)__va(base); rsvd_region[*n].end = (unsigned long)__va(base + size); (*n)++; crashk_res.start = base; crashk_res.end = base + size - 1; } } efi_memmap_res.start = ia64_boot_param->efi_memmap; efi_memmap_res.end = efi_memmap_res.start + ia64_boot_param->efi_memmap_size; boot_param_res.start = __pa(ia64_boot_param); boot_param_res.end = boot_param_res.start + sizeof(*ia64_boot_param); } #else static inline void __init setup_crashkernel(unsigned long total, int *n) {} #endif /** * reserve_memory - setup reserved memory areas * * Setup the reserved memory areas set aside for the boot parameters, * initrd, etc. There are currently %IA64_MAX_RSVD_REGIONS defined, * see arch/ia64/include/asm/meminit.h if you need to define more. */ void __init reserve_memory (void) { int n = 0; unsigned long total_memory; /* * none of the entries in this table overlap */ rsvd_region[n].start = (unsigned long) ia64_boot_param; rsvd_region[n].end = rsvd_region[n].start + sizeof(*ia64_boot_param); n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->efi_memmap); rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->efi_memmap_size; n++; rsvd_region[n].start = (unsigned long) __va(ia64_boot_param->command_line); rsvd_region[n].end = (rsvd_region[n].start + strlen(__va(ia64_boot_param->command_line)) + 1); n++; rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START); rsvd_region[n].end = (unsigned long) ia64_imva(_end); n++; n += paravirt_reserve_memory(&rsvd_region[n]); #ifdef CONFIG_BLK_DEV_INITRD if (ia64_boot_param->initrd_start) { rsvd_region[n].start = (unsigned long)__va(ia64_boot_param->initrd_start); rsvd_region[n].end = rsvd_region[n].start + ia64_boot_param->initrd_size; n++; } #endif #ifdef CONFIG_CRASH_DUMP if (reserve_elfcorehdr(&rsvd_region[n].start, &rsvd_region[n].end) == 0) n++; #endif total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); n++; setup_crashkernel(total_memory, &n); /* end of memory marker */ rsvd_region[n].start = ~0UL; rsvd_region[n].end = ~0UL; n++; num_rsvd_regions = n; BUG_ON(IA64_MAX_RSVD_REGIONS + 1 < n); sort_regions(rsvd_region, num_rsvd_regions); num_rsvd_regions = merge_regions(rsvd_region, num_rsvd_regions); } /** * find_initrd - get initrd parameters from the boot parameter structure * * Grab the initrd start and end from the boot parameter struct given us by * the boot loader. */ void __init find_initrd (void) { #ifdef CONFIG_BLK_DEV_INITRD if (ia64_boot_param->initrd_start) { initrd_start = (unsigned long)__va(ia64_boot_param->initrd_start); initrd_end = initrd_start+ia64_boot_param->initrd_size; printk(KERN_INFO "Initial ramdisk at: 0x%lx (%llu bytes)\n", initrd_start, ia64_boot_param->initrd_size); } #endif } static void __init io_port_init (void) { unsigned long phys_iobase; /* * Set `iobase' based on the EFI memory map or, failing that, the * value firmware left in ar.k0. * * Note that in ia32 mode, IN/OUT instructions use ar.k0 to compute * the port's virtual address, so ia32_load_state() loads it with a * user virtual address. But in ia64 mode, glibc uses the * *physical* address in ar.k0 to mmap the appropriate area from * /dev/mem, and the inX()/outX() interfaces use MMIO. In both * cases, user-mode can only use the legacy 0-64K I/O port space. * * ar.k0 is not involved in kernel I/O port accesses, which can use * any of the I/O port spaces and are done via MMIO using the * virtual mmio_base from the appropriate io_space[]. */ phys_iobase = efi_get_iobase(); if (!phys_iobase) { phys_iobase = ia64_get_kr(IA64_KR_IO_BASE); printk(KERN_INFO "No I/O port range found in EFI memory map, " "falling back to AR.KR0 (0x%lx)\n", phys_iobase); } ia64_iobase = (unsigned long) ioremap(phys_iobase, 0); ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); /* setup legacy IO port space */ io_space[0].mmio_base = ia64_iobase; io_space[0].sparse = 1; num_io_spaces = 1; } /** * early_console_setup - setup debugging console * * Consoles started here require little enough setup that we can start using * them very early in the boot process, either right after the machine * vector initialization, or even before if the drivers can detect their hw. * * Returns non-zero if a console couldn't be setup. */ static inline int __init early_console_setup (char *cmdline) { int earlycons = 0; #ifdef CONFIG_SERIAL_SGI_L1_CONSOLE { extern int sn_serial_console_early_setup(void); if (!sn_serial_console_early_setup()) earlycons++; } #endif #ifdef CONFIG_EFI_PCDP if (!efi_setup_pcdp_console(cmdline)) earlycons++; #endif if (!simcons_register()) earlycons++; return (earlycons) ? 0 : -1; } static inline void mark_bsp_online (void) { #ifdef CONFIG_SMP /* If we register an early console, allow CPU 0 to printk */ set_cpu_online(smp_processor_id(), true); #endif } static __initdata int nomca; static __init int setup_nomca(char *s) { nomca = 1; return 0; } early_param("nomca", setup_nomca); #ifdef CONFIG_CRASH_DUMP int __init reserve_elfcorehdr(u64 *start, u64 *end) { u64 length; /* We get the address using the kernel command line, * but the size is extracted from the EFI tables. * Both address and size are required for reservation * to work properly. */ if (!is_vmcore_usable()) return -EINVAL; if ((length = vmcore_find_descriptor_size(elfcorehdr_addr)) == 0) { vmcore_unusable(); return -EINVAL; } *start = (unsigned long)__va(elfcorehdr_addr); *end = *start + length; return 0; } #endif /* CONFIG_PROC_VMCORE */ void __init setup_arch (char **cmdline_p) { unw_init(); paravirt_arch_setup_early(); ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist); paravirt_patch_apply(); *cmdline_p = __va(ia64_boot_param->command_line); strlcpy(boot_command_line, *cmdline_p, COMMAND_LINE_SIZE); efi_init(); io_port_init(); #ifdef CONFIG_IA64_GENERIC /* machvec needs to be parsed from the command line * before parse_early_param() is called to ensure * that ia64_mv is initialised before any command line * settings may cause console setup to occur */ machvec_init_from_cmdline(*cmdline_p); #endif parse_early_param(); if (early_console_setup(*cmdline_p) == 0) mark_bsp_online(); #ifdef CONFIG_ACPI /* Initialize the ACPI boot-time table parser */ acpi_table_init(); early_acpi_boot_init(); # ifdef CONFIG_ACPI_NUMA acpi_numa_init(); # ifdef CONFIG_ACPI_HOTPLUG_CPU prefill_possible_map(); # endif per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 32 : cpus_weight(early_cpu_possible_map)), additional_cpus > 0 ? additional_cpus : 0); # endif #endif /* CONFIG_APCI_BOOT */ #ifdef CONFIG_SMP smp_build_cpu_map(); #endif find_memory(); /* process SAL system table: */ ia64_sal_init(__va(efi.sal_systab)); #ifdef CONFIG_ITANIUM ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); #else { unsigned long num_phys_stacked; if (ia64_pal_rse_info(&num_phys_stacked, 0) == 0 && num_phys_stacked > 96) ia64_patch_rse((u64) __start___rse_patchlist, (u64) __end___rse_patchlist); } #endif #ifdef CONFIG_SMP cpu_physical_id(0) = hard_smp_processor_id(); #endif cpu_init(); /* initialize the bootstrap CPU */ mmu_context_init(); /* initialize context_id bitmap */ paravirt_banner(); paravirt_arch_setup_console(cmdline_p); #ifdef CONFIG_VT if (!conswitchp) { # if defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; # endif # if defined(CONFIG_VGA_CONSOLE) /* * Non-legacy systems may route legacy VGA MMIO range to system * memory. vga_con probes the MMIO hole, so memory looks like * a VGA device to it. The EFI memory map can tell us if it's * memory so we can avoid this problem. */ if (efi_mem_type(0xA0000) != EFI_CONVENTIONAL_MEMORY) conswitchp = &vga_con; # endif } #endif /* enable IA-64 Machine Check Abort Handling unless disabled */ if (paravirt_arch_setup_nomca()) nomca = 1; if (!nomca) ia64_mca_init(); platform_setup(cmdline_p); #ifndef CONFIG_IA64_HP_SIM check_sal_cache_flush(); #endif paging_init(); } /* * Display cpu info for all CPUs. */ static int show_cpuinfo (struct seq_file *m, void *v) { #ifdef CONFIG_SMP # define lpj c->loops_per_jiffy # define cpunum c->cpu #else # define lpj loops_per_jiffy # define cpunum 0 #endif static struct { unsigned long mask; const char *feature_name; } feature_bits[] = { { 1UL << 0, "branchlong" }, { 1UL << 1, "spontaneous deferral"}, { 1UL << 2, "16-byte atomic ops" } }; char features[128], *cp, *sep; struct cpuinfo_ia64 *c = v; unsigned long mask; unsigned long proc_freq; int i, size; mask = c->features; /* build the feature string: */ memcpy(features, "standard", 9); cp = features; size = sizeof(features); sep = ""; for (i = 0; i < ARRAY_SIZE(feature_bits) && size > 1; ++i) { if (mask & feature_bits[i].mask) { cp += snprintf(cp, size, "%s%s", sep, feature_bits[i].feature_name), sep = ", "; mask &= ~feature_bits[i].mask; size = sizeof(features) - (cp - features); } } if (mask && size > 1) { /* print unknown features as a hex value */ snprintf(cp, size, "%s0x%lx", sep, mask); } proc_freq = cpufreq_quick_get(cpunum); if (!proc_freq) proc_freq = c->proc_freq / 1000; seq_printf(m, "processor : %d\n" "vendor : %s\n" "arch : IA-64\n" "family : %u\n" "model : %u\n" "model name : %s\n" "revision : %u\n" "archrev : %u\n" "features : %s\n" "cpu number : %lu\n" "cpu regs : %u\n" "cpu MHz : %lu.%03lu\n" "itc MHz : %lu.%06lu\n" "BogoMIPS : %lu.%02lu\n", cpunum, c->vendor, c->family, c->model, c->model_name, c->revision, c->archrev, features, c->ppn, c->number, proc_freq / 1000, proc_freq % 1000, c->itc_freq / 1000000, c->itc_freq % 1000000, lpj*HZ/500000, (lpj*HZ/5000) % 100); #ifdef CONFIG_SMP seq_printf(m, "siblings : %u\n", cpus_weight(cpu_core_map[cpunum])); if (c->socket_id != -1) seq_printf(m, "physical id: %u\n", c->socket_id); if (c->threads_per_core > 1 || c->cores_per_socket > 1) seq_printf(m, "core id : %u\n" "thread id : %u\n", c->core_id, c->thread_id); #endif seq_printf(m,"\n"); return 0; } static void * c_start (struct seq_file *m, loff_t *pos) { #ifdef CONFIG_SMP while (*pos < nr_cpu_ids && !cpu_online(*pos)) ++*pos; #endif return *pos < nr_cpu_ids ? cpu_data(*pos) : NULL; } static void * c_next (struct seq_file *m, void *v, loff_t *pos) { ++*pos; return c_start(m, pos); } static void c_stop (struct seq_file *m, void *v) { } const struct seq_operations cpuinfo_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = show_cpuinfo }; #define MAX_BRANDS 8 static char brandname[MAX_BRANDS][128]; static char * __cpuinit get_model_name(__u8 family, __u8 model) { static int overflow; char brand[128]; int i; memcpy(brand, "Unknown", 8); if (ia64_pal_get_brand_info(brand)) { if (family == 0x7) memcpy(brand, "Merced", 7); else if (family == 0x1f) switch (model) { case 0: memcpy(brand, "McKinley", 9); break; case 1: memcpy(brand, "Madison", 8); break; case 2: memcpy(brand, "Madison up to 9M cache", 23); break; } } for (i = 0; i < MAX_BRANDS; i++) if (strcmp(brandname[i], brand) == 0) return brandname[i]; for (i = 0; i < MAX_BRANDS; i++) if (brandname[i][0] == '\0') return strcpy(brandname[i], brand); if (overflow++ == 0) printk(KERN_ERR "%s: Table overflow. Some processor model information will be missing\n", __func__); return "Unknown"; } static void __cpuinit identify_cpu (struct cpuinfo_ia64 *c) { union { unsigned long bits[5]; struct { /* id 0 & 1: */ char vendor[16]; /* id 2 */ u64 ppn; /* processor serial number */ /* id 3: */ unsigned number : 8; unsigned revision : 8; unsigned model : 8; unsigned family : 8; unsigned archrev : 8; unsigned reserved : 24; /* id 4: */ u64 features; } field; } cpuid; pal_vm_info_1_u_t vm1; pal_vm_info_2_u_t vm2; pal_status_t status; unsigned long impl_va_msb = 50, phys_addr_size = 44; /* Itanium defaults */ int i; for (i = 0; i < 5; ++i) cpuid.bits[i] = ia64_get_cpuid(i); memcpy(c->vendor, cpuid.field.vendor, 16); #ifdef CONFIG_SMP c->cpu = smp_processor_id(); /* below default values will be overwritten by identify_siblings() * for Multi-Threading/Multi-Core capable CPUs */ c->threads_per_core = c->cores_per_socket = c->num_log = 1; c->socket_id = -1; identify_siblings(c); if (c->threads_per_core > smp_num_siblings) smp_num_siblings = c->threads_per_core; #endif c->ppn = cpuid.field.ppn; c->number = cpuid.field.number; c->revision = cpuid.field.revision; c->model = cpuid.field.model; c->family = cpuid.field.family; c->archrev = cpuid.field.archrev; c->features = cpuid.field.features; c->model_name = get_model_name(c->family, c->model); status = ia64_pal_vm_summary(&vm1, &vm2); if (status == PAL_STATUS_SUCCESS) { impl_va_msb = vm2.pal_vm_info_2_s.impl_va_msb; phys_addr_size = vm1.pal_vm_info_1_s.phys_add_size; } c->unimpl_va_mask = ~((7L<<61) | ((1L << (impl_va_msb + 1)) - 1)); c->unimpl_pa_mask = ~((1L<<63) | ((1L << phys_addr_size) - 1)); } /* * Do the following calculations: * * 1. the max. cache line size. * 2. the minimum of the i-cache stride sizes for "flush_icache_range()". * 3. the minimum of the cache stride sizes for "clflush_cache_range()". */ static void __cpuinit get_cache_info(void) { unsigned long line_size, max = 1; unsigned long l, levels, unique_caches; pal_cache_config_info_t cci; long status; status = ia64_pal_cache_summary(&levels, &unique_caches); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_summary() failed (status=%ld)\n", __func__, status); max = SMP_CACHE_BYTES; /* Safest setup for "flush_icache_range()" */ ia64_i_cache_stride_shift = I_CACHE_STRIDE_SHIFT; /* Safest setup for "clflush_cache_range()" */ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; goto out; } for (l = 0; l < levels; ++l) { /* cache_type (data_or_unified)=2 */ status = ia64_pal_cache_config_info(l, 2, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info" "(l=%lu, 2) failed (status=%ld)\n", __func__, l, status); max = SMP_CACHE_BYTES; /* The safest setup for "flush_icache_range()" */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; /* The safest setup for "clflush_cache_range()" */ ia64_cache_stride_shift = CACHE_STRIDE_SHIFT; cci.pcci_unified = 1; } else { if (cci.pcci_stride < ia64_cache_stride_shift) ia64_cache_stride_shift = cci.pcci_stride; line_size = 1 << cci.pcci_line_size; if (line_size > max) max = line_size; } if (!cci.pcci_unified) { /* cache_type (instruction)=1*/ status = ia64_pal_cache_config_info(l, 1, &cci); if (status != 0) { printk(KERN_ERR "%s: ia64_pal_cache_config_info" "(l=%lu, 1) failed (status=%ld)\n", __func__, l, status); /* The safest setup for flush_icache_range() */ cci.pcci_stride = I_CACHE_STRIDE_SHIFT; } } if (cci.pcci_stride < ia64_i_cache_stride_shift) ia64_i_cache_stride_shift = cci.pcci_stride; } out: if (max > ia64_max_cacheline_size) ia64_max_cacheline_size = max; } /* * cpu_init() initializes state that is per-CPU. This function acts * as a 'CPU state barrier', nothing should get across. */ void __cpuinit cpu_init (void) { extern void __cpuinit ia64_mmu_init (void *); static unsigned long max_num_phys_stacked = IA64_NUM_PHYS_STACK_REG; unsigned long num_phys_stacked; pal_vm_info_2_u_t vmi; unsigned int max_ctx; struct cpuinfo_ia64 *cpu_info; void *cpu_data; cpu_data = per_cpu_init(); #ifdef CONFIG_SMP /* * insert boot cpu into sibling and core mapes * (must be done after per_cpu area is setup) */ if (smp_processor_id() == 0) { cpu_set(0, per_cpu(cpu_sibling_map, 0)); cpu_set(0, cpu_core_map[0]); } else { /* * Set ar.k3 so that assembly code in MCA handler can compute * physical addresses of per cpu variables with a simple: * phys = ar.k3 + &per_cpu_var * and the alt-dtlb-miss handler can set per-cpu mapping into * the TLB when needed. head.S already did this for cpu0. */ ia64_set_kr(IA64_KR_PER_CPU_DATA, ia64_tpa(cpu_data) - (long) __per_cpu_start); } #endif get_cache_info(); /* * We can't pass "local_cpu_data" to identify_cpu() because we haven't called * ia64_mmu_init() yet. And we can't call ia64_mmu_init() first because it * depends on the data returned by identify_cpu(). We break the dependency by * accessing cpu_data() through the canonical per-CPU address. */ cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start); identify_cpu(cpu_info); #ifdef CONFIG_MCKINLEY { # define FEATURE_SET 16 struct ia64_pal_retval iprv; if (cpu_info->family == 0x1f) { PAL_CALL_PHYS(iprv, PAL_PROC_GET_FEATURES, 0, FEATURE_SET, 0); if ((iprv.status == 0) && (iprv.v0 & 0x80) && (iprv.v2 & 0x80)) PAL_CALL_PHYS(iprv, PAL_PROC_SET_FEATURES, (iprv.v1 | 0x80), FEATURE_SET, 0); } } #endif /* Clear the stack memory reserved for pt_regs: */ memset(task_pt_regs(current), 0, sizeof(struct pt_regs)); ia64_set_kr(IA64_KR_FPU_OWNER, 0); /* * Initialize the page-table base register to a global * directory with all zeroes. This ensure that we can handle * TLB-misses to user address-space even before we created the * first user address-space. This may happen, e.g., due to * aggressive use of lfetch.fault. */ ia64_set_kr(IA64_KR_PT_BASE, __pa(ia64_imva(empty_zero_page))); /* * Initialize default control register to defer speculative faults except * for those arising from TLB misses, which are not deferred. The * kernel MUST NOT depend on a particular setting of these bits (in other words, * the kernel must have recovery code for all speculative accesses). Turn on * dcr.lc as per recommendation by the architecture team. Most IA-32 apps * shouldn't be affected by this (moral: keep your ia32 locks aligned and you'll * be fine). */ ia64_setreg(_IA64_REG_CR_DCR, ( IA64_DCR_DP | IA64_DCR_DK | IA64_DCR_DX | IA64_DCR_DR | IA64_DCR_DA | IA64_DCR_DD | IA64_DCR_LC)); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; BUG_ON(current->mm); ia64_mmu_init(ia64_imva(cpu_data)); ia64_mca_cpu_init(ia64_imva(cpu_data)); /* Clear ITC to eliminate sched_clock() overflows in human time. */ ia64_set_itc(0); /* disable all local interrupt sources: */ ia64_set_itv(1 << 16); ia64_set_lrr0(1 << 16); ia64_set_lrr1(1 << 16); ia64_setreg(_IA64_REG_CR_PMV, 1 << 16); ia64_setreg(_IA64_REG_CR_CMCV, 1 << 16); /* clear TPR & XTP to enable all interrupt classes: */ ia64_setreg(_IA64_REG_CR_TPR, 0); /* Clear any pending interrupts left by SAL/EFI */ while (ia64_get_ivr() != IA64_SPURIOUS_INT_VECTOR) ia64_eoi(); #ifdef CONFIG_SMP normal_xtp(); #endif /* set ia64_ctx.max_rid to the maximum RID that is supported by all CPUs: */ if (ia64_pal_vm_summary(NULL, &vmi) == 0) { max_ctx = (1U << (vmi.pal_vm_info_2_s.rid_size - 3)) - 1; setup_ptcg_sem(vmi.pal_vm_info_2_s.max_purges, NPTCG_FROM_PAL); } else { printk(KERN_WARNING "cpu_init: PAL VM summary failed, assuming 18 RID bits\n"); max_ctx = (1U << 15) - 1; /* use architected minimum */ } while (max_ctx < ia64_ctx.max_ctx) { unsigned int old = ia64_ctx.max_ctx; if (cmpxchg(&ia64_ctx.max_ctx, old, max_ctx) == old) break; } if (ia64_pal_rse_info(&num_phys_stacked, NULL) != 0) { printk(KERN_WARNING "cpu_init: PAL RSE info failed; assuming 96 physical " "stacked regs\n"); num_phys_stacked = 96; } /* size of physical stacked register partition plus 8 bytes: */ if (num_phys_stacked > max_num_phys_stacked) { ia64_patch_phys_stack_reg(num_phys_stacked*8 + 8); max_num_phys_stacked = num_phys_stacked; } platform_cpu_init(); pm_idle = default_idle; } void __init check_bugs (void) { ia64_patch_mckinley_e9((unsigned long) __start___mckinley_e9_bundles, (unsigned long) __end___mckinley_e9_bundles); } static int __init run_dmi_scan(void) { dmi_scan_machine(); return 0; } core_initcall(run_dmi_scan);
gpl-2.0
peat-psuwit/android_kernel_lge_w7ds
drivers/mmc/host/davinci_mmc.c
4797
42925
/* * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver * * Copyright (C) 2006 Texas Instruments. * Original author: Purushotam Kumar * Copyright (C) 2009 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/cpufreq.h> #include <linux/mmc/host.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/mmc/mmc.h> #include <mach/mmc.h> #include <mach/edma.h> /* * Register Definitions */ #define DAVINCI_MMCCTL 0x00 /* Control Register */ #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ #define DAVINCI_MMCCMD 0x30 /* Command Register */ #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ #define DAVINCI_MMCETOK 0x4C #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ #define DAVINCI_MMCCKC 0x54 #define DAVINCI_MMCTORC 0x58 #define DAVINCI_MMCTODC 0x5C #define DAVINCI_MMCBLNC 0x60 #define DAVINCI_SDIOCTL 0x64 #define DAVINCI_SDIOST0 0x68 #define DAVINCI_SDIOIEN 0x6C #define DAVINCI_SDIOIST 0x70 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ /* DAVINCI_MMCCTL definitions */ #define MMCCTL_DATRST (1 << 0) #define MMCCTL_CMDRST (1 << 1) #define MMCCTL_WIDTH_8_BIT (1 << 8) #define MMCCTL_WIDTH_4_BIT (1 << 2) #define MMCCTL_DATEG_DISABLED (0 << 6) #define MMCCTL_DATEG_RISING (1 << 6) #define MMCCTL_DATEG_FALLING (2 << 6) #define MMCCTL_DATEG_BOTH (3 << 6) #define MMCCTL_PERMDR_LE (0 << 9) #define MMCCTL_PERMDR_BE (1 << 9) #define MMCCTL_PERMDX_LE (0 << 10) #define MMCCTL_PERMDX_BE (1 << 10) /* DAVINCI_MMCCLK definitions */ #define MMCCLK_CLKEN (1 << 8) #define MMCCLK_CLKRT_MASK (0xFF << 0) /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ #define MMCST0_DATDNE BIT(0) /* data done */ #define MMCST0_BSYDNE BIT(1) /* busy done */ #define MMCST0_RSPDNE BIT(2) /* command done */ #define MMCST0_TOUTRD BIT(3) /* data read timeout */ #define MMCST0_TOUTRS BIT(4) /* command response timeout */ #define MMCST0_CRCWR BIT(5) /* data write CRC error */ #define MMCST0_CRCRD BIT(6) /* data read CRC error */ #define MMCST0_CRCRS BIT(7) /* command response CRC error */ #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ #define MMCST0_TRNDNE BIT(12) /* transfer done */ /* DAVINCI_MMCST1 definitions */ #define MMCST1_BUSY (1 << 0) /* DAVINCI_MMCCMD definitions */ #define MMCCMD_CMD_MASK (0x3F << 0) #define MMCCMD_PPLEN (1 << 7) #define MMCCMD_BSYEXP (1 << 8) #define MMCCMD_RSPFMT_MASK (3 << 9) #define MMCCMD_RSPFMT_NONE (0 << 9) #define MMCCMD_RSPFMT_R1456 (1 << 9) #define MMCCMD_RSPFMT_R2 (2 << 9) #define MMCCMD_RSPFMT_R3 (3 << 9) #define MMCCMD_DTRW (1 << 11) #define MMCCMD_STRMTP (1 << 12) #define MMCCMD_WDATX (1 << 13) #define MMCCMD_INITCK (1 << 14) #define MMCCMD_DCLR (1 << 15) #define MMCCMD_DMATRIG (1 << 16) /* DAVINCI_MMCFIFOCTL definitions */ #define MMCFIFOCTL_FIFORST (1 << 0) #define MMCFIFOCTL_FIFODIR_WR (1 << 1) #define MMCFIFOCTL_FIFODIR_RD (0 << 1) #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ /* DAVINCI_SDIOST0 definitions */ #define SDIOST0_DAT1_HI BIT(0) /* DAVINCI_SDIOIEN definitions */ #define SDIOIEN_IOINTEN BIT(0) /* DAVINCI_SDIOIST definitions */ #define SDIOIST_IOINT BIT(0) /* MMCSD Init clock in Hz in opendrain mode */ #define MMCSD_INIT_CLOCK 200000 /* * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only * for drivers with max_segs == 1, making the segments bigger (64KB) * than the page or two that's otherwise typical. nr_sg (passed from * platform data) == 16 gives at least the same throughput boost, using * EDMA transfer linkage instead of spending CPU time copying pages. */ #define MAX_CCNT ((1 << 16) - 1) #define MAX_NR_SG 16 static unsigned rw_threshold = 32; module_param(rw_threshold, uint, S_IRUGO); MODULE_PARM_DESC(rw_threshold, "Read/Write threshold. Default = 32"); static unsigned poll_threshold = 128; module_param(poll_threshold, uint, S_IRUGO); MODULE_PARM_DESC(poll_threshold, "Polling transaction size threshold. Default = 128"); static unsigned poll_loopcount = 32; module_param(poll_loopcount, uint, S_IRUGO); MODULE_PARM_DESC(poll_loopcount, "Maximum polling loop count. Default = 32"); static unsigned __initdata use_dma = 1; module_param(use_dma, uint, 0); MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); struct mmc_davinci_host { struct mmc_command *cmd; struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; unsigned int mmc_input_clk; void __iomem *base; struct resource *mem_res; int mmc_irq, sdio_irq; unsigned char bus_mode; #define DAVINCI_MMC_DATADIR_NONE 0 #define DAVINCI_MMC_DATADIR_READ 1 #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; unsigned char suspended; /* buffer is used during PIO of one scatterlist segment, and * is updated along with buffer_bytes_left. bytes_left applies * to all N blocks of the PIO transfer. */ u8 *buffer; u32 buffer_bytes_left; u32 bytes_left; u32 rxdma, txdma; bool use_dma; bool do_dma; bool sdio_int; bool active_request; /* Scatterlist DMA uses one or more parameter RAM entries: * the main one (associated with rxdma or txdma) plus zero or * more links. The entries for a given transfer differ only * by memory buffer (address, length) and link field. */ struct edmacc_param tx_template; struct edmacc_param rx_template; unsigned n_link; u32 links[MAX_NR_SG - 1]; /* For PIO we walk scatterlists one segment at a time. */ unsigned int sg_len; struct scatterlist *sg; /* Version of the MMC/SD controller */ u8 version; /* for ns in one cycle calculation */ unsigned ns_in_one_cycle; /* Number of sg segments */ u8 nr_sg; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif }; static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); /* PIO only */ static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) { host->buffer_bytes_left = sg_dma_len(host->sg); host->buffer = sg_virt(host->sg); if (host->buffer_bytes_left > host->bytes_left) host->buffer_bytes_left = host->bytes_left; } static void davinci_fifo_data_trans(struct mmc_davinci_host *host, unsigned int n) { u8 *p; unsigned int i; if (host->buffer_bytes_left == 0) { host->sg = sg_next(host->data->sg); mmc_davinci_sg_to_buf(host); } p = host->buffer; if (n > host->buffer_bytes_left) n = host->buffer_bytes_left; host->buffer_bytes_left -= n; host->bytes_left -= n; /* NOTE: we never transfer more than rw_threshold bytes * to/from the fifo here; there's no I/O overlap. * This also assumes that access width( i.e. ACCWD) is 4 bytes */ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { for (i = 0; i < (n >> 2); i++) { writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); p = p + 4; } if (n & 3) { iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); p = p + (n & 3); } } else { for (i = 0; i < (n >> 2); i++) { *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); p = p + 4; } if (n & 3) { ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); p = p + (n & 3); } } host->buffer = p; } static void mmc_davinci_start_command(struct mmc_davinci_host *host, struct mmc_command *cmd) { u32 cmd_reg = 0; u32 im_val; dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", cmd->opcode, cmd->arg, ({ char *s; switch (mmc_resp_type(cmd)) { case MMC_RSP_R1: s = ", R1/R5/R6/R7 response"; break; case MMC_RSP_R1B: s = ", R1b response"; break; case MMC_RSP_R2: s = ", R2 response"; break; case MMC_RSP_R3: s = ", R3/R4 response"; break; default: s = ", (R? response)"; break; }; s; })); host->cmd = cmd; switch (mmc_resp_type(cmd)) { case MMC_RSP_R1B: /* There's some spec confusion about when R1B is * allowed, but if the card doesn't issue a BUSY * then it's harmless for us to allow it. */ cmd_reg |= MMCCMD_BSYEXP; /* FALLTHROUGH */ case MMC_RSP_R1: /* 48 bits, CRC */ cmd_reg |= MMCCMD_RSPFMT_R1456; break; case MMC_RSP_R2: /* 136 bits, CRC */ cmd_reg |= MMCCMD_RSPFMT_R2; break; case MMC_RSP_R3: /* 48 bits, no CRC */ cmd_reg |= MMCCMD_RSPFMT_R3; break; default: cmd_reg |= MMCCMD_RSPFMT_NONE; dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", mmc_resp_type(cmd)); break; } /* Set command index */ cmd_reg |= cmd->opcode; /* Enable EDMA transfer triggers */ if (host->do_dma) cmd_reg |= MMCCMD_DMATRIG; if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && host->data_dir == DAVINCI_MMC_DATADIR_READ) cmd_reg |= MMCCMD_DMATRIG; /* Setting whether command involves data transfer or not */ if (cmd->data) cmd_reg |= MMCCMD_WDATX; /* Setting whether stream or block transfer */ if (cmd->flags & MMC_DATA_STREAM) cmd_reg |= MMCCMD_STRMTP; /* Setting whether data read or write */ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) cmd_reg |= MMCCMD_DTRW; if (host->bus_mode == MMC_BUSMODE_PUSHPULL) cmd_reg |= MMCCMD_PPLEN; /* set Command timeout */ writel(0x1FFF, host->base + DAVINCI_MMCTOR); /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { im_val |= MMCST0_DATDNE | MMCST0_CRCWR; if (!host->do_dma) im_val |= MMCST0_DXRDY; } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; if (!host->do_dma) im_val |= MMCST0_DRRDY; } /* * Before non-DMA WRITE commands the controller needs priming: * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size */ if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) davinci_fifo_data_trans(host, rw_threshold); writel(cmd->arg, host->base + DAVINCI_MMCARGHL); writel(cmd_reg, host->base + DAVINCI_MMCCMD); host->active_request = true; if (!host->do_dma && host->bytes_left <= poll_threshold) { u32 count = poll_loopcount; while (host->active_request && count--) { mmc_davinci_irq(0, host); cpu_relax(); } } if (host->active_request) writel(im_val, host->base + DAVINCI_MMCIM); } /*----------------------------------------------------------------------*/ /* DMA infrastructure */ static void davinci_abort_dma(struct mmc_davinci_host *host) { int sync_dev; if (host->data_dir == DAVINCI_MMC_DATADIR_READ) sync_dev = host->rxdma; else sync_dev = host->txdma; edma_stop(sync_dev); edma_clean_channel(sync_dev); } static void mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) { if (DMA_COMPLETE != ch_status) { struct mmc_davinci_host *host = data; /* Currently means: DMA Event Missed, or "null" transfer * request was seen. In the future, TC errors (like bad * addresses) might be presented too. */ dev_warn(mmc_dev(host->mmc), "DMA %s error\n", (host->data->flags & MMC_DATA_WRITE) ? "write" : "read"); host->data->error = -EIO; mmc_davinci_xfer_done(host, host->data); } } /* Set up tx or rx template, to be modified and updated later */ static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, bool tx, struct edmacc_param *template) { unsigned sync_dev; const u16 acnt = 4; const u16 bcnt = rw_threshold >> 2; const u16 ccnt = 0; u32 src_port = 0; u32 dst_port = 0; s16 src_bidx, dst_bidx; s16 src_cidx, dst_cidx; /* * A-B Sync transfer: each DMA request is for one "frame" of * rw_threshold bytes, broken into "acnt"-size chunks repeated * "bcnt" times. Each segment needs "ccnt" such frames; since * we tell the block layer our mmc->max_seg_size limit, we can * trust (later) that it's within bounds. * * The FIFOs are read/written in 4-byte chunks (acnt == 4) and * EDMA will optimize memory operations to use larger bursts. */ if (tx) { sync_dev = host->txdma; /* src_prt, ccnt, and link to be set up later */ src_bidx = acnt; src_cidx = acnt * bcnt; dst_port = host->mem_res->start + DAVINCI_MMCDXR; dst_bidx = 0; dst_cidx = 0; } else { sync_dev = host->rxdma; src_port = host->mem_res->start + DAVINCI_MMCDRR; src_bidx = 0; src_cidx = 0; /* dst_prt, ccnt, and link to be set up later */ dst_bidx = acnt; dst_cidx = acnt * bcnt; } /* * We can't use FIFO mode for the FIFOs because MMC FIFO addresses * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT * parameter is ignored. */ edma_set_src(sync_dev, src_port, INCR, W8BIT); edma_set_dest(sync_dev, dst_port, INCR, W8BIT); edma_set_src_index(sync_dev, src_bidx, src_cidx); edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); edma_read_slot(sync_dev, template); /* don't bother with irqs or chaining */ template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; } static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, struct mmc_data *data) { struct edmacc_param *template; int channel, slot; unsigned link; struct scatterlist *sg; unsigned sg_len; unsigned bytes_left = host->bytes_left; const unsigned shift = ffs(rw_threshold) - 1; if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { template = &host->tx_template; channel = host->txdma; } else { template = &host->rx_template; channel = host->rxdma; } /* We know sg_len and ccnt will never be out of range because * we told the mmc layer which in turn tells the block layer * to ensure that it only hands us one scatterlist segment * per EDMA PARAM entry. Update the PARAM * entries needed for each segment of this scatterlist. */ for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; sg_len-- != 0 && bytes_left; sg = sg_next(sg), slot = host->links[link++]) { u32 buf = sg_dma_address(sg); unsigned count = sg_dma_len(sg); template->link_bcntrld = sg_len ? (EDMA_CHAN_SLOT(host->links[link]) << 5) : 0xffff; if (count > bytes_left) count = bytes_left; bytes_left -= count; if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) template->src = buf; else template->dst = buf; template->ccnt = count >> shift; edma_write_slot(slot, template); } if (host->version == MMC_CTLR_VERSION_2) edma_clear_event(channel); edma_start(channel); } static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, struct mmc_data *data) { int i; int mask = rw_threshold - 1; host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, ((data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); /* no individual DMA segment should need a partial FIFO */ for (i = 0; i < host->sg_len; i++) { if (sg_dma_len(data->sg + i) & mask) { dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); return -1; } } host->do_dma = 1; mmc_davinci_send_dma_request(host, data); return 0; } static void __init_or_module davinci_release_dma_channels(struct mmc_davinci_host *host) { unsigned i; if (!host->use_dma) return; for (i = 0; i < host->n_link; i++) edma_free_slot(host->links[i]); edma_free_channel(host->txdma); edma_free_channel(host->rxdma); } static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) { u32 link_size; int r, i; /* Acquire master DMA write channel */ r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, EVENTQ_DEFAULT); if (r < 0) { dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", "tx", r); return r; } mmc_davinci_dma_setup(host, true, &host->tx_template); /* Acquire master DMA read channel */ r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, EVENTQ_DEFAULT); if (r < 0) { dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", "rx", r); goto free_master_write; } mmc_davinci_dma_setup(host, false, &host->rx_template); /* Allocate parameter RAM slots, which will later be bound to a * channel as needed to handle a scatterlist. */ link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); for (i = 0; i < link_size; i++) { r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); if (r < 0) { dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", r); break; } host->links[i] = r; } host->n_link = i; return 0; free_master_write: edma_free_channel(host->txdma); return r; } /*----------------------------------------------------------------------*/ static void mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) { int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; int timeout; struct mmc_data *data = req->data; if (host->version == MMC_CTLR_VERSION_2) fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; host->data = data; if (data == NULL) { host->data_dir = DAVINCI_MMC_DATADIR_NONE; writel(0, host->base + DAVINCI_MMCBLEN); writel(0, host->base + DAVINCI_MMCNBLK); return; } dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", (data->flags & MMC_DATA_STREAM) ? "stream" : "block", (data->flags & MMC_DATA_WRITE) ? "write" : "read", data->blocks, data->blksz); dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", data->timeout_clks, data->timeout_ns); timeout = data->timeout_clks + (data->timeout_ns / host->ns_in_one_cycle); if (timeout > 0xffff) timeout = 0xffff; writel(timeout, host->base + DAVINCI_MMCTOD); writel(data->blocks, host->base + DAVINCI_MMCNBLK); writel(data->blksz, host->base + DAVINCI_MMCBLEN); /* Configure the FIFO */ switch (data->flags & MMC_DATA_WRITE) { case MMC_DATA_WRITE: host->data_dir = DAVINCI_MMC_DATADIR_WRITE; writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, host->base + DAVINCI_MMCFIFOCTL); writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, host->base + DAVINCI_MMCFIFOCTL); break; default: host->data_dir = DAVINCI_MMC_DATADIR_READ; writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, host->base + DAVINCI_MMCFIFOCTL); writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, host->base + DAVINCI_MMCFIFOCTL); break; } host->buffer = NULL; host->bytes_left = data->blocks * data->blksz; /* For now we try to use DMA whenever we won't need partial FIFO * reads or writes, either for the whole transfer (as tested here) * or for any individual scatterlist segment (tested when we call * start_dma_transfer). * * While we *could* change that, unusual block sizes are rarely * used. The occasional fallback to PIO should't hurt. */ if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 && mmc_davinci_start_dma_transfer(host, data) == 0) { /* zero this to ensure we take no PIO paths */ host->bytes_left = 0; } else { /* Revert to CPU Copy */ host->sg_len = data->sg_len; host->sg = host->data->sg; mmc_davinci_sg_to_buf(host); } } static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) { struct mmc_davinci_host *host = mmc_priv(mmc); unsigned long timeout = jiffies + msecs_to_jiffies(900); u32 mmcst1 = 0; /* Card may still be sending BUSY after a previous operation, * typically some kind of write. If so, we can't proceed yet. */ while (time_before(jiffies, timeout)) { mmcst1 = readl(host->base + DAVINCI_MMCST1); if (!(mmcst1 & MMCST1_BUSY)) break; cpu_relax(); } if (mmcst1 & MMCST1_BUSY) { dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); req->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, req); return; } host->do_dma = 0; mmc_davinci_prepare_data(host, req); mmc_davinci_start_command(host, req->cmd); } static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, unsigned int mmc_req_freq) { unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; mmc_pclk = host->mmc_input_clk; if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) mmc_push_pull_divisor = ((unsigned int)mmc_pclk / (2 * mmc_req_freq)) - 1; else mmc_push_pull_divisor = 0; mmc_freq = (unsigned int)mmc_pclk / (2 * (mmc_push_pull_divisor + 1)); if (mmc_freq > mmc_req_freq) mmc_push_pull_divisor = mmc_push_pull_divisor + 1; /* Convert ns to clock cycles */ if (mmc_req_freq <= 400000) host->ns_in_one_cycle = (1000000) / (((mmc_pclk / (2 * (mmc_push_pull_divisor + 1)))/1000)); else host->ns_in_one_cycle = (1000000) / (((mmc_pclk / (2 * (mmc_push_pull_divisor + 1)))/1000000)); return mmc_push_pull_divisor; } static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) { unsigned int open_drain_freq = 0, mmc_pclk = 0; unsigned int mmc_push_pull_freq = 0; struct mmc_davinci_host *host = mmc_priv(mmc); if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { u32 temp; /* Ignoring the init clock value passed for fixing the inter * operability with different cards. */ open_drain_freq = ((unsigned int)mmc_pclk / (2 * MMCSD_INIT_CLOCK)) - 1; if (open_drain_freq > 0xFF) open_drain_freq = 0xFF; temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; temp |= open_drain_freq; writel(temp, host->base + DAVINCI_MMCCLK); /* Convert ns to clock cycles */ host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); } else { u32 temp; mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); if (mmc_push_pull_freq > 0xFF) mmc_push_pull_freq = 0xFF; temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; writel(temp, host->base + DAVINCI_MMCCLK); udelay(10); temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; temp |= mmc_push_pull_freq; writel(temp, host->base + DAVINCI_MMCCLK); writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); udelay(10); } } static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmc_davinci_host *host = mmc_priv(mmc); struct platform_device *pdev = to_platform_device(mmc->parent); struct davinci_mmc_config *config = pdev->dev.platform_data; dev_dbg(mmc_dev(host->mmc), "clock %dHz busmode %d powermode %d Vdd %04x\n", ios->clock, ios->bus_mode, ios->power_mode, ios->vdd); switch (ios->power_mode) { case MMC_POWER_OFF: if (config && config->set_power) config->set_power(pdev->id, false); break; case MMC_POWER_UP: if (config && config->set_power) config->set_power(pdev->id, true); break; } switch (ios->bus_width) { case MMC_BUS_WIDTH_8: dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); writel((readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, host->base + DAVINCI_MMCCTL); break; case MMC_BUS_WIDTH_4: dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); if (host->version == MMC_CTLR_VERSION_2) writel((readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, host->base + DAVINCI_MMCCTL); else writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT, host->base + DAVINCI_MMCCTL); break; case MMC_BUS_WIDTH_1: dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); if (host->version == MMC_CTLR_VERSION_2) writel(readl(host->base + DAVINCI_MMCCTL) & ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), host->base + DAVINCI_MMCCTL); else writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT, host->base + DAVINCI_MMCCTL); break; } calculate_clk_divider(mmc, ios); host->bus_mode = ios->bus_mode; if (ios->power_mode == MMC_POWER_UP) { unsigned long timeout = jiffies + msecs_to_jiffies(50); bool lose = true; /* Send clock cycles, poll completion */ writel(0, host->base + DAVINCI_MMCARGHL); writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); while (time_before(jiffies, timeout)) { u32 tmp = readl(host->base + DAVINCI_MMCST0); if (tmp & MMCST0_RSPDNE) { lose = false; break; } cpu_relax(); } if (lose) dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); } /* FIXME on power OFF, reset things ... */ } static void mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) { host->data = NULL; if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { /* * SDIO Interrupt Detection work-around as suggested by * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata * 2.1.6): Signal SDIO interrupt only if it is enabled by core */ if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_signal_sdio_irq(host->mmc); } } if (host->do_dma) { davinci_abort_dma(host); dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); host->do_dma = false; } host->data_dir = DAVINCI_MMC_DATADIR_NONE; if (!data->stop || (host->cmd && host->cmd->error)) { mmc_request_done(host->mmc, data->mrq); writel(0, host->base + DAVINCI_MMCIM); host->active_request = false; } else mmc_davinci_start_command(host, data->stop); } static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, struct mmc_command *cmd) { host->cmd = NULL; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { /* response type 2 */ cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); } else { /* response types 1, 1b, 3, 4, 5, 6 */ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); } } if (host->data == NULL || cmd->error) { if (cmd->error == -ETIMEDOUT) cmd->mrq->cmd->retries = 0; mmc_request_done(host->mmc, cmd->mrq); writel(0, host->base + DAVINCI_MMCIM); host->active_request = false; } } static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, int val) { u32 temp; temp = readl(host->base + DAVINCI_MMCCTL); if (val) /* reset */ temp |= MMCCTL_CMDRST | MMCCTL_DATRST; else /* enable */ temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); writel(temp, host->base + DAVINCI_MMCCTL); udelay(10); } static void davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) { mmc_davinci_reset_ctrl(host, 1); mmc_davinci_reset_ctrl(host, 0); } static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) { struct mmc_davinci_host *host = dev_id; unsigned int status; status = readl(host->base + DAVINCI_SDIOIST); if (status & SDIOIST_IOINT) { dev_dbg(mmc_dev(host->mmc), "SDIO interrupt status %x\n", status); writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_signal_sdio_irq(host->mmc); } return IRQ_HANDLED; } static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) { struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; unsigned int status, qstatus; int end_command = 0; int end_transfer = 0; struct mmc_data *data = host->data; if (host->cmd == NULL && host->data == NULL) { status = readl(host->base + DAVINCI_MMCST0); dev_dbg(mmc_dev(host->mmc), "Spurious interrupt 0x%04x\n", status); /* Disable the interrupt from mmcsd */ writel(0, host->base + DAVINCI_MMCIM); return IRQ_NONE; } status = readl(host->base + DAVINCI_MMCST0); qstatus = status; /* handle FIFO first when using PIO for data. * bytes_left will decrease to zero as I/O progress and status will * read zero over iteration because this controller status * register(MMCST0) reports any status only once and it is cleared * by read. So, it is not unbouned loop even in the case of * non-dma. */ if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { unsigned long im_val; /* * If interrupts fire during the following loop, they will be * handled by the handler, but the PIC will still buffer these. * As a result, the handler will be called again to serve these * needlessly. In order to avoid these spurious interrupts, * keep interrupts masked during the loop. */ im_val = readl(host->base + DAVINCI_MMCIM); writel(0, host->base + DAVINCI_MMCIM); do { davinci_fifo_data_trans(host, rw_threshold); status = readl(host->base + DAVINCI_MMCST0); qstatus |= status; } while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))); /* * If an interrupt is pending, it is assumed it will fire when * it is unmasked. This assumption is also taken when the MMCIM * is first set. Otherwise, writing to MMCIM after reading the * status is race-prone. */ writel(im_val, host->base + DAVINCI_MMCIM); } if (qstatus & MMCST0_DATDNE) { /* All blocks sent/received, and CRC checks passed */ if (data != NULL) { if ((host->do_dma == 0) && (host->bytes_left > 0)) { /* if datasize < rw_threshold * no RX ints are generated */ davinci_fifo_data_trans(host, host->bytes_left); } end_transfer = 1; data->bytes_xfered = data->blocks * data->blksz; } else { dev_err(mmc_dev(host->mmc), "DATDNE with no host->data\n"); } } if (qstatus & MMCST0_TOUTRD) { /* Read data timeout */ data->error = -ETIMEDOUT; end_transfer = 1; dev_dbg(mmc_dev(host->mmc), "read data timeout, status %x\n", qstatus); davinci_abort_data(host, data); } if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { /* Data CRC error */ data->error = -EILSEQ; end_transfer = 1; /* NOTE: this controller uses CRCWR to report both CRC * errors and timeouts (on writes). MMCDRSP values are * only weakly documented, but 0x9f was clearly a timeout * case and the two three-bit patterns in various SD specs * (101, 010) aren't part of it ... */ if (qstatus & MMCST0_CRCWR) { u32 temp = readb(host->base + DAVINCI_MMCDRSP); if (temp == 0x9f) data->error = -ETIMEDOUT; } dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", (qstatus & MMCST0_CRCWR) ? "write" : "read", (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); davinci_abort_data(host, data); } if (qstatus & MMCST0_TOUTRS) { /* Command timeout */ if (host->cmd) { dev_dbg(mmc_dev(host->mmc), "CMD%d timeout, status %x\n", host->cmd->opcode, qstatus); host->cmd->error = -ETIMEDOUT; if (data) { end_transfer = 1; davinci_abort_data(host, data); } else end_command = 1; } } if (qstatus & MMCST0_CRCRS) { /* Command CRC error */ dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); if (host->cmd) { host->cmd->error = -EILSEQ; end_command = 1; } } if (qstatus & MMCST0_RSPDNE) { /* End of command phase */ end_command = (int) host->cmd; } if (end_command) mmc_davinci_cmd_done(host, host->cmd); if (end_transfer) mmc_davinci_xfer_done(host, data); return IRQ_HANDLED; } static int mmc_davinci_get_cd(struct mmc_host *mmc) { struct platform_device *pdev = to_platform_device(mmc->parent); struct davinci_mmc_config *config = pdev->dev.platform_data; if (!config || !config->get_cd) return -ENOSYS; return config->get_cd(pdev->id); } static int mmc_davinci_get_ro(struct mmc_host *mmc) { struct platform_device *pdev = to_platform_device(mmc->parent); struct davinci_mmc_config *config = pdev->dev.platform_data; if (!config || !config->get_ro) return -ENOSYS; return config->get_ro(pdev->id); } static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mmc_davinci_host *host = mmc_priv(mmc); if (enable) { if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_signal_sdio_irq(host->mmc); } else { host->sdio_int = true; writel(readl(host->base + DAVINCI_SDIOIEN) | SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); } } else { host->sdio_int = false; writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); } } static struct mmc_host_ops mmc_davinci_ops = { .request = mmc_davinci_request, .set_ios = mmc_davinci_set_ios, .get_cd = mmc_davinci_get_cd, .get_ro = mmc_davinci_get_ro, .enable_sdio_irq = mmc_davinci_enable_sdio_irq, }; /*----------------------------------------------------------------------*/ #ifdef CONFIG_CPU_FREQ static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct mmc_davinci_host *host; unsigned int mmc_pclk; struct mmc_host *mmc; unsigned long flags; host = container_of(nb, struct mmc_davinci_host, freq_transition); mmc = host->mmc; mmc_pclk = clk_get_rate(host->clk); if (val == CPUFREQ_POSTCHANGE) { spin_lock_irqsave(&mmc->lock, flags); host->mmc_input_clk = mmc_pclk; calculate_clk_divider(mmc, &mmc->ios); spin_unlock_irqrestore(&mmc->lock, flags); } return 0; } static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) { host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; return cpufreq_register_notifier(&host->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) { cpufreq_unregister_notifier(&host->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) { return 0; } static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) { } #endif static void __init init_mmcsd_host(struct mmc_davinci_host *host) { mmc_davinci_reset_ctrl(host, 1); writel(0, host->base + DAVINCI_MMCCLK); writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); writel(0x1FFF, host->base + DAVINCI_MMCTOR); writel(0xFFFF, host->base + DAVINCI_MMCTOD); mmc_davinci_reset_ctrl(host, 0); } static int __init davinci_mmcsd_probe(struct platform_device *pdev) { struct davinci_mmc_config *pdata = pdev->dev.platform_data; struct mmc_davinci_host *host = NULL; struct mmc_host *mmc = NULL; struct resource *r, *mem = NULL; int ret = 0, irq = 0; size_t mem_size; /* REVISIT: when we're fully converted, fail if pdata is NULL */ ret = -ENODEV; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!r || irq == NO_IRQ) goto out; ret = -EBUSY; mem_size = resource_size(r); mem = request_mem_region(r->start, mem_size, pdev->name); if (!mem) goto out; ret = -ENOMEM; mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); if (!mmc) goto out; host = mmc_priv(mmc); host->mmc = mmc; /* Important */ r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!r) goto out; host->rxdma = r->start; r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!r) goto out; host->txdma = r->start; host->mem_res = mem; host->base = ioremap(mem->start, mem_size); if (!host->base) goto out; ret = -ENXIO; host->clk = clk_get(&pdev->dev, "MMCSDCLK"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); goto out; } clk_enable(host->clk); host->mmc_input_clk = clk_get_rate(host->clk); init_mmcsd_host(host); if (pdata->nr_sg) host->nr_sg = pdata->nr_sg - 1; if (host->nr_sg > MAX_NR_SG || !host->nr_sg) host->nr_sg = MAX_NR_SG; host->use_dma = use_dma; host->mmc_irq = irq; host->sdio_irq = platform_get_irq(pdev, 1); if (host->use_dma && davinci_acquire_dma_channels(host) != 0) host->use_dma = 0; /* REVISIT: someday, support IRQ-driven card detection. */ mmc->caps |= MMC_CAP_NEEDS_POLL; mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; if (pdata && (pdata->wires == 4 || pdata->wires == 0)) mmc->caps |= MMC_CAP_4_BIT_DATA; if (pdata && (pdata->wires == 8)) mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); host->version = pdata->version; mmc->ops = &mmc_davinci_ops; mmc->f_min = 312500; mmc->f_max = 25000000; if (pdata && pdata->max_freq) mmc->f_max = pdata->max_freq; if (pdata && pdata->caps) mmc->caps |= pdata->caps; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; /* With no iommu coalescing pages, each phys_seg is a hw_seg. * Each hw_seg uses one EDMA parameter RAM slot, always one * channel and then usually some linked slots. */ mmc->max_segs = 1 + host->n_link; /* EDMA limit per hw segment (one or two MBytes) */ mmc->max_seg_size = MAX_CCNT * rw_threshold; /* MMC/SD controller limits for multiblock requests */ mmc->max_blk_size = 4095; /* BLEN is 12 bits */ mmc->max_blk_count = 65535; /* NBLK is 16 bits */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); platform_set_drvdata(pdev, host); ret = mmc_davinci_cpufreq_register(host); if (ret) { dev_err(&pdev->dev, "failed to register cpufreq\n"); goto cpu_freq_fail; } ret = mmc_add_host(mmc); if (ret < 0) goto out; ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); if (ret) goto out; if (host->sdio_irq >= 0) { ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0, mmc_hostname(mmc), host); if (!ret) mmc->caps |= MMC_CAP_SDIO_IRQ; } rename_region(mem, mmc_hostname(mmc)); dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", host->use_dma ? "DMA" : "PIO", (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); return 0; out: mmc_davinci_cpufreq_deregister(host); cpu_freq_fail: if (host) { davinci_release_dma_channels(host); if (host->clk) { clk_disable(host->clk); clk_put(host->clk); } if (host->base) iounmap(host->base); } if (mmc) mmc_free_host(mmc); if (mem) release_resource(mem); dev_dbg(&pdev->dev, "probe err %d\n", ret); return ret; } static int __exit davinci_mmcsd_remove(struct platform_device *pdev) { struct mmc_davinci_host *host = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (host) { mmc_davinci_cpufreq_deregister(host); mmc_remove_host(host->mmc); free_irq(host->mmc_irq, host); if (host->mmc->caps & MMC_CAP_SDIO_IRQ) free_irq(host->sdio_irq, host); davinci_release_dma_channels(host); clk_disable(host->clk); clk_put(host->clk); iounmap(host->base); release_resource(host->mem_res); mmc_free_host(host->mmc); } return 0; } #ifdef CONFIG_PM static int davinci_mmcsd_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); int ret; ret = mmc_suspend_host(host->mmc); if (!ret) { writel(0, host->base + DAVINCI_MMCIM); mmc_davinci_reset_ctrl(host, 1); clk_disable(host->clk); host->suspended = 1; } else { host->suspended = 0; } return ret; } static int davinci_mmcsd_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); int ret; if (!host->suspended) return 0; clk_enable(host->clk); mmc_davinci_reset_ctrl(host, 0); ret = mmc_resume_host(host->mmc); if (!ret) host->suspended = 0; return ret; } static const struct dev_pm_ops davinci_mmcsd_pm = { .suspend = davinci_mmcsd_suspend, .resume = davinci_mmcsd_resume, }; #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) #else #define davinci_mmcsd_pm_ops NULL #endif static struct platform_driver davinci_mmcsd_driver = { .driver = { .name = "davinci_mmc", .owner = THIS_MODULE, .pm = davinci_mmcsd_pm_ops, }, .remove = __exit_p(davinci_mmcsd_remove), }; static int __init davinci_mmcsd_init(void) { return platform_driver_probe(&davinci_mmcsd_driver, davinci_mmcsd_probe); } module_init(davinci_mmcsd_init); static void __exit davinci_mmcsd_exit(void) { platform_driver_unregister(&davinci_mmcsd_driver); } module_exit(davinci_mmcsd_exit); MODULE_AUTHOR("Texas Instruments India"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
gpl-2.0
slayher/android_kernel_samsung_hlte
drivers/mmc/host/davinci_mmc.c
4797
42925
/* * davinci_mmc.c - TI DaVinci MMC/SD/SDIO driver * * Copyright (C) 2006 Texas Instruments. * Original author: Purushotam Kumar * Copyright (C) 2009 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/ioport.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/cpufreq.h> #include <linux/mmc/host.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/mmc/mmc.h> #include <mach/mmc.h> #include <mach/edma.h> /* * Register Definitions */ #define DAVINCI_MMCCTL 0x00 /* Control Register */ #define DAVINCI_MMCCLK 0x04 /* Memory Clock Control Register */ #define DAVINCI_MMCST0 0x08 /* Status Register 0 */ #define DAVINCI_MMCST1 0x0C /* Status Register 1 */ #define DAVINCI_MMCIM 0x10 /* Interrupt Mask Register */ #define DAVINCI_MMCTOR 0x14 /* Response Time-Out Register */ #define DAVINCI_MMCTOD 0x18 /* Data Read Time-Out Register */ #define DAVINCI_MMCBLEN 0x1C /* Block Length Register */ #define DAVINCI_MMCNBLK 0x20 /* Number of Blocks Register */ #define DAVINCI_MMCNBLC 0x24 /* Number of Blocks Counter Register */ #define DAVINCI_MMCDRR 0x28 /* Data Receive Register */ #define DAVINCI_MMCDXR 0x2C /* Data Transmit Register */ #define DAVINCI_MMCCMD 0x30 /* Command Register */ #define DAVINCI_MMCARGHL 0x34 /* Argument Register */ #define DAVINCI_MMCRSP01 0x38 /* Response Register 0 and 1 */ #define DAVINCI_MMCRSP23 0x3C /* Response Register 0 and 1 */ #define DAVINCI_MMCRSP45 0x40 /* Response Register 0 and 1 */ #define DAVINCI_MMCRSP67 0x44 /* Response Register 0 and 1 */ #define DAVINCI_MMCDRSP 0x48 /* Data Response Register */ #define DAVINCI_MMCETOK 0x4C #define DAVINCI_MMCCIDX 0x50 /* Command Index Register */ #define DAVINCI_MMCCKC 0x54 #define DAVINCI_MMCTORC 0x58 #define DAVINCI_MMCTODC 0x5C #define DAVINCI_MMCBLNC 0x60 #define DAVINCI_SDIOCTL 0x64 #define DAVINCI_SDIOST0 0x68 #define DAVINCI_SDIOIEN 0x6C #define DAVINCI_SDIOIST 0x70 #define DAVINCI_MMCFIFOCTL 0x74 /* FIFO Control Register */ /* DAVINCI_MMCCTL definitions */ #define MMCCTL_DATRST (1 << 0) #define MMCCTL_CMDRST (1 << 1) #define MMCCTL_WIDTH_8_BIT (1 << 8) #define MMCCTL_WIDTH_4_BIT (1 << 2) #define MMCCTL_DATEG_DISABLED (0 << 6) #define MMCCTL_DATEG_RISING (1 << 6) #define MMCCTL_DATEG_FALLING (2 << 6) #define MMCCTL_DATEG_BOTH (3 << 6) #define MMCCTL_PERMDR_LE (0 << 9) #define MMCCTL_PERMDR_BE (1 << 9) #define MMCCTL_PERMDX_LE (0 << 10) #define MMCCTL_PERMDX_BE (1 << 10) /* DAVINCI_MMCCLK definitions */ #define MMCCLK_CLKEN (1 << 8) #define MMCCLK_CLKRT_MASK (0xFF << 0) /* IRQ bit definitions, for DAVINCI_MMCST0 and DAVINCI_MMCIM */ #define MMCST0_DATDNE BIT(0) /* data done */ #define MMCST0_BSYDNE BIT(1) /* busy done */ #define MMCST0_RSPDNE BIT(2) /* command done */ #define MMCST0_TOUTRD BIT(3) /* data read timeout */ #define MMCST0_TOUTRS BIT(4) /* command response timeout */ #define MMCST0_CRCWR BIT(5) /* data write CRC error */ #define MMCST0_CRCRD BIT(6) /* data read CRC error */ #define MMCST0_CRCRS BIT(7) /* command response CRC error */ #define MMCST0_DXRDY BIT(9) /* data transmit ready (fifo empty) */ #define MMCST0_DRRDY BIT(10) /* data receive ready (data in fifo)*/ #define MMCST0_DATED BIT(11) /* DAT3 edge detect */ #define MMCST0_TRNDNE BIT(12) /* transfer done */ /* DAVINCI_MMCST1 definitions */ #define MMCST1_BUSY (1 << 0) /* DAVINCI_MMCCMD definitions */ #define MMCCMD_CMD_MASK (0x3F << 0) #define MMCCMD_PPLEN (1 << 7) #define MMCCMD_BSYEXP (1 << 8) #define MMCCMD_RSPFMT_MASK (3 << 9) #define MMCCMD_RSPFMT_NONE (0 << 9) #define MMCCMD_RSPFMT_R1456 (1 << 9) #define MMCCMD_RSPFMT_R2 (2 << 9) #define MMCCMD_RSPFMT_R3 (3 << 9) #define MMCCMD_DTRW (1 << 11) #define MMCCMD_STRMTP (1 << 12) #define MMCCMD_WDATX (1 << 13) #define MMCCMD_INITCK (1 << 14) #define MMCCMD_DCLR (1 << 15) #define MMCCMD_DMATRIG (1 << 16) /* DAVINCI_MMCFIFOCTL definitions */ #define MMCFIFOCTL_FIFORST (1 << 0) #define MMCFIFOCTL_FIFODIR_WR (1 << 1) #define MMCFIFOCTL_FIFODIR_RD (0 << 1) #define MMCFIFOCTL_FIFOLEV (1 << 2) /* 0 = 128 bits, 1 = 256 bits */ #define MMCFIFOCTL_ACCWD_4 (0 << 3) /* access width of 4 bytes */ #define MMCFIFOCTL_ACCWD_3 (1 << 3) /* access width of 3 bytes */ #define MMCFIFOCTL_ACCWD_2 (2 << 3) /* access width of 2 bytes */ #define MMCFIFOCTL_ACCWD_1 (3 << 3) /* access width of 1 byte */ /* DAVINCI_SDIOST0 definitions */ #define SDIOST0_DAT1_HI BIT(0) /* DAVINCI_SDIOIEN definitions */ #define SDIOIEN_IOINTEN BIT(0) /* DAVINCI_SDIOIST definitions */ #define SDIOIST_IOINT BIT(0) /* MMCSD Init clock in Hz in opendrain mode */ #define MMCSD_INIT_CLOCK 200000 /* * One scatterlist dma "segment" is at most MAX_CCNT rw_threshold units, * and we handle up to MAX_NR_SG segments. MMC_BLOCK_BOUNCE kicks in only * for drivers with max_segs == 1, making the segments bigger (64KB) * than the page or two that's otherwise typical. nr_sg (passed from * platform data) == 16 gives at least the same throughput boost, using * EDMA transfer linkage instead of spending CPU time copying pages. */ #define MAX_CCNT ((1 << 16) - 1) #define MAX_NR_SG 16 static unsigned rw_threshold = 32; module_param(rw_threshold, uint, S_IRUGO); MODULE_PARM_DESC(rw_threshold, "Read/Write threshold. Default = 32"); static unsigned poll_threshold = 128; module_param(poll_threshold, uint, S_IRUGO); MODULE_PARM_DESC(poll_threshold, "Polling transaction size threshold. Default = 128"); static unsigned poll_loopcount = 32; module_param(poll_loopcount, uint, S_IRUGO); MODULE_PARM_DESC(poll_loopcount, "Maximum polling loop count. Default = 32"); static unsigned __initdata use_dma = 1; module_param(use_dma, uint, 0); MODULE_PARM_DESC(use_dma, "Whether to use DMA or not. Default = 1"); struct mmc_davinci_host { struct mmc_command *cmd; struct mmc_data *data; struct mmc_host *mmc; struct clk *clk; unsigned int mmc_input_clk; void __iomem *base; struct resource *mem_res; int mmc_irq, sdio_irq; unsigned char bus_mode; #define DAVINCI_MMC_DATADIR_NONE 0 #define DAVINCI_MMC_DATADIR_READ 1 #define DAVINCI_MMC_DATADIR_WRITE 2 unsigned char data_dir; unsigned char suspended; /* buffer is used during PIO of one scatterlist segment, and * is updated along with buffer_bytes_left. bytes_left applies * to all N blocks of the PIO transfer. */ u8 *buffer; u32 buffer_bytes_left; u32 bytes_left; u32 rxdma, txdma; bool use_dma; bool do_dma; bool sdio_int; bool active_request; /* Scatterlist DMA uses one or more parameter RAM entries: * the main one (associated with rxdma or txdma) plus zero or * more links. The entries for a given transfer differ only * by memory buffer (address, length) and link field. */ struct edmacc_param tx_template; struct edmacc_param rx_template; unsigned n_link; u32 links[MAX_NR_SG - 1]; /* For PIO we walk scatterlists one segment at a time. */ unsigned int sg_len; struct scatterlist *sg; /* Version of the MMC/SD controller */ u8 version; /* for ns in one cycle calculation */ unsigned ns_in_one_cycle; /* Number of sg segments */ u8 nr_sg; #ifdef CONFIG_CPU_FREQ struct notifier_block freq_transition; #endif }; static irqreturn_t mmc_davinci_irq(int irq, void *dev_id); /* PIO only */ static void mmc_davinci_sg_to_buf(struct mmc_davinci_host *host) { host->buffer_bytes_left = sg_dma_len(host->sg); host->buffer = sg_virt(host->sg); if (host->buffer_bytes_left > host->bytes_left) host->buffer_bytes_left = host->bytes_left; } static void davinci_fifo_data_trans(struct mmc_davinci_host *host, unsigned int n) { u8 *p; unsigned int i; if (host->buffer_bytes_left == 0) { host->sg = sg_next(host->data->sg); mmc_davinci_sg_to_buf(host); } p = host->buffer; if (n > host->buffer_bytes_left) n = host->buffer_bytes_left; host->buffer_bytes_left -= n; host->bytes_left -= n; /* NOTE: we never transfer more than rw_threshold bytes * to/from the fifo here; there's no I/O overlap. * This also assumes that access width( i.e. ACCWD) is 4 bytes */ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { for (i = 0; i < (n >> 2); i++) { writel(*((u32 *)p), host->base + DAVINCI_MMCDXR); p = p + 4; } if (n & 3) { iowrite8_rep(host->base + DAVINCI_MMCDXR, p, (n & 3)); p = p + (n & 3); } } else { for (i = 0; i < (n >> 2); i++) { *((u32 *)p) = readl(host->base + DAVINCI_MMCDRR); p = p + 4; } if (n & 3) { ioread8_rep(host->base + DAVINCI_MMCDRR, p, (n & 3)); p = p + (n & 3); } } host->buffer = p; } static void mmc_davinci_start_command(struct mmc_davinci_host *host, struct mmc_command *cmd) { u32 cmd_reg = 0; u32 im_val; dev_dbg(mmc_dev(host->mmc), "CMD%d, arg 0x%08x%s\n", cmd->opcode, cmd->arg, ({ char *s; switch (mmc_resp_type(cmd)) { case MMC_RSP_R1: s = ", R1/R5/R6/R7 response"; break; case MMC_RSP_R1B: s = ", R1b response"; break; case MMC_RSP_R2: s = ", R2 response"; break; case MMC_RSP_R3: s = ", R3/R4 response"; break; default: s = ", (R? response)"; break; }; s; })); host->cmd = cmd; switch (mmc_resp_type(cmd)) { case MMC_RSP_R1B: /* There's some spec confusion about when R1B is * allowed, but if the card doesn't issue a BUSY * then it's harmless for us to allow it. */ cmd_reg |= MMCCMD_BSYEXP; /* FALLTHROUGH */ case MMC_RSP_R1: /* 48 bits, CRC */ cmd_reg |= MMCCMD_RSPFMT_R1456; break; case MMC_RSP_R2: /* 136 bits, CRC */ cmd_reg |= MMCCMD_RSPFMT_R2; break; case MMC_RSP_R3: /* 48 bits, no CRC */ cmd_reg |= MMCCMD_RSPFMT_R3; break; default: cmd_reg |= MMCCMD_RSPFMT_NONE; dev_dbg(mmc_dev(host->mmc), "unknown resp_type %04x\n", mmc_resp_type(cmd)); break; } /* Set command index */ cmd_reg |= cmd->opcode; /* Enable EDMA transfer triggers */ if (host->do_dma) cmd_reg |= MMCCMD_DMATRIG; if (host->version == MMC_CTLR_VERSION_2 && host->data != NULL && host->data_dir == DAVINCI_MMC_DATADIR_READ) cmd_reg |= MMCCMD_DMATRIG; /* Setting whether command involves data transfer or not */ if (cmd->data) cmd_reg |= MMCCMD_WDATX; /* Setting whether stream or block transfer */ if (cmd->flags & MMC_DATA_STREAM) cmd_reg |= MMCCMD_STRMTP; /* Setting whether data read or write */ if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) cmd_reg |= MMCCMD_DTRW; if (host->bus_mode == MMC_BUSMODE_PUSHPULL) cmd_reg |= MMCCMD_PPLEN; /* set Command timeout */ writel(0x1FFF, host->base + DAVINCI_MMCTOR); /* Enable interrupt (calculate here, defer until FIFO is stuffed). */ im_val = MMCST0_RSPDNE | MMCST0_CRCRS | MMCST0_TOUTRS; if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { im_val |= MMCST0_DATDNE | MMCST0_CRCWR; if (!host->do_dma) im_val |= MMCST0_DXRDY; } else if (host->data_dir == DAVINCI_MMC_DATADIR_READ) { im_val |= MMCST0_DATDNE | MMCST0_CRCRD | MMCST0_TOUTRD; if (!host->do_dma) im_val |= MMCST0_DRRDY; } /* * Before non-DMA WRITE commands the controller needs priming: * FIFO should be populated with 32 bytes i.e. whatever is the FIFO size */ if (!host->do_dma && (host->data_dir == DAVINCI_MMC_DATADIR_WRITE)) davinci_fifo_data_trans(host, rw_threshold); writel(cmd->arg, host->base + DAVINCI_MMCARGHL); writel(cmd_reg, host->base + DAVINCI_MMCCMD); host->active_request = true; if (!host->do_dma && host->bytes_left <= poll_threshold) { u32 count = poll_loopcount; while (host->active_request && count--) { mmc_davinci_irq(0, host); cpu_relax(); } } if (host->active_request) writel(im_val, host->base + DAVINCI_MMCIM); } /*----------------------------------------------------------------------*/ /* DMA infrastructure */ static void davinci_abort_dma(struct mmc_davinci_host *host) { int sync_dev; if (host->data_dir == DAVINCI_MMC_DATADIR_READ) sync_dev = host->rxdma; else sync_dev = host->txdma; edma_stop(sync_dev); edma_clean_channel(sync_dev); } static void mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data); static void mmc_davinci_dma_cb(unsigned channel, u16 ch_status, void *data) { if (DMA_COMPLETE != ch_status) { struct mmc_davinci_host *host = data; /* Currently means: DMA Event Missed, or "null" transfer * request was seen. In the future, TC errors (like bad * addresses) might be presented too. */ dev_warn(mmc_dev(host->mmc), "DMA %s error\n", (host->data->flags & MMC_DATA_WRITE) ? "write" : "read"); host->data->error = -EIO; mmc_davinci_xfer_done(host, host->data); } } /* Set up tx or rx template, to be modified and updated later */ static void __init mmc_davinci_dma_setup(struct mmc_davinci_host *host, bool tx, struct edmacc_param *template) { unsigned sync_dev; const u16 acnt = 4; const u16 bcnt = rw_threshold >> 2; const u16 ccnt = 0; u32 src_port = 0; u32 dst_port = 0; s16 src_bidx, dst_bidx; s16 src_cidx, dst_cidx; /* * A-B Sync transfer: each DMA request is for one "frame" of * rw_threshold bytes, broken into "acnt"-size chunks repeated * "bcnt" times. Each segment needs "ccnt" such frames; since * we tell the block layer our mmc->max_seg_size limit, we can * trust (later) that it's within bounds. * * The FIFOs are read/written in 4-byte chunks (acnt == 4) and * EDMA will optimize memory operations to use larger bursts. */ if (tx) { sync_dev = host->txdma; /* src_prt, ccnt, and link to be set up later */ src_bidx = acnt; src_cidx = acnt * bcnt; dst_port = host->mem_res->start + DAVINCI_MMCDXR; dst_bidx = 0; dst_cidx = 0; } else { sync_dev = host->rxdma; src_port = host->mem_res->start + DAVINCI_MMCDRR; src_bidx = 0; src_cidx = 0; /* dst_prt, ccnt, and link to be set up later */ dst_bidx = acnt; dst_cidx = acnt * bcnt; } /* * We can't use FIFO mode for the FIFOs because MMC FIFO addresses * are not 256-bit (32-byte) aligned. So we use INCR, and the W8BIT * parameter is ignored. */ edma_set_src(sync_dev, src_port, INCR, W8BIT); edma_set_dest(sync_dev, dst_port, INCR, W8BIT); edma_set_src_index(sync_dev, src_bidx, src_cidx); edma_set_dest_index(sync_dev, dst_bidx, dst_cidx); edma_set_transfer_params(sync_dev, acnt, bcnt, ccnt, 8, ABSYNC); edma_read_slot(sync_dev, template); /* don't bother with irqs or chaining */ template->opt |= EDMA_CHAN_SLOT(sync_dev) << 12; } static void mmc_davinci_send_dma_request(struct mmc_davinci_host *host, struct mmc_data *data) { struct edmacc_param *template; int channel, slot; unsigned link; struct scatterlist *sg; unsigned sg_len; unsigned bytes_left = host->bytes_left; const unsigned shift = ffs(rw_threshold) - 1; if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) { template = &host->tx_template; channel = host->txdma; } else { template = &host->rx_template; channel = host->rxdma; } /* We know sg_len and ccnt will never be out of range because * we told the mmc layer which in turn tells the block layer * to ensure that it only hands us one scatterlist segment * per EDMA PARAM entry. Update the PARAM * entries needed for each segment of this scatterlist. */ for (slot = channel, link = 0, sg = data->sg, sg_len = host->sg_len; sg_len-- != 0 && bytes_left; sg = sg_next(sg), slot = host->links[link++]) { u32 buf = sg_dma_address(sg); unsigned count = sg_dma_len(sg); template->link_bcntrld = sg_len ? (EDMA_CHAN_SLOT(host->links[link]) << 5) : 0xffff; if (count > bytes_left) count = bytes_left; bytes_left -= count; if (host->data_dir == DAVINCI_MMC_DATADIR_WRITE) template->src = buf; else template->dst = buf; template->ccnt = count >> shift; edma_write_slot(slot, template); } if (host->version == MMC_CTLR_VERSION_2) edma_clear_event(channel); edma_start(channel); } static int mmc_davinci_start_dma_transfer(struct mmc_davinci_host *host, struct mmc_data *data) { int i; int mask = rw_threshold - 1; host->sg_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, ((data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); /* no individual DMA segment should need a partial FIFO */ for (i = 0; i < host->sg_len; i++) { if (sg_dma_len(data->sg + i) & mask) { dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); return -1; } } host->do_dma = 1; mmc_davinci_send_dma_request(host, data); return 0; } static void __init_or_module davinci_release_dma_channels(struct mmc_davinci_host *host) { unsigned i; if (!host->use_dma) return; for (i = 0; i < host->n_link; i++) edma_free_slot(host->links[i]); edma_free_channel(host->txdma); edma_free_channel(host->rxdma); } static int __init davinci_acquire_dma_channels(struct mmc_davinci_host *host) { u32 link_size; int r, i; /* Acquire master DMA write channel */ r = edma_alloc_channel(host->txdma, mmc_davinci_dma_cb, host, EVENTQ_DEFAULT); if (r < 0) { dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", "tx", r); return r; } mmc_davinci_dma_setup(host, true, &host->tx_template); /* Acquire master DMA read channel */ r = edma_alloc_channel(host->rxdma, mmc_davinci_dma_cb, host, EVENTQ_DEFAULT); if (r < 0) { dev_warn(mmc_dev(host->mmc), "alloc %s channel err %d\n", "rx", r); goto free_master_write; } mmc_davinci_dma_setup(host, false, &host->rx_template); /* Allocate parameter RAM slots, which will later be bound to a * channel as needed to handle a scatterlist. */ link_size = min_t(unsigned, host->nr_sg, ARRAY_SIZE(host->links)); for (i = 0; i < link_size; i++) { r = edma_alloc_slot(EDMA_CTLR(host->txdma), EDMA_SLOT_ANY); if (r < 0) { dev_dbg(mmc_dev(host->mmc), "dma PaRAM alloc --> %d\n", r); break; } host->links[i] = r; } host->n_link = i; return 0; free_master_write: edma_free_channel(host->txdma); return r; } /*----------------------------------------------------------------------*/ static void mmc_davinci_prepare_data(struct mmc_davinci_host *host, struct mmc_request *req) { int fifo_lev = (rw_threshold == 32) ? MMCFIFOCTL_FIFOLEV : 0; int timeout; struct mmc_data *data = req->data; if (host->version == MMC_CTLR_VERSION_2) fifo_lev = (rw_threshold == 64) ? MMCFIFOCTL_FIFOLEV : 0; host->data = data; if (data == NULL) { host->data_dir = DAVINCI_MMC_DATADIR_NONE; writel(0, host->base + DAVINCI_MMCBLEN); writel(0, host->base + DAVINCI_MMCNBLK); return; } dev_dbg(mmc_dev(host->mmc), "%s %s, %d blocks of %d bytes\n", (data->flags & MMC_DATA_STREAM) ? "stream" : "block", (data->flags & MMC_DATA_WRITE) ? "write" : "read", data->blocks, data->blksz); dev_dbg(mmc_dev(host->mmc), " DTO %d cycles + %d ns\n", data->timeout_clks, data->timeout_ns); timeout = data->timeout_clks + (data->timeout_ns / host->ns_in_one_cycle); if (timeout > 0xffff) timeout = 0xffff; writel(timeout, host->base + DAVINCI_MMCTOD); writel(data->blocks, host->base + DAVINCI_MMCNBLK); writel(data->blksz, host->base + DAVINCI_MMCBLEN); /* Configure the FIFO */ switch (data->flags & MMC_DATA_WRITE) { case MMC_DATA_WRITE: host->data_dir = DAVINCI_MMC_DATADIR_WRITE; writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR | MMCFIFOCTL_FIFORST, host->base + DAVINCI_MMCFIFOCTL); writel(fifo_lev | MMCFIFOCTL_FIFODIR_WR, host->base + DAVINCI_MMCFIFOCTL); break; default: host->data_dir = DAVINCI_MMC_DATADIR_READ; writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD | MMCFIFOCTL_FIFORST, host->base + DAVINCI_MMCFIFOCTL); writel(fifo_lev | MMCFIFOCTL_FIFODIR_RD, host->base + DAVINCI_MMCFIFOCTL); break; } host->buffer = NULL; host->bytes_left = data->blocks * data->blksz; /* For now we try to use DMA whenever we won't need partial FIFO * reads or writes, either for the whole transfer (as tested here) * or for any individual scatterlist segment (tested when we call * start_dma_transfer). * * While we *could* change that, unusual block sizes are rarely * used. The occasional fallback to PIO should't hurt. */ if (host->use_dma && (host->bytes_left & (rw_threshold - 1)) == 0 && mmc_davinci_start_dma_transfer(host, data) == 0) { /* zero this to ensure we take no PIO paths */ host->bytes_left = 0; } else { /* Revert to CPU Copy */ host->sg_len = data->sg_len; host->sg = host->data->sg; mmc_davinci_sg_to_buf(host); } } static void mmc_davinci_request(struct mmc_host *mmc, struct mmc_request *req) { struct mmc_davinci_host *host = mmc_priv(mmc); unsigned long timeout = jiffies + msecs_to_jiffies(900); u32 mmcst1 = 0; /* Card may still be sending BUSY after a previous operation, * typically some kind of write. If so, we can't proceed yet. */ while (time_before(jiffies, timeout)) { mmcst1 = readl(host->base + DAVINCI_MMCST1); if (!(mmcst1 & MMCST1_BUSY)) break; cpu_relax(); } if (mmcst1 & MMCST1_BUSY) { dev_err(mmc_dev(host->mmc), "still BUSY? bad ... \n"); req->cmd->error = -ETIMEDOUT; mmc_request_done(mmc, req); return; } host->do_dma = 0; mmc_davinci_prepare_data(host, req); mmc_davinci_start_command(host, req->cmd); } static unsigned int calculate_freq_for_card(struct mmc_davinci_host *host, unsigned int mmc_req_freq) { unsigned int mmc_freq = 0, mmc_pclk = 0, mmc_push_pull_divisor = 0; mmc_pclk = host->mmc_input_clk; if (mmc_req_freq && mmc_pclk > (2 * mmc_req_freq)) mmc_push_pull_divisor = ((unsigned int)mmc_pclk / (2 * mmc_req_freq)) - 1; else mmc_push_pull_divisor = 0; mmc_freq = (unsigned int)mmc_pclk / (2 * (mmc_push_pull_divisor + 1)); if (mmc_freq > mmc_req_freq) mmc_push_pull_divisor = mmc_push_pull_divisor + 1; /* Convert ns to clock cycles */ if (mmc_req_freq <= 400000) host->ns_in_one_cycle = (1000000) / (((mmc_pclk / (2 * (mmc_push_pull_divisor + 1)))/1000)); else host->ns_in_one_cycle = (1000000) / (((mmc_pclk / (2 * (mmc_push_pull_divisor + 1)))/1000000)); return mmc_push_pull_divisor; } static void calculate_clk_divider(struct mmc_host *mmc, struct mmc_ios *ios) { unsigned int open_drain_freq = 0, mmc_pclk = 0; unsigned int mmc_push_pull_freq = 0; struct mmc_davinci_host *host = mmc_priv(mmc); if (ios->bus_mode == MMC_BUSMODE_OPENDRAIN) { u32 temp; /* Ignoring the init clock value passed for fixing the inter * operability with different cards. */ open_drain_freq = ((unsigned int)mmc_pclk / (2 * MMCSD_INIT_CLOCK)) - 1; if (open_drain_freq > 0xFF) open_drain_freq = 0xFF; temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; temp |= open_drain_freq; writel(temp, host->base + DAVINCI_MMCCLK); /* Convert ns to clock cycles */ host->ns_in_one_cycle = (1000000) / (MMCSD_INIT_CLOCK/1000); } else { u32 temp; mmc_push_pull_freq = calculate_freq_for_card(host, ios->clock); if (mmc_push_pull_freq > 0xFF) mmc_push_pull_freq = 0xFF; temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKEN; writel(temp, host->base + DAVINCI_MMCCLK); udelay(10); temp = readl(host->base + DAVINCI_MMCCLK) & ~MMCCLK_CLKRT_MASK; temp |= mmc_push_pull_freq; writel(temp, host->base + DAVINCI_MMCCLK); writel(temp | MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); udelay(10); } } static void mmc_davinci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) { struct mmc_davinci_host *host = mmc_priv(mmc); struct platform_device *pdev = to_platform_device(mmc->parent); struct davinci_mmc_config *config = pdev->dev.platform_data; dev_dbg(mmc_dev(host->mmc), "clock %dHz busmode %d powermode %d Vdd %04x\n", ios->clock, ios->bus_mode, ios->power_mode, ios->vdd); switch (ios->power_mode) { case MMC_POWER_OFF: if (config && config->set_power) config->set_power(pdev->id, false); break; case MMC_POWER_UP: if (config && config->set_power) config->set_power(pdev->id, true); break; } switch (ios->bus_width) { case MMC_BUS_WIDTH_8: dev_dbg(mmc_dev(host->mmc), "Enabling 8 bit mode\n"); writel((readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT) | MMCCTL_WIDTH_8_BIT, host->base + DAVINCI_MMCCTL); break; case MMC_BUS_WIDTH_4: dev_dbg(mmc_dev(host->mmc), "Enabling 4 bit mode\n"); if (host->version == MMC_CTLR_VERSION_2) writel((readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_8_BIT) | MMCCTL_WIDTH_4_BIT, host->base + DAVINCI_MMCCTL); else writel(readl(host->base + DAVINCI_MMCCTL) | MMCCTL_WIDTH_4_BIT, host->base + DAVINCI_MMCCTL); break; case MMC_BUS_WIDTH_1: dev_dbg(mmc_dev(host->mmc), "Enabling 1 bit mode\n"); if (host->version == MMC_CTLR_VERSION_2) writel(readl(host->base + DAVINCI_MMCCTL) & ~(MMCCTL_WIDTH_8_BIT | MMCCTL_WIDTH_4_BIT), host->base + DAVINCI_MMCCTL); else writel(readl(host->base + DAVINCI_MMCCTL) & ~MMCCTL_WIDTH_4_BIT, host->base + DAVINCI_MMCCTL); break; } calculate_clk_divider(mmc, ios); host->bus_mode = ios->bus_mode; if (ios->power_mode == MMC_POWER_UP) { unsigned long timeout = jiffies + msecs_to_jiffies(50); bool lose = true; /* Send clock cycles, poll completion */ writel(0, host->base + DAVINCI_MMCARGHL); writel(MMCCMD_INITCK, host->base + DAVINCI_MMCCMD); while (time_before(jiffies, timeout)) { u32 tmp = readl(host->base + DAVINCI_MMCST0); if (tmp & MMCST0_RSPDNE) { lose = false; break; } cpu_relax(); } if (lose) dev_warn(mmc_dev(host->mmc), "powerup timeout\n"); } /* FIXME on power OFF, reset things ... */ } static void mmc_davinci_xfer_done(struct mmc_davinci_host *host, struct mmc_data *data) { host->data = NULL; if (host->mmc->caps & MMC_CAP_SDIO_IRQ) { /* * SDIO Interrupt Detection work-around as suggested by * Davinci Errata (TMS320DM355 Silicon Revision 1.1 Errata * 2.1.6): Signal SDIO interrupt only if it is enabled by core */ if (host->sdio_int && !(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_signal_sdio_irq(host->mmc); } } if (host->do_dma) { davinci_abort_dma(host); dma_unmap_sg(mmc_dev(host->mmc), data->sg, data->sg_len, (data->flags & MMC_DATA_WRITE) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); host->do_dma = false; } host->data_dir = DAVINCI_MMC_DATADIR_NONE; if (!data->stop || (host->cmd && host->cmd->error)) { mmc_request_done(host->mmc, data->mrq); writel(0, host->base + DAVINCI_MMCIM); host->active_request = false; } else mmc_davinci_start_command(host, data->stop); } static void mmc_davinci_cmd_done(struct mmc_davinci_host *host, struct mmc_command *cmd) { host->cmd = NULL; if (cmd->flags & MMC_RSP_PRESENT) { if (cmd->flags & MMC_RSP_136) { /* response type 2 */ cmd->resp[3] = readl(host->base + DAVINCI_MMCRSP01); cmd->resp[2] = readl(host->base + DAVINCI_MMCRSP23); cmd->resp[1] = readl(host->base + DAVINCI_MMCRSP45); cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); } else { /* response types 1, 1b, 3, 4, 5, 6 */ cmd->resp[0] = readl(host->base + DAVINCI_MMCRSP67); } } if (host->data == NULL || cmd->error) { if (cmd->error == -ETIMEDOUT) cmd->mrq->cmd->retries = 0; mmc_request_done(host->mmc, cmd->mrq); writel(0, host->base + DAVINCI_MMCIM); host->active_request = false; } } static inline void mmc_davinci_reset_ctrl(struct mmc_davinci_host *host, int val) { u32 temp; temp = readl(host->base + DAVINCI_MMCCTL); if (val) /* reset */ temp |= MMCCTL_CMDRST | MMCCTL_DATRST; else /* enable */ temp &= ~(MMCCTL_CMDRST | MMCCTL_DATRST); writel(temp, host->base + DAVINCI_MMCCTL); udelay(10); } static void davinci_abort_data(struct mmc_davinci_host *host, struct mmc_data *data) { mmc_davinci_reset_ctrl(host, 1); mmc_davinci_reset_ctrl(host, 0); } static irqreturn_t mmc_davinci_sdio_irq(int irq, void *dev_id) { struct mmc_davinci_host *host = dev_id; unsigned int status; status = readl(host->base + DAVINCI_SDIOIST); if (status & SDIOIST_IOINT) { dev_dbg(mmc_dev(host->mmc), "SDIO interrupt status %x\n", status); writel(status | SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_signal_sdio_irq(host->mmc); } return IRQ_HANDLED; } static irqreturn_t mmc_davinci_irq(int irq, void *dev_id) { struct mmc_davinci_host *host = (struct mmc_davinci_host *)dev_id; unsigned int status, qstatus; int end_command = 0; int end_transfer = 0; struct mmc_data *data = host->data; if (host->cmd == NULL && host->data == NULL) { status = readl(host->base + DAVINCI_MMCST0); dev_dbg(mmc_dev(host->mmc), "Spurious interrupt 0x%04x\n", status); /* Disable the interrupt from mmcsd */ writel(0, host->base + DAVINCI_MMCIM); return IRQ_NONE; } status = readl(host->base + DAVINCI_MMCST0); qstatus = status; /* handle FIFO first when using PIO for data. * bytes_left will decrease to zero as I/O progress and status will * read zero over iteration because this controller status * register(MMCST0) reports any status only once and it is cleared * by read. So, it is not unbouned loop even in the case of * non-dma. */ if (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))) { unsigned long im_val; /* * If interrupts fire during the following loop, they will be * handled by the handler, but the PIC will still buffer these. * As a result, the handler will be called again to serve these * needlessly. In order to avoid these spurious interrupts, * keep interrupts masked during the loop. */ im_val = readl(host->base + DAVINCI_MMCIM); writel(0, host->base + DAVINCI_MMCIM); do { davinci_fifo_data_trans(host, rw_threshold); status = readl(host->base + DAVINCI_MMCST0); qstatus |= status; } while (host->bytes_left && (status & (MMCST0_DXRDY | MMCST0_DRRDY))); /* * If an interrupt is pending, it is assumed it will fire when * it is unmasked. This assumption is also taken when the MMCIM * is first set. Otherwise, writing to MMCIM after reading the * status is race-prone. */ writel(im_val, host->base + DAVINCI_MMCIM); } if (qstatus & MMCST0_DATDNE) { /* All blocks sent/received, and CRC checks passed */ if (data != NULL) { if ((host->do_dma == 0) && (host->bytes_left > 0)) { /* if datasize < rw_threshold * no RX ints are generated */ davinci_fifo_data_trans(host, host->bytes_left); } end_transfer = 1; data->bytes_xfered = data->blocks * data->blksz; } else { dev_err(mmc_dev(host->mmc), "DATDNE with no host->data\n"); } } if (qstatus & MMCST0_TOUTRD) { /* Read data timeout */ data->error = -ETIMEDOUT; end_transfer = 1; dev_dbg(mmc_dev(host->mmc), "read data timeout, status %x\n", qstatus); davinci_abort_data(host, data); } if (qstatus & (MMCST0_CRCWR | MMCST0_CRCRD)) { /* Data CRC error */ data->error = -EILSEQ; end_transfer = 1; /* NOTE: this controller uses CRCWR to report both CRC * errors and timeouts (on writes). MMCDRSP values are * only weakly documented, but 0x9f was clearly a timeout * case and the two three-bit patterns in various SD specs * (101, 010) aren't part of it ... */ if (qstatus & MMCST0_CRCWR) { u32 temp = readb(host->base + DAVINCI_MMCDRSP); if (temp == 0x9f) data->error = -ETIMEDOUT; } dev_dbg(mmc_dev(host->mmc), "data %s %s error\n", (qstatus & MMCST0_CRCWR) ? "write" : "read", (data->error == -ETIMEDOUT) ? "timeout" : "CRC"); davinci_abort_data(host, data); } if (qstatus & MMCST0_TOUTRS) { /* Command timeout */ if (host->cmd) { dev_dbg(mmc_dev(host->mmc), "CMD%d timeout, status %x\n", host->cmd->opcode, qstatus); host->cmd->error = -ETIMEDOUT; if (data) { end_transfer = 1; davinci_abort_data(host, data); } else end_command = 1; } } if (qstatus & MMCST0_CRCRS) { /* Command CRC error */ dev_dbg(mmc_dev(host->mmc), "Command CRC error\n"); if (host->cmd) { host->cmd->error = -EILSEQ; end_command = 1; } } if (qstatus & MMCST0_RSPDNE) { /* End of command phase */ end_command = (int) host->cmd; } if (end_command) mmc_davinci_cmd_done(host, host->cmd); if (end_transfer) mmc_davinci_xfer_done(host, data); return IRQ_HANDLED; } static int mmc_davinci_get_cd(struct mmc_host *mmc) { struct platform_device *pdev = to_platform_device(mmc->parent); struct davinci_mmc_config *config = pdev->dev.platform_data; if (!config || !config->get_cd) return -ENOSYS; return config->get_cd(pdev->id); } static int mmc_davinci_get_ro(struct mmc_host *mmc) { struct platform_device *pdev = to_platform_device(mmc->parent); struct davinci_mmc_config *config = pdev->dev.platform_data; if (!config || !config->get_ro) return -ENOSYS; return config->get_ro(pdev->id); } static void mmc_davinci_enable_sdio_irq(struct mmc_host *mmc, int enable) { struct mmc_davinci_host *host = mmc_priv(mmc); if (enable) { if (!(readl(host->base + DAVINCI_SDIOST0) & SDIOST0_DAT1_HI)) { writel(SDIOIST_IOINT, host->base + DAVINCI_SDIOIST); mmc_signal_sdio_irq(host->mmc); } else { host->sdio_int = true; writel(readl(host->base + DAVINCI_SDIOIEN) | SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); } } else { host->sdio_int = false; writel(readl(host->base + DAVINCI_SDIOIEN) & ~SDIOIEN_IOINTEN, host->base + DAVINCI_SDIOIEN); } } static struct mmc_host_ops mmc_davinci_ops = { .request = mmc_davinci_request, .set_ios = mmc_davinci_set_ios, .get_cd = mmc_davinci_get_cd, .get_ro = mmc_davinci_get_ro, .enable_sdio_irq = mmc_davinci_enable_sdio_irq, }; /*----------------------------------------------------------------------*/ #ifdef CONFIG_CPU_FREQ static int mmc_davinci_cpufreq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct mmc_davinci_host *host; unsigned int mmc_pclk; struct mmc_host *mmc; unsigned long flags; host = container_of(nb, struct mmc_davinci_host, freq_transition); mmc = host->mmc; mmc_pclk = clk_get_rate(host->clk); if (val == CPUFREQ_POSTCHANGE) { spin_lock_irqsave(&mmc->lock, flags); host->mmc_input_clk = mmc_pclk; calculate_clk_divider(mmc, &mmc->ios); spin_unlock_irqrestore(&mmc->lock, flags); } return 0; } static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) { host->freq_transition.notifier_call = mmc_davinci_cpufreq_transition; return cpufreq_register_notifier(&host->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) { cpufreq_unregister_notifier(&host->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); } #else static inline int mmc_davinci_cpufreq_register(struct mmc_davinci_host *host) { return 0; } static inline void mmc_davinci_cpufreq_deregister(struct mmc_davinci_host *host) { } #endif static void __init init_mmcsd_host(struct mmc_davinci_host *host) { mmc_davinci_reset_ctrl(host, 1); writel(0, host->base + DAVINCI_MMCCLK); writel(MMCCLK_CLKEN, host->base + DAVINCI_MMCCLK); writel(0x1FFF, host->base + DAVINCI_MMCTOR); writel(0xFFFF, host->base + DAVINCI_MMCTOD); mmc_davinci_reset_ctrl(host, 0); } static int __init davinci_mmcsd_probe(struct platform_device *pdev) { struct davinci_mmc_config *pdata = pdev->dev.platform_data; struct mmc_davinci_host *host = NULL; struct mmc_host *mmc = NULL; struct resource *r, *mem = NULL; int ret = 0, irq = 0; size_t mem_size; /* REVISIT: when we're fully converted, fail if pdata is NULL */ ret = -ENODEV; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!r || irq == NO_IRQ) goto out; ret = -EBUSY; mem_size = resource_size(r); mem = request_mem_region(r->start, mem_size, pdev->name); if (!mem) goto out; ret = -ENOMEM; mmc = mmc_alloc_host(sizeof(struct mmc_davinci_host), &pdev->dev); if (!mmc) goto out; host = mmc_priv(mmc); host->mmc = mmc; /* Important */ r = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!r) goto out; host->rxdma = r->start; r = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!r) goto out; host->txdma = r->start; host->mem_res = mem; host->base = ioremap(mem->start, mem_size); if (!host->base) goto out; ret = -ENXIO; host->clk = clk_get(&pdev->dev, "MMCSDCLK"); if (IS_ERR(host->clk)) { ret = PTR_ERR(host->clk); goto out; } clk_enable(host->clk); host->mmc_input_clk = clk_get_rate(host->clk); init_mmcsd_host(host); if (pdata->nr_sg) host->nr_sg = pdata->nr_sg - 1; if (host->nr_sg > MAX_NR_SG || !host->nr_sg) host->nr_sg = MAX_NR_SG; host->use_dma = use_dma; host->mmc_irq = irq; host->sdio_irq = platform_get_irq(pdev, 1); if (host->use_dma && davinci_acquire_dma_channels(host) != 0) host->use_dma = 0; /* REVISIT: someday, support IRQ-driven card detection. */ mmc->caps |= MMC_CAP_NEEDS_POLL; mmc->caps |= MMC_CAP_WAIT_WHILE_BUSY; if (pdata && (pdata->wires == 4 || pdata->wires == 0)) mmc->caps |= MMC_CAP_4_BIT_DATA; if (pdata && (pdata->wires == 8)) mmc->caps |= (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA); host->version = pdata->version; mmc->ops = &mmc_davinci_ops; mmc->f_min = 312500; mmc->f_max = 25000000; if (pdata && pdata->max_freq) mmc->f_max = pdata->max_freq; if (pdata && pdata->caps) mmc->caps |= pdata->caps; mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; /* With no iommu coalescing pages, each phys_seg is a hw_seg. * Each hw_seg uses one EDMA parameter RAM slot, always one * channel and then usually some linked slots. */ mmc->max_segs = 1 + host->n_link; /* EDMA limit per hw segment (one or two MBytes) */ mmc->max_seg_size = MAX_CCNT * rw_threshold; /* MMC/SD controller limits for multiblock requests */ mmc->max_blk_size = 4095; /* BLEN is 12 bits */ mmc->max_blk_count = 65535; /* NBLK is 16 bits */ mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count; dev_dbg(mmc_dev(host->mmc), "max_segs=%d\n", mmc->max_segs); dev_dbg(mmc_dev(host->mmc), "max_blk_size=%d\n", mmc->max_blk_size); dev_dbg(mmc_dev(host->mmc), "max_req_size=%d\n", mmc->max_req_size); dev_dbg(mmc_dev(host->mmc), "max_seg_size=%d\n", mmc->max_seg_size); platform_set_drvdata(pdev, host); ret = mmc_davinci_cpufreq_register(host); if (ret) { dev_err(&pdev->dev, "failed to register cpufreq\n"); goto cpu_freq_fail; } ret = mmc_add_host(mmc); if (ret < 0) goto out; ret = request_irq(irq, mmc_davinci_irq, 0, mmc_hostname(mmc), host); if (ret) goto out; if (host->sdio_irq >= 0) { ret = request_irq(host->sdio_irq, mmc_davinci_sdio_irq, 0, mmc_hostname(mmc), host); if (!ret) mmc->caps |= MMC_CAP_SDIO_IRQ; } rename_region(mem, mmc_hostname(mmc)); dev_info(mmc_dev(host->mmc), "Using %s, %d-bit mode\n", host->use_dma ? "DMA" : "PIO", (mmc->caps & MMC_CAP_4_BIT_DATA) ? 4 : 1); return 0; out: mmc_davinci_cpufreq_deregister(host); cpu_freq_fail: if (host) { davinci_release_dma_channels(host); if (host->clk) { clk_disable(host->clk); clk_put(host->clk); } if (host->base) iounmap(host->base); } if (mmc) mmc_free_host(mmc); if (mem) release_resource(mem); dev_dbg(&pdev->dev, "probe err %d\n", ret); return ret; } static int __exit davinci_mmcsd_remove(struct platform_device *pdev) { struct mmc_davinci_host *host = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (host) { mmc_davinci_cpufreq_deregister(host); mmc_remove_host(host->mmc); free_irq(host->mmc_irq, host); if (host->mmc->caps & MMC_CAP_SDIO_IRQ) free_irq(host->sdio_irq, host); davinci_release_dma_channels(host); clk_disable(host->clk); clk_put(host->clk); iounmap(host->base); release_resource(host->mem_res); mmc_free_host(host->mmc); } return 0; } #ifdef CONFIG_PM static int davinci_mmcsd_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); int ret; ret = mmc_suspend_host(host->mmc); if (!ret) { writel(0, host->base + DAVINCI_MMCIM); mmc_davinci_reset_ctrl(host, 1); clk_disable(host->clk); host->suspended = 1; } else { host->suspended = 0; } return ret; } static int davinci_mmcsd_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mmc_davinci_host *host = platform_get_drvdata(pdev); int ret; if (!host->suspended) return 0; clk_enable(host->clk); mmc_davinci_reset_ctrl(host, 0); ret = mmc_resume_host(host->mmc); if (!ret) host->suspended = 0; return ret; } static const struct dev_pm_ops davinci_mmcsd_pm = { .suspend = davinci_mmcsd_suspend, .resume = davinci_mmcsd_resume, }; #define davinci_mmcsd_pm_ops (&davinci_mmcsd_pm) #else #define davinci_mmcsd_pm_ops NULL #endif static struct platform_driver davinci_mmcsd_driver = { .driver = { .name = "davinci_mmc", .owner = THIS_MODULE, .pm = davinci_mmcsd_pm_ops, }, .remove = __exit_p(davinci_mmcsd_remove), }; static int __init davinci_mmcsd_init(void) { return platform_driver_probe(&davinci_mmcsd_driver, davinci_mmcsd_probe); } module_init(davinci_mmcsd_init); static void __exit davinci_mmcsd_exit(void) { platform_driver_unregister(&davinci_mmcsd_driver); } module_exit(davinci_mmcsd_exit); MODULE_AUTHOR("Texas Instruments India"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
gpl-2.0
ench0/kernel_samsung_hlte
drivers/scsi/pm8001/pm8001_hwi.c
4797
154311
/* * PMC-Sierra SPC 8001 SAS/SATA based host adapters driver * * Copyright (c) 2008-2009 USI Co., Ltd. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * */ #include <linux/slab.h> #include "pm8001_sas.h" #include "pm8001_hwi.h" #include "pm8001_chips.h" #include "pm8001_ctl.h" /** * read_main_config_table - read the configure table and save it. * @pm8001_ha: our hba card information */ static void __devinit read_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; pm8001_ha->main_cfg_tbl.signature = pm8001_mr32(address, 0x00); pm8001_ha->main_cfg_tbl.interface_rev = pm8001_mr32(address, 0x04); pm8001_ha->main_cfg_tbl.firmware_rev = pm8001_mr32(address, 0x08); pm8001_ha->main_cfg_tbl.max_out_io = pm8001_mr32(address, 0x0C); pm8001_ha->main_cfg_tbl.max_sgl = pm8001_mr32(address, 0x10); pm8001_ha->main_cfg_tbl.ctrl_cap_flag = pm8001_mr32(address, 0x14); pm8001_ha->main_cfg_tbl.gst_offset = pm8001_mr32(address, 0x18); pm8001_ha->main_cfg_tbl.inbound_queue_offset = pm8001_mr32(address, MAIN_IBQ_OFFSET); pm8001_ha->main_cfg_tbl.outbound_queue_offset = pm8001_mr32(address, MAIN_OBQ_OFFSET); pm8001_ha->main_cfg_tbl.hda_mode_flag = pm8001_mr32(address, MAIN_HDA_FLAGS_OFFSET); /* read analog Setting offset from the configuration table */ pm8001_ha->main_cfg_tbl.anolog_setup_table_offset = pm8001_mr32(address, MAIN_ANALOG_SETUP_OFFSET); /* read Error Dump Offset and Length */ pm8001_ha->main_cfg_tbl.fatal_err_dump_offset0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_OFFSET); pm8001_ha->main_cfg_tbl.fatal_err_dump_length0 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP0_LENGTH); pm8001_ha->main_cfg_tbl.fatal_err_dump_offset1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_OFFSET); pm8001_ha->main_cfg_tbl.fatal_err_dump_length1 = pm8001_mr32(address, MAIN_FATAL_ERROR_RDUMP1_LENGTH); } /** * read_general_status_table - read the general status table and save it. * @pm8001_ha: our hba card information */ static void __devinit read_general_status_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->general_stat_tbl_addr; pm8001_ha->gs_tbl.gst_len_mpistate = pm8001_mr32(address, 0x00); pm8001_ha->gs_tbl.iq_freeze_state0 = pm8001_mr32(address, 0x04); pm8001_ha->gs_tbl.iq_freeze_state1 = pm8001_mr32(address, 0x08); pm8001_ha->gs_tbl.msgu_tcnt = pm8001_mr32(address, 0x0C); pm8001_ha->gs_tbl.iop_tcnt = pm8001_mr32(address, 0x10); pm8001_ha->gs_tbl.reserved = pm8001_mr32(address, 0x14); pm8001_ha->gs_tbl.phy_state[0] = pm8001_mr32(address, 0x18); pm8001_ha->gs_tbl.phy_state[1] = pm8001_mr32(address, 0x1C); pm8001_ha->gs_tbl.phy_state[2] = pm8001_mr32(address, 0x20); pm8001_ha->gs_tbl.phy_state[3] = pm8001_mr32(address, 0x24); pm8001_ha->gs_tbl.phy_state[4] = pm8001_mr32(address, 0x28); pm8001_ha->gs_tbl.phy_state[5] = pm8001_mr32(address, 0x2C); pm8001_ha->gs_tbl.phy_state[6] = pm8001_mr32(address, 0x30); pm8001_ha->gs_tbl.phy_state[7] = pm8001_mr32(address, 0x34); pm8001_ha->gs_tbl.reserved1 = pm8001_mr32(address, 0x38); pm8001_ha->gs_tbl.reserved2 = pm8001_mr32(address, 0x3C); pm8001_ha->gs_tbl.reserved3 = pm8001_mr32(address, 0x40); pm8001_ha->gs_tbl.recover_err_info[0] = pm8001_mr32(address, 0x44); pm8001_ha->gs_tbl.recover_err_info[1] = pm8001_mr32(address, 0x48); pm8001_ha->gs_tbl.recover_err_info[2] = pm8001_mr32(address, 0x4C); pm8001_ha->gs_tbl.recover_err_info[3] = pm8001_mr32(address, 0x50); pm8001_ha->gs_tbl.recover_err_info[4] = pm8001_mr32(address, 0x54); pm8001_ha->gs_tbl.recover_err_info[5] = pm8001_mr32(address, 0x58); pm8001_ha->gs_tbl.recover_err_info[6] = pm8001_mr32(address, 0x5C); pm8001_ha->gs_tbl.recover_err_info[7] = pm8001_mr32(address, 0x60); } /** * read_inbnd_queue_table - read the inbound queue table and save it. * @pm8001_ha: our hba card information */ static void __devinit read_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { int inbQ_num = 1; int i; void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; for (i = 0; i < inbQ_num; i++) { u32 offset = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); pm8001_ha->inbnd_q_tbl[i].pi_offset = pm8001_mr32(address, (offset + 0x18)); } } /** * read_outbnd_queue_table - read the outbound queue table and save it. * @pm8001_ha: our hba card information */ static void __devinit read_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha) { int outbQ_num = 1; int i; void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; for (i = 0; i < outbQ_num; i++) { u32 offset = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(address, (offset + 0x14))); pm8001_ha->outbnd_q_tbl[i].ci_offset = pm8001_mr32(address, (offset + 0x18)); } } /** * init_default_table_values - init the default table. * @pm8001_ha: our hba card information */ static void __devinit init_default_table_values(struct pm8001_hba_info *pm8001_ha) { int qn = 1; int i; u32 offsetib, offsetob; void __iomem *addressib = pm8001_ha->inbnd_q_tbl_addr; void __iomem *addressob = pm8001_ha->outbnd_q_tbl_addr; pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd = 0; pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3 = 0; pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7 = 0; pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3 = 0; pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7 = 0; pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3 = 0; pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7 = 0; pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3 = 0; pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7 = 0; pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3 = 0; pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7 = 0; pm8001_ha->main_cfg_tbl.upper_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_hi; pm8001_ha->main_cfg_tbl.lower_event_log_addr = pm8001_ha->memoryMap.region[AAP1].phys_addr_lo; pm8001_ha->main_cfg_tbl.event_log_size = PM8001_EVENT_LOG_SIZE; pm8001_ha->main_cfg_tbl.event_log_option = 0x01; pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_hi; pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr = pm8001_ha->memoryMap.region[IOP].phys_addr_lo; pm8001_ha->main_cfg_tbl.iop_event_log_size = PM8001_EVENT_LOG_SIZE; pm8001_ha->main_cfg_tbl.iop_event_log_option = 0x01; pm8001_ha->main_cfg_tbl.fatal_err_interrupt = 0x01; for (i = 0; i < qn; i++) { pm8001_ha->inbnd_q_tbl[i].element_pri_size_cnt = 0x00000100 | (0x00000040 << 16) | (0x00<<30); pm8001_ha->inbnd_q_tbl[i].upper_base_addr = pm8001_ha->memoryMap.region[IB].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].lower_base_addr = pm8001_ha->memoryMap.region[IB].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].base_virt = (u8 *)pm8001_ha->memoryMap.region[IB].virt_ptr; pm8001_ha->inbnd_q_tbl[i].total_length = pm8001_ha->memoryMap.region[IB].total_len; pm8001_ha->inbnd_q_tbl[i].ci_upper_base_addr = pm8001_ha->memoryMap.region[CI].phys_addr_hi; pm8001_ha->inbnd_q_tbl[i].ci_lower_base_addr = pm8001_ha->memoryMap.region[CI].phys_addr_lo; pm8001_ha->inbnd_q_tbl[i].ci_virt = pm8001_ha->memoryMap.region[CI].virt_ptr; offsetib = i * 0x20; pm8001_ha->inbnd_q_tbl[i].pi_pci_bar = get_pci_bar_index(pm8001_mr32(addressib, (offsetib + 0x14))); pm8001_ha->inbnd_q_tbl[i].pi_offset = pm8001_mr32(addressib, (offsetib + 0x18)); pm8001_ha->inbnd_q_tbl[i].producer_idx = 0; pm8001_ha->inbnd_q_tbl[i].consumer_index = 0; } for (i = 0; i < qn; i++) { pm8001_ha->outbnd_q_tbl[i].element_size_cnt = 256 | (64 << 16) | (1<<30); pm8001_ha->outbnd_q_tbl[i].upper_base_addr = pm8001_ha->memoryMap.region[OB].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].lower_base_addr = pm8001_ha->memoryMap.region[OB].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].base_virt = (u8 *)pm8001_ha->memoryMap.region[OB].virt_ptr; pm8001_ha->outbnd_q_tbl[i].total_length = pm8001_ha->memoryMap.region[OB].total_len; pm8001_ha->outbnd_q_tbl[i].pi_upper_base_addr = pm8001_ha->memoryMap.region[PI].phys_addr_hi; pm8001_ha->outbnd_q_tbl[i].pi_lower_base_addr = pm8001_ha->memoryMap.region[PI].phys_addr_lo; pm8001_ha->outbnd_q_tbl[i].interrup_vec_cnt_delay = 0 | (10 << 16) | (0 << 24); pm8001_ha->outbnd_q_tbl[i].pi_virt = pm8001_ha->memoryMap.region[PI].virt_ptr; offsetob = i * 0x24; pm8001_ha->outbnd_q_tbl[i].ci_pci_bar = get_pci_bar_index(pm8001_mr32(addressob, offsetob + 0x14)); pm8001_ha->outbnd_q_tbl[i].ci_offset = pm8001_mr32(addressob, (offsetob + 0x18)); pm8001_ha->outbnd_q_tbl[i].consumer_idx = 0; pm8001_ha->outbnd_q_tbl[i].producer_index = 0; } } /** * update_main_config_table - update the main default table to the HBA. * @pm8001_ha: our hba card information */ static void __devinit update_main_config_table(struct pm8001_hba_info *pm8001_ha) { void __iomem *address = pm8001_ha->main_cfg_tbl_addr; pm8001_mw32(address, 0x24, pm8001_ha->main_cfg_tbl.inbound_q_nppd_hppd); pm8001_mw32(address, 0x28, pm8001_ha->main_cfg_tbl.outbound_hw_event_pid0_3); pm8001_mw32(address, 0x2C, pm8001_ha->main_cfg_tbl.outbound_hw_event_pid4_7); pm8001_mw32(address, 0x30, pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid0_3); pm8001_mw32(address, 0x34, pm8001_ha->main_cfg_tbl.outbound_ncq_event_pid4_7); pm8001_mw32(address, 0x38, pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid0_3); pm8001_mw32(address, 0x3C, pm8001_ha->main_cfg_tbl.outbound_tgt_ITNexus_event_pid4_7); pm8001_mw32(address, 0x40, pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid0_3); pm8001_mw32(address, 0x44, pm8001_ha->main_cfg_tbl.outbound_tgt_ssp_event_pid4_7); pm8001_mw32(address, 0x48, pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid0_3); pm8001_mw32(address, 0x4C, pm8001_ha->main_cfg_tbl.outbound_tgt_smp_event_pid4_7); pm8001_mw32(address, 0x50, pm8001_ha->main_cfg_tbl.upper_event_log_addr); pm8001_mw32(address, 0x54, pm8001_ha->main_cfg_tbl.lower_event_log_addr); pm8001_mw32(address, 0x58, pm8001_ha->main_cfg_tbl.event_log_size); pm8001_mw32(address, 0x5C, pm8001_ha->main_cfg_tbl.event_log_option); pm8001_mw32(address, 0x60, pm8001_ha->main_cfg_tbl.upper_iop_event_log_addr); pm8001_mw32(address, 0x64, pm8001_ha->main_cfg_tbl.lower_iop_event_log_addr); pm8001_mw32(address, 0x68, pm8001_ha->main_cfg_tbl.iop_event_log_size); pm8001_mw32(address, 0x6C, pm8001_ha->main_cfg_tbl.iop_event_log_option); pm8001_mw32(address, 0x70, pm8001_ha->main_cfg_tbl.fatal_err_interrupt); } /** * update_inbnd_queue_table - update the inbound queue table to the HBA. * @pm8001_ha: our hba card information */ static void __devinit update_inbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number) { void __iomem *address = pm8001_ha->inbnd_q_tbl_addr; u16 offset = number * 0x20; pm8001_mw32(address, offset + 0x00, pm8001_ha->inbnd_q_tbl[number].element_pri_size_cnt); pm8001_mw32(address, offset + 0x04, pm8001_ha->inbnd_q_tbl[number].upper_base_addr); pm8001_mw32(address, offset + 0x08, pm8001_ha->inbnd_q_tbl[number].lower_base_addr); pm8001_mw32(address, offset + 0x0C, pm8001_ha->inbnd_q_tbl[number].ci_upper_base_addr); pm8001_mw32(address, offset + 0x10, pm8001_ha->inbnd_q_tbl[number].ci_lower_base_addr); } /** * update_outbnd_queue_table - update the outbound queue table to the HBA. * @pm8001_ha: our hba card information */ static void __devinit update_outbnd_queue_table(struct pm8001_hba_info *pm8001_ha, int number) { void __iomem *address = pm8001_ha->outbnd_q_tbl_addr; u16 offset = number * 0x24; pm8001_mw32(address, offset + 0x00, pm8001_ha->outbnd_q_tbl[number].element_size_cnt); pm8001_mw32(address, offset + 0x04, pm8001_ha->outbnd_q_tbl[number].upper_base_addr); pm8001_mw32(address, offset + 0x08, pm8001_ha->outbnd_q_tbl[number].lower_base_addr); pm8001_mw32(address, offset + 0x0C, pm8001_ha->outbnd_q_tbl[number].pi_upper_base_addr); pm8001_mw32(address, offset + 0x10, pm8001_ha->outbnd_q_tbl[number].pi_lower_base_addr); pm8001_mw32(address, offset + 0x1C, pm8001_ha->outbnd_q_tbl[number].interrup_vec_cnt_delay); } /** * pm8001_bar4_shift - function is called to shift BAR base address * @pm8001_ha : our hba card infomation * @shiftValue : shifting value in memory bar. */ int pm8001_bar4_shift(struct pm8001_hba_info *pm8001_ha, u32 shiftValue) { u32 regVal; unsigned long start; /* program the inbound AXI translation Lower Address */ pm8001_cw32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW, shiftValue); /* confirm the setting is written */ start = jiffies + HZ; /* 1 sec */ do { regVal = pm8001_cr32(pm8001_ha, 1, SPC_IBW_AXI_TRANSLATION_LOW); } while ((regVal != shiftValue) && time_before(jiffies, start)); if (regVal != shiftValue) { PM8001_INIT_DBG(pm8001_ha, pm8001_printk("TIMEOUT:SPC_IBW_AXI_TRANSLATION_LOW" " = 0x%x\n", regVal)); return -1; } return 0; } /** * mpi_set_phys_g3_with_ssc * @pm8001_ha: our hba card information * @SSCbit: set SSCbit to 0 to disable all phys ssc; 1 to enable all phys ssc. */ static void __devinit mpi_set_phys_g3_with_ssc(struct pm8001_hba_info *pm8001_ha, u32 SSCbit) { u32 value, offset, i; unsigned long flags; #define SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR 0x00030000 #define SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR 0x00040000 #define SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET 0x1074 #define SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET 0x1074 #define PHY_G3_WITHOUT_SSC_BIT_SHIFT 12 #define PHY_G3_WITH_SSC_BIT_SHIFT 13 #define SNW3_PHY_CAPABILITIES_PARITY 31 /* * Using shifted destination address 0x3_0000:0x1074 + 0x4000*N (N=0:3) * Using shifted destination address 0x4_0000:0x1074 + 0x4000*(N-4) (N=4:7) */ spin_lock_irqsave(&pm8001_ha->lock, flags); if (-1 == pm8001_bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_0_3_SHIFT_ADDR)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); return; } for (i = 0; i < 4; i++) { offset = SAS2_SETTINGS_LOCAL_PHY_0_3_OFFSET + 0x4000 * i; pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); } /* shift membase 3 for SAS2_SETTINGS_LOCAL_PHY 4 - 7 */ if (-1 == pm8001_bar4_shift(pm8001_ha, SAS2_SETTINGS_LOCAL_PHY_4_7_SHIFT_ADDR)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); return; } for (i = 4; i < 8; i++) { offset = SAS2_SETTINGS_LOCAL_PHY_4_7_OFFSET + 0x4000 * (i-4); pm8001_cw32(pm8001_ha, 2, offset, 0x80001501); } /************************************************************* Change the SSC upspreading value to 0x0 so that upspreading is disabled. Device MABC SMOD0 Controls Address: (via MEMBASE-III): Using shifted destination address 0x0_0000: with Offset 0xD8 31:28 R/W Reserved Do not change 27:24 R/W SAS_SMOD_SPRDUP 0000 23:20 R/W SAS_SMOD_SPRDDN 0000 19:0 R/W Reserved Do not change Upon power-up this register will read as 0x8990c016, and I would like you to change the SAS_SMOD_SPRDUP bits to 0b0000 so that the written value will be 0x8090c016. This will ensure only down-spreading SSC is enabled on the SPC. *************************************************************/ value = pm8001_cr32(pm8001_ha, 2, 0xd8); pm8001_cw32(pm8001_ha, 2, 0xd8, 0x8000C016); /*set the shifted destination address to 0x0 to avoid error operation */ pm8001_bar4_shift(pm8001_ha, 0x0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return; } /** * mpi_set_open_retry_interval_reg * @pm8001_ha: our hba card information * @interval - interval time for each OPEN_REJECT (RETRY). The units are in 1us. */ static void __devinit mpi_set_open_retry_interval_reg(struct pm8001_hba_info *pm8001_ha, u32 interval) { u32 offset; u32 value; u32 i; unsigned long flags; #define OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR 0x00030000 #define OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR 0x00040000 #define OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET 0x30B4 #define OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET 0x30B4 #define OPEN_RETRY_INTERVAL_REG_MASK 0x0000FFFF value = interval & OPEN_RETRY_INTERVAL_REG_MASK; spin_lock_irqsave(&pm8001_ha->lock, flags); /* shift bar and set the OPEN_REJECT(RETRY) interval time of PHY 0 -3.*/ if (-1 == pm8001_bar4_shift(pm8001_ha, OPEN_RETRY_INTERVAL_PHY_0_3_SHIFT_ADDR)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); return; } for (i = 0; i < 4; i++) { offset = OPEN_RETRY_INTERVAL_PHY_0_3_OFFSET + 0x4000 * i; pm8001_cw32(pm8001_ha, 2, offset, value); } if (-1 == pm8001_bar4_shift(pm8001_ha, OPEN_RETRY_INTERVAL_PHY_4_7_SHIFT_ADDR)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); return; } for (i = 4; i < 8; i++) { offset = OPEN_RETRY_INTERVAL_PHY_4_7_OFFSET + 0x4000 * (i-4); pm8001_cw32(pm8001_ha, 2, offset, value); } /*set the shifted destination address to 0x0 to avoid error operation */ pm8001_bar4_shift(pm8001_ha, 0x0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return; } /** * mpi_init_check - check firmware initialization status. * @pm8001_ha: our hba card information */ static int mpi_init_check(struct pm8001_hba_info *pm8001_ha) { u32 max_wait_count; u32 value; u32 gst_len_mpistate; /* Write bit0=1 to Inbound DoorBell Register to tell the SPC FW the table is updated */ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_UPDATE); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_count = 1 * 1000 * 1000;/* 1 sec */ do { udelay(1); value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); value &= SPC_MSGU_CFG_TABLE_UPDATE; } while ((value != 0) && (--max_wait_count)); if (!max_wait_count) return -1; /* check the MPI-State for initialization */ gst_len_mpistate = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, GST_GSTLEN_MPIS_OFFSET); if (GST_MPI_STATE_INIT != (gst_len_mpistate & GST_MPI_STATE_MASK)) return -1; /* check MPI Initialization error */ gst_len_mpistate = gst_len_mpistate >> 16; if (0x0000 != gst_len_mpistate) return -1; return 0; } /** * check_fw_ready - The LLDD check if the FW is ready, if not, return error. * @pm8001_ha: our hba card information */ static int check_fw_ready(struct pm8001_hba_info *pm8001_ha) { u32 value, value1; u32 max_wait_count; /* check error state */ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); /* check AAP error */ if (SCRATCH_PAD1_ERR == (value & SCRATCH_PAD_STATE_MASK)) { /* error state */ value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); return -1; } /* check IOP error */ if (SCRATCH_PAD2_ERR == (value1 & SCRATCH_PAD_STATE_MASK)) { /* error state */ value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3); return -1; } /* bit 4-31 of scratch pad1 should be zeros if it is not in error state*/ if (value & SCRATCH_PAD1_STATE_MASK) { /* error case */ pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0); return -1; } /* bit 2, 4-31 of scratch pad2 should be zeros if it is not in error state */ if (value1 & SCRATCH_PAD2_STATE_MASK) { /* error case */ return -1; } max_wait_count = 1 * 1000 * 1000;/* 1 sec timeout */ /* wait until scratch pad 1 and 2 registers in ready state */ do { udelay(1); value = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & SCRATCH_PAD1_RDY; value1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & SCRATCH_PAD2_RDY; if ((--max_wait_count) == 0) return -1; } while ((value != SCRATCH_PAD1_RDY) || (value1 != SCRATCH_PAD2_RDY)); return 0; } static void init_pci_device_addresses(struct pm8001_hba_info *pm8001_ha) { void __iomem *base_addr; u32 value; u32 offset; u32 pcibar; u32 pcilogic; value = pm8001_cr32(pm8001_ha, 0, 0x44); offset = value & 0x03FFFFFF; PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Scratchpad 0 Offset: %x\n", offset)); pcilogic = (value & 0xFC000000) >> 26; pcibar = get_pci_bar_index(pcilogic); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Scratchpad 0 PCI BAR: %d\n", pcibar)); pm8001_ha->main_cfg_tbl_addr = base_addr = pm8001_ha->io_mem[pcibar].memvirtaddr + offset; pm8001_ha->general_stat_tbl_addr = base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x18); pm8001_ha->inbnd_q_tbl_addr = base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x1C); pm8001_ha->outbnd_q_tbl_addr = base_addr + pm8001_cr32(pm8001_ha, pcibar, offset + 0x20); } /** * pm8001_chip_init - the main init function that initialize whole PM8001 chip. * @pm8001_ha: our hba card information */ static int __devinit pm8001_chip_init(struct pm8001_hba_info *pm8001_ha) { /* check the firmware status */ if (-1 == check_fw_ready(pm8001_ha)) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Firmware is not ready!\n")); return -EBUSY; } /* Initialize pci space address eg: mpi offset */ init_pci_device_addresses(pm8001_ha); init_default_table_values(pm8001_ha); read_main_config_table(pm8001_ha); read_general_status_table(pm8001_ha); read_inbnd_queue_table(pm8001_ha); read_outbnd_queue_table(pm8001_ha); /* update main config table ,inbound table and outbound table */ update_main_config_table(pm8001_ha); update_inbnd_queue_table(pm8001_ha, 0); update_outbnd_queue_table(pm8001_ha, 0); mpi_set_phys_g3_with_ssc(pm8001_ha, 0); /* 7->130ms, 34->500ms, 119->1.5s */ mpi_set_open_retry_interval_reg(pm8001_ha, 119); /* notify firmware update finished and check initialization status */ if (0 == mpi_init_check(pm8001_ha)) { PM8001_INIT_DBG(pm8001_ha, pm8001_printk("MPI initialize successful!\n")); } else return -EBUSY; /*This register is a 16-bit timer with a resolution of 1us. This is the timer used for interrupt delay/coalescing in the PCIe Application Layer. Zero is not a valid value. A value of 1 in the register will cause the interrupts to be normal. A value greater than 1 will cause coalescing delays.*/ pm8001_cw32(pm8001_ha, 1, 0x0033c0, 0x1); pm8001_cw32(pm8001_ha, 1, 0x0033c4, 0x0); return 0; } static int mpi_uninit_check(struct pm8001_hba_info *pm8001_ha) { u32 max_wait_count; u32 value; u32 gst_len_mpistate; init_pci_device_addresses(pm8001_ha); /* Write bit1=1 to Inbound DoorBell Register to tell the SPC FW the table is stop */ pm8001_cw32(pm8001_ha, 0, MSGU_IBDB_SET, SPC_MSGU_CFG_TABLE_RESET); /* wait until Inbound DoorBell Clear Register toggled */ max_wait_count = 1 * 1000 * 1000;/* 1 sec */ do { udelay(1); value = pm8001_cr32(pm8001_ha, 0, MSGU_IBDB_SET); value &= SPC_MSGU_CFG_TABLE_RESET; } while ((value != 0) && (--max_wait_count)); if (!max_wait_count) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("TIMEOUT:IBDB value/=0x%x\n", value)); return -1; } /* check the MPI-State for termination in progress */ /* wait until Inbound DoorBell Clear Register toggled */ max_wait_count = 1 * 1000 * 1000; /* 1 sec */ do { udelay(1); gst_len_mpistate = pm8001_mr32(pm8001_ha->general_stat_tbl_addr, GST_GSTLEN_MPIS_OFFSET); if (GST_MPI_STATE_UNINIT == (gst_len_mpistate & GST_MPI_STATE_MASK)) break; } while (--max_wait_count); if (!max_wait_count) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk(" TIME OUT MPI State = 0x%x\n", gst_len_mpistate & GST_MPI_STATE_MASK)); return -1; } return 0; } /** * soft_reset_ready_check - Function to check FW is ready for soft reset. * @pm8001_ha: our hba card information */ static u32 soft_reset_ready_check(struct pm8001_hba_info *pm8001_ha) { u32 regVal, regVal1, regVal2; if (mpi_uninit_check(pm8001_ha) != 0) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("MPI state is not ready\n")); return -1; } /* read the scratch pad 2 register bit 2 */ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & SCRATCH_PAD2_FWRDY_RST; if (regVal == SCRATCH_PAD2_FWRDY_RST) { PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Firmware is ready for reset .\n")); } else { unsigned long flags; /* Trigger NMI twice via RB6 */ spin_lock_irqsave(&pm8001_ha->lock, flags); if (-1 == pm8001_bar4_shift(pm8001_ha, RB6_ACCESS_REG)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Shift Bar4 to 0x%x failed\n", RB6_ACCESS_REG)); return -1; } pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST); pm8001_cw32(pm8001_ha, 2, SPC_RB6_OFFSET, RB6_MAGIC_NUMBER_RST); /* wait for 100 ms */ mdelay(100); regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2) & SCRATCH_PAD2_FWRDY_RST; if (regVal != SCRATCH_PAD2_FWRDY_RST) { regVal1 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); regVal2 = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("TIMEOUT:MSGU_SCRATCH_PAD1" "=0x%x, MSGU_SCRATCH_PAD2=0x%x\n", regVal1, regVal2)); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0))); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return -1; } spin_unlock_irqrestore(&pm8001_ha->lock, flags); } return 0; } /** * pm8001_chip_soft_rst - soft reset the PM8001 chip, so that the clear all * the FW register status to the originated status. * @pm8001_ha: our hba card information * @signature: signature in host scratch pad0 register. */ static int pm8001_chip_soft_rst(struct pm8001_hba_info *pm8001_ha, u32 signature) { u32 regVal, toggleVal; u32 max_wait_count; u32 regVal1, regVal2, regVal3; unsigned long flags; /* step1: Check FW is ready for soft reset */ if (soft_reset_ready_check(pm8001_ha) != 0) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("FW is not ready\n")); return -1; } /* step 2: clear NMI status register on AAP1 and IOP, write the same value to clear */ /* map 0x60000 to BAR4(0x20), BAR2(win) */ spin_lock_irqsave(&pm8001_ha->lock, flags); if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_AAP1_ADDR_BASE)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Shift Bar4 to 0x%x failed\n", MBIC_AAP1_ADDR_BASE)); return -1; } regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("MBIC - NMI Enable VPE0 (IOP)= 0x%x\n", regVal)); pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_IOP, 0x0); /* map 0x70000 to BAR4(0x20), BAR2(win) */ if (-1 == pm8001_bar4_shift(pm8001_ha, MBIC_IOP_ADDR_BASE)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Shift Bar4 to 0x%x failed\n", MBIC_IOP_ADDR_BASE)); return -1; } regVal = pm8001_cr32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("MBIC - NMI Enable VPE0 (AAP1)= 0x%x\n", regVal)); pm8001_cw32(pm8001_ha, 2, MBIC_NMI_ENABLE_VPE0_AAP1, 0x0); regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("PCIE -Event Interrupt Enable = 0x%x\n", regVal)); pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT_ENABLE, 0x0); regVal = pm8001_cr32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("PCIE - Event Interrupt = 0x%x\n", regVal)); pm8001_cw32(pm8001_ha, 1, PCIE_EVENT_INTERRUPT, regVal); regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("PCIE -Error Interrupt Enable = 0x%x\n", regVal)); pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT_ENABLE, 0x0); regVal = pm8001_cr32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("PCIE - Error Interrupt = 0x%x\n", regVal)); pm8001_cw32(pm8001_ha, 1, PCIE_ERROR_INTERRUPT, regVal); /* read the scratch pad 1 register bit 2 */ regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & SCRATCH_PAD1_RST; toggleVal = regVal ^ SCRATCH_PAD1_RST; /* set signature in host scratch pad0 register to tell SPC that the host performs the soft reset */ pm8001_cw32(pm8001_ha, 0, MSGU_HOST_SCRATCH_PAD_0, signature); /* read required registers for confirmming */ /* map 0x0700000 to BAR4(0x20), BAR2(win) */ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Shift Bar4 to 0x%x failed\n", GSM_ADDR_BASE)); return -1; } PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x0(0x00007b88)-GSM Configuration and" " Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); /* step 3: host read GSM Configuration and Reset register */ regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); /* Put those bits to low */ /* GSM XCBI offset = 0x70 0000 0x00 Bit 13 COM_SLV_SW_RSTB 1 0x00 Bit 12 QSSP_SW_RSTB 1 0x00 Bit 11 RAAE_SW_RSTB 1 0x00 Bit 9 RB_1_SW_RSTB 1 0x00 Bit 8 SM_SW_RSTB 1 */ regVal &= ~(0x00003b00); /* host write GSM Configuration and Reset register */ pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x0 (0x00007b88 ==> 0x00004088) - GSM " "Configuration and Reset is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); /* step 4: */ /* disable GSM - Read Address Parity Check */ regVal1 = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700038 - Read Address Parity Check " "Enable = 0x%x\n", regVal1)); pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, 0x0); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable" "is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK))); /* disable GSM - Write Address Parity Check */ regVal2 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700040 - Write Address Parity Check" " Enable = 0x%x\n", regVal2)); pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, 0x0); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700040 - Write Address Parity Check " "Enable is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK))); /* disable GSM - Write Data Parity Check */ regVal3 = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x300048 - Write Data Parity Check" " Enable = 0x%x\n", regVal3)); pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, 0x0); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x300048 - Write Data Parity Check Enable" "is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK))); /* step 5: delay 10 usec */ udelay(10); /* step 5-b: set GPIO-0 output control to tristate anyway */ if (-1 == pm8001_bar4_shift(pm8001_ha, GPIO_ADDR_BASE)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Shift Bar4 to 0x%x failed\n", GPIO_ADDR_BASE)); return -1; } regVal = pm8001_cr32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GPIO Output Control Register:" " = 0x%x\n", regVal)); /* set GPIO-0 output control to tri-state */ regVal &= 0xFFFFFFFC; pm8001_cw32(pm8001_ha, 2, GPIO_GPIO_0_0UTPUT_CTL_OFFSET, regVal); /* Step 6: Reset the IOP and AAP1 */ /* map 0x00000 to BAR4(0x20), BAR2(win) */ if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", SPC_TOP_LEVEL_ADDR_BASE)); return -1; } regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Top Register before resetting IOP/AAP1" ":= 0x%x\n", regVal)); regVal &= ~(SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); /* step 7: Reset the BDMA/OSSP */ regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Top Register before resetting BDMA/OSSP" ": = 0x%x\n", regVal)); regVal &= ~(SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); /* step 8: delay 10 usec */ udelay(10); /* step 9: bring the BDMA and OSSP out of reset */ regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("Top Register before bringing up BDMA/OSSP" ":= 0x%x\n", regVal)); regVal |= (SPC_REG_RESET_BDMA_CORE | SPC_REG_RESET_OSSP); pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); /* step 10: delay 10 usec */ udelay(10); /* step 11: reads and sets the GSM Configuration and Reset Register */ /* map 0x0700000 to BAR4(0x20), BAR2(win) */ if (-1 == pm8001_bar4_shift(pm8001_ha, GSM_ADDR_BASE)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("SPC Shift Bar4 to 0x%x failed\n", GSM_ADDR_BASE)); return -1; } PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x0 (0x00007b88)-GSM Configuration and " "Reset = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); regVal = pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET); /* Put those bits to high */ /* GSM XCBI offset = 0x70 0000 0x00 Bit 13 COM_SLV_SW_RSTB 1 0x00 Bit 12 QSSP_SW_RSTB 1 0x00 Bit 11 RAAE_SW_RSTB 1 0x00 Bit 9 RB_1_SW_RSTB 1 0x00 Bit 8 SM_SW_RSTB 1 */ regVal |= (GSM_CONFIG_RESET_VALUE); pm8001_cw32(pm8001_ha, 2, GSM_CONFIG_RESET, regVal); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM (0x00004088 ==> 0x00007b88) - GSM" " Configuration and Reset is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_CONFIG_RESET))); /* step 12: Restore GSM - Read Address Parity Check */ regVal = pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK); /* just for debugging */ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700038 - Read Address Parity Check Enable" " = 0x%x\n", regVal)); pm8001_cw32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK, regVal1); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700038 - Read Address Parity" " Check Enable is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_READ_ADDR_PARITY_CHECK))); /* Restore GSM - Write Address Parity Check */ regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK); pm8001_cw32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK, regVal2); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700040 - Write Address Parity Check" " Enable is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_WRITE_ADDR_PARITY_CHECK))); /* Restore GSM - Write Data Parity Check */ regVal = pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK); pm8001_cw32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK, regVal3); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("GSM 0x700048 - Write Data Parity Check Enable" "is set to = 0x%x\n", pm8001_cr32(pm8001_ha, 2, GSM_WRITE_DATA_PARITY_CHECK))); /* step 13: bring the IOP and AAP1 out of reset */ /* map 0x00000 to BAR4(0x20), BAR2(win) */ if (-1 == pm8001_bar4_shift(pm8001_ha, SPC_TOP_LEVEL_ADDR_BASE)) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Shift Bar4 to 0x%x failed\n", SPC_TOP_LEVEL_ADDR_BASE)); return -1; } regVal = pm8001_cr32(pm8001_ha, 2, SPC_REG_RESET); regVal |= (SPC_REG_RESET_PCS_IOP_SS | SPC_REG_RESET_PCS_AAP1_SS); pm8001_cw32(pm8001_ha, 2, SPC_REG_RESET, regVal); /* step 14: delay 10 usec - Normal Mode */ udelay(10); /* check Soft Reset Normal mode or Soft Reset HDA mode */ if (signature == SPC_SOFT_RESET_SIGNATURE) { /* step 15 (Normal Mode): wait until scratch pad1 register bit 2 toggled */ max_wait_count = 2 * 1000 * 1000;/* 2 sec */ do { udelay(1); regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1) & SCRATCH_PAD1_RST; } while ((regVal != toggleVal) && (--max_wait_count)); if (!max_wait_count) { regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("TIMEOUT : ToggleVal 0x%x," "MSGU_SCRATCH_PAD1 = 0x%x\n", toggleVal, regVal)); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0))); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("SCRATCH_PAD2 value = 0x%x\n", pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2))); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return -1; } /* step 16 (Normal) - Clear ODMR and ODCR */ pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); /* step 17 (Normal Mode): wait for the FW and IOP to get ready - 1 sec timeout */ /* Wait for the SPC Configuration Table to be ready */ if (check_fw_ready(pm8001_ha) == -1) { regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_1); /* return error if MPI Configuration Table not ready */ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("FW not ready SCRATCH_PAD1" " = 0x%x\n", regVal)); regVal = pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_2); /* return error if MPI Configuration Table not ready */ PM8001_INIT_DBG(pm8001_ha, pm8001_printk("FW not ready SCRATCH_PAD2" " = 0x%x\n", regVal)); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SCRATCH_PAD0 value = 0x%x\n", pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_0))); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SCRATCH_PAD3 value = 0x%x\n", pm8001_cr32(pm8001_ha, 0, MSGU_SCRATCH_PAD_3))); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return -1; } } pm8001_bar4_shift(pm8001_ha, 0); spin_unlock_irqrestore(&pm8001_ha->lock, flags); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("SPC soft reset Complete\n")); return 0; } static void pm8001_hw_chip_rst(struct pm8001_hba_info *pm8001_ha) { u32 i; u32 regVal; PM8001_INIT_DBG(pm8001_ha, pm8001_printk("chip reset start\n")); /* do SPC chip reset. */ regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); regVal &= ~(SPC_REG_RESET_DEVICE); pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); /* delay 10 usec */ udelay(10); /* bring chip reset out of reset */ regVal = pm8001_cr32(pm8001_ha, 1, SPC_REG_RESET); regVal |= SPC_REG_RESET_DEVICE; pm8001_cw32(pm8001_ha, 1, SPC_REG_RESET, regVal); /* delay 10 usec */ udelay(10); /* wait for 20 msec until the firmware gets reloaded */ i = 20; do { mdelay(1); } while ((--i) != 0); PM8001_INIT_DBG(pm8001_ha, pm8001_printk("chip reset finished\n")); } /** * pm8001_chip_iounmap - which maped when initialized. * @pm8001_ha: our hba card information */ static void pm8001_chip_iounmap(struct pm8001_hba_info *pm8001_ha) { s8 bar, logical = 0; for (bar = 0; bar < 6; bar++) { /* ** logical BARs for SPC: ** bar 0 and 1 - logical BAR0 ** bar 2 and 3 - logical BAR1 ** bar4 - logical BAR2 ** bar5 - logical BAR3 ** Skip the appropriate assignments: */ if ((bar == 1) || (bar == 3)) continue; if (pm8001_ha->io_mem[logical].memvirtaddr) { iounmap(pm8001_ha->io_mem[logical].memvirtaddr); logical++; } } } /** * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt * @pm8001_ha: our hba card information */ static void pm8001_chip_intx_interrupt_enable(struct pm8001_hba_info *pm8001_ha) { pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_CLEAR_ALL); pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, ODCR_CLEAR_ALL); } /** * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt * @pm8001_ha: our hba card information */ static void pm8001_chip_intx_interrupt_disable(struct pm8001_hba_info *pm8001_ha) { pm8001_cw32(pm8001_ha, 0, MSGU_ODMR, ODMR_MASK_ALL); } /** * pm8001_chip_msix_interrupt_enable - enable PM8001 chip interrupt * @pm8001_ha: our hba card information */ static void pm8001_chip_msix_interrupt_enable(struct pm8001_hba_info *pm8001_ha, u32 int_vec_idx) { u32 msi_index; u32 value; msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; msi_index += MSIX_TABLE_BASE; pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_ENABLE); value = (1 << int_vec_idx); pm8001_cw32(pm8001_ha, 0, MSGU_ODCR, value); } /** * pm8001_chip_msix_interrupt_disable - disable PM8001 chip interrupt * @pm8001_ha: our hba card information */ static void pm8001_chip_msix_interrupt_disable(struct pm8001_hba_info *pm8001_ha, u32 int_vec_idx) { u32 msi_index; msi_index = int_vec_idx * MSIX_TABLE_ELEMENT_SIZE; msi_index += MSIX_TABLE_BASE; pm8001_cw32(pm8001_ha, 0, msi_index, MSIX_INTERRUPT_DISABLE); } /** * pm8001_chip_interrupt_enable - enable PM8001 chip interrupt * @pm8001_ha: our hba card information */ static void pm8001_chip_interrupt_enable(struct pm8001_hba_info *pm8001_ha) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_enable(pm8001_ha, 0); return; #endif pm8001_chip_intx_interrupt_enable(pm8001_ha); } /** * pm8001_chip_intx_interrupt_disable- disable PM8001 chip interrupt * @pm8001_ha: our hba card information */ static void pm8001_chip_interrupt_disable(struct pm8001_hba_info *pm8001_ha) { #ifdef PM8001_USE_MSIX pm8001_chip_msix_interrupt_disable(pm8001_ha, 0); return; #endif pm8001_chip_intx_interrupt_disable(pm8001_ha); } /** * mpi_msg_free_get- get the free message buffer for transfer inbound queue. * @circularQ: the inbound queue we want to transfer to HBA. * @messageSize: the message size of this transfer, normally it is 64 bytes * @messagePtr: the pointer to message. */ static int mpi_msg_free_get(struct inbound_queue_table *circularQ, u16 messageSize, void **messagePtr) { u32 offset, consumer_index; struct mpi_msg_hdr *msgHeader; u8 bcCount = 1; /* only support single buffer */ /* Checks is the requested message size can be allocated in this queue*/ if (messageSize > 64) { *messagePtr = NULL; return -1; } /* Stores the new consumer index */ consumer_index = pm8001_read_32(circularQ->ci_virt); circularQ->consumer_index = cpu_to_le32(consumer_index); if (((circularQ->producer_idx + bcCount) % 256) == le32_to_cpu(circularQ->consumer_index)) { *messagePtr = NULL; return -1; } /* get memory IOMB buffer address */ offset = circularQ->producer_idx * 64; /* increment to next bcCount element */ circularQ->producer_idx = (circularQ->producer_idx + bcCount) % 256; /* Adds that distance to the base of the region virtual address plus the message header size*/ msgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + offset); *messagePtr = ((void *)msgHeader) + sizeof(struct mpi_msg_hdr); return 0; } /** * mpi_build_cmd- build the message queue for transfer, update the PI to FW * to tell the fw to get this message from IOMB. * @pm8001_ha: our hba card information * @circularQ: the inbound queue we want to transfer to HBA. * @opCode: the operation code represents commands which LLDD and fw recognized. * @payload: the command payload of each operation command. */ static int mpi_build_cmd(struct pm8001_hba_info *pm8001_ha, struct inbound_queue_table *circularQ, u32 opCode, void *payload) { u32 Header = 0, hpriority = 0, bc = 1, category = 0x02; u32 responseQueue = 0; void *pMessage; if (mpi_msg_free_get(circularQ, 64, &pMessage) < 0) { PM8001_IO_DBG(pm8001_ha, pm8001_printk("No free mpi buffer\n")); return -1; } BUG_ON(!payload); /*Copy to the payload*/ memcpy(pMessage, payload, (64 - sizeof(struct mpi_msg_hdr))); /*Build the header*/ Header = ((1 << 31) | (hpriority << 30) | ((bc & 0x1f) << 24) | ((responseQueue & 0x3F) << 16) | ((category & 0xF) << 12) | (opCode & 0xFFF)); pm8001_write_32((pMessage - 4), 0, cpu_to_le32(Header)); /*Update the PI to the firmware*/ pm8001_cw32(pm8001_ha, circularQ->pi_pci_bar, circularQ->pi_offset, circularQ->producer_idx); PM8001_IO_DBG(pm8001_ha, pm8001_printk("after PI= %d CI= %d\n", circularQ->producer_idx, circularQ->consumer_index)); return 0; } static u32 mpi_msg_free_set(struct pm8001_hba_info *pm8001_ha, void *pMsg, struct outbound_queue_table *circularQ, u8 bc) { u32 producer_index; struct mpi_msg_hdr *msgHeader; struct mpi_msg_hdr *pOutBoundMsgHeader; msgHeader = (struct mpi_msg_hdr *)(pMsg - sizeof(struct mpi_msg_hdr)); pOutBoundMsgHeader = (struct mpi_msg_hdr *)(circularQ->base_virt + circularQ->consumer_idx * 64); if (pOutBoundMsgHeader != msgHeader) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("consumer_idx = %d msgHeader = %p\n", circularQ->consumer_idx, msgHeader)); /* Update the producer index from SPC */ producer_index = pm8001_read_32(circularQ->pi_virt); circularQ->producer_index = cpu_to_le32(producer_index); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("consumer_idx = %d producer_index = %d" "msgHeader = %p\n", circularQ->consumer_idx, circularQ->producer_index, msgHeader)); return 0; } /* free the circular queue buffer elements associated with the message*/ circularQ->consumer_idx = (circularQ->consumer_idx + bc) % 256; /* update the CI of outbound queue */ pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, circularQ->consumer_idx); /* Update the producer index from SPC*/ producer_index = pm8001_read_32(circularQ->pi_virt); circularQ->producer_index = cpu_to_le32(producer_index); PM8001_IO_DBG(pm8001_ha, pm8001_printk(" CI=%d PI=%d\n", circularQ->consumer_idx, circularQ->producer_index)); return 0; } /** * mpi_msg_consume- get the MPI message from outbound queue message table. * @pm8001_ha: our hba card information * @circularQ: the outbound queue table. * @messagePtr1: the message contents of this outbound message. * @pBC: the message size. */ static u32 mpi_msg_consume(struct pm8001_hba_info *pm8001_ha, struct outbound_queue_table *circularQ, void **messagePtr1, u8 *pBC) { struct mpi_msg_hdr *msgHeader; __le32 msgHeader_tmp; u32 header_tmp; do { /* If there are not-yet-delivered messages ... */ if (le32_to_cpu(circularQ->producer_index) != circularQ->consumer_idx) { /*Get the pointer to the circular queue buffer element*/ msgHeader = (struct mpi_msg_hdr *) (circularQ->base_virt + circularQ->consumer_idx * 64); /* read header */ header_tmp = pm8001_read_32(msgHeader); msgHeader_tmp = cpu_to_le32(header_tmp); if (0 != (le32_to_cpu(msgHeader_tmp) & 0x80000000)) { if (OPC_OUB_SKIP_ENTRY != (le32_to_cpu(msgHeader_tmp) & 0xfff)) { *messagePtr1 = ((u8 *)msgHeader) + sizeof(struct mpi_msg_hdr); *pBC = (u8)((le32_to_cpu(msgHeader_tmp) >> 24) & 0x1f); PM8001_IO_DBG(pm8001_ha, pm8001_printk(": CI=%d PI=%d " "msgHeader=%x\n", circularQ->consumer_idx, circularQ->producer_index, msgHeader_tmp)); return MPI_IO_STATUS_SUCCESS; } else { circularQ->consumer_idx = (circularQ->consumer_idx + ((le32_to_cpu(msgHeader_tmp) >> 24) & 0x1f)) % 256; msgHeader_tmp = 0; pm8001_write_32(msgHeader, 0, 0); /* update the CI of outbound queue */ pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, circularQ->consumer_idx); } } else { circularQ->consumer_idx = (circularQ->consumer_idx + ((le32_to_cpu(msgHeader_tmp) >> 24) & 0x1f)) % 256; msgHeader_tmp = 0; pm8001_write_32(msgHeader, 0, 0); /* update the CI of outbound queue */ pm8001_cw32(pm8001_ha, circularQ->ci_pci_bar, circularQ->ci_offset, circularQ->consumer_idx); return MPI_IO_STATUS_FAIL; } } else { u32 producer_index; void *pi_virt = circularQ->pi_virt; /* Update the producer index from SPC */ producer_index = pm8001_read_32(pi_virt); circularQ->producer_index = cpu_to_le32(producer_index); } } while (le32_to_cpu(circularQ->producer_index) != circularQ->consumer_idx); /* while we don't have any more not-yet-delivered message */ /* report empty */ return MPI_IO_STATUS_BUSY; } static void pm8001_work_fn(struct work_struct *work) { struct pm8001_work *pw = container_of(work, struct pm8001_work, work); struct pm8001_device *pm8001_dev; struct domain_device *dev; /* * So far, all users of this stash an associated structure here. * If we get here, and this pointer is null, then the action * was cancelled. This nullification happens when the device * goes away. */ pm8001_dev = pw->data; /* Most stash device structure */ if ((pm8001_dev == NULL) || ((pw->handler != IO_XFER_ERROR_BREAK) && (pm8001_dev->dev_type == NO_DEVICE))) { kfree(pw); return; } switch (pw->handler) { case IO_XFER_ERROR_BREAK: { /* This one stashes the sas_task instead */ struct sas_task *t = (struct sas_task *)pm8001_dev; u32 tag; struct pm8001_ccb_info *ccb; struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; unsigned long flags, flags1; struct task_status_struct *ts; int i; if (pm8001_query_task(t) == TMF_RESP_FUNC_SUCC) break; /* Task still on lu */ spin_lock_irqsave(&pm8001_ha->lock, flags); spin_lock_irqsave(&t->task_state_lock, flags1); if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { spin_unlock_irqrestore(&t->task_state_lock, flags1); spin_unlock_irqrestore(&pm8001_ha->lock, flags); break; /* Task got completed by another */ } spin_unlock_irqrestore(&t->task_state_lock, flags1); /* Search for a possible ccb that matches the task */ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { ccb = &pm8001_ha->ccb_info[i]; tag = ccb->ccb_tag; if ((tag != 0xFFFFFFFF) && (ccb->task == t)) break; } if (!ccb) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); break; /* Task got freed by another */ } ts = &t->task_status; ts->resp = SAS_TASK_COMPLETE; /* Force the midlayer to retry */ ts->stat = SAS_QUEUE_FULL; pm8001_dev = ccb->device; if (pm8001_dev) pm8001_dev->running_req--; spin_lock_irqsave(&t->task_state_lock, flags1); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&t->task_state_lock, flags1); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p" " done with event 0x%x resp 0x%x stat 0x%x but" " aborted by upper layer!\n", t, pw->handler, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); spin_unlock_irqrestore(&pm8001_ha->lock, flags); } else { spin_unlock_irqrestore(&t->task_state_lock, flags1); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* in order to force CPU ordering */ spin_unlock_irqrestore(&pm8001_ha->lock, flags); t->task_done(t); } } break; case IO_XFER_OPEN_RETRY_TIMEOUT: { /* This one stashes the sas_task instead */ struct sas_task *t = (struct sas_task *)pm8001_dev; u32 tag; struct pm8001_ccb_info *ccb; struct pm8001_hba_info *pm8001_ha = pw->pm8001_ha; unsigned long flags, flags1; int i, ret = 0; PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); ret = pm8001_query_task(t); PM8001_IO_DBG(pm8001_ha, switch (ret) { case TMF_RESP_FUNC_SUCC: pm8001_printk("...Task on lu\n"); break; case TMF_RESP_FUNC_COMPLETE: pm8001_printk("...Task NOT on lu\n"); break; default: pm8001_printk("...query task failed!!!\n"); break; }); spin_lock_irqsave(&pm8001_ha->lock, flags); spin_lock_irqsave(&t->task_state_lock, flags1); if (unlikely((t->task_state_flags & SAS_TASK_STATE_DONE))) { spin_unlock_irqrestore(&t->task_state_lock, flags1); spin_unlock_irqrestore(&pm8001_ha->lock, flags); if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ (void)pm8001_abort_task(t); break; /* Task got completed by another */ } spin_unlock_irqrestore(&t->task_state_lock, flags1); /* Search for a possible ccb that matches the task */ for (i = 0; ccb = NULL, i < PM8001_MAX_CCB; i++) { ccb = &pm8001_ha->ccb_info[i]; tag = ccb->ccb_tag; if ((tag != 0xFFFFFFFF) && (ccb->task == t)) break; } if (!ccb) { spin_unlock_irqrestore(&pm8001_ha->lock, flags); if (ret == TMF_RESP_FUNC_SUCC) /* task on lu */ (void)pm8001_abort_task(t); break; /* Task got freed by another */ } pm8001_dev = ccb->device; dev = pm8001_dev->sas_device; switch (ret) { case TMF_RESP_FUNC_SUCC: /* task on lu */ ccb->open_retry = 1; /* Snub completion */ spin_unlock_irqrestore(&pm8001_ha->lock, flags); ret = pm8001_abort_task(t); ccb->open_retry = 0; switch (ret) { case TMF_RESP_FUNC_SUCC: case TMF_RESP_FUNC_COMPLETE: break; default: /* device misbehavior */ ret = TMF_RESP_FUNC_FAILED; PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Reset phy\n")); pm8001_I_T_nexus_reset(dev); break; } break; case TMF_RESP_FUNC_COMPLETE: /* task not on lu */ spin_unlock_irqrestore(&pm8001_ha->lock, flags); /* Do we need to abort the task locally? */ break; default: /* device misbehavior */ spin_unlock_irqrestore(&pm8001_ha->lock, flags); ret = TMF_RESP_FUNC_FAILED; PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Reset phy\n")); pm8001_I_T_nexus_reset(dev); } if (ret == TMF_RESP_FUNC_FAILED) t = NULL; pm8001_open_reject_retry(pm8001_ha, t, pm8001_dev); PM8001_IO_DBG(pm8001_ha, pm8001_printk("...Complete\n")); } break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_DS_IN_ERROR: dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; case IO_DS_NON_OPERATIONAL: dev = pm8001_dev->sas_device; pm8001_I_T_nexus_reset(dev); break; } kfree(pw); } static int pm8001_handle_event(struct pm8001_hba_info *pm8001_ha, void *data, int handler) { struct pm8001_work *pw; int ret = 0; pw = kmalloc(sizeof(struct pm8001_work), GFP_ATOMIC); if (pw) { pw->pm8001_ha = pm8001_ha; pw->data = data; pw->handler = handler; INIT_WORK(&pw->work, pm8001_work_fn); queue_work(pm8001_wq, &pw->work); } else ret = -ENOMEM; return ret; } /** * mpi_ssp_completion- process the event that FW response to the SSP request. * @pm8001_ha: our hba card information * @piomb: the message contents of this outbound message. * * When FW has completed a ssp request for example a IO request, after it has * filled the SG data with the data, it will trigger this event represent * that he has finished the job,please check the coresponding buffer. * So we will tell the caller who maybe waiting the result to tell upper layer * that the task has been finished. */ static void mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha , void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; unsigned long flags; u32 status; u32 param; u32 tag; struct ssp_completion_resp *psspPayload; struct task_status_struct *ts; struct ssp_response_iu *iu; struct pm8001_device *pm8001_dev; psspPayload = (struct ssp_completion_resp *)(piomb + 4); status = le32_to_cpu(psspPayload->status); tag = le32_to_cpu(psspPayload->tag); ccb = &pm8001_ha->ccb_info[tag]; if ((status == IO_ABORTED) && ccb->open_retry) { /* Being completed by another */ ccb->open_retry = 0; return; } pm8001_dev = ccb->device; param = le32_to_cpu(psspPayload->param); t = ccb->task; if (status && status != IO_UNDERFLOW) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sas IO status 0x%x\n", status)); if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS" ",param = %d\n", param)); if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; } else { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PROTO_RESPONSE; ts->residual = param; iu = &psspPayload->ssp_resp_iu; sas_ssp_task_response(pm8001_ha->dev, t, iu); } if (pm8001_dev) pm8001_dev->running_req--; break; case IO_ABORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_ABORTED IOMB Tag\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; break; case IO_UNDERFLOW: /* SSP Completion with error */ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW" ",param = %d\n", param)); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_UNDERRUN; ts->residual = param; if (pm8001_dev) pm8001_dev->running_req--; break; case IO_NO_DEVICE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; break; case IO_XFER_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; /* Force the midlayer to retry */ ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_XFER_ERROR_PHY_NOT_READY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_EPROTO; break; case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case IO_OPEN_CNX_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; if (!t->uldd_task) pm8001_handle_event(pm8001_ha, pm8001_dev, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); break; case IO_OPEN_CNX_ERROR_BAD_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_BAD_DEST; break; case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" "NOT_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; case IO_XFER_ERROR_NAK_RECEIVED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_XFER_ERROR_ACK_NAK_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case IO_XFER_ERROR_DMA: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_DMA\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; break; case IO_XFER_OPEN_RETRY_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_XFER_ERROR_OFFSET_MISMATCH: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; break; case IO_PORT_IN_RESET: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_PORT_IN_RESET\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; break; case IO_DS_NON_OPERATIONAL: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_DS_NON_OPERATIONAL\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; if (!t->uldd_task) pm8001_handle_event(pm8001_ha, pm8001_dev, IO_DS_NON_OPERATIONAL); break; case IO_DS_IN_RECOVERY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_DS_IN_RECOVERY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; break; case IO_TM_TAG_NOT_FOUND: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_TM_TAG_NOT_FOUND\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; break; case IO_SSP_EXT_IU_ZERO_LEN_ERROR: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SSP_EXT_IU_ZERO_LEN_ERROR\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; break; case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: PM8001_IO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; break; } PM8001_IO_DBG(pm8001_ha, pm8001_printk("scsi_status = %x \n ", psspPayload->ssp_resp_iu.status)); spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&t->task_state_lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" " io_status 0x%x resp 0x%x " "stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* in order to force CPU ordering */ t->task_done(t); } } /*See the comments for mpi_ssp_completion */ static void mpi_ssp_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { struct sas_task *t; unsigned long flags; struct task_status_struct *ts; struct pm8001_ccb_info *ccb; struct pm8001_device *pm8001_dev; struct ssp_event_resp *psspPayload = (struct ssp_event_resp *)(piomb + 4); u32 event = le32_to_cpu(psspPayload->event); u32 tag = le32_to_cpu(psspPayload->tag); u32 port_id = le32_to_cpu(psspPayload->port_id); u32 dev_id = le32_to_cpu(psspPayload->device_id); ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; if (event) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sas IO status 0x%x\n", event)); if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; PM8001_IO_DBG(pm8001_ha, pm8001_printk("port_id = %x,device_id = %x\n", port_id, dev_id)); switch (event) { case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n");) ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; if (pm8001_dev) pm8001_dev->running_req--; break; case IO_XFER_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_BREAK\n")); pm8001_handle_event(pm8001_ha, t, IO_XFER_ERROR_BREAK); return; case IO_XFER_ERROR_PHY_NOT_READY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" "_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_EPROTO; break; case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case IO_OPEN_CNX_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; if (!t->uldd_task) pm8001_handle_event(pm8001_ha, pm8001_dev, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); break; case IO_OPEN_CNX_ERROR_BAD_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_BAD_DEST; break; case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" "NOT_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; case IO_XFER_ERROR_NAK_RECEIVED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_XFER_ERROR_ACK_NAK_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case IO_XFER_OPEN_RETRY_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); pm8001_handle_event(pm8001_ha, t, IO_XFER_OPEN_RETRY_TIMEOUT); return; case IO_XFER_ERROR_UNEXPECTED_PHASE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; break; case IO_XFER_ERROR_XFER_RDY_OVERRUN: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; break; case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; break; case IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_CMD_ISSUE_ACK_NAK_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; break; case IO_XFER_ERROR_OFFSET_MISMATCH: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; break; case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; break; case IO_XFER_CMD_FRAME_ISSUED: PM8001_IO_DBG(pm8001_ha, pm8001_printk(" IO_XFER_CMD_FRAME_ISSUED\n")); return; default: PM8001_IO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", event)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; break; } spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&t->task_state_lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" " event 0x%x resp 0x%x " "stat 0x%x but aborted by upper layer!\n", t, event, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* in order to force CPU ordering */ t->task_done(t); } } /*See the comments for mpi_ssp_completion */ static void mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; u32 param; u32 status; u32 tag; struct sata_completion_resp *psataPayload; struct task_status_struct *ts; struct ata_task_resp *resp ; u32 *sata_resp; struct pm8001_device *pm8001_dev; unsigned long flags; psataPayload = (struct sata_completion_resp *)(piomb + 4); status = le32_to_cpu(psataPayload->status); tag = le32_to_cpu(psataPayload->tag); ccb = &pm8001_ha->ccb_info[tag]; param = le32_to_cpu(psataPayload->param); t = ccb->task; ts = &t->task_status; pm8001_dev = ccb->device; if (status) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sata IO status 0x%x\n", status)); if (unlikely(!t || !t->lldd_task || !t->dev)) return; switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); if (param == 0) { ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; } else { u8 len; ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PROTO_RESPONSE; ts->residual = param; PM8001_IO_DBG(pm8001_ha, pm8001_printk("SAS_PROTO_RESPONSE len = %d\n", param)); sata_resp = &psataPayload->sata_resp[0]; resp = (struct ata_task_resp *)ts->buf; if (t->ata_task.dma_xfer == 0 && t->data_dir == PCI_DMA_FROMDEVICE) { len = sizeof(struct pio_setup_fis); PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO read len = %d\n", len)); } else if (t->ata_task.use_ncq) { len = sizeof(struct set_dev_bits_fis); PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA len = %d\n", len)); } else { len = sizeof(struct dev_to_host_fis); PM8001_IO_DBG(pm8001_ha, pm8001_printk("other len = %d\n", len)); } if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { resp->frame_len = len; memcpy(&resp->ending_fis[0], sata_resp, len); ts->buf_valid_size = sizeof(*resp); } else PM8001_IO_DBG(pm8001_ha, pm8001_printk("response to large\n")); } if (pm8001_dev) pm8001_dev->running_req--; break; case IO_ABORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_ABORTED IOMB Tag\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; if (pm8001_dev) pm8001_dev->running_req--; break; /* following cases are to do cases */ case IO_UNDERFLOW: /* SATA Completion with error */ PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW param = %d\n", param)); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_UNDERRUN; ts->residual = param; if (pm8001_dev) pm8001_dev->running_req--; break; case IO_NO_DEVICE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; break; case IO_XFER_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_INTERRUPTED; break; case IO_XFER_ERROR_PHY_NOT_READY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" "_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_EPROTO; break; case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case IO_OPEN_CNX_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; if (!t->uldd_task) { pm8001_handle_event(pm8001_ha, pm8001_dev, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*in order to force CPU ordering*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); return; } break; case IO_OPEN_CNX_ERROR_BAD_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_BAD_DEST; if (!t->uldd_task) { pm8001_handle_event(pm8001_ha, pm8001_dev, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); return; } break; case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" "NOT_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; case IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_STP_RESOURCES" "_BUSY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; if (!t->uldd_task) { pm8001_handle_event(pm8001_ha, pm8001_dev, IO_OPEN_CNX_ERROR_STP_RESOURCES_BUSY); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* ditto*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); return; } break; case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; case IO_XFER_ERROR_NAK_RECEIVED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case IO_XFER_ERROR_ACK_NAK_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_ACK_NAK_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case IO_XFER_ERROR_DMA: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_DMA\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; break; case IO_XFER_ERROR_SATA_LINK_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_SATA_LINK_TIMEOUT\n")); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; break; case IO_XFER_ERROR_REJECTED_NCQ_MODE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_UNDERRUN; break; case IO_XFER_OPEN_RETRY_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; case IO_PORT_IN_RESET: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_PORT_IN_RESET\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; break; case IO_DS_NON_OPERATIONAL: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_DS_NON_OPERATIONAL\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; if (!t->uldd_task) { pm8001_handle_event(pm8001_ha, pm8001_dev, IO_DS_NON_OPERATIONAL); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); return; } break; case IO_DS_IN_RECOVERY: PM8001_IO_DBG(pm8001_ha, pm8001_printk(" IO_DS_IN_RECOVERY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; break; case IO_DS_IN_ERROR: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_DS_IN_ERROR\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; if (!t->uldd_task) { pm8001_handle_event(pm8001_ha, pm8001_dev, IO_DS_IN_ERROR); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_QUEUE_FULL; pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); return; } break; case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; default: PM8001_IO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; break; } spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&t->task_state_lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with io_status 0x%x" " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else if (t->uldd_task) { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* ditto */ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); } else if (!t->uldd_task) { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); } } /*See the comments for mpi_ssp_completion */ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { struct sas_task *t; struct task_status_struct *ts; struct pm8001_ccb_info *ccb; struct pm8001_device *pm8001_dev; struct sata_event_resp *psataPayload = (struct sata_event_resp *)(piomb + 4); u32 event = le32_to_cpu(psataPayload->event); u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); unsigned long flags; ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; pm8001_dev = ccb->device; if (event) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("sata IO status 0x%x\n", event)); if (unlikely(!t || !t->lldd_task || !t->dev)) return; ts = &t->task_status; PM8001_IO_DBG(pm8001_ha, pm8001_printk("port_id = %x,device_id = %x\n", port_id, dev_id)); switch (event) { case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; if (pm8001_dev) pm8001_dev->running_req--; break; case IO_XFER_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_INTERRUPTED; break; case IO_XFER_ERROR_PHY_NOT_READY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT" "_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_EPROTO; break; case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case IO_OPEN_CNX_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; if (!t->uldd_task) { pm8001_handle_event(pm8001_ha, pm8001_dev, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); return; } break; case IO_OPEN_CNX_ERROR_BAD_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_BAD_DEST; break; case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" "NOT_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; case IO_XFER_ERROR_NAK_RECEIVED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_NAK_RECEIVED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case IO_XFER_ERROR_PEER_ABORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_PEER_ABORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case IO_XFER_ERROR_REJECTED_NCQ_MODE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_REJECTED_NCQ_MODE\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_UNDERRUN; break; case IO_XFER_OPEN_RETRY_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; case IO_XFER_ERROR_UNEXPECTED_PHASE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_UNEXPECTED_PHASE\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; case IO_XFER_ERROR_XFER_RDY_OVERRUN: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_XFER_RDY_OVERRUN\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; case IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_XFER_RDY_NOT_EXPECTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; case IO_XFER_ERROR_OFFSET_MISMATCH: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_OFFSET_MISMATCH\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; case IO_XFER_ERROR_XFER_ZERO_DATA_LEN: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_XFER_ZERO_DATA_LEN\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; case IO_XFER_CMD_FRAME_ISSUED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_CMD_FRAME_ISSUED\n")); break; case IO_XFER_PIO_SETUP_ERROR: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_PIO_SETUP_ERROR\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; default: PM8001_IO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", event)); /* not allowed case. Therefore, return failed status */ ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_TO; break; } spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&t->task_state_lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with io_status 0x%x" " resp 0x%x stat 0x%x but aborted by upper layer!\n", t, event, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else if (t->uldd_task) { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* ditto */ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); } else if (!t->uldd_task) { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/*ditto*/ spin_unlock_irq(&pm8001_ha->lock); t->task_done(t); spin_lock_irq(&pm8001_ha->lock); } } /*See the comments for mpi_ssp_completion */ static void mpi_smp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 param; struct sas_task *t; struct pm8001_ccb_info *ccb; unsigned long flags; u32 status; u32 tag; struct smp_completion_resp *psmpPayload; struct task_status_struct *ts; struct pm8001_device *pm8001_dev; psmpPayload = (struct smp_completion_resp *)(piomb + 4); status = le32_to_cpu(psmpPayload->status); tag = le32_to_cpu(psmpPayload->tag); ccb = &pm8001_ha->ccb_info[tag]; param = le32_to_cpu(psmpPayload->param); t = ccb->task; ts = &t->task_status; pm8001_dev = ccb->device; if (status) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("smp IO status 0x%x\n", status)); if (unlikely(!t || !t->lldd_task || !t->dev)) return; switch (status) { case IO_SUCCESS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; if (pm8001_dev) pm8001_dev->running_req--; break; case IO_ABORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_ABORTED IOMB\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; if (pm8001_dev) pm8001_dev->running_req--; break; case IO_OVERFLOW: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_UNDERFLOW\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; if (pm8001_dev) pm8001_dev->running_req--; break; case IO_NO_DEVICE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_NO_DEVICE\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PHY_DOWN; break; case IO_ERROR_HW_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_ERROR_HW_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_BUSY; break; case IO_XFER_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_BUSY; break; case IO_XFER_ERROR_PHY_NOT_READY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_PHY_NOT_READY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_BUSY; break; case IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_PROTOCOL_NOT_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case IO_OPEN_CNX_ERROR_ZONE_VIOLATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_ZONE_VIOLATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case IO_OPEN_CNX_ERROR_BREAK: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BREAK\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_CONT0; break; case IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_UNKNOWN; pm8001_handle_event(pm8001_ha, pm8001_dev, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); break; case IO_OPEN_CNX_ERROR_BAD_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_BAD_DESTINATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_BAD_DEST; break; case IO_OPEN_CNX_ERROR_CONNECTION_RATE_NOT_SUPPORTED: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_CONNECTION_RATE_" "NOT_SUPPORTED\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_CONN_RATE; break; case IO_OPEN_CNX_ERROR_WRONG_DESTINATION: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_WRONG_DESTINATION\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_WRONG_DEST; break; case IO_XFER_ERROR_RX_FRAME: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_ERROR_RX_FRAME\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; break; case IO_XFER_OPEN_RETRY_TIMEOUT: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_XFER_OPEN_RETRY_TIMEOUT\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_ERROR_INTERNAL_SMP_RESOURCE: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_ERROR_INTERNAL_SMP_RESOURCE\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; break; case IO_PORT_IN_RESET: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_PORT_IN_RESET\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_DS_NON_OPERATIONAL: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_DS_NON_OPERATIONAL\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; break; case IO_DS_IN_RECOVERY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_DS_IN_RECOVERY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; case IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY: PM8001_IO_DBG(pm8001_ha, pm8001_printk("IO_OPEN_CNX_ERROR_HW_RESOURCE_BUSY\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_OPEN_REJECT; ts->open_rej_reason = SAS_OREJ_RSVD_RETRY; break; default: PM8001_IO_DBG(pm8001_ha, pm8001_printk("Unknown status 0x%x\n", status)); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DEV_NO_RESPONSE; /* not allowed case. Therefore, return failed status */ break; } spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { spin_unlock_irqrestore(&t->task_state_lock, flags); PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task 0x%p done with" " io_status 0x%x resp 0x%x " "stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat)); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb();/* in order to force CPU ordering */ t->task_done(t); } } static void mpi_set_dev_state_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct set_dev_state_resp *pPayload = (struct set_dev_state_resp *)(piomb + 4); u32 tag = le32_to_cpu(pPayload->tag); struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; struct pm8001_device *pm8001_dev = ccb->device; u32 status = le32_to_cpu(pPayload->status); u32 device_id = le32_to_cpu(pPayload->device_id); u8 pds = le32_to_cpu(pPayload->pds_nds) | PDS_BITS; u8 nds = le32_to_cpu(pPayload->pds_nds) | NDS_BITS; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set device id = 0x%x state " "from 0x%x to 0x%x status = 0x%x!\n", device_id, pds, nds, status)); complete(pm8001_dev->setds_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; pm8001_ccb_free(pm8001_ha, tag); } static void mpi_set_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); u32 tag = le32_to_cpu(pPayload->tag); struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; u32 dlen_status = le32_to_cpu(pPayload->dlen_status); complete(pm8001_ha->nvmd_completion); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Set nvm data complete!\n")); if ((dlen_status & NVMD_STAT) != 0) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Set nvm data error!\n")); return; } ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; pm8001_ccb_free(pm8001_ha, tag); } static void mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct fw_control_ex *fw_control_context; struct get_nvm_data_resp *pPayload = (struct get_nvm_data_resp *)(piomb + 4); u32 tag = le32_to_cpu(pPayload->tag); struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; u32 dlen_status = le32_to_cpu(pPayload->dlen_status); u32 ir_tds_bn_dps_das_nvm = le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm); void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr; fw_control_context = ccb->fw_control_context; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n")); if ((dlen_status & NVMD_STAT) != 0) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Get nvm data error!\n")); complete(pm8001_ha->nvmd_completion); return; } if (ir_tds_bn_dps_das_nvm & IPMode) { /* indirect mode - IR bit set */ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get NVMD success, IR=1\n")); if ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == TWI_DEVICE) { if (ir_tds_bn_dps_das_nvm == 0x80a80200) { memcpy(pm8001_ha->sas_addr, ((u8 *)virt_addr + 4), SAS_ADDR_SIZE); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get SAS address" " from VPD successfully!\n")); } } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == C_SEEPROM) || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == VPD_FLASH) || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == EXPAN_ROM)) { ; } else if (((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == AAP1_RDUMP) || ((ir_tds_bn_dps_das_nvm & NVMD_TYPE) == IOP_RDUMP)) { ; } else { /* Should not be happened*/ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("(IR=1)Wrong Device type 0x%x\n", ir_tds_bn_dps_das_nvm)); } } else /* direct mode */{ PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n", (dlen_status & NVMD_LEN) >> 24)); } memcpy(fw_control_context->usrAddr, pm8001_ha->memoryMap.region[NVMD].virt_ptr, fw_control_context->len); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; pm8001_ccb_free(pm8001_ha, tag); } static int mpi_local_phy_ctl(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct local_phy_ctl_resp *pPayload = (struct local_phy_ctl_resp *)(piomb + 4); u32 status = le32_to_cpu(pPayload->status); u32 phy_id = le32_to_cpu(pPayload->phyop_phyid) & ID_BITS; u32 phy_op = le32_to_cpu(pPayload->phyop_phyid) & OP_BITS; if (status != 0) { PM8001_MSG_DBG(pm8001_ha, pm8001_printk("%x phy execute %x phy op failed!\n", phy_id, phy_op)); } else PM8001_MSG_DBG(pm8001_ha, pm8001_printk("%x phy execute %x phy op success!\n", phy_id, phy_op)); return 0; } /** * pm8001_bytes_dmaed - one of the interface function communication with libsas * @pm8001_ha: our hba card information * @i: which phy that received the event. * * when HBA driver received the identify done event or initiate FIS received * event(for SATA), it will invoke this function to notify the sas layer that * the sas toplogy has formed, please discover the the whole sas domain, * while receive a broadcast(change) primitive just tell the sas * layer to discover the changed domain rather than the whole domain. */ static void pm8001_bytes_dmaed(struct pm8001_hba_info *pm8001_ha, int i) { struct pm8001_phy *phy = &pm8001_ha->phy[i]; struct asd_sas_phy *sas_phy = &phy->sas_phy; struct sas_ha_struct *sas_ha; if (!phy->phy_attached) return; sas_ha = pm8001_ha->sas; if (sas_phy->phy) { struct sas_phy *sphy = sas_phy->phy; sphy->negotiated_linkrate = sas_phy->linkrate; sphy->minimum_linkrate = phy->minimum_linkrate; sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; sphy->maximum_linkrate = phy->maximum_linkrate; sphy->maximum_linkrate_hw = phy->maximum_linkrate; } if (phy->phy_type & PORT_TYPE_SAS) { struct sas_identify_frame *id; id = (struct sas_identify_frame *)phy->frame_rcvd; id->dev_type = phy->identify.device_type; id->initiator_bits = SAS_PROTOCOL_ALL; id->target_bits = phy->identify.target_port_protocols; } else if (phy->phy_type & PORT_TYPE_SATA) { /*Nothing*/ } PM8001_MSG_DBG(pm8001_ha, pm8001_printk("phy %d byte dmaded.\n", i)); sas_phy->frame_rcvd_size = phy->frame_rcvd_size; pm8001_ha->sas->notify_port_event(sas_phy, PORTE_BYTES_DMAED); } /* Get the link rate speed */ static void get_lrate_mode(struct pm8001_phy *phy, u8 link_rate) { struct sas_phy *sas_phy = phy->sas_phy.phy; switch (link_rate) { case PHY_SPEED_60: phy->sas_phy.linkrate = SAS_LINK_RATE_6_0_GBPS; phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_6_0_GBPS; break; case PHY_SPEED_30: phy->sas_phy.linkrate = SAS_LINK_RATE_3_0_GBPS; phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_3_0_GBPS; break; case PHY_SPEED_15: phy->sas_phy.linkrate = SAS_LINK_RATE_1_5_GBPS; phy->sas_phy.phy->negotiated_linkrate = SAS_LINK_RATE_1_5_GBPS; break; } sas_phy->negotiated_linkrate = phy->sas_phy.linkrate; sas_phy->maximum_linkrate_hw = SAS_LINK_RATE_6_0_GBPS; sas_phy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS; sas_phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS; sas_phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS; } /** * asd_get_attached_sas_addr -- extract/generate attached SAS address * @phy: pointer to asd_phy * @sas_addr: pointer to buffer where the SAS address is to be written * * This function extracts the SAS address from an IDENTIFY frame * received. If OOB is SATA, then a SAS address is generated from the * HA tables. * * LOCKING: the frame_rcvd_lock needs to be held since this parses the frame * buffer. */ static void pm8001_get_attached_sas_addr(struct pm8001_phy *phy, u8 *sas_addr) { if (phy->sas_phy.frame_rcvd[0] == 0x34 && phy->sas_phy.oob_mode == SATA_OOB_MODE) { struct pm8001_hba_info *pm8001_ha = phy->sas_phy.ha->lldd_ha; /* FIS device-to-host */ u64 addr = be64_to_cpu(*(__be64 *)pm8001_ha->sas_addr); addr += phy->sas_phy.id; *(__be64 *)sas_addr = cpu_to_be64(addr); } else { struct sas_identify_frame *idframe = (void *) phy->sas_phy.frame_rcvd; memcpy(sas_addr, idframe->sas_addr, SAS_ADDR_SIZE); } } /** * pm8001_hw_event_ack_req- For PM8001,some events need to acknowage to FW. * @pm8001_ha: our hba card information * @Qnum: the outbound queue message number. * @SEA: source of event to ack * @port_id: port id. * @phyId: phy id. * @param0: parameter 0. * @param1: parameter 1. */ static void pm8001_hw_event_ack_req(struct pm8001_hba_info *pm8001_ha, u32 Qnum, u32 SEA, u32 port_id, u32 phyId, u32 param0, u32 param1) { struct hw_event_ack_req payload; u32 opc = OPC_INB_SAS_HW_EVENT_ACK; struct inbound_queue_table *circularQ; memset((u8 *)&payload, 0, sizeof(payload)); circularQ = &pm8001_ha->inbnd_q_tbl[Qnum]; payload.tag = cpu_to_le32(1); payload.sea_phyid_portid = cpu_to_le32(((SEA & 0xFFFF) << 8) | ((phyId & 0x0F) << 4) | (port_id & 0x0F)); payload.param0 = cpu_to_le32(param0); payload.param1 = cpu_to_le32(param1); mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); } static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, u32 phyId, u32 phy_op); /** * hw_event_sas_phy_up -FW tells me a SAS phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ static void hw_event_sas_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4); u32 lr_evt_status_phyid_portid = le32_to_cpu(pPayload->lr_evt_status_phyid_portid); u8 link_rate = (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); u8 phy_id = (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); u8 portstate = (u8)(npip_portstate & 0x0000000F); struct pm8001_port *port = &pm8001_ha->port[port_id]; struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; unsigned long flags; u8 deviceType = pPayload->sas_identify.dev_type; port->port_state = portstate; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SAS_PHY_UP port id = %d, phy id = %d\n", port_id, phy_id)); switch (deviceType) { case SAS_PHY_UNUSED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("device type no device.\n")); break; case SAS_END_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("end device.\n")); pm8001_chip_phy_ctl_req(pm8001_ha, phy_id, PHY_NOTIFY_ENABLE_SPINUP); port->port_attached = 1; get_lrate_mode(phy, link_rate); break; case SAS_EDGE_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("expander device.\n")); port->port_attached = 1; get_lrate_mode(phy, link_rate); break; case SAS_FANOUT_EXPANDER_DEVICE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("fanout expander device.\n")); port->port_attached = 1; get_lrate_mode(phy, link_rate); break; default: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unknown device type(%x)\n", deviceType)); break; } phy->phy_type |= PORT_TYPE_SAS; phy->identify.device_type = deviceType; phy->phy_attached = 1; if (phy->identify.device_type == SAS_END_DEVICE) phy->identify.target_port_protocols = SAS_PROTOCOL_SSP; else if (phy->identify.device_type != SAS_PHY_UNUSED) phy->identify.target_port_protocols = SAS_PROTOCOL_SMP; phy->sas_phy.oob_mode = SAS_OOB_MODE; sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); memcpy(phy->frame_rcvd, &pPayload->sas_identify, sizeof(struct sas_identify_frame)-4); phy->frame_rcvd_size = sizeof(struct sas_identify_frame) - 4; pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); if (pm8001_ha->flags == PM8001F_RUN_TIME) mdelay(200);/*delay a moment to wait disk to spinup*/ pm8001_bytes_dmaed(pm8001_ha, phy_id); } /** * hw_event_sata_phy_up -FW tells me a SATA phy up event. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ static void hw_event_sata_phy_up(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4); u32 lr_evt_status_phyid_portid = le32_to_cpu(pPayload->lr_evt_status_phyid_portid); u8 link_rate = (u8)((lr_evt_status_phyid_portid & 0xF0000000) >> 28); u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); u8 phy_id = (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); u8 portstate = (u8)(npip_portstate & 0x0000000F); struct pm8001_port *port = &pm8001_ha->port[port_id]; struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; unsigned long flags; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SATA_PHY_UP port id = %d," " phy id = %d\n", port_id, phy_id)); port->port_state = portstate; port->port_attached = 1; get_lrate_mode(phy, link_rate); phy->phy_type |= PORT_TYPE_SATA; phy->phy_attached = 1; phy->sas_phy.oob_mode = SATA_OOB_MODE; sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_DONE); spin_lock_irqsave(&phy->sas_phy.frame_rcvd_lock, flags); memcpy(phy->frame_rcvd, ((u8 *)&pPayload->sata_fis - 4), sizeof(struct dev_to_host_fis)); phy->frame_rcvd_size = sizeof(struct dev_to_host_fis); phy->identify.target_port_protocols = SAS_PROTOCOL_SATA; phy->identify.device_type = SATA_DEV; pm8001_get_attached_sas_addr(phy, phy->sas_phy.attached_sas_addr); spin_unlock_irqrestore(&phy->sas_phy.frame_rcvd_lock, flags); pm8001_bytes_dmaed(pm8001_ha, phy_id); } /** * hw_event_phy_down -we should notify the libsas the phy is down. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ static void hw_event_phy_down(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4); u32 lr_evt_status_phyid_portid = le32_to_cpu(pPayload->lr_evt_status_phyid_portid); u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); u8 phy_id = (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); u32 npip_portstate = le32_to_cpu(pPayload->npip_portstate); u8 portstate = (u8)(npip_portstate & 0x0000000F); struct pm8001_port *port = &pm8001_ha->port[port_id]; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; port->port_state = portstate; phy->phy_type = 0; phy->identify.device_type = 0; phy->phy_attached = 0; memset(&phy->dev_sas_addr, 0, SAS_ADDR_SIZE); switch (portstate) { case PORT_VALID: break; case PORT_INVALID: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" PortInvalid portID %d\n", port_id)); PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" Last phy Down and port invalid\n")); port->port_attached = 0; pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, port_id, phy_id, 0, 0); break; case PORT_IN_RESET: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" Port In Reset portID %d\n", port_id)); break; case PORT_NOT_ESTABLISHED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" phy Down and PORT_NOT_ESTABLISHED\n")); port->port_attached = 0; break; case PORT_LOSTCOMM: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" phy Down and PORT_LOSTCOMM\n")); PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" Last phy Down and port invalid\n")); port->port_attached = 0; pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_PHY_DOWN, port_id, phy_id, 0, 0); break; default: port->port_attached = 0; PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" phy Down and(default) = %x\n", portstate)); break; } } /** * mpi_reg_resp -process register device ID response. * @pm8001_ha: our hba card information * @piomb: IO message buffer * * when sas layer find a device it will notify LLDD, then the driver register * the domain device to FW, this event is the return device ID which the FW * has assigned, from now,inter-communication with FW is no longer using the * SAS address, use device ID which FW assigned. */ static int mpi_reg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; u32 htag; struct pm8001_ccb_info *ccb; struct pm8001_device *pm8001_dev; struct dev_reg_resp *registerRespPayload = (struct dev_reg_resp *)(piomb + 4); htag = le32_to_cpu(registerRespPayload->tag); ccb = &pm8001_ha->ccb_info[htag]; pm8001_dev = ccb->device; status = le32_to_cpu(registerRespPayload->status); device_id = le32_to_cpu(registerRespPayload->device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" register device is status = %d\n", status)); switch (status) { case DEVREG_SUCCESS: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_SUCCESS\n")); pm8001_dev->device_id = device_id; break; case DEVREG_FAILURE_OUT_OF_RESOURCE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_OUT_OF_RESOURCE\n")); break; case DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_DEVICE_ALREADY_REGISTERED\n")); break; case DEVREG_FAILURE_INVALID_PHY_ID: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_INVALID_PHY_ID\n")); break; case DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_PHY_ID_ALREADY_REGISTERED\n")); break; case DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_PORT_ID_OUT_OF_RANGE\n")); break; case DEVREG_FAILURE_PORT_NOT_VALID_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_PORT_NOT_VALID_STATE\n")); break; case DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_VALID\n")); break; default: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("DEVREG_FAILURE_DEVICE_TYPE_NOT_UNSORPORTED\n")); break; } complete(pm8001_dev->dcompletion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; pm8001_ccb_free(pm8001_ha, htag); return 0; } static int mpi_dereg_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; u32 device_id; struct dev_reg_resp *registerRespPayload = (struct dev_reg_resp *)(piomb + 4); status = le32_to_cpu(registerRespPayload->status); device_id = le32_to_cpu(registerRespPayload->device_id); if (status != 0) PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" deregister device failed ,status = %x" ", device_id = %x\n", status, device_id)); return 0; } static int mpi_fw_flash_update_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 status; struct fw_control_ex fw_control_context; struct fw_flash_Update_resp *ppayload = (struct fw_flash_Update_resp *)(piomb + 4); u32 tag = ppayload->tag; struct pm8001_ccb_info *ccb = &pm8001_ha->ccb_info[tag]; status = le32_to_cpu(ppayload->status); memcpy(&fw_control_context, ccb->fw_control_context, sizeof(fw_control_context)); switch (status) { case FLASH_UPDATE_COMPLETE_PENDING_REBOOT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_COMPLETE_PENDING_REBOOT\n")); break; case FLASH_UPDATE_IN_PROGRESS: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_IN_PROGRESS\n")); break; case FLASH_UPDATE_HDR_ERR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_HDR_ERR\n")); break; case FLASH_UPDATE_OFFSET_ERR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_OFFSET_ERR\n")); break; case FLASH_UPDATE_CRC_ERR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_CRC_ERR\n")); break; case FLASH_UPDATE_LENGTH_ERR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_LENGTH_ERR\n")); break; case FLASH_UPDATE_HW_ERR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_HW_ERR\n")); break; case FLASH_UPDATE_DNLD_NOT_SUPPORTED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_DNLD_NOT_SUPPORTED\n")); break; case FLASH_UPDATE_DISABLED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk(": FLASH_UPDATE_DISABLED\n")); break; default: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("No matched status = %d\n", status)); break; } ccb->fw_control_context->fw_control->retcode = status; pci_free_consistent(pm8001_ha->pdev, fw_control_context.len, fw_control_context.virtAddr, fw_control_context.phys_addr); complete(pm8001_ha->nvmd_completion); ccb->task = NULL; ccb->ccb_tag = 0xFFFFFFFF; pm8001_ccb_free(pm8001_ha, tag); return 0; } static int mpi_general_event(struct pm8001_hba_info *pm8001_ha , void *piomb) { u32 status; int i; struct general_event_resp *pPayload = (struct general_event_resp *)(piomb + 4); status = le32_to_cpu(pPayload->status); PM8001_MSG_DBG(pm8001_ha, pm8001_printk(" status = 0x%x\n", status)); for (i = 0; i < GENERAL_EVENT_PAYLOAD; i++) PM8001_MSG_DBG(pm8001_ha, pm8001_printk("inb_IOMB_payload[0x%x] 0x%x,\n", i, pPayload->inb_IOMB_payload[i])); return 0; } static int mpi_task_abort_resp(struct pm8001_hba_info *pm8001_ha, void *piomb) { struct sas_task *t; struct pm8001_ccb_info *ccb; unsigned long flags; u32 status ; u32 tag, scp; struct task_status_struct *ts; struct task_abort_resp *pPayload = (struct task_abort_resp *)(piomb + 4); status = le32_to_cpu(pPayload->status); tag = le32_to_cpu(pPayload->tag); scp = le32_to_cpu(pPayload->scp); ccb = &pm8001_ha->ccb_info[tag]; t = ccb->task; PM8001_IO_DBG(pm8001_ha, pm8001_printk(" status = 0x%x\n", status)); if (t == NULL) return -1; ts = &t->task_status; if (status != 0) PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("task abort failed status 0x%x ," "tag = 0x%x, scp= 0x%x\n", status, tag, scp)); switch (status) { case IO_SUCCESS: PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_SUCCESS\n")); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; break; case IO_NOT_VALID: PM8001_EH_DBG(pm8001_ha, pm8001_printk("IO_NOT_VALID\n")); ts->resp = TMF_RESP_FUNC_FAILED; break; } spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_STATE_PENDING; t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; t->task_state_flags |= SAS_TASK_STATE_DONE; spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); mb(); t->task_done(t); return 0; } /** * mpi_hw_event -The hw event has come. * @pm8001_ha: our hba card information * @piomb: IO message buffer */ static int mpi_hw_event(struct pm8001_hba_info *pm8001_ha, void* piomb) { unsigned long flags; struct hw_event_resp *pPayload = (struct hw_event_resp *)(piomb + 4); u32 lr_evt_status_phyid_portid = le32_to_cpu(pPayload->lr_evt_status_phyid_portid); u8 port_id = (u8)(lr_evt_status_phyid_portid & 0x0000000F); u8 phy_id = (u8)((lr_evt_status_phyid_portid & 0x000000F0) >> 4); u16 eventType = (u16)((lr_evt_status_phyid_portid & 0x00FFFF00) >> 8); u8 status = (u8)((lr_evt_status_phyid_portid & 0x0F000000) >> 24); struct sas_ha_struct *sas_ha = pm8001_ha->sas; struct pm8001_phy *phy = &pm8001_ha->phy[phy_id]; struct asd_sas_phy *sas_phy = sas_ha->sas_phy[phy_id]; PM8001_MSG_DBG(pm8001_ha, pm8001_printk("outbound queue HW event & event type : ")); switch (eventType) { case HW_EVENT_PHY_START_STATUS: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PHY_START_STATUS" " status = %x\n", status)); if (status == 0) { phy->phy_state = 1; if (pm8001_ha->flags == PM8001F_RUN_TIME) complete(phy->enable_completion); } break; case HW_EVENT_SAS_PHY_UP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PHY_START_STATUS\n")); hw_event_sas_phy_up(pm8001_ha, piomb); break; case HW_EVENT_SATA_PHY_UP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SATA_PHY_UP\n")); hw_event_sata_phy_up(pm8001_ha, piomb); break; case HW_EVENT_PHY_STOP_STATUS: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PHY_STOP_STATUS " "status = %x\n", status)); if (status == 0) phy->phy_state = 0; break; case HW_EVENT_SATA_SPINUP_HOLD: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_SATA_SPINUP_HOLD\n")); sas_ha->notify_phy_event(&phy->sas_phy, PHYE_SPINUP_HOLD); break; case HW_EVENT_PHY_DOWN: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PHY_DOWN\n")); sas_ha->notify_phy_event(&phy->sas_phy, PHYE_LOSS_OF_SIGNAL); phy->phy_attached = 0; phy->phy_state = 0; hw_event_phy_down(pm8001_ha, piomb); break; case HW_EVENT_PORT_INVALID: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PORT_INVALID\n")); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; /* the broadcast change primitive received, tell the LIBSAS this event to revalidate the sas domain*/ case HW_EVENT_BROADCAST_CHANGE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_BROADCAST_CHANGE\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_BROADCAST_CHANGE, port_id, phy_id, 1, 0); spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); sas_phy->sas_prim = HW_EVENT_BROADCAST_CHANGE; spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); break; case HW_EVENT_PHY_ERROR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PHY_ERROR\n")); sas_phy_disconnected(&phy->sas_phy); phy->phy_attached = 0; sas_ha->notify_phy_event(&phy->sas_phy, PHYE_OOB_ERROR); break; case HW_EVENT_BROADCAST_EXP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_BROADCAST_EXP\n")); spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); sas_phy->sas_prim = HW_EVENT_BROADCAST_EXP; spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); break; case HW_EVENT_LINK_ERR_INVALID_DWORD: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_LINK_ERR_INVALID_DWORD\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_INVALID_DWORD, port_id, phy_id, 0, 0); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_LINK_ERR_DISPARITY_ERROR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_LINK_ERR_DISPARITY_ERROR\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_DISPARITY_ERROR, port_id, phy_id, 0, 0); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_LINK_ERR_CODE_VIOLATION: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_LINK_ERR_CODE_VIOLATION\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_CODE_VIOLATION, port_id, phy_id, 0, 0); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_LOSS_OF_DWORD_SYNCH, port_id, phy_id, 0, 0); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_MALFUNCTION: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_MALFUNCTION\n")); break; case HW_EVENT_BROADCAST_SES: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_BROADCAST_SES\n")); spin_lock_irqsave(&sas_phy->sas_prim_lock, flags); sas_phy->sas_prim = HW_EVENT_BROADCAST_SES; spin_unlock_irqrestore(&sas_phy->sas_prim_lock, flags); sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD); break; case HW_EVENT_INBOUND_CRC_ERROR: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_INBOUND_CRC_ERROR\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_INBOUND_CRC_ERROR, port_id, phy_id, 0, 0); break; case HW_EVENT_HARD_RESET_RECEIVED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_HARD_RESET_RECEIVED\n")); sas_ha->notify_port_event(sas_phy, PORTE_HARD_RESET); break; case HW_EVENT_ID_FRAME_TIMEOUT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_ID_FRAME_TIMEOUT\n")); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_LINK_ERR_PHY_RESET_FAILED: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_LINK_ERR_PHY_RESET_FAILED\n")); pm8001_hw_event_ack_req(pm8001_ha, 0, HW_EVENT_LINK_ERR_PHY_RESET_FAILED, port_id, phy_id, 0, 0); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_PORT_RESET_TIMER_TMO: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PORT_RESET_TIMER_TMO\n")); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_PORT_RECOVERY_TIMER_TMO: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PORT_RECOVERY_TIMER_TMO\n")); sas_phy_disconnected(sas_phy); phy->phy_attached = 0; sas_ha->notify_port_event(sas_phy, PORTE_LINK_RESET_ERR); break; case HW_EVENT_PORT_RECOVER: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PORT_RECOVER\n")); break; case HW_EVENT_PORT_RESET_COMPLETE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("HW_EVENT_PORT_RESET_COMPLETE\n")); break; case EVENT_BROADCAST_ASYNCH_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("EVENT_BROADCAST_ASYNCH_EVENT\n")); break; default: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Unknown event type = %x\n", eventType)); break; } return 0; } /** * process_one_iomb - process one outbound Queue memory block * @pm8001_ha: our hba card information * @piomb: IO message buffer */ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, void *piomb) { u32 pHeader = (u32)*(u32 *)piomb; u8 opc = (u8)(pHeader & 0xFFF); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("process_one_iomb:")); switch (opc) { case OPC_OUB_ECHO: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_ECHO\n")); break; case OPC_OUB_HW_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_HW_EVENT\n")); mpi_hw_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_COMP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_COMP\n")); mpi_ssp_completion(pm8001_ha, piomb); break; case OPC_OUB_SMP_COMP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SMP_COMP\n")); mpi_smp_completion(pm8001_ha, piomb); break; case OPC_OUB_LOCAL_PHY_CNTRL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_LOCAL_PHY_CNTRL\n")); mpi_local_phy_ctl(pm8001_ha, piomb); break; case OPC_OUB_DEV_REGIST: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_REGIST\n")); mpi_reg_resp(pm8001_ha, piomb); break; case OPC_OUB_DEREG_DEV: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister the device\n")); mpi_dereg_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEV_HANDLE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_DEV_HANDLE\n")); break; case OPC_OUB_SATA_COMP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_COMP\n")); mpi_sata_completion(pm8001_ha, piomb); break; case OPC_OUB_SATA_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_EVENT\n")); mpi_sata_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_EVENT\n")); mpi_ssp_event(pm8001_ha, piomb); break; case OPC_OUB_DEV_HANDLE_ARRIV: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_HANDLE_ARRIV\n")); /*This is for target*/ break; case OPC_OUB_SSP_RECV_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_RECV_EVENT\n")); /*This is for target*/ break; case OPC_OUB_DEV_INFO: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEV_INFO\n")); break; case OPC_OUB_FW_FLASH_UPDATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_FW_FLASH_UPDATE\n")); mpi_fw_flash_update_resp(pm8001_ha, piomb); break; case OPC_OUB_GPIO_RESPONSE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GPIO_RESPONSE\n")); break; case OPC_OUB_GPIO_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GPIO_EVENT\n")); break; case OPC_OUB_GENERAL_EVENT: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GENERAL_EVENT\n")); mpi_general_event(pm8001_ha, piomb); break; case OPC_OUB_SSP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SSP_ABORT_RSP\n")); mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SATA_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SATA_ABORT_RSP\n")); mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_SAS_DIAG_MODE_START_END: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SAS_DIAG_MODE_START_END\n")); break; case OPC_OUB_SAS_DIAG_EXECUTE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SAS_DIAG_EXECUTE\n")); break; case OPC_OUB_GET_TIME_STAMP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_TIME_STAMP\n")); break; case OPC_OUB_SAS_HW_EVENT_ACK: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SAS_HW_EVENT_ACK\n")); break; case OPC_OUB_PORT_CONTROL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_PORT_CONTROL\n")); break; case OPC_OUB_SMP_ABORT_RSP: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SMP_ABORT_RSP\n")); mpi_task_abort_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_NVMD_DATA\n")); mpi_get_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_SET_NVMD_DATA: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_NVMD_DATA\n")); mpi_set_nvmd_resp(pm8001_ha, piomb); break; case OPC_OUB_DEVICE_HANDLE_REMOVAL: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_DEVICE_HANDLE_REMOVAL\n")); break; case OPC_OUB_SET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_DEVICE_STATE\n")); mpi_set_dev_state_resp(pm8001_ha, piomb); break; case OPC_OUB_GET_DEVICE_STATE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_GET_DEVICE_STATE\n")); break; case OPC_OUB_SET_DEV_INFO: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SET_DEV_INFO\n")); break; case OPC_OUB_SAS_RE_INITIALIZE: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("OPC_OUB_SAS_RE_INITIALIZE\n")); break; default: PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Unknown outbound Queue IOMB OPC = %x\n", opc)); break; } } static int process_oq(struct pm8001_hba_info *pm8001_ha) { struct outbound_queue_table *circularQ; void *pMsg1 = NULL; u8 uninitialized_var(bc); u32 ret = MPI_IO_STATUS_FAIL; unsigned long flags; spin_lock_irqsave(&pm8001_ha->lock, flags); circularQ = &pm8001_ha->outbnd_q_tbl[0]; do { ret = mpi_msg_consume(pm8001_ha, circularQ, &pMsg1, &bc); if (MPI_IO_STATUS_SUCCESS == ret) { /* process the outbound message */ process_one_iomb(pm8001_ha, (void *)(pMsg1 - 4)); /* free the message from the outbound circular buffer */ mpi_msg_free_set(pm8001_ha, pMsg1, circularQ, bc); } if (MPI_IO_STATUS_BUSY == ret) { /* Update the producer index from SPC */ circularQ->producer_index = cpu_to_le32(pm8001_read_32(circularQ->pi_virt)); if (le32_to_cpu(circularQ->producer_index) == circularQ->consumer_idx) /* OQ is empty */ break; } } while (1); spin_unlock_irqrestore(&pm8001_ha->lock, flags); return ret; } /* PCI_DMA_... to our direction translation. */ static const u8 data_dir_flags[] = { [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT,/* UNSPECIFIED */ [PCI_DMA_TODEVICE] = DATA_DIR_OUT,/* OUTBOUND */ [PCI_DMA_FROMDEVICE] = DATA_DIR_IN,/* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE,/* NO TRANSFER */ }; static void pm8001_chip_make_sg(struct scatterlist *scatter, int nr, void *prd) { int i; struct scatterlist *sg; struct pm8001_prd *buf_prd = prd; for_each_sg(scatter, sg, nr, i) { buf_prd->addr = cpu_to_le64(sg_dma_address(sg)); buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg)); buf_prd->im_len.e = 0; buf_prd++; } } static void build_smp_cmd(u32 deviceID, __le32 hTag, struct smp_req *psmp_cmd) { psmp_cmd->tag = hTag; psmp_cmd->device_id = cpu_to_le32(deviceID); psmp_cmd->len_ip_ir = cpu_to_le32(1|(1 << 1)); } /** * pm8001_chip_smp_req - send a SMP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ static int pm8001_chip_smp_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { int elem, rc; struct sas_task *task = ccb->task; struct domain_device *dev = task->dev; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct scatterlist *sg_req, *sg_resp; u32 req_len, resp_len; struct smp_req smp_cmd; u32 opc; struct inbound_queue_table *circularQ; memset(&smp_cmd, 0, sizeof(smp_cmd)); /* * DMA-map SMP request, response buffers */ sg_req = &task->smp_task.smp_req; elem = dma_map_sg(pm8001_ha->dev, sg_req, 1, PCI_DMA_TODEVICE); if (!elem) return -ENOMEM; req_len = sg_dma_len(sg_req); sg_resp = &task->smp_task.smp_resp; elem = dma_map_sg(pm8001_ha->dev, sg_resp, 1, PCI_DMA_FROMDEVICE); if (!elem) { rc = -ENOMEM; goto err_out; } resp_len = sg_dma_len(sg_resp); /* must be in dwords */ if ((req_len & 0x3) || (resp_len & 0x3)) { rc = -EINVAL; goto err_out_2; } opc = OPC_INB_SMP_REQUEST; circularQ = &pm8001_ha->inbnd_q_tbl[0]; smp_cmd.tag = cpu_to_le32(ccb->ccb_tag); smp_cmd.long_smp_req.long_req_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); smp_cmd.long_smp_req.long_req_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); smp_cmd.long_smp_req.long_resp_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); smp_cmd.long_smp_req.long_resp_size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); build_smp_cmd(pm8001_dev->device_id, smp_cmd.tag, &smp_cmd); mpi_build_cmd(pm8001_ha, circularQ, opc, (u32 *)&smp_cmd); return 0; err_out_2: dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); err_out: dma_unmap_sg(pm8001_ha->dev, &ccb->task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); return rc; } /** * pm8001_chip_ssp_io_req - send a SSP task to FW * @pm8001_ha: our hba card information. * @ccb: the ccb information this request used. */ static int pm8001_chip_ssp_io_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { struct sas_task *task = ccb->task; struct domain_device *dev = task->dev; struct pm8001_device *pm8001_dev = dev->lldd_dev; struct ssp_ini_io_start_req ssp_cmd; u32 tag = ccb->ccb_tag; int ret; u64 phys_addr; struct inbound_queue_table *circularQ; u32 opc = OPC_INB_SSPINIIOSTART; memset(&ssp_cmd, 0, sizeof(ssp_cmd)); memcpy(ssp_cmd.ssp_iu.lun, task->ssp_task.LUN, 8); ssp_cmd.dir_m_tlr = cpu_to_le32(data_dir_flags[task->data_dir] << 8 | 0x0);/*0 for SAS 1.1 compatible TLR*/ ssp_cmd.data_len = cpu_to_le32(task->total_xfer_len); ssp_cmd.device_id = cpu_to_le32(pm8001_dev->device_id); ssp_cmd.tag = cpu_to_le32(tag); if (task->ssp_task.enable_first_burst) ssp_cmd.ssp_iu.efb_prio_attr |= 0x80; ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_prio << 3); ssp_cmd.ssp_iu.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(ssp_cmd.ssp_iu.cdb, task->ssp_task.cdb, 16); circularQ = &pm8001_ha->inbnd_q_tbl[0]; /* fill in PRD (scatter/gather) table, if any */ if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle + offsetof(struct pm8001_ccb_info, buf_prd[0]); ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(phys_addr)); ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(phys_addr)); ssp_cmd.esgl = cpu_to_le32(1<<31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); ssp_cmd.addr_low = cpu_to_le32(lower_32_bits(dma_addr)); ssp_cmd.addr_high = cpu_to_le32(upper_32_bits(dma_addr)); ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } else if (task->num_scatter == 0) { ssp_cmd.addr_low = 0; ssp_cmd.addr_high = 0; ssp_cmd.len = cpu_to_le32(task->total_xfer_len); ssp_cmd.esgl = 0; } ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &ssp_cmd); return ret; } static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb) { struct sas_task *task = ccb->task; struct domain_device *dev = task->dev; struct pm8001_device *pm8001_ha_dev = dev->lldd_dev; u32 tag = ccb->ccb_tag; int ret; struct sata_start_req sata_cmd; u32 hdr_tag, ncg_tag = 0; u64 phys_addr; u32 ATAP = 0x0; u32 dir; struct inbound_queue_table *circularQ; u32 opc = OPC_INB_SATA_HOST_OPSTART; memset(&sata_cmd, 0, sizeof(sata_cmd)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; if (task->data_dir == PCI_DMA_NONE) { ATAP = 0x04; /* no data*/ PM8001_IO_DBG(pm8001_ha, pm8001_printk("no data\n")); } else if (likely(!task->ata_task.device_control_reg_update)) { if (task->ata_task.dma_xfer) { ATAP = 0x06; /* DMA */ PM8001_IO_DBG(pm8001_ha, pm8001_printk("DMA\n")); } else { ATAP = 0x05; /* PIO*/ PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); } if (task->ata_task.use_ncq && dev->sata_dev.command_set != ATAPI_COMMAND_SET) { ATAP = 0x07; /* FPDMA */ PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); } } if (task->ata_task.use_ncq && pm8001_get_ncq_tag(task, &hdr_tag)) ncg_tag = hdr_tag; dir = data_dir_flags[task->data_dir] << 8; sata_cmd.tag = cpu_to_le32(tag); sata_cmd.device_id = cpu_to_le32(pm8001_ha_dev->device_id); sata_cmd.data_len = cpu_to_le32(task->total_xfer_len); sata_cmd.ncqtag_atap_dir_m = cpu_to_le32(((ncg_tag & 0xff)<<16)|((ATAP & 0x3f) << 10) | dir); sata_cmd.sata_fis = task->ata_task.fis; if (likely(!task->ata_task.device_control_reg_update)) sata_cmd.sata_fis.flags |= 0x80;/* C=1: update ATA cmd reg */ sata_cmd.sata_fis.flags &= 0xF0;/* PM_PORT field shall be 0 */ /* fill in PRD (scatter/gather) table, if any */ if (task->num_scatter > 1) { pm8001_chip_make_sg(task->scatter, ccb->n_elem, ccb->buf_prd); phys_addr = ccb->ccb_dma_handle + offsetof(struct pm8001_ccb_info, buf_prd[0]); sata_cmd.addr_low = lower_32_bits(phys_addr); sata_cmd.addr_high = upper_32_bits(phys_addr); sata_cmd.esgl = cpu_to_le32(1 << 31); } else if (task->num_scatter == 1) { u64 dma_addr = sg_dma_address(task->scatter); sata_cmd.addr_low = lower_32_bits(dma_addr); sata_cmd.addr_high = upper_32_bits(dma_addr); sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } else if (task->num_scatter == 0) { sata_cmd.addr_low = 0; sata_cmd.addr_high = 0; sata_cmd.len = cpu_to_le32(task->total_xfer_len); sata_cmd.esgl = 0; } ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sata_cmd); return ret; } /** * pm8001_chip_phy_start_req - start phy via PHY_START COMMAND * @pm8001_ha: our hba card information. * @num: the inbound queue number * @phy_id: the phy id which we wanted to start up. */ static int pm8001_chip_phy_start_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_start_req payload; struct inbound_queue_table *circularQ; int ret; u32 tag = 0x01; u32 opcode = OPC_INB_PHYSTART; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); /* ** [0:7] PHY Identifier ** [8:11] link rate 1.5G, 3G, 6G ** [12:13] link mode 01b SAS mode; 10b SATA mode; 11b both ** [14] 0b disable spin up hold; 1b enable spin up hold */ payload.ase_sh_lm_slr_phyid = cpu_to_le32(SPINHOLD_DISABLE | LINKMODE_AUTO | LINKRATE_15 | LINKRATE_30 | LINKRATE_60 | phy_id); payload.sas_identify.dev_type = SAS_END_DEV; payload.sas_identify.initiator_bits = SAS_PROTOCOL_ALL; memcpy(payload.sas_identify.sas_addr, pm8001_ha->sas_addr, SAS_ADDR_SIZE); payload.sas_identify.phy_id = phy_id; ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); return ret; } /** * pm8001_chip_phy_stop_req - start phy via PHY_STOP COMMAND * @pm8001_ha: our hba card information. * @num: the inbound queue number * @phy_id: the phy id which we wanted to start up. */ static int pm8001_chip_phy_stop_req(struct pm8001_hba_info *pm8001_ha, u8 phy_id) { struct phy_stop_req payload; struct inbound_queue_table *circularQ; int ret; u32 tag = 0x01; u32 opcode = OPC_INB_PHYSTOP; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(tag); payload.phy_id = cpu_to_le32(phy_id); ret = mpi_build_cmd(pm8001_ha, circularQ, opcode, &payload); return ret; } /** * see comments on mpi_reg_resp. */ static int pm8001_chip_reg_dev_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 flag) { struct reg_dev_req payload; u32 opc; u32 stp_sspsmp_sata = 0x4; struct inbound_queue_table *circularQ; u32 linkrate, phy_id; int rc, tag = 0xdeadbeef; struct pm8001_ccb_info *ccb; u8 retryFlag = 0x1; u16 firstBurstSize = 0; u16 ITNT = 2000; struct domain_device *dev = pm8001_dev->sas_device; struct domain_device *parent_dev = dev->parent; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&payload, 0, sizeof(payload)); rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) return rc; ccb = &pm8001_ha->ccb_info[tag]; ccb->device = pm8001_dev; ccb->ccb_tag = tag; payload.tag = cpu_to_le32(tag); if (flag == 1) stp_sspsmp_sata = 0x02; /*direct attached sata */ else { if (pm8001_dev->dev_type == SATA_DEV) stp_sspsmp_sata = 0x00; /* stp*/ else if (pm8001_dev->dev_type == SAS_END_DEV || pm8001_dev->dev_type == EDGE_DEV || pm8001_dev->dev_type == FANOUT_DEV) stp_sspsmp_sata = 0x01; /*ssp or smp*/ } if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) phy_id = parent_dev->ex_dev.ex_phy->phy_id; else phy_id = pm8001_dev->attached_phy; opc = OPC_INB_REG_DEV; linkrate = (pm8001_dev->sas_device->linkrate < dev->port->linkrate) ? pm8001_dev->sas_device->linkrate : dev->port->linkrate; payload.phyid_portid = cpu_to_le32(((pm8001_dev->sas_device->port->id) & 0x0F) | ((phy_id & 0x0F) << 4)); payload.dtype_dlr_retry = cpu_to_le32((retryFlag & 0x01) | ((linkrate & 0x0F) * 0x1000000) | ((stp_sspsmp_sata & 0x03) * 0x10000000)); payload.firstburstsize_ITNexustimeout = cpu_to_le32(ITNT | (firstBurstSize * 0x10000)); memcpy(payload.sas_addr, pm8001_dev->sas_device->sas_addr, SAS_ADDR_SIZE); rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); return rc; } /** * see comments on mpi_reg_resp. */ static int pm8001_chip_dereg_dev_req(struct pm8001_hba_info *pm8001_ha, u32 device_id) { struct dereg_dev_req payload; u32 opc = OPC_INB_DEREG_DEV_HANDLE; int ret; struct inbound_queue_table *circularQ; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&payload, 0, sizeof(payload)); payload.tag = cpu_to_le32(1); payload.device_id = cpu_to_le32(device_id); PM8001_MSG_DBG(pm8001_ha, pm8001_printk("unregister device device_id = %d\n", device_id)); ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); return ret; } /** * pm8001_chip_phy_ctl_req - support the local phy operation * @pm8001_ha: our hba card information. * @num: the inbound queue number * @phy_id: the phy id which we wanted to operate * @phy_op: */ static int pm8001_chip_phy_ctl_req(struct pm8001_hba_info *pm8001_ha, u32 phyId, u32 phy_op) { struct local_phy_ctl_req payload; struct inbound_queue_table *circularQ; int ret; u32 opc = OPC_INB_LOCAL_PHY_CONTROL; memset(&payload, 0, sizeof(payload)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; payload.tag = cpu_to_le32(1); payload.phyop_phyid = cpu_to_le32(((phy_op & 0xff) << 8) | (phyId & 0x0F)); ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); return ret; } static u32 pm8001_chip_is_our_interupt(struct pm8001_hba_info *pm8001_ha) { u32 value; #ifdef PM8001_USE_MSIX return 1; #endif value = pm8001_cr32(pm8001_ha, 0, MSGU_ODR); if (value) return 1; return 0; } /** * pm8001_chip_isr - PM8001 isr handler. * @pm8001_ha: our hba card information. * @irq: irq number. * @stat: stat. */ static irqreturn_t pm8001_chip_isr(struct pm8001_hba_info *pm8001_ha) { pm8001_chip_interrupt_disable(pm8001_ha); process_oq(pm8001_ha); pm8001_chip_interrupt_enable(pm8001_ha); return IRQ_HANDLED; } static int send_task_abort(struct pm8001_hba_info *pm8001_ha, u32 opc, u32 dev_id, u8 flag, u32 task_tag, u32 cmd_tag) { struct task_abort_req task_abort; struct inbound_queue_table *circularQ; int ret; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&task_abort, 0, sizeof(task_abort)); if (ABORT_SINGLE == (flag & ABORT_MASK)) { task_abort.abort_all = 0; task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag_to_abort = cpu_to_le32(task_tag); task_abort.tag = cpu_to_le32(cmd_tag); } else if (ABORT_ALL == (flag & ABORT_MASK)) { task_abort.abort_all = cpu_to_le32(1); task_abort.device_id = cpu_to_le32(dev_id); task_abort.tag = cpu_to_le32(cmd_tag); } ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &task_abort); return ret; } /** * pm8001_chip_abort_task - SAS abort task when error or exception happened. * @task: the task we wanted to aborted. * @flag: the abort flag. */ static int pm8001_chip_abort_task(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u8 flag, u32 task_tag, u32 cmd_tag) { u32 opc, device_id; int rc = TMF_RESP_FUNC_FAILED; PM8001_EH_DBG(pm8001_ha, pm8001_printk("cmd_tag = %x, abort task tag" " = %x", cmd_tag, task_tag)); if (pm8001_dev->dev_type == SAS_END_DEV) opc = OPC_INB_SSP_ABORT; else if (pm8001_dev->dev_type == SATA_DEV) opc = OPC_INB_SATA_ABORT; else opc = OPC_INB_SMP_ABORT;/* SMP */ device_id = pm8001_dev->device_id; rc = send_task_abort(pm8001_ha, opc, device_id, flag, task_tag, cmd_tag); if (rc != TMF_RESP_FUNC_COMPLETE) PM8001_EH_DBG(pm8001_ha, pm8001_printk("rc= %d\n", rc)); return rc; } /** * pm8001_chip_ssp_tm_req - built the task management command. * @pm8001_ha: our hba card information. * @ccb: the ccb information. * @tmf: task management function. */ static int pm8001_chip_ssp_tm_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_ccb_info *ccb, struct pm8001_tmf_task *tmf) { struct sas_task *task = ccb->task; struct domain_device *dev = task->dev; struct pm8001_device *pm8001_dev = dev->lldd_dev; u32 opc = OPC_INB_SSPINITMSTART; struct inbound_queue_table *circularQ; struct ssp_ini_tm_start_req sspTMCmd; int ret; memset(&sspTMCmd, 0, sizeof(sspTMCmd)); sspTMCmd.device_id = cpu_to_le32(pm8001_dev->device_id); sspTMCmd.relate_tag = cpu_to_le32(tmf->tag_of_task_to_be_managed); sspTMCmd.tmf = cpu_to_le32(tmf->tmf); memcpy(sspTMCmd.lun, task->ssp_task.LUN, 8); sspTMCmd.tag = cpu_to_le32(ccb->ccb_tag); circularQ = &pm8001_ha->inbnd_q_tbl[0]; ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &sspTMCmd); return ret; } static int pm8001_chip_get_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_GET_NVMD_DATA; u32 nvmd_type; int rc; u32 tag; struct pm8001_ccb_info *ccb; struct inbound_queue_table *circularQ; struct get_nvm_data_req nvmd_req; struct fw_control_ex *fw_control_context; struct pm8001_ioctl_payload *ioctl_payload = payload; nvmd_type = ioctl_payload->minor_function; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; fw_control_context->usrAddr = (u8 *)&ioctl_payload->func_specific[0]; fw_control_context->len = ioctl_payload->length; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { kfree(fw_control_context); return rc; } ccb = &pm8001_ha->ccb_info[tag]; ccb->ccb_tag = tag; ccb->fw_control_context = fw_control_context; nvmd_req.tag = cpu_to_le32(tag); switch (nvmd_type) { case TWI_DEVICE: { u32 twi_addr, twi_page_size; twi_addr = 0xa8; twi_page_size = 2; nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | twi_page_size << 8 | TWI_DEVICE); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; } case C_SEEPROM: { nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; } case VPD_FLASH: { nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; } case EXPAN_ROM: { nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; } default: break; } rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); return rc; } static int pm8001_chip_set_nvmd_req(struct pm8001_hba_info *pm8001_ha, void *payload) { u32 opc = OPC_INB_SET_NVMD_DATA; u32 nvmd_type; int rc; u32 tag; struct pm8001_ccb_info *ccb; struct inbound_queue_table *circularQ; struct set_nvm_data_req nvmd_req; struct fw_control_ex *fw_control_context; struct pm8001_ioctl_payload *ioctl_payload = payload; nvmd_type = ioctl_payload->minor_function; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; circularQ = &pm8001_ha->inbnd_q_tbl[0]; memcpy(pm8001_ha->memoryMap.region[NVMD].virt_ptr, ioctl_payload->func_specific, ioctl_payload->length); memset(&nvmd_req, 0, sizeof(nvmd_req)); rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { kfree(fw_control_context); return rc; } ccb = &pm8001_ha->ccb_info[tag]; ccb->fw_control_context = fw_control_context; ccb->ccb_tag = tag; nvmd_req.tag = cpu_to_le32(tag); switch (nvmd_type) { case TWI_DEVICE: { u32 twi_addr, twi_page_size; twi_addr = 0xa8; twi_page_size = 2; nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | twi_addr << 16 | twi_page_size << 8 | TWI_DEVICE); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; } case C_SEEPROM: nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | C_SEEPROM); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; case VPD_FLASH: nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | VPD_FLASH); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; case EXPAN_ROM: nvmd_req.len_ir_vpdd = cpu_to_le32(IPMode | EXPAN_ROM); nvmd_req.resp_len = cpu_to_le32(ioctl_payload->length); nvmd_req.reserved[0] = cpu_to_le32(0xFEDCBA98); nvmd_req.resp_addr_hi = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_hi); nvmd_req.resp_addr_lo = cpu_to_le32(pm8001_ha->memoryMap.region[NVMD].phys_addr_lo); break; default: break; } rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &nvmd_req); return rc; } /** * pm8001_chip_fw_flash_update_build - support the firmware update operation * @pm8001_ha: our hba card information. * @fw_flash_updata_info: firmware flash update param */ static int pm8001_chip_fw_flash_update_build(struct pm8001_hba_info *pm8001_ha, void *fw_flash_updata_info, u32 tag) { struct fw_flash_Update_req payload; struct fw_flash_updata_info *info; struct inbound_queue_table *circularQ; int ret; u32 opc = OPC_INB_FW_FLASH_UPDATE; memset(&payload, 0, sizeof(struct fw_flash_Update_req)); circularQ = &pm8001_ha->inbnd_q_tbl[0]; info = fw_flash_updata_info; payload.tag = cpu_to_le32(tag); payload.cur_image_len = cpu_to_le32(info->cur_image_len); payload.cur_image_offset = cpu_to_le32(info->cur_image_offset); payload.total_image_len = cpu_to_le32(info->total_image_len); payload.len = info->sgl.im_len.len ; payload.sgl_addr_lo = cpu_to_le32(lower_32_bits(le64_to_cpu(info->sgl.addr))); payload.sgl_addr_hi = cpu_to_le32(upper_32_bits(le64_to_cpu(info->sgl.addr))); ret = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); return ret; } static int pm8001_chip_fw_flash_update_req(struct pm8001_hba_info *pm8001_ha, void *payload) { struct fw_flash_updata_info flash_update_info; struct fw_control_info *fw_control; struct fw_control_ex *fw_control_context; int rc; u32 tag; struct pm8001_ccb_info *ccb; void *buffer = NULL; dma_addr_t phys_addr; u32 phys_addr_hi; u32 phys_addr_lo; struct pm8001_ioctl_payload *ioctl_payload = payload; fw_control_context = kzalloc(sizeof(struct fw_control_ex), GFP_KERNEL); if (!fw_control_context) return -ENOMEM; fw_control = (struct fw_control_info *)&ioctl_payload->func_specific[0]; if (fw_control->len != 0) { if (pm8001_mem_alloc(pm8001_ha->pdev, (void **)&buffer, &phys_addr, &phys_addr_hi, &phys_addr_lo, fw_control->len, 0) != 0) { PM8001_FAIL_DBG(pm8001_ha, pm8001_printk("Mem alloc failure\n")); kfree(fw_control_context); return -ENOMEM; } } memcpy(buffer, fw_control->buffer, fw_control->len); flash_update_info.sgl.addr = cpu_to_le64(phys_addr); flash_update_info.sgl.im_len.len = cpu_to_le32(fw_control->len); flash_update_info.sgl.im_len.e = 0; flash_update_info.cur_image_offset = fw_control->offset; flash_update_info.cur_image_len = fw_control->len; flash_update_info.total_image_len = fw_control->size; fw_control_context->fw_control = fw_control; fw_control_context->virtAddr = buffer; fw_control_context->len = fw_control->len; rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) { kfree(fw_control_context); return rc; } ccb = &pm8001_ha->ccb_info[tag]; ccb->fw_control_context = fw_control_context; ccb->ccb_tag = tag; rc = pm8001_chip_fw_flash_update_build(pm8001_ha, &flash_update_info, tag); return rc; } static int pm8001_chip_set_dev_state_req(struct pm8001_hba_info *pm8001_ha, struct pm8001_device *pm8001_dev, u32 state) { struct set_dev_state_req payload; struct inbound_queue_table *circularQ; struct pm8001_ccb_info *ccb; int rc; u32 tag; u32 opc = OPC_INB_SET_DEVICE_STATE; memset(&payload, 0, sizeof(payload)); rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) return -1; ccb = &pm8001_ha->ccb_info[tag]; ccb->ccb_tag = tag; ccb->device = pm8001_dev; circularQ = &pm8001_ha->inbnd_q_tbl[0]; payload.tag = cpu_to_le32(tag); payload.device_id = cpu_to_le32(pm8001_dev->device_id); payload.nds = cpu_to_le32(state); rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); return rc; } static int pm8001_chip_sas_re_initialization(struct pm8001_hba_info *pm8001_ha) { struct sas_re_initialization_req payload; struct inbound_queue_table *circularQ; struct pm8001_ccb_info *ccb; int rc; u32 tag; u32 opc = OPC_INB_SAS_RE_INITIALIZE; memset(&payload, 0, sizeof(payload)); rc = pm8001_tag_alloc(pm8001_ha, &tag); if (rc) return -1; ccb = &pm8001_ha->ccb_info[tag]; ccb->ccb_tag = tag; circularQ = &pm8001_ha->inbnd_q_tbl[0]; payload.tag = cpu_to_le32(tag); payload.SSAHOLT = cpu_to_le32(0xd << 25); payload.sata_hol_tmo = cpu_to_le32(80); payload.open_reject_cmdretries_data_retries = cpu_to_le32(0xff00ff); rc = mpi_build_cmd(pm8001_ha, circularQ, opc, &payload); return rc; } const struct pm8001_dispatch pm8001_8001_dispatch = { .name = "pmc8001", .chip_init = pm8001_chip_init, .chip_soft_rst = pm8001_chip_soft_rst, .chip_rst = pm8001_hw_chip_rst, .chip_iounmap = pm8001_chip_iounmap, .isr = pm8001_chip_isr, .is_our_interupt = pm8001_chip_is_our_interupt, .isr_process_oq = process_oq, .interrupt_enable = pm8001_chip_interrupt_enable, .interrupt_disable = pm8001_chip_interrupt_disable, .make_prd = pm8001_chip_make_sg, .smp_req = pm8001_chip_smp_req, .ssp_io_req = pm8001_chip_ssp_io_req, .sata_req = pm8001_chip_sata_req, .phy_start_req = pm8001_chip_phy_start_req, .phy_stop_req = pm8001_chip_phy_stop_req, .reg_dev_req = pm8001_chip_reg_dev_req, .dereg_dev_req = pm8001_chip_dereg_dev_req, .phy_ctl_req = pm8001_chip_phy_ctl_req, .task_abort = pm8001_chip_abort_task, .ssp_tm_req = pm8001_chip_ssp_tm_req, .get_nvmd_req = pm8001_chip_get_nvmd_req, .set_nvmd_req = pm8001_chip_set_nvmd_req, .fw_flash_update_req = pm8001_chip_fw_flash_update_req, .set_dev_state_req = pm8001_chip_set_dev_state_req, .sas_re_init_req = pm8001_chip_sas_re_initialization, };
gpl-2.0
DooMLoRD/android_kernel_sony_msm8974
arch/alpha/kernel/pc873xx.c
12989
1696
#include <linux/ioport.h> #include <asm/io.h> #include "pc873xx.h" static unsigned pc873xx_probelist[] = {0x398, 0x26e, 0}; static char *pc873xx_names[] = { "PC87303", "PC87306", "PC87312", "PC87332", "PC87334" }; static unsigned int base, model; unsigned int __init pc873xx_get_base() { return base; } char *__init pc873xx_get_model() { return pc873xx_names[model]; } static unsigned char __init pc873xx_read(unsigned int base, int reg) { outb(reg, base); return inb(base + 1); } static void __init pc873xx_write(unsigned int base, int reg, unsigned char data) { unsigned long flags; local_irq_save(flags); outb(reg, base); outb(data, base + 1); outb(data, base + 1); /* Must be written twice */ local_irq_restore(flags); } int __init pc873xx_probe(void) { int val, index = 0; while ((base = pc873xx_probelist[index++])) { if (request_region(base, 2, "Super IO PC873xx") == NULL) continue; val = pc873xx_read(base, REG_SID); if ((val & 0xf0) == 0x10) { model = PC87332; break; } else if ((val & 0xf8) == 0x70) { model = PC87306; break; } else if ((val & 0xf8) == 0x50) { model = PC87334; break; } else if ((val & 0xf8) == 0x40) { model = PC87303; break; } release_region(base, 2); } return (base == 0) ? -1 : 1; } void __init pc873xx_enable_epp19(void) { unsigned char data; printk(KERN_INFO "PC873xx enabling EPP v1.9\n"); data = pc873xx_read(base, REG_PCR); pc873xx_write(base, REG_PCR, (data & 0xFC) | 0x02); } void __init pc873xx_enable_ide(void) { unsigned char data; printk(KERN_INFO "PC873xx enabling IDE interrupt\n"); data = pc873xx_read(base, REG_FER); pc873xx_write(base, REG_FER, data | 0x40); }
gpl-2.0
TeskeVirtualSystem/odroid_mptcp
arch/mips/dec/prom/identify.c
13757
4690
/* * identify.c: machine identification code. * * Copyright (C) 1998 Harald Koerfgen and Paul M. Antoine * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/string.h> #include <linux/types.h> #include <asm/bootinfo.h> #include <asm/dec/ioasic.h> #include <asm/dec/ioasic_addrs.h> #include <asm/dec/kn01.h> #include <asm/dec/kn02.h> #include <asm/dec/kn02ba.h> #include <asm/dec/kn02ca.h> #include <asm/dec/kn03.h> #include <asm/dec/kn230.h> #include <asm/dec/prom.h> #include <asm/dec/system.h> #include "dectypes.h" static const char *dec_system_strings[] = { [MACH_DSUNKNOWN] "unknown DECstation", [MACH_DS23100] "DECstation 2100/3100", [MACH_DS5100] "DECsystem 5100", [MACH_DS5000_200] "DECstation 5000/200", [MACH_DS5000_1XX] "DECstation 5000/1xx", [MACH_DS5000_XX] "Personal DECstation 5000/xx", [MACH_DS5000_2X0] "DECstation 5000/2x0", [MACH_DS5400] "DECsystem 5400", [MACH_DS5500] "DECsystem 5500", [MACH_DS5800] "DECsystem 5800", [MACH_DS5900] "DECsystem 5900", }; const char *get_system_type(void) { #define STR_BUF_LEN 64 static char system[STR_BUF_LEN]; static int called = 0; if (called == 0) { called = 1; snprintf(system, STR_BUF_LEN, "Digital %s", dec_system_strings[mips_machtype]); } return system; } /* * Setup essential system-specific memory addresses. We need them * early. Semantically the functions belong to prom/init.c, but they * are compact enough we want them inlined. --macro */ volatile u8 *dec_rtc_base; EXPORT_SYMBOL(dec_rtc_base); static inline void prom_init_kn01(void) { dec_kn_slot_base = KN01_SLOT_BASE; dec_kn_slot_size = KN01_SLOT_SIZE; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC); } static inline void prom_init_kn230(void) { dec_kn_slot_base = KN01_SLOT_BASE; dec_kn_slot_size = KN01_SLOT_SIZE; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN01_RTC); } static inline void prom_init_kn02(void) { dec_kn_slot_base = KN02_SLOT_BASE; dec_kn_slot_size = KN02_SLOT_SIZE; dec_tc_bus = 1; dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + KN02_RTC); } static inline void prom_init_kn02xa(void) { dec_kn_slot_base = KN02XA_SLOT_BASE; dec_kn_slot_size = IOASIC_SLOT_SIZE; dec_tc_bus = 1; ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL); dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY); } static inline void prom_init_kn03(void) { dec_kn_slot_base = KN03_SLOT_BASE; dec_kn_slot_size = IOASIC_SLOT_SIZE; dec_tc_bus = 1; ioasic_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_IOCTL); dec_rtc_base = (void *)CKSEG1ADDR(dec_kn_slot_base + IOASIC_TOY); } void __init prom_identify_arch(u32 magic) { unsigned char dec_cpunum, dec_firmrev, dec_etc, dec_systype; u32 dec_sysid; if (!prom_is_rex(magic)) { dec_sysid = simple_strtoul(prom_getenv("systype"), (char **)0, 0); } else { dec_sysid = rex_getsysid(); if (dec_sysid == 0) { printk("Zero sysid returned from PROM! " "Assuming a PMAX-like machine.\n"); dec_sysid = 1; } } dec_cpunum = (dec_sysid & 0xff000000) >> 24; dec_systype = (dec_sysid & 0xff0000) >> 16; dec_firmrev = (dec_sysid & 0xff00) >> 8; dec_etc = dec_sysid & 0xff; /* * FIXME: This may not be an exhaustive list of DECStations/Servers! * Put all model-specific initialisation calls here. */ switch (dec_systype) { case DS2100_3100: mips_machtype = MACH_DS23100; prom_init_kn01(); break; case DS5100: /* DS5100 MIPSMATE */ mips_machtype = MACH_DS5100; prom_init_kn230(); break; case DS5000_200: /* DS5000 3max */ mips_machtype = MACH_DS5000_200; prom_init_kn02(); break; case DS5000_1XX: /* DS5000/100 3min */ mips_machtype = MACH_DS5000_1XX; prom_init_kn02xa(); break; case DS5000_2X0: /* DS5000/240 3max+ or DS5900 bigmax */ mips_machtype = MACH_DS5000_2X0; prom_init_kn03(); if (!(ioasic_read(IO_REG_SIR) & KN03_IO_INR_3MAXP)) mips_machtype = MACH_DS5900; break; case DS5000_XX: /* Personal DS5000/xx maxine */ mips_machtype = MACH_DS5000_XX; prom_init_kn02xa(); break; case DS5800: /* DS5800 Isis */ mips_machtype = MACH_DS5800; break; case DS5400: /* DS5400 MIPSfair */ mips_machtype = MACH_DS5400; break; case DS5500: /* DS5500 MIPSfair-2 */ mips_machtype = MACH_DS5500; break; default: mips_machtype = MACH_DSUNKNOWN; break; } if (mips_machtype == MACH_DSUNKNOWN) printk("This is an %s, id is %x\n", dec_system_strings[mips_machtype], dec_systype); else printk("This is a %s\n", dec_system_strings[mips_machtype]); }
gpl-2.0
j-r0dd/motus_kernel
arch/x86/kernel/cpu/cpufreq/gx-suspmod.c
190
14953
/* * Cyrix MediaGX and NatSemi Geode Suspend Modulation * (C) 2002 Zwane Mwaikambo <zwane@commfireservices.com> * (C) 2002 Hiroshi Miura <miura@da-cha.org> * All Rights Reserved * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation * * The author(s) of this software shall not be held liable for damages * of any nature resulting due to the use of this software. This * software is provided AS-IS with no warranties. * * Theoretical note: * * (see Geode(tm) CS5530 manual (rev.4.1) page.56) * * CPU frequency control on NatSemi Geode GX1/GXLV processor and CS55x0 * are based on Suspend Modulation. * * Suspend Modulation works by asserting and de-asserting the SUSP# pin * to CPU(GX1/GXLV) for configurable durations. When asserting SUSP# * the CPU enters an idle state. GX1 stops its core clock when SUSP# is * asserted then power consumption is reduced. * * Suspend Modulation's OFF/ON duration are configurable * with 'Suspend Modulation OFF Count Register' * and 'Suspend Modulation ON Count Register'. * These registers are 8bit counters that represent the number of * 32us intervals which the SUSP# pin is asserted(ON)/de-asserted(OFF) * to the processor. * * These counters define a ratio which is the effective frequency * of operation of the system. * * OFF Count * F_eff = Fgx * ---------------------- * OFF Count + ON Count * * 0 <= On Count, Off Count <= 255 * * From these limits, we can get register values * * off_duration + on_duration <= MAX_DURATION * on_duration = off_duration * (stock_freq - freq) / freq * * off_duration = (freq * DURATION) / stock_freq * on_duration = DURATION - off_duration * * *--------------------------------------------------------------------------- * * ChangeLog: * Dec. 12, 2003 Hiroshi Miura <miura@da-cha.org> * - fix on/off register mistake * - fix cpu_khz calc when it stops cpu modulation. * * Dec. 11, 2002 Hiroshi Miura <miura@da-cha.org> * - rewrite for Cyrix MediaGX Cx5510/5520 and * NatSemi Geode Cs5530(A). * * Jul. ??, 2002 Zwane Mwaikambo <zwane@commfireservices.com> * - cs5530_mod patch for 2.4.19-rc1. * *--------------------------------------------------------------------------- * * Todo * Test on machines with 5510, 5530, 5530A */ /************************************************************************ * Suspend Modulation - Definitions * ************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/smp.h> #include <linux/cpufreq.h> #include <linux/pci.h> #include <asm/processor-cyrix.h> #include <asm/errno.h> /* PCI config registers, all at F0 */ #define PCI_PMER1 0x80 /* power management enable register 1 */ #define PCI_PMER2 0x81 /* power management enable register 2 */ #define PCI_PMER3 0x82 /* power management enable register 3 */ #define PCI_IRQTC 0x8c /* irq speedup timer counter register:typical 2 to 4ms */ #define PCI_VIDTC 0x8d /* video speedup timer counter register: typical 50 to 100ms */ #define PCI_MODOFF 0x94 /* suspend modulation OFF counter register, 1 = 32us */ #define PCI_MODON 0x95 /* suspend modulation ON counter register */ #define PCI_SUSCFG 0x96 /* suspend configuration register */ /* PMER1 bits */ #define GPM (1<<0) /* global power management */ #define GIT (1<<1) /* globally enable PM device idle timers */ #define GTR (1<<2) /* globally enable IO traps */ #define IRQ_SPDUP (1<<3) /* disable clock throttle during interrupt handling */ #define VID_SPDUP (1<<4) /* disable clock throttle during vga video handling */ /* SUSCFG bits */ #define SUSMOD (1<<0) /* enable/disable suspend modulation */ /* the below is supported only with cs5530 (after rev.1.2)/cs5530A */ #define SMISPDUP (1<<1) /* select how SMI re-enable suspend modulation: */ /* IRQTC timer or read SMI speedup disable reg.(F1BAR[08-09h]) */ #define SUSCFG (1<<2) /* enable powering down a GXLV processor. "Special 3Volt Suspend" mode */ /* the below is supported only with cs5530A */ #define PWRSVE_ISA (1<<3) /* stop ISA clock */ #define PWRSVE (1<<4) /* active idle */ struct gxfreq_params { u8 on_duration; u8 off_duration; u8 pci_suscfg; u8 pci_pmer1; u8 pci_pmer2; struct pci_dev *cs55x0; }; static struct gxfreq_params *gx_params; static int stock_freq; /* PCI bus clock - defaults to 30.000 if cpu_khz is not available */ static int pci_busclk = 0; module_param (pci_busclk, int, 0444); /* maximum duration for which the cpu may be suspended * (32us * MAX_DURATION). If no parameter is given, this defaults * to 255. * Note that this leads to a maximum of 8 ms(!) where the CPU clock * is suspended -- processing power is just 0.39% of what it used to be, * though. 781.25 kHz(!) for a 200 MHz processor -- wow. */ static int max_duration = 255; module_param (max_duration, int, 0444); /* For the default policy, we want at least some processing power * - let's say 5%. (min = maxfreq / POLICY_MIN_DIV) */ #define POLICY_MIN_DIV 20 #define dprintk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_DRIVER, "gx-suspmod", msg) /** * we can detect a core multipiler from dir0_lsb * from GX1 datasheet p.56, * MULT[3:0]: * 0000 = SYSCLK multiplied by 4 (test only) * 0001 = SYSCLK multiplied by 10 * 0010 = SYSCLK multiplied by 4 * 0011 = SYSCLK multiplied by 6 * 0100 = SYSCLK multiplied by 9 * 0101 = SYSCLK multiplied by 5 * 0110 = SYSCLK multiplied by 7 * 0111 = SYSCLK multiplied by 8 * of 33.3MHz **/ static int gx_freq_mult[16] = { 4, 10, 4, 6, 9, 5, 7, 8, 0, 0, 0, 0, 0, 0, 0, 0 }; /**************************************************************** * Low Level chipset interface * ****************************************************************/ static struct pci_device_id gx_chipset_tbl[] __initdata = { { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5530_LEGACY, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5520, PCI_ANY_ID, PCI_ANY_ID }, { PCI_VENDOR_ID_CYRIX, PCI_DEVICE_ID_CYRIX_5510, PCI_ANY_ID, PCI_ANY_ID }, { 0, }, }; /** * gx_detect_chipset: * **/ static __init struct pci_dev *gx_detect_chipset(void) { struct pci_dev *gx_pci = NULL; /* check if CPU is a MediaGX or a Geode. */ if ((boot_cpu_data.x86_vendor != X86_VENDOR_NSC) && (boot_cpu_data.x86_vendor != X86_VENDOR_CYRIX)) { dprintk("error: no MediaGX/Geode processor found!\n"); return NULL; } /* detect which companion chip is used */ while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) { if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) return gx_pci; } dprintk("error: no supported chipset found!\n"); return NULL; } /** * gx_get_cpuspeed: * * Finds out at which efficient frequency the Cyrix MediaGX/NatSemi Geode CPU runs. */ static unsigned int gx_get_cpuspeed(unsigned int cpu) { if ((gx_params->pci_suscfg & SUSMOD) == 0) return stock_freq; return (stock_freq * gx_params->off_duration) / (gx_params->on_duration + gx_params->off_duration); } /** * gx_validate_speed: * determine current cpu speed * **/ static unsigned int gx_validate_speed(unsigned int khz, u8 *on_duration, u8 *off_duration) { unsigned int i; u8 tmp_on, tmp_off; int old_tmp_freq = stock_freq; int tmp_freq; *off_duration=1; *on_duration=0; for (i=max_duration; i>0; i--) { tmp_off = ((khz * i) / stock_freq) & 0xff; tmp_on = i - tmp_off; tmp_freq = (stock_freq * tmp_off) / i; /* if this relation is closer to khz, use this. If it's equal, * prefer it, too - lower latency */ if (abs(tmp_freq - khz) <= abs(old_tmp_freq - khz)) { *on_duration = tmp_on; *off_duration = tmp_off; old_tmp_freq = tmp_freq; } } return old_tmp_freq; } /** * gx_set_cpuspeed: * set cpu speed in khz. **/ static void gx_set_cpuspeed(unsigned int khz) { u8 suscfg, pmer1; unsigned int new_khz; unsigned long flags; struct cpufreq_freqs freqs; freqs.cpu = 0; freqs.old = gx_get_cpuspeed(0); new_khz = gx_validate_speed(khz, &gx_params->on_duration, &gx_params->off_duration); freqs.new = new_khz; cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); local_irq_save(flags); if (new_khz != stock_freq) { /* if new khz == 100% of CPU speed, it is special case */ switch (gx_params->cs55x0->device) { case PCI_DEVICE_ID_CYRIX_5530_LEGACY: pmer1 = gx_params->pci_pmer1 | IRQ_SPDUP | VID_SPDUP; /* FIXME: need to test other values -- Zwane,Miura */ pci_write_config_byte(gx_params->cs55x0, PCI_IRQTC, 4); /* typical 2 to 4ms */ pci_write_config_byte(gx_params->cs55x0, PCI_VIDTC, 100);/* typical 50 to 100ms */ pci_write_config_byte(gx_params->cs55x0, PCI_PMER1, pmer1); if (gx_params->cs55x0->revision < 0x10) { /* CS5530(rev 1.2, 1.3) */ suscfg = gx_params->pci_suscfg | SUSMOD; } else { /* CS5530A,B.. */ suscfg = gx_params->pci_suscfg | SUSMOD | PWRSVE; } break; case PCI_DEVICE_ID_CYRIX_5520: case PCI_DEVICE_ID_CYRIX_5510: suscfg = gx_params->pci_suscfg | SUSMOD; break; default: local_irq_restore(flags); dprintk("fatal: try to set unknown chipset.\n"); return; } } else { suscfg = gx_params->pci_suscfg & ~(SUSMOD); gx_params->off_duration = 0; gx_params->on_duration = 0; dprintk("suspend modulation disabled: cpu runs 100 percent speed.\n"); } pci_write_config_byte(gx_params->cs55x0, PCI_MODOFF, gx_params->off_duration); pci_write_config_byte(gx_params->cs55x0, PCI_MODON, gx_params->on_duration); pci_write_config_byte(gx_params->cs55x0, PCI_SUSCFG, suscfg); pci_read_config_byte(gx_params->cs55x0, PCI_SUSCFG, &suscfg); local_irq_restore(flags); gx_params->pci_suscfg = suscfg; cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); dprintk("suspend modulation w/ duration of ON:%d us, OFF:%d us\n", gx_params->on_duration * 32, gx_params->off_duration * 32); dprintk("suspend modulation w/ clock speed: %d kHz.\n", freqs.new); } /**************************************************************** * High level functions * ****************************************************************/ /* * cpufreq_gx_verify: test if frequency range is valid * * This function checks if a given frequency range in kHz is valid * for the hardware supported by the driver. */ static int cpufreq_gx_verify(struct cpufreq_policy *policy) { unsigned int tmp_freq = 0; u8 tmp1, tmp2; if (!stock_freq || !policy) return -EINVAL; policy->cpu = 0; cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); /* it needs to be assured that at least one supported frequency is * within policy->min and policy->max. If it is not, policy->max * needs to be increased until one freuqency is supported. * policy->min may not be decreased, though. This way we guarantee a * specific processing capacity. */ tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); if (tmp_freq < policy->min) tmp_freq += stock_freq / max_duration; policy->min = tmp_freq; if (policy->min > policy->max) policy->max = tmp_freq; tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); if (tmp_freq > policy->max) tmp_freq -= stock_freq / max_duration; policy->max = tmp_freq; if (policy->max < policy->min) policy->max = policy->min; cpufreq_verify_within_limits(policy, (stock_freq / max_duration), stock_freq); return 0; } /* * cpufreq_gx_target: * */ static int cpufreq_gx_target(struct cpufreq_policy *policy, unsigned int target_freq, unsigned int relation) { u8 tmp1, tmp2; unsigned int tmp_freq; if (!stock_freq || !policy) return -EINVAL; policy->cpu = 0; tmp_freq = gx_validate_speed(target_freq, &tmp1, &tmp2); while (tmp_freq < policy->min) { tmp_freq += stock_freq / max_duration; tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); } while (tmp_freq > policy->max) { tmp_freq -= stock_freq / max_duration; tmp_freq = gx_validate_speed(tmp_freq, &tmp1, &tmp2); } gx_set_cpuspeed(tmp_freq); return 0; } static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) { unsigned int maxfreq, curfreq; if (!policy || policy->cpu != 0) return -ENODEV; /* determine maximum frequency */ if (pci_busclk) { maxfreq = pci_busclk * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; } else if (cpu_khz) { maxfreq = cpu_khz; } else { maxfreq = 30000 * gx_freq_mult[getCx86(CX86_DIR1) & 0x0f]; } stock_freq = maxfreq; curfreq = gx_get_cpuspeed(0); dprintk("cpu max frequency is %d.\n", maxfreq); dprintk("cpu current frequency is %dkHz.\n",curfreq); /* setup basic struct for cpufreq API */ policy->cpu = 0; if (max_duration < POLICY_MIN_DIV) policy->min = maxfreq / max_duration; else policy->min = maxfreq / POLICY_MIN_DIV; policy->max = maxfreq; policy->cur = curfreq; policy->cpuinfo.min_freq = maxfreq / max_duration; policy->cpuinfo.max_freq = maxfreq; policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; return 0; } /* * cpufreq_gx_init: * MediaGX/Geode GX initialize cpufreq driver */ static struct cpufreq_driver gx_suspmod_driver = { .get = gx_get_cpuspeed, .verify = cpufreq_gx_verify, .target = cpufreq_gx_target, .init = cpufreq_gx_cpu_init, .name = "gx-suspmod", .owner = THIS_MODULE, }; static int __init cpufreq_gx_init(void) { int ret; struct gxfreq_params *params; struct pci_dev *gx_pci; /* Test if we have the right hardware */ if ((gx_pci = gx_detect_chipset()) == NULL) return -ENODEV; /* check whether module parameters are sane */ if (max_duration > 0xff) max_duration = 0xff; dprintk("geode suspend modulation available.\n"); params = kzalloc(sizeof(struct gxfreq_params), GFP_KERNEL); if (params == NULL) return -ENOMEM; params->cs55x0 = gx_pci; gx_params = params; /* keep cs55x0 configurations */ pci_read_config_byte(params->cs55x0, PCI_SUSCFG, &(params->pci_suscfg)); pci_read_config_byte(params->cs55x0, PCI_PMER1, &(params->pci_pmer1)); pci_read_config_byte(params->cs55x0, PCI_PMER2, &(params->pci_pmer2)); pci_read_config_byte(params->cs55x0, PCI_MODON, &(params->on_duration)); pci_read_config_byte(params->cs55x0, PCI_MODOFF, &(params->off_duration)); if ((ret = cpufreq_register_driver(&gx_suspmod_driver))) { kfree(params); return ret; /* register error! */ } return 0; } static void __exit cpufreq_gx_exit(void) { cpufreq_unregister_driver(&gx_suspmod_driver); pci_dev_put(gx_params->cs55x0); kfree(gx_params); } MODULE_AUTHOR ("Hiroshi Miura <miura@da-cha.org>"); MODULE_DESCRIPTION ("Cpufreq driver for Cyrix MediaGX and NatSemi Geode"); MODULE_LICENSE ("GPL"); module_init(cpufreq_gx_init); module_exit(cpufreq_gx_exit);
gpl-2.0
pershoot/vision-2635
sound/soc/omap/omap3pandora.c
702
9944
/* * omap3pandora.c -- SoC audio for Pandora Handheld Console * * Author: Gražvydas Ignotas <notasas@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/regulator/consumer.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <sound/soc-dapm.h> #include <asm/mach-types.h> #include "omap-mcbsp.h" #include "omap-pcm.h" #include "../codecs/twl4030.h" #define OMAP3_PANDORA_DAC_POWER_GPIO 118 #define OMAP3_PANDORA_AMP_POWER_GPIO 14 #define PREFIX "ASoC omap3pandora: " static struct regulator *omap3pandora_dac_reg; static int omap3pandora_cmn_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, unsigned int fmt) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->dai->codec_dai; struct snd_soc_dai *cpu_dai = rtd->dai->cpu_dai; int ret; /* Set codec DAI configuration */ ret = snd_soc_dai_set_fmt(codec_dai, fmt); if (ret < 0) { pr_err(PREFIX "can't set codec DAI configuration\n"); return ret; } /* Set cpu DAI configuration */ ret = snd_soc_dai_set_fmt(cpu_dai, fmt); if (ret < 0) { pr_err(PREFIX "can't set cpu DAI configuration\n"); return ret; } /* Set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000, SND_SOC_CLOCK_IN); if (ret < 0) { pr_err(PREFIX "can't set codec system clock\n"); return ret; } /* Set McBSP clock to external */ ret = snd_soc_dai_set_sysclk(cpu_dai, OMAP_MCBSP_SYSCLK_CLKS_EXT, 256 * params_rate(params), SND_SOC_CLOCK_IN); if (ret < 0) { pr_err(PREFIX "can't set cpu system clock\n"); return ret; } ret = snd_soc_dai_set_clkdiv(cpu_dai, OMAP_MCBSP_CLKGDV, 8); if (ret < 0) { pr_err(PREFIX "can't set SRG clock divider\n"); return ret; } return 0; } static int omap3pandora_out_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { return omap3pandora_cmn_hw_params(substream, params, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_IB_NF | SND_SOC_DAIFMT_CBS_CFS); } static int omap3pandora_in_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { return omap3pandora_cmn_hw_params(substream, params, SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS); } static int omap3pandora_dac_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { /* * The PCM1773 DAC datasheet requires 1ms delay between switching * VCC power on/off and /PD pin high/low */ if (SND_SOC_DAPM_EVENT_ON(event)) { regulator_enable(omap3pandora_dac_reg); mdelay(1); gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 1); } else { gpio_set_value(OMAP3_PANDORA_DAC_POWER_GPIO, 0); mdelay(1); regulator_disable(omap3pandora_dac_reg); } return 0; } static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { if (SND_SOC_DAPM_EVENT_ON(event)) gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 1); else gpio_set_value(OMAP3_PANDORA_AMP_POWER_GPIO, 0); return 0; } /* * Audio paths on Pandora board: * * |O| ---> PCM DAC +-> AMP -> Headphone Jack * |M| A +--------> Line Out * |A| <~~clk~~+ * |P| <--- TWL4030 <--------- Line In and MICs */ static const struct snd_soc_dapm_widget omap3pandora_out_dapm_widgets[] = { SND_SOC_DAPM_DAC_E("PCM DAC", "HiFi Playback", SND_SOC_NOPM, 0, 0, omap3pandora_dac_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_PGA_E("Headphone Amplifier", SND_SOC_NOPM, 0, 0, NULL, 0, omap3pandora_hp_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD), SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_LINE("Line Out", NULL), }; static const struct snd_soc_dapm_widget omap3pandora_in_dapm_widgets[] = { SND_SOC_DAPM_MIC("Mic (internal)", NULL), SND_SOC_DAPM_MIC("Mic (external)", NULL), SND_SOC_DAPM_LINE("Line In", NULL), }; static const struct snd_soc_dapm_route omap3pandora_out_map[] = { {"PCM DAC", NULL, "APLL Enable"}, {"Headphone Amplifier", NULL, "PCM DAC"}, {"Line Out", NULL, "PCM DAC"}, {"Headphone Jack", NULL, "Headphone Amplifier"}, }; static const struct snd_soc_dapm_route omap3pandora_in_map[] = { {"AUXL", NULL, "Line In"}, {"AUXR", NULL, "Line In"}, {"MAINMIC", NULL, "Mic Bias 1"}, {"Mic Bias 1", NULL, "Mic (internal)"}, {"SUBMIC", NULL, "Mic Bias 2"}, {"Mic Bias 2", NULL, "Mic (external)"}, }; static int omap3pandora_out_init(struct snd_soc_codec *codec) { int ret; /* All TWL4030 output pins are floating */ snd_soc_dapm_nc_pin(codec, "EARPIECE"); snd_soc_dapm_nc_pin(codec, "PREDRIVEL"); snd_soc_dapm_nc_pin(codec, "PREDRIVER"); snd_soc_dapm_nc_pin(codec, "HSOL"); snd_soc_dapm_nc_pin(codec, "HSOR"); snd_soc_dapm_nc_pin(codec, "CARKITL"); snd_soc_dapm_nc_pin(codec, "CARKITR"); snd_soc_dapm_nc_pin(codec, "HFL"); snd_soc_dapm_nc_pin(codec, "HFR"); snd_soc_dapm_nc_pin(codec, "VIBRA"); ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets, ARRAY_SIZE(omap3pandora_out_dapm_widgets)); if (ret < 0) return ret; snd_soc_dapm_add_routes(codec, omap3pandora_out_map, ARRAY_SIZE(omap3pandora_out_map)); return snd_soc_dapm_sync(codec); } static int omap3pandora_in_init(struct snd_soc_codec *codec) { int ret; /* Not comnnected */ snd_soc_dapm_nc_pin(codec, "HSMIC"); snd_soc_dapm_nc_pin(codec, "CARKITMIC"); snd_soc_dapm_nc_pin(codec, "DIGIMIC0"); snd_soc_dapm_nc_pin(codec, "DIGIMIC1"); ret = snd_soc_dapm_new_controls(codec, omap3pandora_in_dapm_widgets, ARRAY_SIZE(omap3pandora_in_dapm_widgets)); if (ret < 0) return ret; snd_soc_dapm_add_routes(codec, omap3pandora_in_map, ARRAY_SIZE(omap3pandora_in_map)); return snd_soc_dapm_sync(codec); } static struct snd_soc_ops omap3pandora_out_ops = { .hw_params = omap3pandora_out_hw_params, }; static struct snd_soc_ops omap3pandora_in_ops = { .hw_params = omap3pandora_in_hw_params, }; /* Digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link omap3pandora_dai[] = { { .name = "PCM1773", .stream_name = "HiFi Out", .cpu_dai = &omap_mcbsp_dai[0], .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI], .ops = &omap3pandora_out_ops, .init = omap3pandora_out_init, }, { .name = "TWL4030", .stream_name = "Line/Mic In", .cpu_dai = &omap_mcbsp_dai[1], .codec_dai = &twl4030_dai[TWL4030_DAI_HIFI], .ops = &omap3pandora_in_ops, .init = omap3pandora_in_init, } }; /* SoC card */ static struct snd_soc_card snd_soc_card_omap3pandora = { .name = "omap3pandora", .platform = &omap_soc_platform, .dai_link = omap3pandora_dai, .num_links = ARRAY_SIZE(omap3pandora_dai), }; /* Audio subsystem */ static struct snd_soc_device omap3pandora_snd_data = { .card = &snd_soc_card_omap3pandora, .codec_dev = &soc_codec_dev_twl4030, }; static struct platform_device *omap3pandora_snd_device; static int __init omap3pandora_soc_init(void) { int ret; if (!machine_is_omap3_pandora()) return -ENODEV; pr_info("OMAP3 Pandora SoC init\n"); ret = gpio_request(OMAP3_PANDORA_DAC_POWER_GPIO, "dac_power"); if (ret) { pr_err(PREFIX "Failed to get DAC power GPIO\n"); return ret; } ret = gpio_direction_output(OMAP3_PANDORA_DAC_POWER_GPIO, 0); if (ret) { pr_err(PREFIX "Failed to set DAC power GPIO direction\n"); goto fail0; } ret = gpio_request(OMAP3_PANDORA_AMP_POWER_GPIO, "amp_power"); if (ret) { pr_err(PREFIX "Failed to get amp power GPIO\n"); goto fail0; } ret = gpio_direction_output(OMAP3_PANDORA_AMP_POWER_GPIO, 0); if (ret) { pr_err(PREFIX "Failed to set amp power GPIO direction\n"); goto fail1; } omap3pandora_snd_device = platform_device_alloc("soc-audio", -1); if (omap3pandora_snd_device == NULL) { pr_err(PREFIX "Platform device allocation failed\n"); ret = -ENOMEM; goto fail1; } platform_set_drvdata(omap3pandora_snd_device, &omap3pandora_snd_data); omap3pandora_snd_data.dev = &omap3pandora_snd_device->dev; *(unsigned int *)omap_mcbsp_dai[0].private_data = 1; /* McBSP2 */ *(unsigned int *)omap_mcbsp_dai[1].private_data = 3; /* McBSP4 */ ret = platform_device_add(omap3pandora_snd_device); if (ret) { pr_err(PREFIX "Unable to add platform device\n"); goto fail2; } omap3pandora_dac_reg = regulator_get(&omap3pandora_snd_device->dev, "vcc"); if (IS_ERR(omap3pandora_dac_reg)) { pr_err(PREFIX "Failed to get DAC regulator from %s: %ld\n", dev_name(&omap3pandora_snd_device->dev), PTR_ERR(omap3pandora_dac_reg)); goto fail3; } return 0; fail3: platform_device_del(omap3pandora_snd_device); fail2: platform_device_put(omap3pandora_snd_device); fail1: gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO); fail0: gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO); return ret; } module_init(omap3pandora_soc_init); static void __exit omap3pandora_soc_exit(void) { regulator_put(omap3pandora_dac_reg); platform_device_unregister(omap3pandora_snd_device); gpio_free(OMAP3_PANDORA_AMP_POWER_GPIO); gpio_free(OMAP3_PANDORA_DAC_POWER_GPIO); } module_exit(omap3pandora_soc_exit); MODULE_AUTHOR("Grazvydas Ignotas <notasas@gmail.com>"); MODULE_DESCRIPTION("ALSA SoC OMAP3 Pandora"); MODULE_LICENSE("GPL");
gpl-2.0
hejiann/android_kernel_huawei_u8860
drivers/media/rc/nuvoton-cir.c
1214
34725
/* * Driver for Nuvoton Technology Corporation w83667hg/w83677hg-i CIR * * Copyright (C) 2010 Jarod Wilson <jarod@redhat.com> * Copyright (C) 2009 Nuvoton PS Team * * Special thanks to Nuvoton for providing hardware, spec sheets and * sample code upon which portions of this driver are based. Indirect * thanks also to Maxim Levitsky, whose ene_ir driver this driver is * modeled after. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pnp.h> #include <linux/io.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/rc-core.h> #include <linux/pci_ids.h> #include "nuvoton-cir.h" /* write val to config reg */ static inline void nvt_cr_write(struct nvt_dev *nvt, u8 val, u8 reg) { outb(reg, nvt->cr_efir); outb(val, nvt->cr_efdr); } /* read val from config reg */ static inline u8 nvt_cr_read(struct nvt_dev *nvt, u8 reg) { outb(reg, nvt->cr_efir); return inb(nvt->cr_efdr); } /* update config register bit without changing other bits */ static inline void nvt_set_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg) { u8 tmp = nvt_cr_read(nvt, reg) | val; nvt_cr_write(nvt, tmp, reg); } /* clear config register bit without changing other bits */ static inline void nvt_clear_reg_bit(struct nvt_dev *nvt, u8 val, u8 reg) { u8 tmp = nvt_cr_read(nvt, reg) & ~val; nvt_cr_write(nvt, tmp, reg); } /* enter extended function mode */ static inline void nvt_efm_enable(struct nvt_dev *nvt) { /* Enabling Extended Function Mode explicitly requires writing 2x */ outb(EFER_EFM_ENABLE, nvt->cr_efir); outb(EFER_EFM_ENABLE, nvt->cr_efir); } /* exit extended function mode */ static inline void nvt_efm_disable(struct nvt_dev *nvt) { outb(EFER_EFM_DISABLE, nvt->cr_efir); } /* * When you want to address a specific logical device, write its logical * device number to CR_LOGICAL_DEV_SEL, then enable/disable by writing * 0x1/0x0 respectively to CR_LOGICAL_DEV_EN. */ static inline void nvt_select_logical_dev(struct nvt_dev *nvt, u8 ldev) { outb(CR_LOGICAL_DEV_SEL, nvt->cr_efir); outb(ldev, nvt->cr_efdr); } /* write val to cir config register */ static inline void nvt_cir_reg_write(struct nvt_dev *nvt, u8 val, u8 offset) { outb(val, nvt->cir_addr + offset); } /* read val from cir config register */ static u8 nvt_cir_reg_read(struct nvt_dev *nvt, u8 offset) { u8 val; val = inb(nvt->cir_addr + offset); return val; } /* write val to cir wake register */ static inline void nvt_cir_wake_reg_write(struct nvt_dev *nvt, u8 val, u8 offset) { outb(val, nvt->cir_wake_addr + offset); } /* read val from cir wake config register */ static u8 nvt_cir_wake_reg_read(struct nvt_dev *nvt, u8 offset) { u8 val; val = inb(nvt->cir_wake_addr + offset); return val; } #define pr_reg(text, ...) \ printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__) /* dump current cir register contents */ static void cir_dump_regs(struct nvt_dev *nvt) { nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); pr_reg("%s: Dump CIR logical device registers:\n", NVT_DRIVER_NAME); pr_reg(" * CR CIR ACTIVE : 0x%x\n", nvt_cr_read(nvt, CR_LOGICAL_DEV_EN)); pr_reg(" * CR CIR BASE ADDR: 0x%x\n", (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) | nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO)); pr_reg(" * CR CIR IRQ NUM: 0x%x\n", nvt_cr_read(nvt, CR_CIR_IRQ_RSRC)); nvt_efm_disable(nvt); pr_reg("%s: Dump CIR registers:\n", NVT_DRIVER_NAME); pr_reg(" * IRCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRCON)); pr_reg(" * IRSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRSTS)); pr_reg(" * IREN: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IREN)); pr_reg(" * RXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_RXFCONT)); pr_reg(" * CP: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP)); pr_reg(" * CC: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CC)); pr_reg(" * SLCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCH)); pr_reg(" * SLCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SLCL)); pr_reg(" * FIFOCON: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FIFOCON)); pr_reg(" * IRFIFOSTS: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFIFOSTS)); pr_reg(" * SRXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_SRXFIFO)); pr_reg(" * TXFCONT: 0x%x\n", nvt_cir_reg_read(nvt, CIR_TXFCONT)); pr_reg(" * STXFIFO: 0x%x\n", nvt_cir_reg_read(nvt, CIR_STXFIFO)); pr_reg(" * FCCH: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCH)); pr_reg(" * FCCL: 0x%x\n", nvt_cir_reg_read(nvt, CIR_FCCL)); pr_reg(" * IRFSM: 0x%x\n", nvt_cir_reg_read(nvt, CIR_IRFSM)); } /* dump current cir wake register contents */ static void cir_wake_dump_regs(struct nvt_dev *nvt) { u8 i, fifo_len; nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); pr_reg("%s: Dump CIR WAKE logical device registers:\n", NVT_DRIVER_NAME); pr_reg(" * CR CIR WAKE ACTIVE : 0x%x\n", nvt_cr_read(nvt, CR_LOGICAL_DEV_EN)); pr_reg(" * CR CIR WAKE BASE ADDR: 0x%x\n", (nvt_cr_read(nvt, CR_CIR_BASE_ADDR_HI) << 8) | nvt_cr_read(nvt, CR_CIR_BASE_ADDR_LO)); pr_reg(" * CR CIR WAKE IRQ NUM: 0x%x\n", nvt_cr_read(nvt, CR_CIR_IRQ_RSRC)); nvt_efm_disable(nvt); pr_reg("%s: Dump CIR WAKE registers\n", NVT_DRIVER_NAME); pr_reg(" * IRCON: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRCON)); pr_reg(" * IRSTS: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS)); pr_reg(" * IREN: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN)); pr_reg(" * FIFO CMP DEEP: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_DEEP)); pr_reg(" * FIFO CMP TOL: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_CMP_TOL)); pr_reg(" * FIFO COUNT: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT)); pr_reg(" * SLCH: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCH)); pr_reg(" * SLCL: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SLCL)); pr_reg(" * FIFOCON: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON)); pr_reg(" * SRXFSTS: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SRXFSTS)); pr_reg(" * SAMPLE RX FIFO: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_SAMPLE_RX_FIFO)); pr_reg(" * WR FIFO DATA: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_WR_FIFO_DATA)); pr_reg(" * RD FIFO ONLY: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY)); pr_reg(" * RD FIFO ONLY IDX: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)); pr_reg(" * FIFO IGNORE: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_IGNORE)); pr_reg(" * IRFSM: 0x%x\n", nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRFSM)); fifo_len = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFO_COUNT); pr_reg("%s: Dump CIR WAKE FIFO (len %d)\n", NVT_DRIVER_NAME, fifo_len); pr_reg("* Contents = "); for (i = 0; i < fifo_len; i++) printk(KERN_CONT "%02x ", nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY)); printk(KERN_CONT "\n"); } /* detect hardware features */ static int nvt_hw_detect(struct nvt_dev *nvt) { unsigned long flags; u8 chip_major, chip_minor; int ret = 0; char chip_id[12]; bool chip_unknown = false; nvt_efm_enable(nvt); /* Check if we're wired for the alternate EFER setup */ chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); if (chip_major == 0xff) { nvt->cr_efir = CR_EFIR2; nvt->cr_efdr = CR_EFDR2; nvt_efm_enable(nvt); chip_major = nvt_cr_read(nvt, CR_CHIP_ID_HI); } chip_minor = nvt_cr_read(nvt, CR_CHIP_ID_LO); /* these are the known working chip revisions... */ switch (chip_major) { case CHIP_ID_HIGH_667: strcpy(chip_id, "w83667hg\0"); if (chip_minor != CHIP_ID_LOW_667) chip_unknown = true; break; case CHIP_ID_HIGH_677B: strcpy(chip_id, "w83677hg\0"); if (chip_minor != CHIP_ID_LOW_677B2 && chip_minor != CHIP_ID_LOW_677B3) chip_unknown = true; break; case CHIP_ID_HIGH_677C: strcpy(chip_id, "w83677hg-c\0"); if (chip_minor != CHIP_ID_LOW_677C) chip_unknown = true; break; default: strcpy(chip_id, "w836x7hg\0"); chip_unknown = true; break; } /* warn, but still let the driver load, if we don't know this chip */ if (chip_unknown) nvt_pr(KERN_WARNING, "%s: unknown chip, id: 0x%02x 0x%02x, " "it may not work...", chip_id, chip_major, chip_minor); else nvt_dbg("%s: chip id: 0x%02x 0x%02x", chip_id, chip_major, chip_minor); nvt_efm_disable(nvt); spin_lock_irqsave(&nvt->nvt_lock, flags); nvt->chip_major = chip_major; nvt->chip_minor = chip_minor; spin_unlock_irqrestore(&nvt->nvt_lock, flags); return ret; } static void nvt_cir_ldev_init(struct nvt_dev *nvt) { u8 val, psreg, psmask, psval; if (nvt->chip_major == CHIP_ID_HIGH_667) { psreg = CR_MULTIFUNC_PIN_SEL; psmask = MULTIFUNC_PIN_SEL_MASK; psval = MULTIFUNC_ENABLE_CIR | MULTIFUNC_ENABLE_CIRWB; } else { psreg = CR_OUTPUT_PIN_SEL; psmask = OUTPUT_PIN_SEL_MASK; psval = OUTPUT_ENABLE_CIR | OUTPUT_ENABLE_CIRWB; } /* output pin selection: enable CIR, with WB sensor enabled */ val = nvt_cr_read(nvt, psreg); val &= psmask; val |= psval; nvt_cr_write(nvt, val, psreg); /* Select CIR logical device and enable */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); nvt_cr_write(nvt, nvt->cir_addr >> 8, CR_CIR_BASE_ADDR_HI); nvt_cr_write(nvt, nvt->cir_addr & 0xff, CR_CIR_BASE_ADDR_LO); nvt_cr_write(nvt, nvt->cir_irq, CR_CIR_IRQ_RSRC); nvt_dbg("CIR initialized, base io port address: 0x%lx, irq: %d", nvt->cir_addr, nvt->cir_irq); } static void nvt_cir_wake_ldev_init(struct nvt_dev *nvt) { /* Select ACPI logical device, enable it and CIR Wake */ nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); /* Enable CIR Wake via PSOUT# (Pin60) */ nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE); /* enable cir interrupt of mouse/keyboard IRQ event */ nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS); /* enable pme interrupt of cir wakeup event */ nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2); /* Select CIR Wake logical device and enable */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); nvt_cr_write(nvt, nvt->cir_wake_addr >> 8, CR_CIR_BASE_ADDR_HI); nvt_cr_write(nvt, nvt->cir_wake_addr & 0xff, CR_CIR_BASE_ADDR_LO); nvt_cr_write(nvt, nvt->cir_wake_irq, CR_CIR_IRQ_RSRC); nvt_dbg("CIR Wake initialized, base io port address: 0x%lx, irq: %d", nvt->cir_wake_addr, nvt->cir_wake_irq); } /* clear out the hardware's cir rx fifo */ static void nvt_clear_cir_fifo(struct nvt_dev *nvt) { u8 val; val = nvt_cir_reg_read(nvt, CIR_FIFOCON); nvt_cir_reg_write(nvt, val | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON); } /* clear out the hardware's cir wake rx fifo */ static void nvt_clear_cir_wake_fifo(struct nvt_dev *nvt) { u8 val; val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_FIFOCON); nvt_cir_wake_reg_write(nvt, val | CIR_WAKE_FIFOCON_RXFIFOCLR, CIR_WAKE_FIFOCON); } /* clear out the hardware's cir tx fifo */ static void nvt_clear_tx_fifo(struct nvt_dev *nvt) { u8 val; val = nvt_cir_reg_read(nvt, CIR_FIFOCON); nvt_cir_reg_write(nvt, val | CIR_FIFOCON_TXFIFOCLR, CIR_FIFOCON); } /* enable RX Trigger Level Reach and Packet End interrupts */ static void nvt_set_cir_iren(struct nvt_dev *nvt) { u8 iren; iren = CIR_IREN_RTR | CIR_IREN_PE; nvt_cir_reg_write(nvt, iren, CIR_IREN); } static void nvt_cir_regs_init(struct nvt_dev *nvt) { /* set sample limit count (PE interrupt raised when reached) */ nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_SLCH); nvt_cir_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_SLCL); /* set fifo irq trigger levels */ nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV | CIR_FIFOCON_RX_TRIGGER_LEV, CIR_FIFOCON); /* * Enable TX and RX, specify carrier on = low, off = high, and set * sample period (currently 50us) */ nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL, CIR_IRCON); /* clear hardware rx and tx fifos */ nvt_clear_cir_fifo(nvt); nvt_clear_tx_fifo(nvt); /* clear any and all stray interrupts */ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); /* and finally, enable interrupts */ nvt_set_cir_iren(nvt); } static void nvt_cir_wake_regs_init(struct nvt_dev *nvt) { /* set number of bytes needed for wake from s3 (default 65) */ nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFO_CMP_BYTES, CIR_WAKE_FIFO_CMP_DEEP); /* set tolerance/variance allowed per byte during wake compare */ nvt_cir_wake_reg_write(nvt, CIR_WAKE_CMP_TOLERANCE, CIR_WAKE_FIFO_CMP_TOL); /* set sample limit count (PE interrupt raised when reached) */ nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT >> 8, CIR_WAKE_SLCH); nvt_cir_wake_reg_write(nvt, CIR_RX_LIMIT_COUNT & 0xff, CIR_WAKE_SLCL); /* set cir wake fifo rx trigger level (currently 67) */ nvt_cir_wake_reg_write(nvt, CIR_WAKE_FIFOCON_RX_TRIGGER_LEV, CIR_WAKE_FIFOCON); /* * Enable TX and RX, specific carrier on = low, off = high, and set * sample period (currently 50us) */ nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN | CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV | CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, CIR_WAKE_IRCON); /* clear cir wake rx fifo */ nvt_clear_cir_wake_fifo(nvt); /* clear any and all stray interrupts */ nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); } static void nvt_enable_wake(struct nvt_dev *nvt) { nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_ACPI); nvt_set_reg_bit(nvt, CIR_WAKE_ENABLE_BIT, CR_ACPI_CIR_WAKE); nvt_set_reg_bit(nvt, CIR_INTR_MOUSE_IRQ_BIT, CR_ACPI_IRQ_EVENTS); nvt_set_reg_bit(nvt, PME_INTR_CIR_PASS_BIT, CR_ACPI_IRQ_EVENTS2); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR_WAKE); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); nvt_cir_wake_reg_write(nvt, CIR_WAKE_IRCON_MODE0 | CIR_WAKE_IRCON_RXEN | CIR_WAKE_IRCON_R | CIR_WAKE_IRCON_RXINV | CIR_WAKE_IRCON_SAMPLE_PERIOD_SEL, CIR_WAKE_IRCON); nvt_cir_wake_reg_write(nvt, 0xff, CIR_WAKE_IRSTS); nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN); } /* rx carrier detect only works in learning mode, must be called w/nvt_lock */ static u32 nvt_rx_carrier_detect(struct nvt_dev *nvt) { u32 count, carrier, duration = 0; int i; count = nvt_cir_reg_read(nvt, CIR_FCCL) | nvt_cir_reg_read(nvt, CIR_FCCH) << 8; for (i = 0; i < nvt->pkts; i++) { if (nvt->buf[i] & BUF_PULSE_BIT) duration += nvt->buf[i] & BUF_LEN_MASK; } duration *= SAMPLE_PERIOD; if (!count || !duration) { nvt_pr(KERN_NOTICE, "Unable to determine carrier! (c:%u, d:%u)", count, duration); return 0; } carrier = MS_TO_NS(count) / duration; if ((carrier > MAX_CARRIER) || (carrier < MIN_CARRIER)) nvt_dbg("WTF? Carrier frequency out of range!"); nvt_dbg("Carrier frequency: %u (count %u, duration %u)", carrier, count, duration); return carrier; } /* * set carrier frequency * * set carrier on 2 registers: CP & CC * always set CP as 0x81 * set CC by SPEC, CC = 3MHz/carrier - 1 */ static int nvt_set_tx_carrier(struct rc_dev *dev, u32 carrier) { struct nvt_dev *nvt = dev->priv; u16 val; nvt_cir_reg_write(nvt, 1, CIR_CP); val = 3000000 / (carrier) - 1; nvt_cir_reg_write(nvt, val & 0xff, CIR_CC); nvt_dbg("cp: 0x%x cc: 0x%x\n", nvt_cir_reg_read(nvt, CIR_CP), nvt_cir_reg_read(nvt, CIR_CC)); return 0; } /* * nvt_tx_ir * * 1) clean TX fifo first (handled by AP) * 2) copy data from user space * 3) disable RX interrupts, enable TX interrupts: TTR & TFU * 4) send 9 packets to TX FIFO to open TTR * in interrupt_handler: * 5) send all data out * go back to write(): * 6) disable TX interrupts, re-enable RX interupts * * The key problem of this function is user space data may larger than * driver's data buf length. So nvt_tx_ir() will only copy TX_BUF_LEN data to * buf, and keep current copied data buf num in cur_buf_num. But driver's buf * number may larger than TXFCONT (0xff). So in interrupt_handler, it has to * set TXFCONT as 0xff, until buf_count less than 0xff. */ static int nvt_tx_ir(struct rc_dev *dev, int *txbuf, u32 n) { struct nvt_dev *nvt = dev->priv; unsigned long flags; size_t cur_count; unsigned int i; u8 iren; int ret; spin_lock_irqsave(&nvt->tx.lock, flags); if (n >= TX_BUF_LEN) { nvt->tx.buf_count = cur_count = TX_BUF_LEN; ret = TX_BUF_LEN; } else { nvt->tx.buf_count = cur_count = n; ret = n; } memcpy(nvt->tx.buf, txbuf, nvt->tx.buf_count); nvt->tx.cur_buf_num = 0; /* save currently enabled interrupts */ iren = nvt_cir_reg_read(nvt, CIR_IREN); /* now disable all interrupts, save TFU & TTR */ nvt_cir_reg_write(nvt, CIR_IREN_TFU | CIR_IREN_TTR, CIR_IREN); nvt->tx.tx_state = ST_TX_REPLY; nvt_cir_reg_write(nvt, CIR_FIFOCON_TX_TRIGGER_LEV_8 | CIR_FIFOCON_RXFIFOCLR, CIR_FIFOCON); /* trigger TTR interrupt by writing out ones, (yes, it's ugly) */ for (i = 0; i < 9; i++) nvt_cir_reg_write(nvt, 0x01, CIR_STXFIFO); spin_unlock_irqrestore(&nvt->tx.lock, flags); wait_event(nvt->tx.queue, nvt->tx.tx_state == ST_TX_REQUEST); spin_lock_irqsave(&nvt->tx.lock, flags); nvt->tx.tx_state = ST_TX_NONE; spin_unlock_irqrestore(&nvt->tx.lock, flags); /* restore enabled interrupts to prior state */ nvt_cir_reg_write(nvt, iren, CIR_IREN); return ret; } /* dump contents of the last rx buffer we got from the hw rx fifo */ static void nvt_dump_rx_buf(struct nvt_dev *nvt) { int i; printk(KERN_DEBUG "%s (len %d): ", __func__, nvt->pkts); for (i = 0; (i < nvt->pkts) && (i < RX_BUF_LEN); i++) printk(KERN_CONT "0x%02x ", nvt->buf[i]); printk(KERN_CONT "\n"); } /* * Process raw data in rx driver buffer, store it in raw IR event kfifo, * trigger decode when appropriate. * * We get IR data samples one byte at a time. If the msb is set, its a pulse, * otherwise its a space. The lower 7 bits are the count of SAMPLE_PERIOD * (default 50us) intervals for that pulse/space. A discrete signal is * followed by a series of 0x7f packets, then either 0x7<something> or 0x80 * to signal more IR coming (repeats) or end of IR, respectively. We store * sample data in the raw event kfifo until we see 0x7<something> (except f) * or 0x80, at which time, we trigger a decode operation. */ static void nvt_process_rx_ir_data(struct nvt_dev *nvt) { DEFINE_IR_RAW_EVENT(rawir); u32 carrier; u8 sample; int i; nvt_dbg_verbose("%s firing", __func__); if (debug) nvt_dump_rx_buf(nvt); if (nvt->carrier_detect_enabled) carrier = nvt_rx_carrier_detect(nvt); nvt_dbg_verbose("Processing buffer of len %d", nvt->pkts); init_ir_raw_event(&rawir); for (i = 0; i < nvt->pkts; i++) { sample = nvt->buf[i]; rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); rawir.duration = US_TO_NS((sample & BUF_LEN_MASK) * SAMPLE_PERIOD); nvt_dbg("Storing %s with duration %d", rawir.pulse ? "pulse" : "space", rawir.duration); ir_raw_event_store_with_filter(nvt->rdev, &rawir); /* * BUF_PULSE_BIT indicates end of IR data, BUF_REPEAT_BYTE * indicates end of IR signal, but new data incoming. In both * cases, it means we're ready to call ir_raw_event_handle */ if ((sample == BUF_PULSE_BIT) && (i + 1 < nvt->pkts)) { nvt_dbg("Calling ir_raw_event_handle (signal end)\n"); ir_raw_event_handle(nvt->rdev); } } nvt->pkts = 0; nvt_dbg("Calling ir_raw_event_handle (buffer empty)\n"); ir_raw_event_handle(nvt->rdev); nvt_dbg_verbose("%s done", __func__); } static void nvt_handle_rx_fifo_overrun(struct nvt_dev *nvt) { nvt_pr(KERN_WARNING, "RX FIFO overrun detected, flushing data!"); nvt->pkts = 0; nvt_clear_cir_fifo(nvt); ir_raw_event_reset(nvt->rdev); } /* copy data from hardware rx fifo into driver buffer */ static void nvt_get_rx_ir_data(struct nvt_dev *nvt) { unsigned long flags; u8 fifocount, val; unsigned int b_idx; bool overrun = false; int i; /* Get count of how many bytes to read from RX FIFO */ fifocount = nvt_cir_reg_read(nvt, CIR_RXFCONT); /* if we get 0xff, probably means the logical dev is disabled */ if (fifocount == 0xff) return; /* watch out for a fifo overrun condition */ else if (fifocount > RX_BUF_LEN) { overrun = true; fifocount = RX_BUF_LEN; } nvt_dbg("attempting to fetch %u bytes from hw rx fifo", fifocount); spin_lock_irqsave(&nvt->nvt_lock, flags); b_idx = nvt->pkts; /* This should never happen, but lets check anyway... */ if (b_idx + fifocount > RX_BUF_LEN) { nvt_process_rx_ir_data(nvt); b_idx = 0; } /* Read fifocount bytes from CIR Sample RX FIFO register */ for (i = 0; i < fifocount; i++) { val = nvt_cir_reg_read(nvt, CIR_SRXFIFO); nvt->buf[b_idx + i] = val; } nvt->pkts += fifocount; nvt_dbg("%s: pkts now %d", __func__, nvt->pkts); nvt_process_rx_ir_data(nvt); if (overrun) nvt_handle_rx_fifo_overrun(nvt); spin_unlock_irqrestore(&nvt->nvt_lock, flags); } static void nvt_cir_log_irqs(u8 status, u8 iren) { nvt_pr(KERN_INFO, "IRQ 0x%02x (IREN 0x%02x) :%s%s%s%s%s%s%s%s%s", status, iren, status & CIR_IRSTS_RDR ? " RDR" : "", status & CIR_IRSTS_RTR ? " RTR" : "", status & CIR_IRSTS_PE ? " PE" : "", status & CIR_IRSTS_RFO ? " RFO" : "", status & CIR_IRSTS_TE ? " TE" : "", status & CIR_IRSTS_TTR ? " TTR" : "", status & CIR_IRSTS_TFU ? " TFU" : "", status & CIR_IRSTS_GH ? " GH" : "", status & ~(CIR_IRSTS_RDR | CIR_IRSTS_RTR | CIR_IRSTS_PE | CIR_IRSTS_RFO | CIR_IRSTS_TE | CIR_IRSTS_TTR | CIR_IRSTS_TFU | CIR_IRSTS_GH) ? " ?" : ""); } static bool nvt_cir_tx_inactive(struct nvt_dev *nvt) { unsigned long flags; bool tx_inactive; u8 tx_state; spin_lock_irqsave(&nvt->tx.lock, flags); tx_state = nvt->tx.tx_state; spin_unlock_irqrestore(&nvt->tx.lock, flags); tx_inactive = (tx_state == ST_TX_NONE); return tx_inactive; } /* interrupt service routine for incoming and outgoing CIR data */ static irqreturn_t nvt_cir_isr(int irq, void *data) { struct nvt_dev *nvt = data; u8 status, iren, cur_state; unsigned long flags; nvt_dbg_verbose("%s firing", __func__); nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); nvt_efm_disable(nvt); /* * Get IR Status register contents. Write 1 to ack/clear * * bit: reg name - description * 7: CIR_IRSTS_RDR - RX Data Ready * 6: CIR_IRSTS_RTR - RX FIFO Trigger Level Reach * 5: CIR_IRSTS_PE - Packet End * 4: CIR_IRSTS_RFO - RX FIFO Overrun (RDR will also be set) * 3: CIR_IRSTS_TE - TX FIFO Empty * 2: CIR_IRSTS_TTR - TX FIFO Trigger Level Reach * 1: CIR_IRSTS_TFU - TX FIFO Underrun * 0: CIR_IRSTS_GH - Min Length Detected */ status = nvt_cir_reg_read(nvt, CIR_IRSTS); if (!status) { nvt_dbg_verbose("%s exiting, IRSTS 0x0", __func__); nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); return IRQ_RETVAL(IRQ_NONE); } /* ack/clear all irq flags we've got */ nvt_cir_reg_write(nvt, status, CIR_IRSTS); nvt_cir_reg_write(nvt, 0, CIR_IRSTS); /* Interrupt may be shared with CIR Wake, bail if CIR not enabled */ iren = nvt_cir_reg_read(nvt, CIR_IREN); if (!iren) { nvt_dbg_verbose("%s exiting, CIR not enabled", __func__); return IRQ_RETVAL(IRQ_NONE); } if (debug) nvt_cir_log_irqs(status, iren); if (status & CIR_IRSTS_RTR) { /* FIXME: add code for study/learn mode */ /* We only do rx if not tx'ing */ if (nvt_cir_tx_inactive(nvt)) nvt_get_rx_ir_data(nvt); } if (status & CIR_IRSTS_PE) { if (nvt_cir_tx_inactive(nvt)) nvt_get_rx_ir_data(nvt); spin_lock_irqsave(&nvt->nvt_lock, flags); cur_state = nvt->study_state; spin_unlock_irqrestore(&nvt->nvt_lock, flags); if (cur_state == ST_STUDY_NONE) nvt_clear_cir_fifo(nvt); } if (status & CIR_IRSTS_TE) nvt_clear_tx_fifo(nvt); if (status & CIR_IRSTS_TTR) { unsigned int pos, count; u8 tmp; spin_lock_irqsave(&nvt->tx.lock, flags); pos = nvt->tx.cur_buf_num; count = nvt->tx.buf_count; /* Write data into the hardware tx fifo while pos < count */ if (pos < count) { nvt_cir_reg_write(nvt, nvt->tx.buf[pos], CIR_STXFIFO); nvt->tx.cur_buf_num++; /* Disable TX FIFO Trigger Level Reach (TTR) interrupt */ } else { tmp = nvt_cir_reg_read(nvt, CIR_IREN); nvt_cir_reg_write(nvt, tmp & ~CIR_IREN_TTR, CIR_IREN); } spin_unlock_irqrestore(&nvt->tx.lock, flags); } if (status & CIR_IRSTS_TFU) { spin_lock_irqsave(&nvt->tx.lock, flags); if (nvt->tx.tx_state == ST_TX_REPLY) { nvt->tx.tx_state = ST_TX_REQUEST; wake_up(&nvt->tx.queue); } spin_unlock_irqrestore(&nvt->tx.lock, flags); } nvt_dbg_verbose("%s done", __func__); return IRQ_RETVAL(IRQ_HANDLED); } /* Interrupt service routine for CIR Wake */ static irqreturn_t nvt_cir_wake_isr(int irq, void *data) { u8 status, iren, val; struct nvt_dev *nvt = data; unsigned long flags; nvt_dbg_wake("%s firing", __func__); status = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IRSTS); if (!status) return IRQ_RETVAL(IRQ_NONE); if (status & CIR_WAKE_IRSTS_IR_PENDING) nvt_clear_cir_wake_fifo(nvt); nvt_cir_wake_reg_write(nvt, status, CIR_WAKE_IRSTS); nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IRSTS); /* Interrupt may be shared with CIR, bail if Wake not enabled */ iren = nvt_cir_wake_reg_read(nvt, CIR_WAKE_IREN); if (!iren) { nvt_dbg_wake("%s exiting, wake not enabled", __func__); return IRQ_RETVAL(IRQ_HANDLED); } if ((status & CIR_WAKE_IRSTS_PE) && (nvt->wake_state == ST_WAKE_START)) { while (nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY_IDX)) { val = nvt_cir_wake_reg_read(nvt, CIR_WAKE_RD_FIFO_ONLY); nvt_dbg("setting wake up key: 0x%x", val); } nvt_cir_wake_reg_write(nvt, 0, CIR_WAKE_IREN); spin_lock_irqsave(&nvt->nvt_lock, flags); nvt->wake_state = ST_WAKE_FINISH; spin_unlock_irqrestore(&nvt->nvt_lock, flags); } nvt_dbg_wake("%s done", __func__); return IRQ_RETVAL(IRQ_HANDLED); } static void nvt_enable_cir(struct nvt_dev *nvt) { /* set function enable flags */ nvt_cir_reg_write(nvt, CIR_IRCON_TXEN | CIR_IRCON_RXEN | CIR_IRCON_RXINV | CIR_IRCON_SAMPLE_PERIOD_SEL, CIR_IRCON); nvt_efm_enable(nvt); /* enable the CIR logical device */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); /* clear all pending interrupts */ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); /* enable interrupts */ nvt_set_cir_iren(nvt); } static void nvt_disable_cir(struct nvt_dev *nvt) { /* disable CIR interrupts */ nvt_cir_reg_write(nvt, 0, CIR_IREN); /* clear any and all pending interrupts */ nvt_cir_reg_write(nvt, 0xff, CIR_IRSTS); /* clear all function enable flags */ nvt_cir_reg_write(nvt, 0, CIR_IRCON); /* clear hardware rx and tx fifos */ nvt_clear_cir_fifo(nvt); nvt_clear_tx_fifo(nvt); nvt_efm_enable(nvt); /* disable the CIR logical device */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); } static int nvt_open(struct rc_dev *dev) { struct nvt_dev *nvt = dev->priv; unsigned long flags; spin_lock_irqsave(&nvt->nvt_lock, flags); nvt_enable_cir(nvt); spin_unlock_irqrestore(&nvt->nvt_lock, flags); return 0; } static void nvt_close(struct rc_dev *dev) { struct nvt_dev *nvt = dev->priv; unsigned long flags; spin_lock_irqsave(&nvt->nvt_lock, flags); nvt_disable_cir(nvt); spin_unlock_irqrestore(&nvt->nvt_lock, flags); } /* Allocate memory, probe hardware, and initialize everything */ static int nvt_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) { struct nvt_dev *nvt; struct rc_dev *rdev; int ret = -ENOMEM; nvt = kzalloc(sizeof(struct nvt_dev), GFP_KERNEL); if (!nvt) return ret; /* input device for IR remote (and tx) */ rdev = rc_allocate_device(); if (!rdev) goto failure; ret = -ENODEV; /* validate pnp resources */ if (!pnp_port_valid(pdev, 0) || pnp_port_len(pdev, 0) < CIR_IOREG_LENGTH) { dev_err(&pdev->dev, "IR PNP Port not valid!\n"); goto failure; } if (!pnp_irq_valid(pdev, 0)) { dev_err(&pdev->dev, "PNP IRQ not valid!\n"); goto failure; } if (!pnp_port_valid(pdev, 1) || pnp_port_len(pdev, 1) < CIR_IOREG_LENGTH) { dev_err(&pdev->dev, "Wake PNP Port not valid!\n"); goto failure; } nvt->cir_addr = pnp_port_start(pdev, 0); nvt->cir_irq = pnp_irq(pdev, 0); nvt->cir_wake_addr = pnp_port_start(pdev, 1); /* irq is always shared between cir and cir wake */ nvt->cir_wake_irq = nvt->cir_irq; nvt->cr_efir = CR_EFIR; nvt->cr_efdr = CR_EFDR; spin_lock_init(&nvt->nvt_lock); spin_lock_init(&nvt->tx.lock); ret = -EBUSY; /* now claim resources */ if (!request_region(nvt->cir_addr, CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) goto failure; if (request_irq(nvt->cir_irq, nvt_cir_isr, IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt)) goto failure; if (!request_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH, NVT_DRIVER_NAME)) goto failure; if (request_irq(nvt->cir_wake_irq, nvt_cir_wake_isr, IRQF_SHARED, NVT_DRIVER_NAME, (void *)nvt)) goto failure; pnp_set_drvdata(pdev, nvt); nvt->pdev = pdev; init_waitqueue_head(&nvt->tx.queue); ret = nvt_hw_detect(nvt); if (ret) goto failure; /* Initialize CIR & CIR Wake Logical Devices */ nvt_efm_enable(nvt); nvt_cir_ldev_init(nvt); nvt_cir_wake_ldev_init(nvt); nvt_efm_disable(nvt); /* Initialize CIR & CIR Wake Config Registers */ nvt_cir_regs_init(nvt); nvt_cir_wake_regs_init(nvt); /* Set up the rc device */ rdev->priv = nvt; rdev->driver_type = RC_DRIVER_IR_RAW; rdev->allowed_protos = RC_TYPE_ALL; rdev->open = nvt_open; rdev->close = nvt_close; rdev->tx_ir = nvt_tx_ir; rdev->s_tx_carrier = nvt_set_tx_carrier; rdev->input_name = "Nuvoton w836x7hg Infrared Remote Transceiver"; rdev->input_phys = "nuvoton/cir0"; rdev->input_id.bustype = BUS_HOST; rdev->input_id.vendor = PCI_VENDOR_ID_WINBOND2; rdev->input_id.product = nvt->chip_major; rdev->input_id.version = nvt->chip_minor; rdev->dev.parent = &pdev->dev; rdev->driver_name = NVT_DRIVER_NAME; rdev->map_name = RC_MAP_RC6_MCE; rdev->timeout = MS_TO_NS(100); /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD); #if 0 rdev->min_timeout = XYZ; rdev->max_timeout = XYZ; /* tx bits */ rdev->tx_resolution = XYZ; #endif ret = rc_register_device(rdev); if (ret) goto failure; device_init_wakeup(&pdev->dev, true); nvt->rdev = rdev; nvt_pr(KERN_NOTICE, "driver has been successfully loaded\n"); if (debug) { cir_dump_regs(nvt); cir_wake_dump_regs(nvt); } return 0; failure: if (nvt->cir_irq) free_irq(nvt->cir_irq, nvt); if (nvt->cir_addr) release_region(nvt->cir_addr, CIR_IOREG_LENGTH); if (nvt->cir_wake_irq) free_irq(nvt->cir_wake_irq, nvt); if (nvt->cir_wake_addr) release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH); rc_free_device(rdev); kfree(nvt); return ret; } static void __devexit nvt_remove(struct pnp_dev *pdev) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); unsigned long flags; spin_lock_irqsave(&nvt->nvt_lock, flags); /* disable CIR */ nvt_cir_reg_write(nvt, 0, CIR_IREN); nvt_disable_cir(nvt); /* enable CIR Wake (for IR power-on) */ nvt_enable_wake(nvt); spin_unlock_irqrestore(&nvt->nvt_lock, flags); /* free resources */ free_irq(nvt->cir_irq, nvt); free_irq(nvt->cir_wake_irq, nvt); release_region(nvt->cir_addr, CIR_IOREG_LENGTH); release_region(nvt->cir_wake_addr, CIR_IOREG_LENGTH); rc_unregister_device(nvt->rdev); kfree(nvt); } static int nvt_suspend(struct pnp_dev *pdev, pm_message_t state) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); unsigned long flags; nvt_dbg("%s called", __func__); /* zero out misc state tracking */ spin_lock_irqsave(&nvt->nvt_lock, flags); nvt->study_state = ST_STUDY_NONE; nvt->wake_state = ST_WAKE_NONE; spin_unlock_irqrestore(&nvt->nvt_lock, flags); spin_lock_irqsave(&nvt->tx.lock, flags); nvt->tx.tx_state = ST_TX_NONE; spin_unlock_irqrestore(&nvt->tx.lock, flags); /* disable all CIR interrupts */ nvt_cir_reg_write(nvt, 0, CIR_IREN); nvt_efm_enable(nvt); /* disable cir logical dev */ nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); nvt_cr_write(nvt, LOGICAL_DEV_DISABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); /* make sure wake is enabled */ nvt_enable_wake(nvt); return 0; } static int nvt_resume(struct pnp_dev *pdev) { int ret = 0; struct nvt_dev *nvt = pnp_get_drvdata(pdev); nvt_dbg("%s called", __func__); /* open interrupt */ nvt_set_cir_iren(nvt); /* Enable CIR logical device */ nvt_efm_enable(nvt); nvt_select_logical_dev(nvt, LOGICAL_DEV_CIR); nvt_cr_write(nvt, LOGICAL_DEV_ENABLE, CR_LOGICAL_DEV_EN); nvt_efm_disable(nvt); nvt_cir_regs_init(nvt); nvt_cir_wake_regs_init(nvt); return ret; } static void nvt_shutdown(struct pnp_dev *pdev) { struct nvt_dev *nvt = pnp_get_drvdata(pdev); nvt_enable_wake(nvt); } static const struct pnp_device_id nvt_ids[] = { { "WEC0530", 0 }, /* CIR */ { "NTN0530", 0 }, /* CIR for new chip's pnp id*/ { "", 0 }, }; static struct pnp_driver nvt_driver = { .name = NVT_DRIVER_NAME, .id_table = nvt_ids, .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, .probe = nvt_probe, .remove = __devexit_p(nvt_remove), .suspend = nvt_suspend, .resume = nvt_resume, .shutdown = nvt_shutdown, }; int nvt_init(void) { return pnp_register_driver(&nvt_driver); } void nvt_exit(void) { pnp_unregister_driver(&nvt_driver); } module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable debugging output"); MODULE_DEVICE_TABLE(pnp, nvt_ids); MODULE_DESCRIPTION("Nuvoton W83667HG-A & W83677HG-I CIR driver"); MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>"); MODULE_LICENSE("GPL"); module_init(nvt_init); module_exit(nvt_exit);
gpl-2.0
arnavgosain/tomato
drivers/net/ethernet/sis/sis190.c
2238
47273
/* sis190.c: Silicon Integrated Systems SiS190 ethernet driver Copyright (c) 2003 K.M. Liu <kmliu@sis.com> Copyright (c) 2003, 2004 Jeff Garzik <jgarzik@pobox.com> Copyright (c) 2003, 2004, 2005 Francois Romieu <romieu@fr.zoreil.com> Based on r8169.c, tg3.c, 8139cp.c, skge.c, epic100.c and SiS 190/191 genuine driver. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. See the file COPYING in this distribution for more information. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/mii.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <asm/irq.h> #define PHY_MAX_ADDR 32 #define PHY_ID_ANY 0x1f #define MII_REG_ANY 0x1f #define DRV_VERSION "1.4" #define DRV_NAME "sis190" #define SIS190_DRIVER_NAME DRV_NAME " Gigabit Ethernet driver " DRV_VERSION #define sis190_rx_skb netif_rx #define sis190_rx_quota(count, quota) count #define NUM_TX_DESC 64 /* [8..1024] */ #define NUM_RX_DESC 64 /* [8..8192] */ #define TX_RING_BYTES (NUM_TX_DESC * sizeof(struct TxDesc)) #define RX_RING_BYTES (NUM_RX_DESC * sizeof(struct RxDesc)) #define RX_BUF_SIZE 1536 #define RX_BUF_MASK 0xfff8 #define SIS190_REGS_SIZE 0x80 #define SIS190_TX_TIMEOUT (6*HZ) #define SIS190_PHY_TIMEOUT (10*HZ) #define SIS190_MSG_DEFAULT (NETIF_MSG_DRV | NETIF_MSG_PROBE | \ NETIF_MSG_LINK | NETIF_MSG_IFUP | \ NETIF_MSG_IFDOWN) /* Enhanced PHY access register bit definitions */ #define EhnMIIread 0x0000 #define EhnMIIwrite 0x0020 #define EhnMIIdataShift 16 #define EhnMIIpmdShift 6 /* 7016 only */ #define EhnMIIregShift 11 #define EhnMIIreq 0x0010 #define EhnMIInotDone 0x0010 /* Write/read MMIO register */ #define SIS_W8(reg, val) writeb ((val), ioaddr + (reg)) #define SIS_W16(reg, val) writew ((val), ioaddr + (reg)) #define SIS_W32(reg, val) writel ((val), ioaddr + (reg)) #define SIS_R8(reg) readb (ioaddr + (reg)) #define SIS_R16(reg) readw (ioaddr + (reg)) #define SIS_R32(reg) readl (ioaddr + (reg)) #define SIS_PCI_COMMIT() SIS_R32(IntrControl) enum sis190_registers { TxControl = 0x00, TxDescStartAddr = 0x04, rsv0 = 0x08, // reserved TxSts = 0x0c, // unused (Control/Status) RxControl = 0x10, RxDescStartAddr = 0x14, rsv1 = 0x18, // reserved RxSts = 0x1c, // unused IntrStatus = 0x20, IntrMask = 0x24, IntrControl = 0x28, IntrTimer = 0x2c, // unused (Interrupt Timer) PMControl = 0x30, // unused (Power Mgmt Control/Status) rsv2 = 0x34, // reserved ROMControl = 0x38, ROMInterface = 0x3c, StationControl = 0x40, GMIIControl = 0x44, GIoCR = 0x48, // unused (GMAC IO Compensation) GIoCtrl = 0x4c, // unused (GMAC IO Control) TxMacControl = 0x50, TxLimit = 0x54, // unused (Tx MAC Timer/TryLimit) RGDelay = 0x58, // unused (RGMII Tx Internal Delay) rsv3 = 0x5c, // reserved RxMacControl = 0x60, RxMacAddr = 0x62, RxHashTable = 0x68, // Undocumented = 0x6c, RxWolCtrl = 0x70, RxWolData = 0x74, // unused (Rx WOL Data Access) RxMPSControl = 0x78, // unused (Rx MPS Control) rsv4 = 0x7c, // reserved }; enum sis190_register_content { /* IntrStatus */ SoftInt = 0x40000000, // unused Timeup = 0x20000000, // unused PauseFrame = 0x00080000, // unused MagicPacket = 0x00040000, // unused WakeupFrame = 0x00020000, // unused LinkChange = 0x00010000, RxQEmpty = 0x00000080, RxQInt = 0x00000040, TxQ1Empty = 0x00000020, // unused TxQ1Int = 0x00000010, TxQ0Empty = 0x00000008, // unused TxQ0Int = 0x00000004, RxHalt = 0x00000002, TxHalt = 0x00000001, /* {Rx/Tx}CmdBits */ CmdReset = 0x10, CmdRxEnb = 0x08, // unused CmdTxEnb = 0x01, RxBufEmpty = 0x01, // unused /* Cfg9346Bits */ Cfg9346_Lock = 0x00, // unused Cfg9346_Unlock = 0xc0, // unused /* RxMacControl */ AcceptErr = 0x20, // unused AcceptRunt = 0x10, // unused AcceptBroadcast = 0x0800, AcceptMulticast = 0x0400, AcceptMyPhys = 0x0200, AcceptAllPhys = 0x0100, /* RxConfigBits */ RxCfgFIFOShift = 13, RxCfgDMAShift = 8, // 0x1a in RxControl ? /* TxConfigBits */ TxInterFrameGapShift = 24, TxDMAShift = 8, /* DMA burst value (0-7) is shift this many bits */ LinkStatus = 0x02, // unused FullDup = 0x01, // unused /* TBICSRBit */ TBILinkOK = 0x02000000, // unused }; struct TxDesc { __le32 PSize; __le32 status; __le32 addr; __le32 size; }; struct RxDesc { __le32 PSize; __le32 status; __le32 addr; __le32 size; }; enum _DescStatusBit { /* _Desc.status */ OWNbit = 0x80000000, // RXOWN/TXOWN INTbit = 0x40000000, // RXINT/TXINT CRCbit = 0x00020000, // CRCOFF/CRCEN PADbit = 0x00010000, // PREADD/PADEN /* _Desc.size */ RingEnd = 0x80000000, /* TxDesc.status */ LSEN = 0x08000000, // TSO ? -- FR IPCS = 0x04000000, TCPCS = 0x02000000, UDPCS = 0x01000000, BSTEN = 0x00800000, EXTEN = 0x00400000, DEFEN = 0x00200000, BKFEN = 0x00100000, CRSEN = 0x00080000, COLEN = 0x00040000, THOL3 = 0x30000000, THOL2 = 0x20000000, THOL1 = 0x10000000, THOL0 = 0x00000000, WND = 0x00080000, TABRT = 0x00040000, FIFO = 0x00020000, LINK = 0x00010000, ColCountMask = 0x0000ffff, /* RxDesc.status */ IPON = 0x20000000, TCPON = 0x10000000, UDPON = 0x08000000, Wakup = 0x00400000, Magic = 0x00200000, Pause = 0x00100000, DEFbit = 0x00200000, BCAST = 0x000c0000, MCAST = 0x00080000, UCAST = 0x00040000, /* RxDesc.PSize */ TAGON = 0x80000000, RxDescCountMask = 0x7f000000, // multi-desc pkt when > 1 ? -- FR ABORT = 0x00800000, SHORT = 0x00400000, LIMIT = 0x00200000, MIIER = 0x00100000, OVRUN = 0x00080000, NIBON = 0x00040000, COLON = 0x00020000, CRCOK = 0x00010000, RxSizeMask = 0x0000ffff /* * The asic could apparently do vlan, TSO, jumbo (sis191 only) and * provide two (unused with Linux) Tx queues. No publicly * available documentation alas. */ }; enum sis190_eeprom_access_register_bits { EECS = 0x00000001, // unused EECLK = 0x00000002, // unused EEDO = 0x00000008, // unused EEDI = 0x00000004, // unused EEREQ = 0x00000080, EEROP = 0x00000200, EEWOP = 0x00000100 // unused }; /* EEPROM Addresses */ enum sis190_eeprom_address { EEPROMSignature = 0x00, EEPROMCLK = 0x01, // unused EEPROMInfo = 0x02, EEPROMMACAddr = 0x03 }; enum sis190_feature { F_HAS_RGMII = 1, F_PHY_88E1111 = 2, F_PHY_BCM5461 = 4 }; struct sis190_private { void __iomem *mmio_addr; struct pci_dev *pci_dev; struct net_device *dev; spinlock_t lock; u32 rx_buf_sz; u32 cur_rx; u32 cur_tx; u32 dirty_rx; u32 dirty_tx; dma_addr_t rx_dma; dma_addr_t tx_dma; struct RxDesc *RxDescRing; struct TxDesc *TxDescRing; struct sk_buff *Rx_skbuff[NUM_RX_DESC]; struct sk_buff *Tx_skbuff[NUM_TX_DESC]; struct work_struct phy_task; struct timer_list timer; u32 msg_enable; struct mii_if_info mii_if; struct list_head first_phy; u32 features; u32 negotiated_lpa; enum { LNK_OFF, LNK_ON, LNK_AUTONEG, } link_status; }; struct sis190_phy { struct list_head list; int phy_id; u16 id[2]; u16 status; u8 type; }; enum sis190_phy_type { UNKNOWN = 0x00, HOME = 0x01, LAN = 0x02, MIX = 0x03 }; static struct mii_chip_info { const char *name; u16 id[2]; unsigned int type; u32 feature; } mii_chip_table[] = { { "Atheros PHY", { 0x004d, 0xd010 }, LAN, 0 }, { "Atheros PHY AR8012", { 0x004d, 0xd020 }, LAN, 0 }, { "Broadcom PHY BCM5461", { 0x0020, 0x60c0 }, LAN, F_PHY_BCM5461 }, { "Broadcom PHY AC131", { 0x0143, 0xbc70 }, LAN, 0 }, { "Agere PHY ET1101B", { 0x0282, 0xf010 }, LAN, 0 }, { "Marvell PHY 88E1111", { 0x0141, 0x0cc0 }, LAN, F_PHY_88E1111 }, { "Realtek PHY RTL8201", { 0x0000, 0x8200 }, LAN, 0 }, { NULL, } }; static const struct { const char *name; } sis_chip_info[] = { { "SiS 190 PCI Fast Ethernet adapter" }, { "SiS 191 PCI Gigabit Ethernet adapter" }, }; static DEFINE_PCI_DEVICE_TABLE(sis190_pci_tbl) = { { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0190), 0, 0, 0 }, { PCI_DEVICE(PCI_VENDOR_ID_SI, 0x0191), 0, 0, 1 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, sis190_pci_tbl); static int rx_copybreak = 200; static struct { u32 msg_enable; } debug = { -1 }; MODULE_DESCRIPTION("SiS sis190/191 Gigabit Ethernet driver"); module_param(rx_copybreak, int, 0); MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); module_param_named(debug, debug.msg_enable, int, 0); MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); MODULE_AUTHOR("K.M. Liu <kmliu@sis.com>, Ueimor <romieu@fr.zoreil.com>"); MODULE_VERSION(DRV_VERSION); MODULE_LICENSE("GPL"); static const u32 sis190_intr_mask = RxQEmpty | RxQInt | TxQ1Int | TxQ0Int | RxHalt | TxHalt | LinkChange; /* * Maximum number of multicast addresses to filter (vs. Rx-all-multicast). * The chips use a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; static void __mdio_cmd(void __iomem *ioaddr, u32 ctl) { unsigned int i; SIS_W32(GMIIControl, ctl); msleep(1); for (i = 0; i < 100; i++) { if (!(SIS_R32(GMIIControl) & EhnMIInotDone)) break; msleep(1); } if (i > 99) pr_err("PHY command failed !\n"); } static void mdio_write(void __iomem *ioaddr, int phy_id, int reg, int val) { __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIwrite | (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift) | (((u32) val) << EhnMIIdataShift)); } static int mdio_read(void __iomem *ioaddr, int phy_id, int reg) { __mdio_cmd(ioaddr, EhnMIIreq | EhnMIIread | (((u32) reg) << EhnMIIregShift) | (phy_id << EhnMIIpmdShift)); return (u16) (SIS_R32(GMIIControl) >> EhnMIIdataShift); } static void __mdio_write(struct net_device *dev, int phy_id, int reg, int val) { struct sis190_private *tp = netdev_priv(dev); mdio_write(tp->mmio_addr, phy_id, reg, val); } static int __mdio_read(struct net_device *dev, int phy_id, int reg) { struct sis190_private *tp = netdev_priv(dev); return mdio_read(tp->mmio_addr, phy_id, reg); } static u16 mdio_read_latched(void __iomem *ioaddr, int phy_id, int reg) { mdio_read(ioaddr, phy_id, reg); return mdio_read(ioaddr, phy_id, reg); } static u16 sis190_read_eeprom(void __iomem *ioaddr, u32 reg) { u16 data = 0xffff; unsigned int i; if (!(SIS_R32(ROMControl) & 0x0002)) return 0; SIS_W32(ROMInterface, EEREQ | EEROP | (reg << 10)); for (i = 0; i < 200; i++) { if (!(SIS_R32(ROMInterface) & EEREQ)) { data = (SIS_R32(ROMInterface) & 0xffff0000) >> 16; break; } msleep(1); } return data; } static void sis190_irq_mask_and_ack(void __iomem *ioaddr) { SIS_W32(IntrMask, 0x00); SIS_W32(IntrStatus, 0xffffffff); SIS_PCI_COMMIT(); } static void sis190_asic_down(void __iomem *ioaddr) { /* Stop the chip's Tx and Rx DMA processes. */ SIS_W32(TxControl, 0x1a00); SIS_W32(RxControl, 0x1a00); sis190_irq_mask_and_ack(ioaddr); } static void sis190_mark_as_last_descriptor(struct RxDesc *desc) { desc->size |= cpu_to_le32(RingEnd); } static inline void sis190_give_to_asic(struct RxDesc *desc, u32 rx_buf_sz) { u32 eor = le32_to_cpu(desc->size) & RingEnd; desc->PSize = 0x0; desc->size = cpu_to_le32((rx_buf_sz & RX_BUF_MASK) | eor); wmb(); desc->status = cpu_to_le32(OWNbit | INTbit); } static inline void sis190_map_to_asic(struct RxDesc *desc, dma_addr_t mapping, u32 rx_buf_sz) { desc->addr = cpu_to_le32(mapping); sis190_give_to_asic(desc, rx_buf_sz); } static inline void sis190_make_unusable_by_asic(struct RxDesc *desc) { desc->PSize = 0x0; desc->addr = cpu_to_le32(0xdeadbeef); desc->size &= cpu_to_le32(RingEnd); wmb(); desc->status = 0x0; } static struct sk_buff *sis190_alloc_rx_skb(struct sis190_private *tp, struct RxDesc *desc) { u32 rx_buf_sz = tp->rx_buf_sz; struct sk_buff *skb; dma_addr_t mapping; skb = netdev_alloc_skb(tp->dev, rx_buf_sz); if (unlikely(!skb)) goto skb_alloc_failed; mapping = pci_map_single(tp->pci_dev, skb->data, tp->rx_buf_sz, PCI_DMA_FROMDEVICE); if (pci_dma_mapping_error(tp->pci_dev, mapping)) goto out; sis190_map_to_asic(desc, mapping, rx_buf_sz); return skb; out: dev_kfree_skb_any(skb); skb_alloc_failed: sis190_make_unusable_by_asic(desc); return NULL; } static u32 sis190_rx_fill(struct sis190_private *tp, struct net_device *dev, u32 start, u32 end) { u32 cur; for (cur = start; cur < end; cur++) { unsigned int i = cur % NUM_RX_DESC; if (tp->Rx_skbuff[i]) continue; tp->Rx_skbuff[i] = sis190_alloc_rx_skb(tp, tp->RxDescRing + i); if (!tp->Rx_skbuff[i]) break; } return cur - start; } static bool sis190_try_rx_copy(struct sis190_private *tp, struct sk_buff **sk_buff, int pkt_size, dma_addr_t addr) { struct sk_buff *skb; bool done = false; if (pkt_size >= rx_copybreak) goto out; skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size); if (!skb) goto out; pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, PCI_DMA_FROMDEVICE); skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); *sk_buff = skb; done = true; out: return done; } static inline int sis190_rx_pkt_err(u32 status, struct net_device_stats *stats) { #define ErrMask (OVRUN | SHORT | LIMIT | MIIER | NIBON | COLON | ABORT) if ((status & CRCOK) && !(status & ErrMask)) return 0; if (!(status & CRCOK)) stats->rx_crc_errors++; else if (status & OVRUN) stats->rx_over_errors++; else if (status & (SHORT | LIMIT)) stats->rx_length_errors++; else if (status & (MIIER | NIBON | COLON)) stats->rx_frame_errors++; stats->rx_errors++; return -1; } static int sis190_rx_interrupt(struct net_device *dev, struct sis190_private *tp, void __iomem *ioaddr) { struct net_device_stats *stats = &dev->stats; u32 rx_left, cur_rx = tp->cur_rx; u32 delta, count; rx_left = NUM_RX_DESC + tp->dirty_rx - cur_rx; rx_left = sis190_rx_quota(rx_left, (u32) dev->quota); for (; rx_left > 0; rx_left--, cur_rx++) { unsigned int entry = cur_rx % NUM_RX_DESC; struct RxDesc *desc = tp->RxDescRing + entry; u32 status; if (le32_to_cpu(desc->status) & OWNbit) break; status = le32_to_cpu(desc->PSize); //netif_info(tp, intr, dev, "Rx PSize = %08x\n", status); if (sis190_rx_pkt_err(status, stats) < 0) sis190_give_to_asic(desc, tp->rx_buf_sz); else { struct sk_buff *skb = tp->Rx_skbuff[entry]; dma_addr_t addr = le32_to_cpu(desc->addr); int pkt_size = (status & RxSizeMask) - 4; struct pci_dev *pdev = tp->pci_dev; if (unlikely(pkt_size > tp->rx_buf_sz)) { netif_info(tp, intr, dev, "(frag) status = %08x\n", status); stats->rx_dropped++; stats->rx_length_errors++; sis190_give_to_asic(desc, tp->rx_buf_sz); continue; } if (sis190_try_rx_copy(tp, &skb, pkt_size, addr)) { pci_dma_sync_single_for_device(pdev, addr, tp->rx_buf_sz, PCI_DMA_FROMDEVICE); sis190_give_to_asic(desc, tp->rx_buf_sz); } else { pci_unmap_single(pdev, addr, tp->rx_buf_sz, PCI_DMA_FROMDEVICE); tp->Rx_skbuff[entry] = NULL; sis190_make_unusable_by_asic(desc); } skb_put(skb, pkt_size); skb->protocol = eth_type_trans(skb, dev); sis190_rx_skb(skb); stats->rx_packets++; stats->rx_bytes += pkt_size; if ((status & BCAST) == MCAST) stats->multicast++; } } count = cur_rx - tp->cur_rx; tp->cur_rx = cur_rx; delta = sis190_rx_fill(tp, dev, tp->dirty_rx, tp->cur_rx); if (!delta && count) netif_info(tp, intr, dev, "no Rx buffer allocated\n"); tp->dirty_rx += delta; if ((tp->dirty_rx + NUM_RX_DESC) == tp->cur_rx) netif_emerg(tp, intr, dev, "Rx buffers exhausted\n"); return count; } static void sis190_unmap_tx_skb(struct pci_dev *pdev, struct sk_buff *skb, struct TxDesc *desc) { unsigned int len; len = skb->len < ETH_ZLEN ? ETH_ZLEN : skb->len; pci_unmap_single(pdev, le32_to_cpu(desc->addr), len, PCI_DMA_TODEVICE); memset(desc, 0x00, sizeof(*desc)); } static inline int sis190_tx_pkt_err(u32 status, struct net_device_stats *stats) { #define TxErrMask (WND | TABRT | FIFO | LINK) if (!unlikely(status & TxErrMask)) return 0; if (status & WND) stats->tx_window_errors++; if (status & TABRT) stats->tx_aborted_errors++; if (status & FIFO) stats->tx_fifo_errors++; if (status & LINK) stats->tx_carrier_errors++; stats->tx_errors++; return -1; } static void sis190_tx_interrupt(struct net_device *dev, struct sis190_private *tp, void __iomem *ioaddr) { struct net_device_stats *stats = &dev->stats; u32 pending, dirty_tx = tp->dirty_tx; /* * It would not be needed if queueing was allowed to be enabled * again too early (hint: think preempt and unclocked smp systems). */ unsigned int queue_stopped; smp_rmb(); pending = tp->cur_tx - dirty_tx; queue_stopped = (pending == NUM_TX_DESC); for (; pending; pending--, dirty_tx++) { unsigned int entry = dirty_tx % NUM_TX_DESC; struct TxDesc *txd = tp->TxDescRing + entry; u32 status = le32_to_cpu(txd->status); struct sk_buff *skb; if (status & OWNbit) break; skb = tp->Tx_skbuff[entry]; if (likely(sis190_tx_pkt_err(status, stats) == 0)) { stats->tx_packets++; stats->tx_bytes += skb->len; stats->collisions += ((status & ColCountMask) - 1); } sis190_unmap_tx_skb(tp->pci_dev, skb, txd); tp->Tx_skbuff[entry] = NULL; dev_kfree_skb_irq(skb); } if (tp->dirty_tx != dirty_tx) { tp->dirty_tx = dirty_tx; smp_wmb(); if (queue_stopped) netif_wake_queue(dev); } } /* * The interrupt handler does all of the Rx thread work and cleans up after * the Tx thread. */ static irqreturn_t sis190_irq(int irq, void *__dev) { struct net_device *dev = __dev; struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned int handled = 0; u32 status; status = SIS_R32(IntrStatus); if ((status == 0xffffffff) || !status) goto out; handled = 1; if (unlikely(!netif_running(dev))) { sis190_asic_down(ioaddr); goto out; } SIS_W32(IntrStatus, status); // netif_info(tp, intr, dev, "status = %08x\n", status); if (status & LinkChange) { netif_info(tp, intr, dev, "link change\n"); del_timer(&tp->timer); schedule_work(&tp->phy_task); } if (status & RxQInt) sis190_rx_interrupt(dev, tp, ioaddr); if (status & TxQ0Int) sis190_tx_interrupt(dev, tp, ioaddr); out: return IRQ_RETVAL(handled); } #ifdef CONFIG_NET_POLL_CONTROLLER static void sis190_netpoll(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); const int irq = tp->pci_dev->irq; disable_irq(irq); sis190_irq(irq, dev); enable_irq(irq); } #endif static void sis190_free_rx_skb(struct sis190_private *tp, struct sk_buff **sk_buff, struct RxDesc *desc) { struct pci_dev *pdev = tp->pci_dev; pci_unmap_single(pdev, le32_to_cpu(desc->addr), tp->rx_buf_sz, PCI_DMA_FROMDEVICE); dev_kfree_skb(*sk_buff); *sk_buff = NULL; sis190_make_unusable_by_asic(desc); } static void sis190_rx_clear(struct sis190_private *tp) { unsigned int i; for (i = 0; i < NUM_RX_DESC; i++) { if (!tp->Rx_skbuff[i]) continue; sis190_free_rx_skb(tp, tp->Rx_skbuff + i, tp->RxDescRing + i); } } static void sis190_init_ring_indexes(struct sis190_private *tp) { tp->dirty_tx = tp->dirty_rx = tp->cur_tx = tp->cur_rx = 0; } static int sis190_init_ring(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); sis190_init_ring_indexes(tp); memset(tp->Tx_skbuff, 0x0, NUM_TX_DESC * sizeof(struct sk_buff *)); memset(tp->Rx_skbuff, 0x0, NUM_RX_DESC * sizeof(struct sk_buff *)); if (sis190_rx_fill(tp, dev, 0, NUM_RX_DESC) != NUM_RX_DESC) goto err_rx_clear; sis190_mark_as_last_descriptor(tp->RxDescRing + NUM_RX_DESC - 1); return 0; err_rx_clear: sis190_rx_clear(tp); return -ENOMEM; } static void sis190_set_rx_mode(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned long flags; u32 mc_filter[2]; /* Multicast hash filter */ u16 rx_mode; if (dev->flags & IFF_PROMISC) { rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter perfectly -- accept all multicasts. */ rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0xffffffff; } else { struct netdev_hw_addr *ha; rx_mode = AcceptBroadcast | AcceptMyPhys; mc_filter[1] = mc_filter[0] = 0; netdev_for_each_mc_addr(ha, dev) { int bit_nr = ether_crc(ETH_ALEN, ha->addr) & 0x3f; mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31); rx_mode |= AcceptMulticast; } } spin_lock_irqsave(&tp->lock, flags); SIS_W16(RxMacControl, rx_mode | 0x2); SIS_W32(RxHashTable, mc_filter[0]); SIS_W32(RxHashTable + 4, mc_filter[1]); spin_unlock_irqrestore(&tp->lock, flags); } static void sis190_soft_reset(void __iomem *ioaddr) { SIS_W32(IntrControl, 0x8000); SIS_PCI_COMMIT(); SIS_W32(IntrControl, 0x0); sis190_asic_down(ioaddr); } static void sis190_hw_start(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; sis190_soft_reset(ioaddr); SIS_W32(TxDescStartAddr, tp->tx_dma); SIS_W32(RxDescStartAddr, tp->rx_dma); SIS_W32(IntrStatus, 0xffffffff); SIS_W32(IntrMask, 0x0); SIS_W32(GMIIControl, 0x0); SIS_W32(TxMacControl, 0x60); SIS_W16(RxMacControl, 0x02); SIS_W32(RxHashTable, 0x0); SIS_W32(0x6c, 0x0); SIS_W32(RxWolCtrl, 0x0); SIS_W32(RxWolData, 0x0); SIS_PCI_COMMIT(); sis190_set_rx_mode(dev); /* Enable all known interrupts by setting the interrupt mask. */ SIS_W32(IntrMask, sis190_intr_mask); SIS_W32(TxControl, 0x1a00 | CmdTxEnb); SIS_W32(RxControl, 0x1a1d); netif_start_queue(dev); } static void sis190_phy_task(struct work_struct *work) { struct sis190_private *tp = container_of(work, struct sis190_private, phy_task); struct net_device *dev = tp->dev; void __iomem *ioaddr = tp->mmio_addr; int phy_id = tp->mii_if.phy_id; u16 val; rtnl_lock(); if (!netif_running(dev)) goto out_unlock; val = mdio_read(ioaddr, phy_id, MII_BMCR); if (val & BMCR_RESET) { // FIXME: needlessly high ? -- FR 02/07/2005 mod_timer(&tp->timer, jiffies + HZ/10); goto out_unlock; } val = mdio_read_latched(ioaddr, phy_id, MII_BMSR); if (!(val & BMSR_ANEGCOMPLETE) && tp->link_status != LNK_AUTONEG) { netif_carrier_off(dev); netif_warn(tp, link, dev, "auto-negotiating...\n"); tp->link_status = LNK_AUTONEG; } else if ((val & BMSR_LSTATUS) && tp->link_status != LNK_ON) { /* Rejoice ! */ struct { int val; u32 ctl; const char *msg; } reg31[] = { { LPA_1000FULL, 0x07000c00 | 0x00001000, "1000 Mbps Full Duplex" }, { LPA_1000HALF, 0x07000c00, "1000 Mbps Half Duplex" }, { LPA_100FULL, 0x04000800 | 0x00001000, "100 Mbps Full Duplex" }, { LPA_100HALF, 0x04000800, "100 Mbps Half Duplex" }, { LPA_10FULL, 0x04000400 | 0x00001000, "10 Mbps Full Duplex" }, { LPA_10HALF, 0x04000400, "10 Mbps Half Duplex" }, { 0, 0x04000400, "unknown" } }, *p = NULL; u16 adv, autoexp, gigadv, gigrec; val = mdio_read(ioaddr, phy_id, 0x1f); netif_info(tp, link, dev, "mii ext = %04x\n", val); val = mdio_read(ioaddr, phy_id, MII_LPA); adv = mdio_read(ioaddr, phy_id, MII_ADVERTISE); autoexp = mdio_read(ioaddr, phy_id, MII_EXPANSION); netif_info(tp, link, dev, "mii lpa=%04x adv=%04x exp=%04x\n", val, adv, autoexp); if (val & LPA_NPAGE && autoexp & EXPANSION_NWAY) { /* check for gigabit speed */ gigadv = mdio_read(ioaddr, phy_id, MII_CTRL1000); gigrec = mdio_read(ioaddr, phy_id, MII_STAT1000); val = (gigadv & (gigrec >> 2)); if (val & ADVERTISE_1000FULL) p = reg31; else if (val & ADVERTISE_1000HALF) p = reg31 + 1; } if (!p) { val &= adv; for (p = reg31; p->val; p++) { if ((val & p->val) == p->val) break; } } p->ctl |= SIS_R32(StationControl) & ~0x0f001c00; if ((tp->features & F_HAS_RGMII) && (tp->features & F_PHY_BCM5461)) { // Set Tx Delay in RGMII mode. mdio_write(ioaddr, phy_id, 0x18, 0xf1c7); udelay(200); mdio_write(ioaddr, phy_id, 0x1c, 0x8c00); p->ctl |= 0x03000000; } SIS_W32(StationControl, p->ctl); if (tp->features & F_HAS_RGMII) { SIS_W32(RGDelay, 0x0441); SIS_W32(RGDelay, 0x0440); } tp->negotiated_lpa = p->val; netif_info(tp, link, dev, "link on %s mode\n", p->msg); netif_carrier_on(dev); tp->link_status = LNK_ON; } else if (!(val & BMSR_LSTATUS) && tp->link_status != LNK_AUTONEG) tp->link_status = LNK_OFF; mod_timer(&tp->timer, jiffies + SIS190_PHY_TIMEOUT); out_unlock: rtnl_unlock(); } static void sis190_phy_timer(unsigned long __opaque) { struct net_device *dev = (struct net_device *)__opaque; struct sis190_private *tp = netdev_priv(dev); if (likely(netif_running(dev))) schedule_work(&tp->phy_task); } static inline void sis190_delete_timer(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); del_timer_sync(&tp->timer); } static inline void sis190_request_timer(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct timer_list *timer = &tp->timer; init_timer(timer); timer->expires = jiffies + SIS190_PHY_TIMEOUT; timer->data = (unsigned long)dev; timer->function = sis190_phy_timer; add_timer(timer); } static void sis190_set_rxbufsize(struct sis190_private *tp, struct net_device *dev) { unsigned int mtu = dev->mtu; tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE; /* RxDesc->size has a licence to kill the lower bits */ if (tp->rx_buf_sz & 0x07) { tp->rx_buf_sz += 8; tp->rx_buf_sz &= RX_BUF_MASK; } } static int sis190_open(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; int rc = -ENOMEM; sis190_set_rxbufsize(tp, dev); /* * Rx and Tx descriptors need 256 bytes alignment. * pci_alloc_consistent() guarantees a stronger alignment. */ tp->TxDescRing = pci_alloc_consistent(pdev, TX_RING_BYTES, &tp->tx_dma); if (!tp->TxDescRing) goto out; tp->RxDescRing = pci_alloc_consistent(pdev, RX_RING_BYTES, &tp->rx_dma); if (!tp->RxDescRing) goto err_free_tx_0; rc = sis190_init_ring(dev); if (rc < 0) goto err_free_rx_1; sis190_request_timer(dev); rc = request_irq(pdev->irq, sis190_irq, IRQF_SHARED, dev->name, dev); if (rc < 0) goto err_release_timer_2; sis190_hw_start(dev); out: return rc; err_release_timer_2: sis190_delete_timer(dev); sis190_rx_clear(tp); err_free_rx_1: pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); err_free_tx_0: pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); goto out; } static void sis190_tx_clear(struct sis190_private *tp) { unsigned int i; for (i = 0; i < NUM_TX_DESC; i++) { struct sk_buff *skb = tp->Tx_skbuff[i]; if (!skb) continue; sis190_unmap_tx_skb(tp->pci_dev, skb, tp->TxDescRing + i); tp->Tx_skbuff[i] = NULL; dev_kfree_skb(skb); tp->dev->stats.tx_dropped++; } tp->cur_tx = tp->dirty_tx = 0; } static void sis190_down(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; unsigned int poll_locked = 0; sis190_delete_timer(dev); netif_stop_queue(dev); do { spin_lock_irq(&tp->lock); sis190_asic_down(ioaddr); spin_unlock_irq(&tp->lock); synchronize_irq(tp->pci_dev->irq); if (!poll_locked) poll_locked++; synchronize_sched(); } while (SIS_R32(IntrMask)); sis190_tx_clear(tp); sis190_rx_clear(tp); } static int sis190_close(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct pci_dev *pdev = tp->pci_dev; sis190_down(dev); free_irq(pdev->irq, dev); pci_free_consistent(pdev, TX_RING_BYTES, tp->TxDescRing, tp->tx_dma); pci_free_consistent(pdev, RX_RING_BYTES, tp->RxDescRing, tp->rx_dma); tp->TxDescRing = NULL; tp->RxDescRing = NULL; return 0; } static netdev_tx_t sis190_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u32 len, entry, dirty_tx; struct TxDesc *desc; dma_addr_t mapping; if (unlikely(skb->len < ETH_ZLEN)) { if (skb_padto(skb, ETH_ZLEN)) { dev->stats.tx_dropped++; goto out; } len = ETH_ZLEN; } else { len = skb->len; } entry = tp->cur_tx % NUM_TX_DESC; desc = tp->TxDescRing + entry; if (unlikely(le32_to_cpu(desc->status) & OWNbit)) { netif_stop_queue(dev); netif_err(tp, tx_err, dev, "BUG! Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; } mapping = pci_map_single(tp->pci_dev, skb->data, len, PCI_DMA_TODEVICE); if (pci_dma_mapping_error(tp->pci_dev, mapping)) { netif_err(tp, tx_err, dev, "PCI mapping failed, dropping packet"); return NETDEV_TX_BUSY; } tp->Tx_skbuff[entry] = skb; desc->PSize = cpu_to_le32(len); desc->addr = cpu_to_le32(mapping); desc->size = cpu_to_le32(len); if (entry == (NUM_TX_DESC - 1)) desc->size |= cpu_to_le32(RingEnd); wmb(); desc->status = cpu_to_le32(OWNbit | INTbit | DEFbit | CRCbit | PADbit); if (tp->negotiated_lpa & (LPA_1000HALF | LPA_100HALF | LPA_10HALF)) { /* Half Duplex */ desc->status |= cpu_to_le32(COLEN | CRSEN | BKFEN); if (tp->negotiated_lpa & (LPA_1000HALF | LPA_1000FULL)) desc->status |= cpu_to_le32(EXTEN | BSTEN); /* gigabit HD */ } tp->cur_tx++; smp_wmb(); SIS_W32(TxControl, 0x1a00 | CmdReset | CmdTxEnb); dirty_tx = tp->dirty_tx; if ((tp->cur_tx - NUM_TX_DESC) == dirty_tx) { netif_stop_queue(dev); smp_rmb(); if (dirty_tx != tp->dirty_tx) netif_wake_queue(dev); } out: return NETDEV_TX_OK; } static void sis190_free_phy(struct list_head *first_phy) { struct sis190_phy *cur, *next; list_for_each_entry_safe(cur, next, first_phy, list) { kfree(cur); } } /** * sis190_default_phy - Select default PHY for sis190 mac. * @dev: the net device to probe for * * Select first detected PHY with link as default. * If no one is link on, select PHY whose types is HOME as default. * If HOME doesn't exist, select LAN. */ static u16 sis190_default_phy(struct net_device *dev) { struct sis190_phy *phy, *phy_home, *phy_default, *phy_lan; struct sis190_private *tp = netdev_priv(dev); struct mii_if_info *mii_if = &tp->mii_if; void __iomem *ioaddr = tp->mmio_addr; u16 status; phy_home = phy_default = phy_lan = NULL; list_for_each_entry(phy, &tp->first_phy, list) { status = mdio_read_latched(ioaddr, phy->phy_id, MII_BMSR); // Link ON & Not select default PHY & not ghost PHY. if ((status & BMSR_LSTATUS) && !phy_default && (phy->type != UNKNOWN)) { phy_default = phy; } else { status = mdio_read(ioaddr, phy->phy_id, MII_BMCR); mdio_write(ioaddr, phy->phy_id, MII_BMCR, status | BMCR_ANENABLE | BMCR_ISOLATE); if (phy->type == HOME) phy_home = phy; else if (phy->type == LAN) phy_lan = phy; } } if (!phy_default) { if (phy_home) phy_default = phy_home; else if (phy_lan) phy_default = phy_lan; else phy_default = list_first_entry(&tp->first_phy, struct sis190_phy, list); } if (mii_if->phy_id != phy_default->phy_id) { mii_if->phy_id = phy_default->phy_id; if (netif_msg_probe(tp)) pr_info("%s: Using transceiver at address %d as default\n", pci_name(tp->pci_dev), mii_if->phy_id); } status = mdio_read(ioaddr, mii_if->phy_id, MII_BMCR); status &= (~BMCR_ISOLATE); mdio_write(ioaddr, mii_if->phy_id, MII_BMCR, status); status = mdio_read_latched(ioaddr, mii_if->phy_id, MII_BMSR); return status; } static void sis190_init_phy(struct net_device *dev, struct sis190_private *tp, struct sis190_phy *phy, unsigned int phy_id, u16 mii_status) { void __iomem *ioaddr = tp->mmio_addr; struct mii_chip_info *p; INIT_LIST_HEAD(&phy->list); phy->status = mii_status; phy->phy_id = phy_id; phy->id[0] = mdio_read(ioaddr, phy_id, MII_PHYSID1); phy->id[1] = mdio_read(ioaddr, phy_id, MII_PHYSID2); for (p = mii_chip_table; p->type; p++) { if ((p->id[0] == phy->id[0]) && (p->id[1] == (phy->id[1] & 0xfff0))) { break; } } if (p->id[1]) { phy->type = (p->type == MIX) ? ((mii_status & (BMSR_100FULL | BMSR_100HALF)) ? LAN : HOME) : p->type; tp->features |= p->feature; if (netif_msg_probe(tp)) pr_info("%s: %s transceiver at address %d\n", pci_name(tp->pci_dev), p->name, phy_id); } else { phy->type = UNKNOWN; if (netif_msg_probe(tp)) pr_info("%s: unknown PHY 0x%x:0x%x transceiver at address %d\n", pci_name(tp->pci_dev), phy->id[0], (phy->id[1] & 0xfff0), phy_id); } } static void sis190_mii_probe_88e1111_fixup(struct sis190_private *tp) { if (tp->features & F_PHY_88E1111) { void __iomem *ioaddr = tp->mmio_addr; int phy_id = tp->mii_if.phy_id; u16 reg[2][2] = { { 0x808b, 0x0ce1 }, { 0x808f, 0x0c60 } }, *p; p = (tp->features & F_HAS_RGMII) ? reg[0] : reg[1]; mdio_write(ioaddr, phy_id, 0x1b, p[0]); udelay(200); mdio_write(ioaddr, phy_id, 0x14, p[1]); udelay(200); } } /** * sis190_mii_probe - Probe MII PHY for sis190 * @dev: the net device to probe for * * Search for total of 32 possible mii phy addresses. * Identify and set current phy if found one, * return error if it failed to found. */ static int sis190_mii_probe(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); struct mii_if_info *mii_if = &tp->mii_if; void __iomem *ioaddr = tp->mmio_addr; int phy_id; int rc = 0; INIT_LIST_HEAD(&tp->first_phy); for (phy_id = 0; phy_id < PHY_MAX_ADDR; phy_id++) { struct sis190_phy *phy; u16 status; status = mdio_read_latched(ioaddr, phy_id, MII_BMSR); // Try next mii if the current one is not accessible. if (status == 0xffff || status == 0x0000) continue; phy = kmalloc(sizeof(*phy), GFP_KERNEL); if (!phy) { sis190_free_phy(&tp->first_phy); rc = -ENOMEM; goto out; } sis190_init_phy(dev, tp, phy, phy_id, status); list_add(&tp->first_phy, &phy->list); } if (list_empty(&tp->first_phy)) { if (netif_msg_probe(tp)) pr_info("%s: No MII transceivers found!\n", pci_name(tp->pci_dev)); rc = -EIO; goto out; } /* Select default PHY for mac */ sis190_default_phy(dev); sis190_mii_probe_88e1111_fixup(tp); mii_if->dev = dev; mii_if->mdio_read = __mdio_read; mii_if->mdio_write = __mdio_write; mii_if->phy_id_mask = PHY_ID_ANY; mii_if->reg_num_mask = MII_REG_ANY; out: return rc; } static void sis190_mii_remove(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); sis190_free_phy(&tp->first_phy); } static void sis190_release_board(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct sis190_private *tp = netdev_priv(dev); iounmap(tp->mmio_addr); pci_release_regions(pdev); pci_disable_device(pdev); free_netdev(dev); } static struct net_device *sis190_init_board(struct pci_dev *pdev) { struct sis190_private *tp; struct net_device *dev; void __iomem *ioaddr; int rc; dev = alloc_etherdev(sizeof(*tp)); if (!dev) { rc = -ENOMEM; goto err_out_0; } SET_NETDEV_DEV(dev, &pdev->dev); tp = netdev_priv(dev); tp->dev = dev; tp->msg_enable = netif_msg_init(debug.msg_enable, SIS190_MSG_DEFAULT); rc = pci_enable_device(pdev); if (rc < 0) { if (netif_msg_probe(tp)) pr_err("%s: enable failure\n", pci_name(pdev)); goto err_free_dev_1; } rc = -ENODEV; if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { if (netif_msg_probe(tp)) pr_err("%s: region #0 is no MMIO resource\n", pci_name(pdev)); goto err_pci_disable_2; } if (pci_resource_len(pdev, 0) < SIS190_REGS_SIZE) { if (netif_msg_probe(tp)) pr_err("%s: invalid PCI region size(s)\n", pci_name(pdev)); goto err_pci_disable_2; } rc = pci_request_regions(pdev, DRV_NAME); if (rc < 0) { if (netif_msg_probe(tp)) pr_err("%s: could not request regions\n", pci_name(pdev)); goto err_pci_disable_2; } rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (rc < 0) { if (netif_msg_probe(tp)) pr_err("%s: DMA configuration failed\n", pci_name(pdev)); goto err_free_res_3; } pci_set_master(pdev); ioaddr = ioremap(pci_resource_start(pdev, 0), SIS190_REGS_SIZE); if (!ioaddr) { if (netif_msg_probe(tp)) pr_err("%s: cannot remap MMIO, aborting\n", pci_name(pdev)); rc = -EIO; goto err_free_res_3; } tp->pci_dev = pdev; tp->mmio_addr = ioaddr; tp->link_status = LNK_OFF; sis190_irq_mask_and_ack(ioaddr); sis190_soft_reset(ioaddr); out: return dev; err_free_res_3: pci_release_regions(pdev); err_pci_disable_2: pci_disable_device(pdev); err_free_dev_1: free_netdev(dev); err_out_0: dev = ERR_PTR(rc); goto out; } static void sis190_tx_timeout(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u8 tmp8; /* Disable Tx, if not already */ tmp8 = SIS_R8(TxControl); if (tmp8 & CmdTxEnb) SIS_W8(TxControl, tmp8 & ~CmdTxEnb); netif_info(tp, tx_err, dev, "Transmit timeout, status %08x %08x\n", SIS_R32(TxControl), SIS_R32(TxSts)); /* Disable interrupts by clearing the interrupt mask. */ SIS_W32(IntrMask, 0x0000); /* Stop a shared interrupt from scavenging while we are. */ spin_lock_irq(&tp->lock); sis190_tx_clear(tp); spin_unlock_irq(&tp->lock); /* ...and finally, reset everything. */ sis190_hw_start(dev); netif_wake_queue(dev); } static void sis190_set_rgmii(struct sis190_private *tp, u8 reg) { tp->features |= (reg & 0x80) ? F_HAS_RGMII : 0; } static int sis190_get_mac_addr_from_eeprom(struct pci_dev *pdev, struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u16 sig; int i; if (netif_msg_probe(tp)) pr_info("%s: Read MAC address from EEPROM\n", pci_name(pdev)); /* Check to see if there is a sane EEPROM */ sig = (u16) sis190_read_eeprom(ioaddr, EEPROMSignature); if ((sig == 0xffff) || (sig == 0x0000)) { if (netif_msg_probe(tp)) pr_info("%s: Error EEPROM read %x\n", pci_name(pdev), sig); return -EIO; } /* Get MAC address from EEPROM */ for (i = 0; i < ETH_ALEN / 2; i++) { u16 w = sis190_read_eeprom(ioaddr, EEPROMMACAddr + i); ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(w); } sis190_set_rgmii(tp, sis190_read_eeprom(ioaddr, EEPROMInfo)); return 0; } /** * sis190_get_mac_addr_from_apc - Get MAC address for SiS96x model * @pdev: PCI device * @dev: network device to get address for * * SiS96x model, use APC CMOS RAM to store MAC address. * APC CMOS RAM is accessed through ISA bridge. * MAC address is read into @net_dev->dev_addr. */ static int sis190_get_mac_addr_from_apc(struct pci_dev *pdev, struct net_device *dev) { static const u16 ids[] = { 0x0965, 0x0966, 0x0968 }; struct sis190_private *tp = netdev_priv(dev); struct pci_dev *isa_bridge; u8 reg, tmp8; unsigned int i; if (netif_msg_probe(tp)) pr_info("%s: Read MAC address from APC\n", pci_name(pdev)); for (i = 0; i < ARRAY_SIZE(ids); i++) { isa_bridge = pci_get_device(PCI_VENDOR_ID_SI, ids[i], NULL); if (isa_bridge) break; } if (!isa_bridge) { if (netif_msg_probe(tp)) pr_info("%s: Can not find ISA bridge\n", pci_name(pdev)); return -EIO; } /* Enable port 78h & 79h to access APC Registers. */ pci_read_config_byte(isa_bridge, 0x48, &tmp8); reg = (tmp8 & ~0x02); pci_write_config_byte(isa_bridge, 0x48, reg); udelay(50); pci_read_config_byte(isa_bridge, 0x48, &reg); for (i = 0; i < ETH_ALEN; i++) { outb(0x9 + i, 0x78); dev->dev_addr[i] = inb(0x79); } outb(0x12, 0x78); reg = inb(0x79); sis190_set_rgmii(tp, reg); /* Restore the value to ISA Bridge */ pci_write_config_byte(isa_bridge, 0x48, tmp8); pci_dev_put(isa_bridge); return 0; } /** * sis190_init_rxfilter - Initialize the Rx filter * @dev: network device to initialize * * Set receive filter address to our MAC address * and enable packet filtering. */ static inline void sis190_init_rxfilter(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; u16 ctl; int i; ctl = SIS_R16(RxMacControl); /* * Disable packet filtering before setting filter. * Note: SiS's driver writes 32 bits but RxMacControl is 16 bits * only and followed by RxMacAddr (6 bytes). Strange. -- FR */ SIS_W16(RxMacControl, ctl & ~0x0f00); for (i = 0; i < ETH_ALEN; i++) SIS_W8(RxMacAddr + i, dev->dev_addr[i]); SIS_W16(RxMacControl, ctl); SIS_PCI_COMMIT(); } static int sis190_get_mac_addr(struct pci_dev *pdev, struct net_device *dev) { int rc; rc = sis190_get_mac_addr_from_eeprom(pdev, dev); if (rc < 0) { u8 reg; pci_read_config_byte(pdev, 0x73, &reg); if (reg & 0x00000001) rc = sis190_get_mac_addr_from_apc(pdev, dev); } return rc; } static void sis190_set_speed_auto(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); void __iomem *ioaddr = tp->mmio_addr; int phy_id = tp->mii_if.phy_id; int val; netif_info(tp, link, dev, "Enabling Auto-negotiation\n"); val = mdio_read(ioaddr, phy_id, MII_ADVERTISE); // Enable 10/100 Full/Half Mode, leave MII_ADVERTISE bit4:0 // unchanged. mdio_write(ioaddr, phy_id, MII_ADVERTISE, (val & ADVERTISE_SLCT) | ADVERTISE_100FULL | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_10HALF); // Enable 1000 Full Mode. mdio_write(ioaddr, phy_id, MII_CTRL1000, ADVERTISE_1000FULL); // Enable auto-negotiation and restart auto-negotiation. mdio_write(ioaddr, phy_id, MII_BMCR, BMCR_ANENABLE | BMCR_ANRESTART | BMCR_RESET); } static int sis190_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct sis190_private *tp = netdev_priv(dev); return mii_ethtool_gset(&tp->mii_if, cmd); } static int sis190_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct sis190_private *tp = netdev_priv(dev); return mii_ethtool_sset(&tp->mii_if, cmd); } static void sis190_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct sis190_private *tp = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(tp->pci_dev), sizeof(info->bus_info)); } static int sis190_get_regs_len(struct net_device *dev) { return SIS190_REGS_SIZE; } static void sis190_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct sis190_private *tp = netdev_priv(dev); unsigned long flags; if (regs->len > SIS190_REGS_SIZE) regs->len = SIS190_REGS_SIZE; spin_lock_irqsave(&tp->lock, flags); memcpy_fromio(p, tp->mmio_addr, regs->len); spin_unlock_irqrestore(&tp->lock, flags); } static int sis190_nway_reset(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); return mii_nway_restart(&tp->mii_if); } static u32 sis190_get_msglevel(struct net_device *dev) { struct sis190_private *tp = netdev_priv(dev); return tp->msg_enable; } static void sis190_set_msglevel(struct net_device *dev, u32 value) { struct sis190_private *tp = netdev_priv(dev); tp->msg_enable = value; } static const struct ethtool_ops sis190_ethtool_ops = { .get_settings = sis190_get_settings, .set_settings = sis190_set_settings, .get_drvinfo = sis190_get_drvinfo, .get_regs_len = sis190_get_regs_len, .get_regs = sis190_get_regs, .get_link = ethtool_op_get_link, .get_msglevel = sis190_get_msglevel, .set_msglevel = sis190_set_msglevel, .nway_reset = sis190_nway_reset, }; static int sis190_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct sis190_private *tp = netdev_priv(dev); return !netif_running(dev) ? -EINVAL : generic_mii_ioctl(&tp->mii_if, if_mii(ifr), cmd, NULL); } static int sis190_mac_addr(struct net_device *dev, void *p) { int rc; rc = eth_mac_addr(dev, p); if (!rc) sis190_init_rxfilter(dev); return rc; } static const struct net_device_ops sis190_netdev_ops = { .ndo_open = sis190_open, .ndo_stop = sis190_close, .ndo_do_ioctl = sis190_ioctl, .ndo_start_xmit = sis190_start_xmit, .ndo_tx_timeout = sis190_tx_timeout, .ndo_set_rx_mode = sis190_set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = sis190_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = sis190_netpoll, #endif }; static int sis190_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version = 0; struct sis190_private *tp; struct net_device *dev; void __iomem *ioaddr; int rc; if (!printed_version) { if (netif_msg_drv(&debug)) pr_info(SIS190_DRIVER_NAME " loaded\n"); printed_version = 1; } dev = sis190_init_board(pdev); if (IS_ERR(dev)) { rc = PTR_ERR(dev); goto out; } pci_set_drvdata(pdev, dev); tp = netdev_priv(dev); ioaddr = tp->mmio_addr; rc = sis190_get_mac_addr(pdev, dev); if (rc < 0) goto err_release_board; sis190_init_rxfilter(dev); INIT_WORK(&tp->phy_task, sis190_phy_task); dev->netdev_ops = &sis190_netdev_ops; SET_ETHTOOL_OPS(dev, &sis190_ethtool_ops); dev->watchdog_timeo = SIS190_TX_TIMEOUT; spin_lock_init(&tp->lock); rc = sis190_mii_probe(dev); if (rc < 0) goto err_release_board; rc = register_netdev(dev); if (rc < 0) goto err_remove_mii; if (netif_msg_probe(tp)) { netdev_info(dev, "%s: %s at %p (IRQ: %d), %pM\n", pci_name(pdev), sis_chip_info[ent->driver_data].name, ioaddr, pdev->irq, dev->dev_addr); netdev_info(dev, "%s mode.\n", (tp->features & F_HAS_RGMII) ? "RGMII" : "GMII"); } netif_carrier_off(dev); sis190_set_speed_auto(dev); out: return rc; err_remove_mii: sis190_mii_remove(dev); err_release_board: sis190_release_board(pdev); goto out; } static void sis190_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct sis190_private *tp = netdev_priv(dev); sis190_mii_remove(dev); cancel_work_sync(&tp->phy_task); unregister_netdev(dev); sis190_release_board(pdev); pci_set_drvdata(pdev, NULL); } static struct pci_driver sis190_pci_driver = { .name = DRV_NAME, .id_table = sis190_pci_tbl, .probe = sis190_init_one, .remove = sis190_remove_one, }; static int __init sis190_init_module(void) { return pci_register_driver(&sis190_pci_driver); } static void __exit sis190_cleanup_module(void) { pci_unregister_driver(&sis190_pci_driver); } module_init(sis190_init_module); module_exit(sis190_cleanup_module);
gpl-2.0
pierdebeer/AudaxNote_M_Kernel
drivers/video/exynos/exynos_mipi_dsi.c
2238
13510
/* linux/drivers/video/exynos/exynos_mipi_dsi.c * * Samsung SoC MIPI-DSIM driver. * * Copyright (c) 2012 Samsung Electronics Co., Ltd * * InKi Dae, <inki.dae@samsung.com> * Donghwa Lee, <dh09.lee@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/mutex.h> #include <linux/wait.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/ctype.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/memory.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/notifier.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <linux/err.h> #include <video/exynos_mipi_dsim.h> #include "exynos_mipi_dsi_common.h" #include "exynos_mipi_dsi_lowlevel.h" struct mipi_dsim_ddi { int bus_id; struct list_head list; struct mipi_dsim_lcd_device *dsim_lcd_dev; struct mipi_dsim_lcd_driver *dsim_lcd_drv; }; static LIST_HEAD(dsim_ddi_list); static DEFINE_MUTEX(mipi_dsim_lock); static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device *pdev) { return pdev->dev.platform_data; } static struct regulator_bulk_data supplies[] = { { .supply = "vdd11", }, { .supply = "vdd18", }, }; static int exynos_mipi_regulator_enable(struct mipi_dsim_device *dsim) { int ret; mutex_lock(&dsim->lock); ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); mutex_unlock(&dsim->lock); return ret; } static int exynos_mipi_regulator_disable(struct mipi_dsim_device *dsim) { int ret; mutex_lock(&dsim->lock); ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies); mutex_unlock(&dsim->lock); return ret; } /* update all register settings to MIPI DSI controller. */ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim) { /* * data from Display controller(FIMD) is not transferred in video mode * but in case of command mode, all settings is not updated to * registers. */ exynos_mipi_dsi_stand_by(dsim, 0); exynos_mipi_dsi_init_dsim(dsim); exynos_mipi_dsi_init_link(dsim); exynos_mipi_dsi_set_hs_enable(dsim); /* set display timing. */ exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config); exynos_mipi_dsi_init_interrupt(dsim); /* * data from Display controller(FIMD) is transferred in video mode * but in case of command mode, all settings are updated to registers. */ exynos_mipi_dsi_stand_by(dsim, 1); } static int exynos_mipi_dsi_early_blank_mode(struct mipi_dsim_device *dsim, int power) { struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; switch (power) { case FB_BLANK_POWERDOWN: if (dsim->suspended) return 0; if (client_drv && client_drv->suspend) client_drv->suspend(client_dev); clk_disable(dsim->clock); exynos_mipi_regulator_disable(dsim); dsim->suspended = true; break; default: break; } return 0; } static int exynos_mipi_dsi_blank_mode(struct mipi_dsim_device *dsim, int power) { struct platform_device *pdev = to_platform_device(dsim->dev); struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; switch (power) { case FB_BLANK_UNBLANK: if (!dsim->suspended) return 0; /* lcd panel power on. */ if (client_drv && client_drv->power_on) client_drv->power_on(client_dev, 1); exynos_mipi_regulator_enable(dsim); /* enable MIPI-DSI PHY. */ if (dsim->pd->phy_enable) dsim->pd->phy_enable(pdev, true); clk_enable(dsim->clock); exynos_mipi_update_cfg(dsim); /* set lcd panel sequence commands. */ if (client_drv && client_drv->set_sequence) client_drv->set_sequence(client_dev); dsim->suspended = false; break; case FB_BLANK_NORMAL: /* TODO. */ break; default: break; } return 0; } int exynos_mipi_dsi_register_lcd_device(struct mipi_dsim_lcd_device *lcd_dev) { struct mipi_dsim_ddi *dsim_ddi; if (!lcd_dev->name) { pr_err("dsim_lcd_device name is NULL.\n"); return -EFAULT; } dsim_ddi = kzalloc(sizeof(struct mipi_dsim_ddi), GFP_KERNEL); if (!dsim_ddi) { pr_err("failed to allocate dsim_ddi object.\n"); return -ENOMEM; } dsim_ddi->dsim_lcd_dev = lcd_dev; mutex_lock(&mipi_dsim_lock); list_add_tail(&dsim_ddi->list, &dsim_ddi_list); mutex_unlock(&mipi_dsim_lock); return 0; } static struct mipi_dsim_ddi *exynos_mipi_dsi_find_lcd_device( struct mipi_dsim_lcd_driver *lcd_drv) { struct mipi_dsim_ddi *dsim_ddi, *next; struct mipi_dsim_lcd_device *lcd_dev; mutex_lock(&mipi_dsim_lock); list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) { if (!dsim_ddi) goto out; lcd_dev = dsim_ddi->dsim_lcd_dev; if (!lcd_dev) continue; if ((strcmp(lcd_drv->name, lcd_dev->name)) == 0) { /** * bus_id would be used to identify * connected bus. */ dsim_ddi->bus_id = lcd_dev->bus_id; mutex_unlock(&mipi_dsim_lock); return dsim_ddi; } list_del(&dsim_ddi->list); kfree(dsim_ddi); } out: mutex_unlock(&mipi_dsim_lock); return NULL; } int exynos_mipi_dsi_register_lcd_driver(struct mipi_dsim_lcd_driver *lcd_drv) { struct mipi_dsim_ddi *dsim_ddi; if (!lcd_drv->name) { pr_err("dsim_lcd_driver name is NULL.\n"); return -EFAULT; } dsim_ddi = exynos_mipi_dsi_find_lcd_device(lcd_drv); if (!dsim_ddi) { pr_err("mipi_dsim_ddi object not found.\n"); return -EFAULT; } dsim_ddi->dsim_lcd_drv = lcd_drv; pr_info("registered panel driver(%s) to mipi-dsi driver.\n", lcd_drv->name); return 0; } static struct mipi_dsim_ddi *exynos_mipi_dsi_bind_lcd_ddi( struct mipi_dsim_device *dsim, const char *name) { struct mipi_dsim_ddi *dsim_ddi, *next; struct mipi_dsim_lcd_driver *lcd_drv; struct mipi_dsim_lcd_device *lcd_dev; int ret; mutex_lock(&dsim->lock); list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) { lcd_drv = dsim_ddi->dsim_lcd_drv; lcd_dev = dsim_ddi->dsim_lcd_dev; if (!lcd_drv || !lcd_dev || (dsim->id != dsim_ddi->bus_id)) continue; dev_dbg(dsim->dev, "lcd_drv->id = %d, lcd_dev->id = %d\n", lcd_drv->id, lcd_dev->id); dev_dbg(dsim->dev, "lcd_dev->bus_id = %d, dsim->id = %d\n", lcd_dev->bus_id, dsim->id); if ((strcmp(lcd_drv->name, name) == 0)) { lcd_dev->master = dsim; lcd_dev->dev.parent = dsim->dev; dev_set_name(&lcd_dev->dev, "%s", lcd_drv->name); ret = device_register(&lcd_dev->dev); if (ret < 0) { dev_err(dsim->dev, "can't register %s, status %d\n", dev_name(&lcd_dev->dev), ret); mutex_unlock(&dsim->lock); return NULL; } dsim->dsim_lcd_dev = lcd_dev; dsim->dsim_lcd_drv = lcd_drv; mutex_unlock(&dsim->lock); return dsim_ddi; } } mutex_unlock(&dsim->lock); return NULL; } /* define MIPI-DSI Master operations. */ static struct mipi_dsim_master_ops master_ops = { .cmd_read = exynos_mipi_dsi_rd_data, .cmd_write = exynos_mipi_dsi_wr_data, .get_dsim_frame_done = exynos_mipi_dsi_get_frame_done_status, .clear_dsim_frame_done = exynos_mipi_dsi_clear_frame_done, .set_early_blank_mode = exynos_mipi_dsi_early_blank_mode, .set_blank_mode = exynos_mipi_dsi_blank_mode, }; static int exynos_mipi_dsi_probe(struct platform_device *pdev) { struct resource *res; struct mipi_dsim_device *dsim; struct mipi_dsim_config *dsim_config; struct mipi_dsim_platform_data *dsim_pd; struct mipi_dsim_ddi *dsim_ddi; int ret = -EINVAL; dsim = devm_kzalloc(&pdev->dev, sizeof(struct mipi_dsim_device), GFP_KERNEL); if (!dsim) { dev_err(&pdev->dev, "failed to allocate dsim object.\n"); return -ENOMEM; } dsim->pd = to_dsim_plat(pdev); dsim->dev = &pdev->dev; dsim->id = pdev->id; /* get mipi_dsim_platform_data. */ dsim_pd = (struct mipi_dsim_platform_data *)dsim->pd; if (dsim_pd == NULL) { dev_err(&pdev->dev, "failed to get platform data for dsim.\n"); return -EINVAL; } /* get mipi_dsim_config. */ dsim_config = dsim_pd->dsim_config; if (dsim_config == NULL) { dev_err(&pdev->dev, "failed to get dsim config data.\n"); return -EINVAL; } dsim->dsim_config = dsim_config; dsim->master_ops = &master_ops; mutex_init(&dsim->lock); ret = devm_regulator_bulk_get(&pdev->dev, ARRAY_SIZE(supplies), supplies); if (ret) { dev_err(&pdev->dev, "Failed to get regulators: %d\n", ret); return ret; } dsim->clock = devm_clk_get(&pdev->dev, "dsim0"); if (IS_ERR(dsim->clock)) { dev_err(&pdev->dev, "failed to get dsim clock source\n"); return -ENODEV; } clk_enable(dsim->clock); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); dsim->reg_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(dsim->reg_base)) { ret = PTR_ERR(dsim->reg_base); goto error; } mutex_init(&dsim->lock); /* bind lcd ddi matched with panel name. */ dsim_ddi = exynos_mipi_dsi_bind_lcd_ddi(dsim, dsim_pd->lcd_panel_name); if (!dsim_ddi) { dev_err(&pdev->dev, "mipi_dsim_ddi object not found.\n"); ret = -EINVAL; goto error; } dsim->irq = platform_get_irq(pdev, 0); if (IS_ERR_VALUE(dsim->irq)) { dev_err(&pdev->dev, "failed to request dsim irq resource\n"); ret = -EINVAL; goto error; } init_completion(&dsim_wr_comp); init_completion(&dsim_rd_comp); platform_set_drvdata(pdev, dsim); ret = devm_request_irq(&pdev->dev, dsim->irq, exynos_mipi_dsi_interrupt_handler, IRQF_SHARED, dev_name(&pdev->dev), dsim); if (ret != 0) { dev_err(&pdev->dev, "failed to request dsim irq\n"); ret = -EINVAL; goto error; } /* enable interrupts */ exynos_mipi_dsi_init_interrupt(dsim); /* initialize mipi-dsi client(lcd panel). */ if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe) dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev); /* in case mipi-dsi has been enabled by bootloader */ if (dsim_pd->enabled) { exynos_mipi_regulator_enable(dsim); goto done; } /* lcd panel power on. */ if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on) dsim_ddi->dsim_lcd_drv->power_on(dsim_ddi->dsim_lcd_dev, 1); exynos_mipi_regulator_enable(dsim); /* enable MIPI-DSI PHY. */ if (dsim->pd->phy_enable) dsim->pd->phy_enable(pdev, true); exynos_mipi_update_cfg(dsim); /* set lcd panel sequence commands. */ if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->set_sequence) dsim_ddi->dsim_lcd_drv->set_sequence(dsim_ddi->dsim_lcd_dev); dsim->suspended = false; done: platform_set_drvdata(pdev, dsim); dev_dbg(&pdev->dev, "%s() completed successfully (%s mode)\n", __func__, dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB"); return 0; error: clk_disable(dsim->clock); return ret; } static int exynos_mipi_dsi_remove(struct platform_device *pdev) { struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); struct mipi_dsim_ddi *dsim_ddi, *next; struct mipi_dsim_lcd_driver *dsim_lcd_drv; clk_disable(dsim->clock); list_for_each_entry_safe(dsim_ddi, next, &dsim_ddi_list, list) { if (dsim_ddi) { if (dsim->id != dsim_ddi->bus_id) continue; dsim_lcd_drv = dsim_ddi->dsim_lcd_drv; if (dsim_lcd_drv->remove) dsim_lcd_drv->remove(dsim_ddi->dsim_lcd_dev); kfree(dsim_ddi); } } return 0; } #ifdef CONFIG_PM_SLEEP static int exynos_mipi_dsi_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; disable_irq(dsim->irq); if (dsim->suspended) return 0; if (client_drv && client_drv->suspend) client_drv->suspend(client_dev); /* enable MIPI-DSI PHY. */ if (dsim->pd->phy_enable) dsim->pd->phy_enable(pdev, false); clk_disable(dsim->clock); exynos_mipi_regulator_disable(dsim); dsim->suspended = true; return 0; } static int exynos_mipi_dsi_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; enable_irq(dsim->irq); if (!dsim->suspended) return 0; /* lcd panel power on. */ if (client_drv && client_drv->power_on) client_drv->power_on(client_dev, 1); exynos_mipi_regulator_enable(dsim); /* enable MIPI-DSI PHY. */ if (dsim->pd->phy_enable) dsim->pd->phy_enable(pdev, true); clk_enable(dsim->clock); exynos_mipi_update_cfg(dsim); /* set lcd panel sequence commands. */ if (client_drv && client_drv->set_sequence) client_drv->set_sequence(client_dev); dsim->suspended = false; return 0; } #endif static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume) }; static struct platform_driver exynos_mipi_dsi_driver = { .probe = exynos_mipi_dsi_probe, .remove = exynos_mipi_dsi_remove, .driver = { .name = "exynos-mipi-dsim", .owner = THIS_MODULE, .pm = &exynos_mipi_dsi_pm_ops, }, }; module_platform_driver(exynos_mipi_dsi_driver); MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); MODULE_DESCRIPTION("Samusung SoC MIPI-DSI driver"); MODULE_LICENSE("GPL");
gpl-2.0
squllcx/Axon7
arch/mips/pnx833x/stb22x/board.c
4542
3849
/* * board.c: STB225 board support. * * Copyright 2008 NXP Semiconductors * Chris Steel <chris.steel@nxp.com> * Daniel Laird <daniel.j.laird@nxp.com> * * Based on software written by: * Nikita Youshchenko <yoush@debian.org>, based on PNX8550 code. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <asm/bootinfo.h> #include <linux/mm.h> #include <pnx833x.h> #include <gpio.h> /* endianess twiddlers */ #define PNX8335_DEBUG0 0x4400 #define PNX8335_DEBUG1 0x4404 #define PNX8335_DEBUG2 0x4408 #define PNX8335_DEBUG3 0x440c #define PNX8335_DEBUG4 0x4410 #define PNX8335_DEBUG5 0x4414 #define PNX8335_DEBUG6 0x4418 #define PNX8335_DEBUG7 0x441c int prom_argc; char **prom_argv, **prom_envp; extern void prom_init_cmdline(void); extern char *prom_getenv(char *envname); const char *get_system_type(void) { return "NXP STB22x"; } static inline unsigned long env_or_default(char *env, unsigned long dfl) { char *str = prom_getenv(env); return str ? simple_strtol(str, 0, 0) : dfl; } void __init prom_init(void) { unsigned long memsize; prom_argc = fw_arg0; prom_argv = (char **)fw_arg1; prom_envp = (char **)fw_arg2; prom_init_cmdline(); memsize = env_or_default("memsize", 0x02000000); add_memory_region(0, memsize, BOOT_MEM_RAM); } void __init pnx833x_board_setup(void) { pnx833x_gpio_select_function_alt(4); pnx833x_gpio_select_output(4); pnx833x_gpio_select_function_alt(5); pnx833x_gpio_select_input(5); pnx833x_gpio_select_function_alt(6); pnx833x_gpio_select_input(6); pnx833x_gpio_select_function_alt(7); pnx833x_gpio_select_output(7); pnx833x_gpio_select_function_alt(25); pnx833x_gpio_select_function_alt(26); pnx833x_gpio_select_function_alt(27); pnx833x_gpio_select_function_alt(28); pnx833x_gpio_select_function_alt(29); pnx833x_gpio_select_function_alt(30); pnx833x_gpio_select_function_alt(31); pnx833x_gpio_select_function_alt(32); pnx833x_gpio_select_function_alt(33); #if IS_ENABLED(CONFIG_MTD_NAND_PLATFORM) /* Setup MIU for NAND access on CS0... * * (it seems that we must also configure CS1 for reliable operation, * otherwise the first read ID command will fail if it's read as 4 bytes * but pass if it's read as 1 word.) */ /* Setup MIU CS0 & CS1 timing */ PNX833X_MIU_SEL0 = 0; PNX833X_MIU_SEL1 = 0; PNX833X_MIU_SEL0_TIMING = 0x50003081; PNX833X_MIU_SEL1_TIMING = 0x50003081; /* Setup GPIO 00 for use as MIU CS1 (CS0 is not multiplexed, so does not need this) */ pnx833x_gpio_select_function_alt(0); /* Setup GPIO 04 to input NAND read/busy signal */ pnx833x_gpio_select_function_io(4); pnx833x_gpio_select_input(4); /* Setup GPIO 05 to disable NAND write protect */ pnx833x_gpio_select_function_io(5); pnx833x_gpio_select_output(5); pnx833x_gpio_write(1, 5); #elif IS_ENABLED(CONFIG_MTD_CFI) /* Set up MIU for 16-bit NOR access on CS0 and CS1... */ /* Setup MIU CS0 & CS1 timing */ PNX833X_MIU_SEL0 = 1; PNX833X_MIU_SEL1 = 1; PNX833X_MIU_SEL0_TIMING = 0x6A08D082; PNX833X_MIU_SEL1_TIMING = 0x6A08D082; /* Setup GPIO 00 for use as MIU CS1 (CS0 is not multiplexed, so does not need this) */ pnx833x_gpio_select_function_alt(0); #endif }
gpl-2.0
MattCrystal/drunken-avenger
drivers/net/wireless/mwifiex/scan.c
4798
61162
/* * Marvell Wireless LAN device driver: scan ioctl and command handling * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "11n.h" #include "cfg80211.h" /* The maximum number of channels the firmware can scan per command */ #define MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN 14 #define MWIFIEX_CHANNELS_PER_SCAN_CMD 4 /* Memory needed to store a max sized Channel List TLV for a firmware scan */ #define CHAN_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_header) \ + (MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN \ *sizeof(struct mwifiex_chan_scan_param_set))) /* Memory needed to store supported rate */ #define RATE_TLV_MAX_SIZE (sizeof(struct mwifiex_ie_types_rates_param_set) \ + HOSTCMD_SUPPORTED_RATES) /* Memory needed to store a max number/size WildCard SSID TLV for a firmware scan */ #define WILDCARD_SSID_TLV_MAX_SIZE \ (MWIFIEX_MAX_SSID_LIST_LENGTH * \ (sizeof(struct mwifiex_ie_types_wildcard_ssid_params) \ + IEEE80211_MAX_SSID_LEN)) /* Maximum memory needed for a mwifiex_scan_cmd_config with all TLVs at max */ #define MAX_SCAN_CFG_ALLOC (sizeof(struct mwifiex_scan_cmd_config) \ + sizeof(struct mwifiex_ie_types_num_probes) \ + sizeof(struct mwifiex_ie_types_htcap) \ + CHAN_TLV_MAX_SIZE \ + RATE_TLV_MAX_SIZE \ + WILDCARD_SSID_TLV_MAX_SIZE) union mwifiex_scan_cmd_config_tlv { /* Scan configuration (variable length) */ struct mwifiex_scan_cmd_config config; /* Max allocated block */ u8 config_alloc_buf[MAX_SCAN_CFG_ALLOC]; }; enum cipher_suite { CIPHER_SUITE_TKIP, CIPHER_SUITE_CCMP, CIPHER_SUITE_MAX }; static u8 mwifiex_wpa_oui[CIPHER_SUITE_MAX][4] = { { 0x00, 0x50, 0xf2, 0x02 }, /* TKIP */ { 0x00, 0x50, 0xf2, 0x04 }, /* AES */ }; static u8 mwifiex_rsn_oui[CIPHER_SUITE_MAX][4] = { { 0x00, 0x0f, 0xac, 0x02 }, /* TKIP */ { 0x00, 0x0f, 0xac, 0x04 }, /* AES */ }; /* * This function parses a given IE for a given OUI. * * This is used to parse a WPA/RSN IE to find if it has * a given oui in PTK. */ static u8 mwifiex_search_oui_in_ie(struct ie_body *iebody, u8 *oui) { u8 count; count = iebody->ptk_cnt[0]; /* There could be multiple OUIs for PTK hence 1) Take the length. 2) Check all the OUIs for AES. 3) If one of them is AES then pass success. */ while (count) { if (!memcmp(iebody->ptk_body, oui, sizeof(iebody->ptk_body))) return MWIFIEX_OUI_PRESENT; --count; if (count) iebody = (struct ie_body *) ((u8 *) iebody + sizeof(iebody->ptk_body)); } pr_debug("info: %s: OUI is not found in PTK\n", __func__); return MWIFIEX_OUI_NOT_PRESENT; } /* * This function checks if a given OUI is present in a RSN IE. * * The function first checks if a RSN IE is present or not in the * BSS descriptor. It tries to locate the OUI only if such an IE is * present. */ static u8 mwifiex_is_rsn_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) { u8 *oui; struct ie_body *iebody; u8 ret = MWIFIEX_OUI_NOT_PRESENT; if (((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)). ieee_hdr.element_id == WLAN_EID_RSN))) { iebody = (struct ie_body *) (((u8 *) bss_desc->bcn_rsn_ie->data) + RSN_GTK_OUI_OFFSET); oui = &mwifiex_rsn_oui[cipher][0]; ret = mwifiex_search_oui_in_ie(iebody, oui); if (ret) return ret; } return ret; } /* * This function checks if a given OUI is present in a WPA IE. * * The function first checks if a WPA IE is present or not in the * BSS descriptor. It tries to locate the OUI only if such an IE is * present. */ static u8 mwifiex_is_wpa_oui_present(struct mwifiex_bssdescriptor *bss_desc, u32 cipher) { u8 *oui; struct ie_body *iebody; u8 ret = MWIFIEX_OUI_NOT_PRESENT; if (((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == WLAN_EID_WPA))) { iebody = (struct ie_body *) bss_desc->bcn_wpa_ie->data; oui = &mwifiex_wpa_oui[cipher][0]; ret = mwifiex_search_oui_in_ie(iebody, oui); if (ret) return ret; } return ret; } /* * This function compares two SSIDs and checks if they match. */ s32 mwifiex_ssid_cmp(struct cfg80211_ssid *ssid1, struct cfg80211_ssid *ssid2) { if (!ssid1 || !ssid2 || (ssid1->ssid_len != ssid2->ssid_len)) return -1; return memcmp(ssid1->ssid, ssid2->ssid, ssid1->ssid_len); } /* * This function checks if wapi is enabled in driver and scanned network is * compatible with it. */ static bool mwifiex_is_bss_wapi(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (priv->sec_info.wapi_enabled && (bss_desc->bcn_wapi_ie && ((*(bss_desc->bcn_wapi_ie)).ieee_hdr.element_id == WLAN_EID_BSS_AC_ACCESS_DELAY))) { return true; } return false; } /* * This function checks if driver is configured with no security mode and * scanned network is compatible with it. */ static bool mwifiex_is_bss_no_sec(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && !priv->sec_info.encryption_mode && !bss_desc->privacy) { return true; } return false; } /* * This function checks if static WEP is enabled in driver and scanned network * is compatible with it. */ static bool mwifiex_is_bss_static_wep(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && bss_desc->privacy) { return true; } return false; } /* * This function checks if wpa is enabled in driver and scanned network is * compatible with it. */ static bool mwifiex_is_bss_wpa(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (!priv->sec_info.wep_enabled && priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && ((bss_desc->bcn_wpa_ie) && ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id == WLAN_EID_WPA)) /* * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy */ ) { dev_dbg(priv->adapter->dev, "info: %s: WPA:" " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s " "EncMode=%#x privacy=%#x\n", __func__, (bss_desc->bcn_wpa_ie) ? (*(bss_desc->bcn_wpa_ie)). vend_hdr.element_id : 0, (bss_desc->bcn_rsn_ie) ? (*(bss_desc->bcn_rsn_ie)). ieee_hdr.element_id : 0, (priv->sec_info.wep_enabled) ? "e" : "d", (priv->sec_info.wpa_enabled) ? "e" : "d", (priv->sec_info.wpa2_enabled) ? "e" : "d", priv->sec_info.encryption_mode, bss_desc->privacy); return true; } return false; } /* * This function checks if wpa2 is enabled in driver and scanned network is * compatible with it. */ static bool mwifiex_is_bss_wpa2(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && priv->sec_info.wpa2_enabled && ((bss_desc->bcn_rsn_ie) && ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id == WLAN_EID_RSN))) { /* * Privacy bit may NOT be set in some APs like * LinkSys WRT54G && bss_desc->privacy */ dev_dbg(priv->adapter->dev, "info: %s: WPA2: " " wpa_ie=%#x wpa2_ie=%#x WEP=%s WPA=%s WPA2=%s " "EncMode=%#x privacy=%#x\n", __func__, (bss_desc->bcn_wpa_ie) ? (*(bss_desc->bcn_wpa_ie)). vend_hdr.element_id : 0, (bss_desc->bcn_rsn_ie) ? (*(bss_desc->bcn_rsn_ie)). ieee_hdr.element_id : 0, (priv->sec_info.wep_enabled) ? "e" : "d", (priv->sec_info.wpa_enabled) ? "e" : "d", (priv->sec_info.wpa2_enabled) ? "e" : "d", priv->sec_info.encryption_mode, bss_desc->privacy); return true; } return false; } /* * This function checks if adhoc AES is enabled in driver and scanned network is * compatible with it. */ static bool mwifiex_is_bss_adhoc_aes(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && !priv->sec_info.encryption_mode && bss_desc->privacy) { return true; } return false; } /* * This function checks if dynamic WEP is enabled in driver and scanned network * is compatible with it. */ static bool mwifiex_is_bss_dynamic_wep(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { if (!priv->sec_info.wep_enabled && !priv->sec_info.wpa_enabled && !priv->sec_info.wpa2_enabled && ((!bss_desc->bcn_wpa_ie) || ((*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id != WLAN_EID_WPA)) && ((!bss_desc->bcn_rsn_ie) || ((*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id != WLAN_EID_RSN)) && priv->sec_info.encryption_mode && bss_desc->privacy) { dev_dbg(priv->adapter->dev, "info: %s: dynamic " "WEP: wpa_ie=%#x wpa2_ie=%#x " "EncMode=%#x privacy=%#x\n", __func__, (bss_desc->bcn_wpa_ie) ? (*(bss_desc->bcn_wpa_ie)). vend_hdr.element_id : 0, (bss_desc->bcn_rsn_ie) ? (*(bss_desc->bcn_rsn_ie)). ieee_hdr.element_id : 0, priv->sec_info.encryption_mode, bss_desc->privacy); return true; } return false; } /* * This function checks if a scanned network is compatible with the driver * settings. * * WEP WPA WPA2 ad-hoc encrypt Network * enabled enabled enabled AES mode Privacy WPA WPA2 Compatible * 0 0 0 0 NONE 0 0 0 yes No security * 0 1 0 0 x 1x 1 x yes WPA (disable * HT if no AES) * 0 0 1 0 x 1x x 1 yes WPA2 (disable * HT if no AES) * 0 0 0 1 NONE 1 0 0 yes Ad-hoc AES * 1 0 0 0 NONE 1 0 0 yes Static WEP * (disable HT) * 0 0 0 0 !=NONE 1 0 0 yes Dynamic WEP * * Compatibility is not matched while roaming, except for mode. */ static s32 mwifiex_is_network_compatible(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc, u32 mode) { struct mwifiex_adapter *adapter = priv->adapter; bss_desc->disable_11n = false; /* Don't check for compatibility if roaming */ if (priv->media_connected && (priv->bss_mode == NL80211_IFTYPE_STATION) && (bss_desc->bss_mode == NL80211_IFTYPE_STATION)) return 0; if (priv->wps.session_enable) { dev_dbg(adapter->dev, "info: return success directly in WPS period\n"); return 0; } if (mwifiex_is_bss_wapi(priv, bss_desc)) { dev_dbg(adapter->dev, "info: return success for WAPI AP\n"); return 0; } if (bss_desc->bss_mode == mode) { if (mwifiex_is_bss_no_sec(priv, bss_desc)) { /* No security */ return 0; } else if (mwifiex_is_bss_static_wep(priv, bss_desc)) { /* Static WEP enabled */ dev_dbg(adapter->dev, "info: Disable 11n in WEP mode.\n"); bss_desc->disable_11n = true; return 0; } else if (mwifiex_is_bss_wpa(priv, bss_desc)) { /* WPA enabled */ if (((priv->adapter->config_bands & BAND_GN || priv->adapter->config_bands & BAND_AN) && bss_desc->bcn_ht_cap) && !mwifiex_is_wpa_oui_present(bss_desc, CIPHER_SUITE_CCMP)) { if (mwifiex_is_wpa_oui_present (bss_desc, CIPHER_SUITE_TKIP)) { dev_dbg(adapter->dev, "info: Disable 11n if AES " "is not supported by AP\n"); bss_desc->disable_11n = true; } else { return -1; } } return 0; } else if (mwifiex_is_bss_wpa2(priv, bss_desc)) { /* WPA2 enabled */ if (((priv->adapter->config_bands & BAND_GN || priv->adapter->config_bands & BAND_AN) && bss_desc->bcn_ht_cap) && !mwifiex_is_rsn_oui_present(bss_desc, CIPHER_SUITE_CCMP)) { if (mwifiex_is_rsn_oui_present (bss_desc, CIPHER_SUITE_TKIP)) { dev_dbg(adapter->dev, "info: Disable 11n if AES " "is not supported by AP\n"); bss_desc->disable_11n = true; } else { return -1; } } return 0; } else if (mwifiex_is_bss_adhoc_aes(priv, bss_desc)) { /* Ad-hoc AES enabled */ return 0; } else if (mwifiex_is_bss_dynamic_wep(priv, bss_desc)) { /* Dynamic WEP enabled */ return 0; } /* Security doesn't match */ dev_dbg(adapter->dev, "info: %s: failed: wpa_ie=%#x wpa2_ie=%#x WEP=%s " "WPA=%s WPA2=%s EncMode=%#x privacy=%#x\n", __func__, (bss_desc->bcn_wpa_ie) ? (*(bss_desc->bcn_wpa_ie)).vend_hdr.element_id : 0, (bss_desc->bcn_rsn_ie) ? (*(bss_desc->bcn_rsn_ie)).ieee_hdr.element_id : 0, (priv->sec_info.wep_enabled) ? "e" : "d", (priv->sec_info.wpa_enabled) ? "e" : "d", (priv->sec_info.wpa2_enabled) ? "e" : "d", priv->sec_info.encryption_mode, bss_desc->privacy); return -1; } /* Mode doesn't match */ return -1; } /* * This function creates a channel list for the driver to scan, based * on region/band information. * * This routine is used for any scan that is not provided with a * specific channel list to scan. */ static void mwifiex_scan_create_channel_list(struct mwifiex_private *priv, const struct mwifiex_user_scan_cfg *user_scan_in, struct mwifiex_chan_scan_param_set *scan_chan_list, u8 filtered_scan) { enum ieee80211_band band; struct ieee80211_supported_band *sband; struct ieee80211_channel *ch; struct mwifiex_adapter *adapter = priv->adapter; int chan_idx = 0, i; for (band = 0; (band < IEEE80211_NUM_BANDS) ; band++) { if (!priv->wdev->wiphy->bands[band]) continue; sband = priv->wdev->wiphy->bands[band]; for (i = 0; (i < sband->n_channels) ; i++) { ch = &sband->channels[i]; if (ch->flags & IEEE80211_CHAN_DISABLED) continue; scan_chan_list[chan_idx].radio_type = band; if (user_scan_in && user_scan_in->chan_list[0].scan_time) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16((u16) user_scan_in-> chan_list[0].scan_time); else if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->passive_scan_time); else scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->active_scan_time); if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN) scan_chan_list[chan_idx].chan_scan_mode_bitmap |= MWIFIEX_PASSIVE_SCAN; else scan_chan_list[chan_idx].chan_scan_mode_bitmap &= ~MWIFIEX_PASSIVE_SCAN; scan_chan_list[chan_idx].chan_number = (u32) ch->hw_value; if (filtered_scan) { scan_chan_list[chan_idx].max_scan_time = cpu_to_le16(adapter->specific_scan_time); scan_chan_list[chan_idx].chan_scan_mode_bitmap |= MWIFIEX_DISABLE_CHAN_FILT; } chan_idx++; } } } /* * This function constructs and sends multiple scan config commands to * the firmware. * * Previous routines in the code flow have created a scan command configuration * with any requested TLVs. This function splits the channel TLV into maximum * channels supported per scan lists and sends the portion of the channel TLV, * along with the other TLVs, to the firmware. */ static int mwifiex_scan_channel_list(struct mwifiex_private *priv, u32 max_chan_per_scan, u8 filtered_scan, struct mwifiex_scan_cmd_config *scan_cfg_out, struct mwifiex_ie_types_chan_list_param_set *chan_tlv_out, struct mwifiex_chan_scan_param_set *scan_chan_list) { int ret = 0; struct mwifiex_chan_scan_param_set *tmp_chan_list; struct mwifiex_chan_scan_param_set *start_chan; u32 tlv_idx; u32 total_scan_time; u32 done_early; if (!scan_cfg_out || !chan_tlv_out || !scan_chan_list) { dev_dbg(priv->adapter->dev, "info: Scan: Null detect: %p, %p, %p\n", scan_cfg_out, chan_tlv_out, scan_chan_list); return -1; } chan_tlv_out->header.type = cpu_to_le16(TLV_TYPE_CHANLIST); /* Set the temp channel struct pointer to the start of the desired list */ tmp_chan_list = scan_chan_list; /* Loop through the desired channel list, sending a new firmware scan commands for each max_chan_per_scan channels (or for 1,6,11 individually if configured accordingly) */ while (tmp_chan_list->chan_number) { tlv_idx = 0; total_scan_time = 0; chan_tlv_out->header.len = 0; start_chan = tmp_chan_list; done_early = false; /* * Construct the Channel TLV for the scan command. Continue to * insert channel TLVs until: * - the tlv_idx hits the maximum configured per scan command * - the next channel to insert is 0 (end of desired channel * list) * - done_early is set (controlling individual scanning of * 1,6,11) */ while (tlv_idx < max_chan_per_scan && tmp_chan_list->chan_number && !done_early) { dev_dbg(priv->adapter->dev, "info: Scan: Chan(%3d), Radio(%d)," " Mode(%d, %d), Dur(%d)\n", tmp_chan_list->chan_number, tmp_chan_list->radio_type, tmp_chan_list->chan_scan_mode_bitmap & MWIFIEX_PASSIVE_SCAN, (tmp_chan_list->chan_scan_mode_bitmap & MWIFIEX_DISABLE_CHAN_FILT) >> 1, le16_to_cpu(tmp_chan_list->max_scan_time)); /* Copy the current channel TLV to the command being prepared */ memcpy(chan_tlv_out->chan_scan_param + tlv_idx, tmp_chan_list, sizeof(chan_tlv_out->chan_scan_param)); /* Increment the TLV header length by the size appended */ chan_tlv_out->header.len = cpu_to_le16(le16_to_cpu(chan_tlv_out->header.len) + (sizeof(chan_tlv_out->chan_scan_param))); /* * The tlv buffer length is set to the number of bytes * of the between the channel tlv pointer and the start * of the tlv buffer. This compensates for any TLVs * that were appended before the channel list. */ scan_cfg_out->tlv_buf_len = (u32) ((u8 *) chan_tlv_out - scan_cfg_out->tlv_buf); /* Add the size of the channel tlv header and the data length */ scan_cfg_out->tlv_buf_len += (sizeof(chan_tlv_out->header) + le16_to_cpu(chan_tlv_out->header.len)); /* Increment the index to the channel tlv we are constructing */ tlv_idx++; /* Count the total scan time per command */ total_scan_time += le16_to_cpu(tmp_chan_list->max_scan_time); done_early = false; /* Stop the loop if the *current* channel is in the 1,6,11 set and we are not filtering on a BSSID or SSID. */ if (!filtered_scan && (tmp_chan_list->chan_number == 1 || tmp_chan_list->chan_number == 6 || tmp_chan_list->chan_number == 11)) done_early = true; /* Increment the tmp pointer to the next channel to be scanned */ tmp_chan_list++; /* Stop the loop if the *next* channel is in the 1,6,11 set. This will cause it to be the only channel scanned on the next interation */ if (!filtered_scan && (tmp_chan_list->chan_number == 1 || tmp_chan_list->chan_number == 6 || tmp_chan_list->chan_number == 11)) done_early = true; } /* The total scan time should be less than scan command timeout value */ if (total_scan_time > MWIFIEX_MAX_TOTAL_SCAN_TIME) { dev_err(priv->adapter->dev, "total scan time %dms" " is over limit (%dms), scan skipped\n", total_scan_time, MWIFIEX_MAX_TOTAL_SCAN_TIME); ret = -1; break; } priv->adapter->scan_channels = start_chan; /* Send the scan command to the firmware with the specified cfg */ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_SCAN, HostCmd_ACT_GEN_SET, 0, scan_cfg_out); if (ret) break; } if (ret) return -1; return 0; } /* * This function constructs a scan command configuration structure to use * in scan commands. * * Application layer or other functions can invoke network scanning * with a scan configuration supplied in a user scan configuration structure. * This structure is used as the basis of one or many scan command configuration * commands that are sent to the command processing module and eventually to the * firmware. * * This function creates a scan command configuration structure based on the * following user supplied parameters (if present): * - SSID filter * - BSSID filter * - Number of Probes to be sent * - Channel list * * If the SSID or BSSID filter is not present, the filter is disabled/cleared. * If the number of probes is not set, adapter default setting is used. */ static void mwifiex_config_scan(struct mwifiex_private *priv, const struct mwifiex_user_scan_cfg *user_scan_in, struct mwifiex_scan_cmd_config *scan_cfg_out, struct mwifiex_ie_types_chan_list_param_set **chan_list_out, struct mwifiex_chan_scan_param_set *scan_chan_list, u8 *max_chan_per_scan, u8 *filtered_scan, u8 *scan_current_only) { struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_ie_types_num_probes *num_probes_tlv; struct mwifiex_ie_types_wildcard_ssid_params *wildcard_ssid_tlv; struct mwifiex_ie_types_rates_param_set *rates_tlv; const u8 zero_mac[ETH_ALEN] = { 0, 0, 0, 0, 0, 0 }; u8 *tlv_pos; u32 num_probes; u32 ssid_len; u32 chan_idx; u32 scan_type; u16 scan_dur; u8 channel; u8 radio_type; int i; u8 ssid_filter; u8 rates[MWIFIEX_SUPPORTED_RATES]; u32 rates_size; struct mwifiex_ie_types_htcap *ht_cap; /* The tlv_buf_len is calculated for each scan command. The TLVs added in this routine will be preserved since the routine that sends the command will append channelTLVs at *chan_list_out. The difference between the *chan_list_out and the tlv_buf start will be used to calculate the size of anything we add in this routine. */ scan_cfg_out->tlv_buf_len = 0; /* Running tlv pointer. Assigned to chan_list_out at end of function so later routines know where channels can be added to the command buf */ tlv_pos = scan_cfg_out->tlv_buf; /* Initialize the scan as un-filtered; the flag is later set to TRUE below if a SSID or BSSID filter is sent in the command */ *filtered_scan = false; /* Initialize the scan as not being only on the current channel. If the channel list is customized, only contains one channel, and is the active channel, this is set true and data flow is not halted. */ *scan_current_only = false; if (user_scan_in) { /* Default the ssid_filter flag to TRUE, set false under certain wildcard conditions and qualified by the existence of an SSID list before marking the scan as filtered */ ssid_filter = true; /* Set the BSS type scan filter, use Adapter setting if unset */ scan_cfg_out->bss_mode = (user_scan_in->bss_mode ? (u8) user_scan_in-> bss_mode : (u8) adapter->scan_mode); /* Set the number of probes to send, use Adapter setting if unset */ num_probes = (user_scan_in->num_probes ? user_scan_in-> num_probes : adapter->scan_probes); /* * Set the BSSID filter to the incoming configuration, * if non-zero. If not set, it will remain disabled * (all zeros). */ memcpy(scan_cfg_out->specific_bssid, user_scan_in->specific_bssid, sizeof(scan_cfg_out->specific_bssid)); for (i = 0; i < user_scan_in->num_ssids; i++) { ssid_len = user_scan_in->ssid_list[i].ssid_len; wildcard_ssid_tlv = (struct mwifiex_ie_types_wildcard_ssid_params *) tlv_pos; wildcard_ssid_tlv->header.type = cpu_to_le16(TLV_TYPE_WILDCARDSSID); wildcard_ssid_tlv->header.len = cpu_to_le16( (u16) (ssid_len + sizeof(wildcard_ssid_tlv-> max_ssid_length))); /* * max_ssid_length = 0 tells firmware to perform * specific scan for the SSID filled, whereas * max_ssid_length = IEEE80211_MAX_SSID_LEN is for * wildcard scan. */ if (ssid_len) wildcard_ssid_tlv->max_ssid_length = 0; else wildcard_ssid_tlv->max_ssid_length = IEEE80211_MAX_SSID_LEN; memcpy(wildcard_ssid_tlv->ssid, user_scan_in->ssid_list[i].ssid, ssid_len); tlv_pos += (sizeof(wildcard_ssid_tlv->header) + le16_to_cpu(wildcard_ssid_tlv->header.len)); dev_dbg(adapter->dev, "info: scan: ssid[%d]: %s, %d\n", i, wildcard_ssid_tlv->ssid, wildcard_ssid_tlv->max_ssid_length); /* Empty wildcard ssid with a maxlen will match many or potentially all SSIDs (maxlen == 32), therefore do not treat the scan as filtered. */ if (!ssid_len && wildcard_ssid_tlv->max_ssid_length) ssid_filter = false; } /* * The default number of channels sent in the command is low to * ensure the response buffer from the firmware does not * truncate scan results. That is not an issue with an SSID * or BSSID filter applied to the scan results in the firmware. */ if ((i && ssid_filter) || memcmp(scan_cfg_out->specific_bssid, &zero_mac, sizeof(zero_mac))) *filtered_scan = true; } else { scan_cfg_out->bss_mode = (u8) adapter->scan_mode; num_probes = adapter->scan_probes; } /* * If a specific BSSID or SSID is used, the number of channels in the * scan command will be increased to the absolute maximum. */ if (*filtered_scan) *max_chan_per_scan = MWIFIEX_MAX_CHANNELS_PER_SPECIFIC_SCAN; else *max_chan_per_scan = MWIFIEX_CHANNELS_PER_SCAN_CMD; /* If the input config or adapter has the number of Probes set, add tlv */ if (num_probes) { dev_dbg(adapter->dev, "info: scan: num_probes = %d\n", num_probes); num_probes_tlv = (struct mwifiex_ie_types_num_probes *) tlv_pos; num_probes_tlv->header.type = cpu_to_le16(TLV_TYPE_NUMPROBES); num_probes_tlv->header.len = cpu_to_le16(sizeof(num_probes_tlv->num_probes)); num_probes_tlv->num_probes = cpu_to_le16((u16) num_probes); tlv_pos += sizeof(num_probes_tlv->header) + le16_to_cpu(num_probes_tlv->header.len); } /* Append rates tlv */ memset(rates, 0, sizeof(rates)); rates_size = mwifiex_get_supported_rates(priv, rates); rates_tlv = (struct mwifiex_ie_types_rates_param_set *) tlv_pos; rates_tlv->header.type = cpu_to_le16(WLAN_EID_SUPP_RATES); rates_tlv->header.len = cpu_to_le16((u16) rates_size); memcpy(rates_tlv->rates, rates, rates_size); tlv_pos += sizeof(rates_tlv->header) + rates_size; dev_dbg(adapter->dev, "info: SCAN_CMD: Rates size = %d\n", rates_size); if (ISSUPP_11NENABLED(priv->adapter->fw_cap_info) && (priv->adapter->config_bands & BAND_GN || priv->adapter->config_bands & BAND_AN)) { ht_cap = (struct mwifiex_ie_types_htcap *) tlv_pos; memset(ht_cap, 0, sizeof(struct mwifiex_ie_types_htcap)); ht_cap->header.type = cpu_to_le16(WLAN_EID_HT_CAPABILITY); ht_cap->header.len = cpu_to_le16(sizeof(struct ieee80211_ht_cap)); radio_type = mwifiex_band_to_radio_type(priv->adapter->config_bands); mwifiex_fill_cap_info(priv, radio_type, ht_cap); tlv_pos += sizeof(struct mwifiex_ie_types_htcap); } /* Append vendor specific IE TLV */ mwifiex_cmd_append_vsie_tlv(priv, MWIFIEX_VSIE_MASK_SCAN, &tlv_pos); /* * Set the output for the channel TLV to the address in the tlv buffer * past any TLVs that were added in this function (SSID, num_probes). * Channel TLVs will be added past this for each scan command, * preserving the TLVs that were previously added. */ *chan_list_out = (struct mwifiex_ie_types_chan_list_param_set *) tlv_pos; if (user_scan_in && user_scan_in->chan_list[0].chan_number) { dev_dbg(adapter->dev, "info: Scan: Using supplied channel list\n"); for (chan_idx = 0; chan_idx < MWIFIEX_USER_SCAN_CHAN_MAX && user_scan_in->chan_list[chan_idx].chan_number; chan_idx++) { channel = user_scan_in->chan_list[chan_idx].chan_number; (scan_chan_list + chan_idx)->chan_number = channel; radio_type = user_scan_in->chan_list[chan_idx].radio_type; (scan_chan_list + chan_idx)->radio_type = radio_type; scan_type = user_scan_in->chan_list[chan_idx].scan_type; if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) (scan_chan_list + chan_idx)->chan_scan_mode_bitmap |= MWIFIEX_PASSIVE_SCAN; else (scan_chan_list + chan_idx)->chan_scan_mode_bitmap &= ~MWIFIEX_PASSIVE_SCAN; if (user_scan_in->chan_list[chan_idx].scan_time) { scan_dur = (u16) user_scan_in-> chan_list[chan_idx].scan_time; } else { if (scan_type == MWIFIEX_SCAN_TYPE_PASSIVE) scan_dur = adapter->passive_scan_time; else if (*filtered_scan) scan_dur = adapter->specific_scan_time; else scan_dur = adapter->active_scan_time; } (scan_chan_list + chan_idx)->min_scan_time = cpu_to_le16(scan_dur); (scan_chan_list + chan_idx)->max_scan_time = cpu_to_le16(scan_dur); } /* Check if we are only scanning the current channel */ if ((chan_idx == 1) && (user_scan_in->chan_list[0].chan_number == priv->curr_bss_params.bss_descriptor.channel)) { *scan_current_only = true; dev_dbg(adapter->dev, "info: Scan: Scanning current channel only\n"); } } else { dev_dbg(adapter->dev, "info: Scan: Creating full region channel list\n"); mwifiex_scan_create_channel_list(priv, user_scan_in, scan_chan_list, *filtered_scan); } } /* * This function inspects the scan response buffer for pointers to * expected TLVs. * * TLVs can be included at the end of the scan response BSS information. * * Data in the buffer is parsed pointers to TLVs that can potentially * be passed back in the response. */ static void mwifiex_ret_802_11_scan_get_tlv_ptrs(struct mwifiex_adapter *adapter, struct mwifiex_ie_types_data *tlv, u32 tlv_buf_size, u32 req_tlv_type, struct mwifiex_ie_types_data **tlv_data) { struct mwifiex_ie_types_data *current_tlv; u32 tlv_buf_left; u32 tlv_type; u32 tlv_len; current_tlv = tlv; tlv_buf_left = tlv_buf_size; *tlv_data = NULL; dev_dbg(adapter->dev, "info: SCAN_RESP: tlv_buf_size = %d\n", tlv_buf_size); while (tlv_buf_left >= sizeof(struct mwifiex_ie_types_header)) { tlv_type = le16_to_cpu(current_tlv->header.type); tlv_len = le16_to_cpu(current_tlv->header.len); if (sizeof(tlv->header) + tlv_len > tlv_buf_left) { dev_err(adapter->dev, "SCAN_RESP: TLV buffer corrupt\n"); break; } if (req_tlv_type == tlv_type) { switch (tlv_type) { case TLV_TYPE_TSFTIMESTAMP: dev_dbg(adapter->dev, "info: SCAN_RESP: TSF " "timestamp TLV, len = %d\n", tlv_len); *tlv_data = (struct mwifiex_ie_types_data *) current_tlv; break; case TLV_TYPE_CHANNELBANDLIST: dev_dbg(adapter->dev, "info: SCAN_RESP: channel" " band list TLV, len = %d\n", tlv_len); *tlv_data = (struct mwifiex_ie_types_data *) current_tlv; break; default: dev_err(adapter->dev, "SCAN_RESP: unhandled TLV = %d\n", tlv_type); /* Give up, this seems corrupted */ return; } } if (*tlv_data) break; tlv_buf_left -= (sizeof(tlv->header) + tlv_len); current_tlv = (struct mwifiex_ie_types_data *) (current_tlv->data + tlv_len); } /* while */ } /* * This function parses provided beacon buffer and updates * respective fields in bss descriptor structure. */ int mwifiex_update_bss_desc_with_ie(struct mwifiex_adapter *adapter, struct mwifiex_bssdescriptor *bss_entry, u8 *ie_buf, u32 ie_len) { int ret = 0; u8 element_id; struct ieee_types_fh_param_set *fh_param_set; struct ieee_types_ds_param_set *ds_param_set; struct ieee_types_cf_param_set *cf_param_set; struct ieee_types_ibss_param_set *ibss_param_set; u8 *current_ptr; u8 *rate; u8 element_len; u16 total_ie_len; u8 bytes_to_copy; u8 rate_size; u8 found_data_rate_ie; u32 bytes_left; struct ieee_types_vendor_specific *vendor_ie; const u8 wpa_oui[4] = { 0x00, 0x50, 0xf2, 0x01 }; const u8 wmm_oui[4] = { 0x00, 0x50, 0xf2, 0x02 }; found_data_rate_ie = false; rate_size = 0; current_ptr = ie_buf; bytes_left = ie_len; bss_entry->beacon_buf = ie_buf; bss_entry->beacon_buf_size = ie_len; /* Process variable IE */ while (bytes_left >= 2) { element_id = *current_ptr; element_len = *(current_ptr + 1); total_ie_len = element_len + sizeof(struct ieee_types_header); if (bytes_left < total_ie_len) { dev_err(adapter->dev, "err: InterpretIE: in processing" " IE, bytes left < IE length\n"); return -1; } switch (element_id) { case WLAN_EID_SSID: bss_entry->ssid.ssid_len = element_len; memcpy(bss_entry->ssid.ssid, (current_ptr + 2), element_len); dev_dbg(adapter->dev, "info: InterpretIE: ssid: %-32s\n", bss_entry->ssid.ssid); break; case WLAN_EID_SUPP_RATES: memcpy(bss_entry->data_rates, current_ptr + 2, element_len); memcpy(bss_entry->supported_rates, current_ptr + 2, element_len); rate_size = element_len; found_data_rate_ie = true; break; case WLAN_EID_FH_PARAMS: fh_param_set = (struct ieee_types_fh_param_set *) current_ptr; memcpy(&bss_entry->phy_param_set.fh_param_set, fh_param_set, sizeof(struct ieee_types_fh_param_set)); break; case WLAN_EID_DS_PARAMS: ds_param_set = (struct ieee_types_ds_param_set *) current_ptr; bss_entry->channel = ds_param_set->current_chan; memcpy(&bss_entry->phy_param_set.ds_param_set, ds_param_set, sizeof(struct ieee_types_ds_param_set)); break; case WLAN_EID_CF_PARAMS: cf_param_set = (struct ieee_types_cf_param_set *) current_ptr; memcpy(&bss_entry->ss_param_set.cf_param_set, cf_param_set, sizeof(struct ieee_types_cf_param_set)); break; case WLAN_EID_IBSS_PARAMS: ibss_param_set = (struct ieee_types_ibss_param_set *) current_ptr; memcpy(&bss_entry->ss_param_set.ibss_param_set, ibss_param_set, sizeof(struct ieee_types_ibss_param_set)); break; case WLAN_EID_ERP_INFO: bss_entry->erp_flags = *(current_ptr + 2); break; case WLAN_EID_EXT_SUPP_RATES: /* * Only process extended supported rate * if data rate is already found. * Data rate IE should come before * extended supported rate IE */ if (found_data_rate_ie) { if ((element_len + rate_size) > MWIFIEX_SUPPORTED_RATES) bytes_to_copy = (MWIFIEX_SUPPORTED_RATES - rate_size); else bytes_to_copy = element_len; rate = (u8 *) bss_entry->data_rates; rate += rate_size; memcpy(rate, current_ptr + 2, bytes_to_copy); rate = (u8 *) bss_entry->supported_rates; rate += rate_size; memcpy(rate, current_ptr + 2, bytes_to_copy); } break; case WLAN_EID_VENDOR_SPECIFIC: vendor_ie = (struct ieee_types_vendor_specific *) current_ptr; if (!memcmp (vendor_ie->vend_hdr.oui, wpa_oui, sizeof(wpa_oui))) { bss_entry->bcn_wpa_ie = (struct ieee_types_vendor_specific *) current_ptr; bss_entry->wpa_offset = (u16) (current_ptr - bss_entry->beacon_buf); } else if (!memcmp(vendor_ie->vend_hdr.oui, wmm_oui, sizeof(wmm_oui))) { if (total_ie_len == sizeof(struct ieee_types_wmm_parameter) || total_ie_len == sizeof(struct ieee_types_wmm_info)) /* * Only accept and copy the WMM IE if * it matches the size expected for the * WMM Info IE or the WMM Parameter IE. */ memcpy((u8 *) &bss_entry->wmm_ie, current_ptr, total_ie_len); } break; case WLAN_EID_RSN: bss_entry->bcn_rsn_ie = (struct ieee_types_generic *) current_ptr; bss_entry->rsn_offset = (u16) (current_ptr - bss_entry->beacon_buf); break; case WLAN_EID_BSS_AC_ACCESS_DELAY: bss_entry->bcn_wapi_ie = (struct ieee_types_generic *) current_ptr; bss_entry->wapi_offset = (u16) (current_ptr - bss_entry->beacon_buf); break; case WLAN_EID_HT_CAPABILITY: bss_entry->bcn_ht_cap = (struct ieee80211_ht_cap *) (current_ptr + sizeof(struct ieee_types_header)); bss_entry->ht_cap_offset = (u16) (current_ptr + sizeof(struct ieee_types_header) - bss_entry->beacon_buf); break; case WLAN_EID_HT_INFORMATION: bss_entry->bcn_ht_info = (struct ieee80211_ht_info *) (current_ptr + sizeof(struct ieee_types_header)); bss_entry->ht_info_offset = (u16) (current_ptr + sizeof(struct ieee_types_header) - bss_entry->beacon_buf); break; case WLAN_EID_BSS_COEX_2040: bss_entry->bcn_bss_co_2040 = (u8 *) (current_ptr + sizeof(struct ieee_types_header)); bss_entry->bss_co_2040_offset = (u16) (current_ptr + sizeof(struct ieee_types_header) - bss_entry->beacon_buf); break; case WLAN_EID_EXT_CAPABILITY: bss_entry->bcn_ext_cap = (u8 *) (current_ptr + sizeof(struct ieee_types_header)); bss_entry->ext_cap_offset = (u16) (current_ptr + sizeof(struct ieee_types_header) - bss_entry->beacon_buf); break; default: break; } current_ptr += element_len + 2; /* Need to account for IE ID and IE Len */ bytes_left -= (element_len + 2); } /* while (bytes_left > 2) */ return ret; } /* * This function converts radio type scan parameter to a band configuration * to be used in join command. */ static u8 mwifiex_radio_type_to_band(u8 radio_type) { switch (radio_type) { case HostCmd_SCAN_RADIO_TYPE_A: return BAND_A; case HostCmd_SCAN_RADIO_TYPE_BG: default: return BAND_G; } } /* * This is an internal function used to start a scan based on an input * configuration. * * This uses the input user scan configuration information when provided in * order to send the appropriate scan commands to firmware to populate or * update the internal driver scan table. */ static int mwifiex_scan_networks(struct mwifiex_private *priv, const struct mwifiex_user_scan_cfg *user_scan_in) { int ret = 0; struct mwifiex_adapter *adapter = priv->adapter; struct cmd_ctrl_node *cmd_node; union mwifiex_scan_cmd_config_tlv *scan_cfg_out; struct mwifiex_ie_types_chan_list_param_set *chan_list_out; u32 buf_size; struct mwifiex_chan_scan_param_set *scan_chan_list; u8 filtered_scan; u8 scan_current_chan_only; u8 max_chan_per_scan; unsigned long flags; if (adapter->scan_processing) { dev_dbg(adapter->dev, "cmd: Scan already in process...\n"); return ret; } spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->scan_processing = true; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); if (priv->scan_block) { dev_dbg(adapter->dev, "cmd: Scan is blocked during association...\n"); return ret; } scan_cfg_out = kzalloc(sizeof(union mwifiex_scan_cmd_config_tlv), GFP_KERNEL); if (!scan_cfg_out) { dev_err(adapter->dev, "failed to alloc scan_cfg_out\n"); return -ENOMEM; } buf_size = sizeof(struct mwifiex_chan_scan_param_set) * MWIFIEX_USER_SCAN_CHAN_MAX; scan_chan_list = kzalloc(buf_size, GFP_KERNEL); if (!scan_chan_list) { dev_err(adapter->dev, "failed to alloc scan_chan_list\n"); kfree(scan_cfg_out); return -ENOMEM; } mwifiex_config_scan(priv, user_scan_in, &scan_cfg_out->config, &chan_list_out, scan_chan_list, &max_chan_per_scan, &filtered_scan, &scan_current_chan_only); ret = mwifiex_scan_channel_list(priv, max_chan_per_scan, filtered_scan, &scan_cfg_out->config, chan_list_out, scan_chan_list); /* Get scan command from scan_pending_q and put to cmd_pending_q */ if (!ret) { spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); if (!list_empty(&adapter->scan_pending_q)) { cmd_node = list_first_entry(&adapter->scan_pending_q, struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); adapter->cmd_queued = cmd_node; mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); } else { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); } } else { spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->scan_processing = true; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); } kfree(scan_cfg_out); kfree(scan_chan_list); return ret; } /* * Sends IOCTL request to start a scan with user configurations. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. * * Upon completion, it also generates a wireless event to notify * applications. */ int mwifiex_set_user_scan_ioctl(struct mwifiex_private *priv, struct mwifiex_user_scan_cfg *scan_req) { int status; status = mwifiex_scan_networks(priv, scan_req); queue_work(priv->adapter->workqueue, &priv->adapter->main_work); return status; } /* * This function prepares a scan command to be sent to the firmware. * * This uses the scan command configuration sent to the command processing * module in command preparation stage to configure a scan command structure * to send to firmware. * * The fixed fields specifying the BSS type and BSSID filters as well as a * variable number/length of TLVs are sent in the command to firmware. * * Preparation also includes - * - Setting command ID, and proper size * - Ensuring correct endian-ness */ int mwifiex_cmd_802_11_scan(struct host_cmd_ds_command *cmd, struct mwifiex_scan_cmd_config *scan_cfg) { struct host_cmd_ds_802_11_scan *scan_cmd = &cmd->params.scan; /* Set fixed field variables in scan command */ scan_cmd->bss_mode = scan_cfg->bss_mode; memcpy(scan_cmd->bssid, scan_cfg->specific_bssid, sizeof(scan_cmd->bssid)); memcpy(scan_cmd->tlv_buffer, scan_cfg->tlv_buf, scan_cfg->tlv_buf_len); cmd->command = cpu_to_le16(HostCmd_CMD_802_11_SCAN); /* Size is equal to the sizeof(fixed portions) + the TLV len + header */ cmd->size = cpu_to_le16((u16) (sizeof(scan_cmd->bss_mode) + sizeof(scan_cmd->bssid) + scan_cfg->tlv_buf_len + S_DS_GEN)); return 0; } /* * This function checks compatibility of requested network with current * driver settings. */ int mwifiex_check_network_compatibility(struct mwifiex_private *priv, struct mwifiex_bssdescriptor *bss_desc) { int ret = -1; if (!bss_desc) return -1; if ((mwifiex_get_cfp(priv, (u8) bss_desc->bss_band, (u16) bss_desc->channel, 0))) { switch (priv->bss_mode) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: ret = mwifiex_is_network_compatible(priv, bss_desc, priv->bss_mode); if (ret) dev_err(priv->adapter->dev, "cannot find ssid " "%s\n", bss_desc->ssid.ssid); break; default: ret = 0; } } return ret; } static int mwifiex_update_curr_bss_params(struct mwifiex_private *priv, u8 *bssid, s32 rssi, const u8 *ie_buf, size_t ie_len, u16 beacon_period, u16 cap_info_bitmap, u8 band) { struct mwifiex_bssdescriptor *bss_desc; int ret; unsigned long flags; u8 *beacon_ie; /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL); if (!bss_desc) { dev_err(priv->adapter->dev, " failed to alloc bss_desc\n"); return -ENOMEM; } beacon_ie = kmemdup(ie_buf, ie_len, GFP_KERNEL); if (!beacon_ie) { kfree(bss_desc); dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); return -ENOMEM; } ret = mwifiex_fill_new_bss_desc(priv, bssid, rssi, beacon_ie, ie_len, beacon_period, cap_info_bitmap, band, bss_desc); if (ret) goto done; ret = mwifiex_check_network_compatibility(priv, bss_desc); if (ret) goto done; /* Update current bss descriptor parameters */ spin_lock_irqsave(&priv->curr_bcn_buf_lock, flags); priv->curr_bss_params.bss_descriptor.bcn_wpa_ie = NULL; priv->curr_bss_params.bss_descriptor.wpa_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_rsn_ie = NULL; priv->curr_bss_params.bss_descriptor.rsn_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_wapi_ie = NULL; priv->curr_bss_params.bss_descriptor.wapi_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_ht_cap = NULL; priv->curr_bss_params.bss_descriptor.ht_cap_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_ht_info = NULL; priv->curr_bss_params.bss_descriptor.ht_info_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_bss_co_2040 = NULL; priv->curr_bss_params.bss_descriptor. bss_co_2040_offset = 0; priv->curr_bss_params.bss_descriptor.bcn_ext_cap = NULL; priv->curr_bss_params.bss_descriptor.ext_cap_offset = 0; priv->curr_bss_params.bss_descriptor.beacon_buf = NULL; priv->curr_bss_params.bss_descriptor.beacon_buf_size = 0; /* Make a copy of current BSSID descriptor */ memcpy(&priv->curr_bss_params.bss_descriptor, bss_desc, sizeof(priv->curr_bss_params.bss_descriptor)); mwifiex_save_curr_bcn(priv); spin_unlock_irqrestore(&priv->curr_bcn_buf_lock, flags); done: kfree(bss_desc); kfree(beacon_ie); return 0; } /* * This function handles the command response of scan. * * The response buffer for the scan command has the following * memory layout: * * .-------------------------------------------------------------. * | Header (4 * sizeof(t_u16)): Standard command response hdr | * .-------------------------------------------------------------. * | BufSize (t_u16) : sizeof the BSS Description data | * .-------------------------------------------------------------. * | NumOfSet (t_u8) : Number of BSS Descs returned | * .-------------------------------------------------------------. * | BSSDescription data (variable, size given in BufSize) | * .-------------------------------------------------------------. * | TLV data (variable, size calculated using Header->Size, | * | BufSize and sizeof the fixed fields above) | * .-------------------------------------------------------------. */ int mwifiex_ret_802_11_scan(struct mwifiex_private *priv, struct host_cmd_ds_command *resp) { int ret = 0; struct mwifiex_adapter *adapter = priv->adapter; struct cmd_ctrl_node *cmd_node; struct host_cmd_ds_802_11_scan_rsp *scan_rsp; struct mwifiex_ie_types_data *tlv_data; struct mwifiex_ie_types_tsf_timestamp *tsf_tlv; u8 *bss_info; u32 scan_resp_size; u32 bytes_left; u32 idx; u32 tlv_buf_size; struct mwifiex_chan_freq_power *cfp; struct mwifiex_ie_types_chan_band_list_param_set *chan_band_tlv; struct chan_band_param_set *chan_band; u8 is_bgscan_resp; unsigned long flags; struct cfg80211_bss *bss; is_bgscan_resp = (le16_to_cpu(resp->command) == HostCmd_CMD_802_11_BG_SCAN_QUERY); if (is_bgscan_resp) scan_rsp = &resp->params.bg_scan_query_resp.scan_resp; else scan_rsp = &resp->params.scan_resp; if (scan_rsp->number_of_sets > MWIFIEX_MAX_AP) { dev_err(adapter->dev, "SCAN_RESP: too many AP returned (%d)\n", scan_rsp->number_of_sets); ret = -1; goto done; } bytes_left = le16_to_cpu(scan_rsp->bss_descript_size); dev_dbg(adapter->dev, "info: SCAN_RESP: bss_descript_size %d\n", bytes_left); scan_resp_size = le16_to_cpu(resp->size); dev_dbg(adapter->dev, "info: SCAN_RESP: returned %d APs before parsing\n", scan_rsp->number_of_sets); bss_info = scan_rsp->bss_desc_and_tlv_buffer; /* * The size of the TLV buffer is equal to the entire command response * size (scan_resp_size) minus the fixed fields (sizeof()'s), the * BSS Descriptions (bss_descript_size as bytesLef) and the command * response header (S_DS_GEN) */ tlv_buf_size = scan_resp_size - (bytes_left + sizeof(scan_rsp->bss_descript_size) + sizeof(scan_rsp->number_of_sets) + S_DS_GEN); tlv_data = (struct mwifiex_ie_types_data *) (scan_rsp-> bss_desc_and_tlv_buffer + bytes_left); /* Search the TLV buffer space in the scan response for any valid TLVs */ mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size, TLV_TYPE_TSFTIMESTAMP, (struct mwifiex_ie_types_data **) &tsf_tlv); /* Search the TLV buffer space in the scan response for any valid TLVs */ mwifiex_ret_802_11_scan_get_tlv_ptrs(adapter, tlv_data, tlv_buf_size, TLV_TYPE_CHANNELBANDLIST, (struct mwifiex_ie_types_data **) &chan_band_tlv); for (idx = 0; idx < scan_rsp->number_of_sets && bytes_left; idx++) { u8 bssid[ETH_ALEN]; s32 rssi; const u8 *ie_buf; size_t ie_len; u16 channel = 0; u64 network_tsf = 0; u16 beacon_size = 0; u32 curr_bcn_bytes; u32 freq; u16 beacon_period; u16 cap_info_bitmap; u8 *current_ptr; struct mwifiex_bcn_param *bcn_param; if (bytes_left >= sizeof(beacon_size)) { /* Extract & convert beacon size from command buffer */ memcpy(&beacon_size, bss_info, sizeof(beacon_size)); bytes_left -= sizeof(beacon_size); bss_info += sizeof(beacon_size); } if (!beacon_size || beacon_size > bytes_left) { bss_info += bytes_left; bytes_left = 0; return -1; } /* Initialize the current working beacon pointer for this BSS * iteration */ current_ptr = bss_info; /* Advance the return beacon pointer past the current beacon */ bss_info += beacon_size; bytes_left -= beacon_size; curr_bcn_bytes = beacon_size; /* * First 5 fields are bssid, RSSI, time stamp, beacon interval, * and capability information */ if (curr_bcn_bytes < sizeof(struct mwifiex_bcn_param)) { dev_err(adapter->dev, "InterpretIE: not enough bytes left\n"); continue; } bcn_param = (struct mwifiex_bcn_param *)current_ptr; current_ptr += sizeof(*bcn_param); curr_bcn_bytes -= sizeof(*bcn_param); memcpy(bssid, bcn_param->bssid, ETH_ALEN); rssi = (s32) (bcn_param->rssi); dev_dbg(adapter->dev, "info: InterpretIE: RSSI=%02X\n", rssi); beacon_period = le16_to_cpu(bcn_param->beacon_period); cap_info_bitmap = le16_to_cpu(bcn_param->cap_info_bitmap); dev_dbg(adapter->dev, "info: InterpretIE: capabilities=0x%X\n", cap_info_bitmap); /* Rest of the current buffer are IE's */ ie_buf = current_ptr; ie_len = curr_bcn_bytes; dev_dbg(adapter->dev, "info: InterpretIE: IELength for this AP = %d\n", curr_bcn_bytes); while (curr_bcn_bytes >= sizeof(struct ieee_types_header)) { u8 element_id, element_len; element_id = *current_ptr; element_len = *(current_ptr + 1); if (curr_bcn_bytes < element_len + sizeof(struct ieee_types_header)) { dev_err(priv->adapter->dev, "%s: bytes left < IE length\n", __func__); goto done; } if (element_id == WLAN_EID_DS_PARAMS) { channel = *(u8 *) (current_ptr + sizeof(struct ieee_types_header)); break; } current_ptr += element_len + sizeof(struct ieee_types_header); curr_bcn_bytes -= element_len + sizeof(struct ieee_types_header); } /* * If the TSF TLV was appended to the scan results, save this * entry's TSF value in the networkTSF field.The networkTSF is * the firmware's TSF value at the time the beacon or probe * response was received. */ if (tsf_tlv) memcpy(&network_tsf, &tsf_tlv->tsf_data[idx * TSF_DATA_SIZE], sizeof(network_tsf)); if (channel) { struct ieee80211_channel *chan; u8 band; band = BAND_G; if (chan_band_tlv) { chan_band = &chan_band_tlv->chan_band_param[idx]; band = mwifiex_radio_type_to_band( chan_band->radio_type & (BIT(0) | BIT(1))); } cfp = mwifiex_get_cfp(priv, band, channel, 0); freq = cfp ? cfp->freq : 0; chan = ieee80211_get_channel(priv->wdev->wiphy, freq); if (chan && !(chan->flags & IEEE80211_CHAN_DISABLED)) { bss = cfg80211_inform_bss(priv->wdev->wiphy, chan, bssid, network_tsf, cap_info_bitmap, beacon_period, ie_buf, ie_len, rssi, GFP_KERNEL); *(u8 *)bss->priv = band; cfg80211_put_bss(bss); if (priv->media_connected && !memcmp(bssid, priv->curr_bss_params.bss_descriptor .mac_address, ETH_ALEN)) mwifiex_update_curr_bss_params (priv, bssid, rssi, ie_buf, ie_len, beacon_period, cap_info_bitmap, band); } } else { dev_dbg(adapter->dev, "missing BSS channel IE\n"); } } spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); if (list_empty(&adapter->scan_pending_q)) { spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); spin_lock_irqsave(&adapter->mwifiex_cmd_lock, flags); adapter->scan_processing = false; spin_unlock_irqrestore(&adapter->mwifiex_cmd_lock, flags); /* Need to indicate IOCTL complete */ if (adapter->curr_cmd->wait_q_enabled) { adapter->cmd_wait_q.status = 0; mwifiex_complete_cmd(adapter, adapter->curr_cmd); } if (priv->report_scan_result) priv->report_scan_result = false; if (priv->scan_pending_on_block) { priv->scan_pending_on_block = false; up(&priv->async_sem); } if (priv->user_scan_cfg) { dev_dbg(priv->adapter->dev, "info: %s: sending scan results\n", __func__); cfg80211_scan_done(priv->scan_request, 0); priv->scan_request = NULL; kfree(priv->user_scan_cfg); priv->user_scan_cfg = NULL; } } else { /* Get scan command from scan_pending_q and put to cmd_pending_q */ cmd_node = list_first_entry(&adapter->scan_pending_q, struct cmd_ctrl_node, list); list_del(&cmd_node->list); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); mwifiex_insert_cmd_to_pending_q(adapter, cmd_node, true); } done: return ret; } /* * This function prepares command for background scan query. * * Preparation includes - * - Setting command ID and proper size * - Setting background scan flush parameter * - Ensuring correct endian-ness */ int mwifiex_cmd_802_11_bg_scan_query(struct host_cmd_ds_command *cmd) { struct host_cmd_ds_802_11_bg_scan_query *bg_query = &cmd->params.bg_scan_query; cmd->command = cpu_to_le16(HostCmd_CMD_802_11_BG_SCAN_QUERY); cmd->size = cpu_to_le16(sizeof(struct host_cmd_ds_802_11_bg_scan_query) + S_DS_GEN); bg_query->flush = 1; return 0; } /* * This function inserts scan command node to the scan pending queue. */ void mwifiex_queue_scan_cmd(struct mwifiex_private *priv, struct cmd_ctrl_node *cmd_node) { struct mwifiex_adapter *adapter = priv->adapter; unsigned long flags; cmd_node->wait_q_enabled = true; cmd_node->condition = &adapter->scan_wait_q_woken; spin_lock_irqsave(&adapter->scan_pending_q_lock, flags); list_add_tail(&cmd_node->list, &adapter->scan_pending_q); spin_unlock_irqrestore(&adapter->scan_pending_q_lock, flags); } /* * This function sends a scan command for all available channels to the * firmware, filtered on a specific SSID. */ static int mwifiex_scan_specific_ssid(struct mwifiex_private *priv, struct cfg80211_ssid *req_ssid) { struct mwifiex_adapter *adapter = priv->adapter; int ret = 0; struct mwifiex_user_scan_cfg *scan_cfg; if (!req_ssid) return -1; if (adapter->scan_processing) { dev_dbg(adapter->dev, "cmd: Scan already in process...\n"); return ret; } if (priv->scan_block) { dev_dbg(adapter->dev, "cmd: Scan is blocked during association...\n"); return ret; } scan_cfg = kzalloc(sizeof(struct mwifiex_user_scan_cfg), GFP_KERNEL); if (!scan_cfg) { dev_err(adapter->dev, "failed to alloc scan_cfg\n"); return -ENOMEM; } scan_cfg->ssid_list = req_ssid; scan_cfg->num_ssids = 1; ret = mwifiex_scan_networks(priv, scan_cfg); kfree(scan_cfg); return ret; } /* * Sends IOCTL request to start a scan. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. * * Scan command can be issued for both normal scan and specific SSID * scan, depending upon whether an SSID is provided or not. */ int mwifiex_request_scan(struct mwifiex_private *priv, struct cfg80211_ssid *req_ssid) { int ret; if (down_interruptible(&priv->async_sem)) { dev_err(priv->adapter->dev, "%s: acquire semaphore\n", __func__); return -1; } priv->scan_pending_on_block = true; priv->adapter->scan_wait_q_woken = false; if (req_ssid && req_ssid->ssid_len != 0) /* Specific SSID scan */ ret = mwifiex_scan_specific_ssid(priv, req_ssid); else /* Normal scan */ ret = mwifiex_scan_networks(priv, NULL); if (!ret) ret = mwifiex_wait_queue_complete(priv->adapter); if (ret == -1) { priv->scan_pending_on_block = false; up(&priv->async_sem); } return ret; } /* * This function appends the vendor specific IE TLV to a buffer. */ int mwifiex_cmd_append_vsie_tlv(struct mwifiex_private *priv, u16 vsie_mask, u8 **buffer) { int id, ret_len = 0; struct mwifiex_ie_types_vendor_param_set *vs_param_set; if (!buffer) return 0; if (!(*buffer)) return 0; /* * Traverse through the saved vendor specific IE array and append * the selected(scan/assoc/adhoc) IE as TLV to the command */ for (id = 0; id < MWIFIEX_MAX_VSIE_NUM; id++) { if (priv->vs_ie[id].mask & vsie_mask) { vs_param_set = (struct mwifiex_ie_types_vendor_param_set *) *buffer; vs_param_set->header.type = cpu_to_le16(TLV_TYPE_PASSTHROUGH); vs_param_set->header.len = cpu_to_le16((((u16) priv->vs_ie[id].ie[1]) & 0x00FF) + 2); memcpy(vs_param_set->ie, priv->vs_ie[id].ie, le16_to_cpu(vs_param_set->header.len)); *buffer += le16_to_cpu(vs_param_set->header.len) + sizeof(struct mwifiex_ie_types_header); ret_len += le16_to_cpu(vs_param_set->header.len) + sizeof(struct mwifiex_ie_types_header); } } return ret_len; } /* * This function saves a beacon buffer of the current BSS descriptor. * * The current beacon buffer is saved so that it can be restored in the * following cases that makes the beacon buffer not to contain the current * ssid's beacon buffer. * - The current ssid was not found somehow in the last scan. * - The current ssid was the last entry of the scan table and overloaded. */ void mwifiex_save_curr_bcn(struct mwifiex_private *priv) { struct mwifiex_bssdescriptor *curr_bss = &priv->curr_bss_params.bss_descriptor; if (!curr_bss->beacon_buf_size) return; /* allocate beacon buffer at 1st time; or if it's size has changed */ if (!priv->curr_bcn_buf || priv->curr_bcn_size != curr_bss->beacon_buf_size) { priv->curr_bcn_size = curr_bss->beacon_buf_size; kfree(priv->curr_bcn_buf); priv->curr_bcn_buf = kmalloc(curr_bss->beacon_buf_size, GFP_ATOMIC); if (!priv->curr_bcn_buf) { dev_err(priv->adapter->dev, "failed to alloc curr_bcn_buf\n"); return; } } memcpy(priv->curr_bcn_buf, curr_bss->beacon_buf, curr_bss->beacon_buf_size); dev_dbg(priv->adapter->dev, "info: current beacon saved %d\n", priv->curr_bcn_size); curr_bss->beacon_buf = priv->curr_bcn_buf; /* adjust the pointers in the current BSS descriptor */ if (curr_bss->bcn_wpa_ie) curr_bss->bcn_wpa_ie = (struct ieee_types_vendor_specific *) (curr_bss->beacon_buf + curr_bss->wpa_offset); if (curr_bss->bcn_rsn_ie) curr_bss->bcn_rsn_ie = (struct ieee_types_generic *) (curr_bss->beacon_buf + curr_bss->rsn_offset); if (curr_bss->bcn_ht_cap) curr_bss->bcn_ht_cap = (struct ieee80211_ht_cap *) (curr_bss->beacon_buf + curr_bss->ht_cap_offset); if (curr_bss->bcn_ht_info) curr_bss->bcn_ht_info = (struct ieee80211_ht_info *) (curr_bss->beacon_buf + curr_bss->ht_info_offset); if (curr_bss->bcn_bss_co_2040) curr_bss->bcn_bss_co_2040 = (u8 *) (curr_bss->beacon_buf + curr_bss->bss_co_2040_offset); if (curr_bss->bcn_ext_cap) curr_bss->bcn_ext_cap = (u8 *) (curr_bss->beacon_buf + curr_bss->ext_cap_offset); } /* * This function frees the current BSS descriptor beacon buffer. */ void mwifiex_free_curr_bcn(struct mwifiex_private *priv) { kfree(priv->curr_bcn_buf); priv->curr_bcn_buf = NULL; }
gpl-2.0
ShinySide/HispAsian_Kernel_NH7
drivers/mtd/nand/h1910.c
4798
4046
/* * drivers/mtd/nand/h1910.c * * Copyright (C) 2003 Joshua Wise (joshua@joshuawise.com) * * Derived from drivers/mtd/nand/edb7312.c * Copyright (C) 2002 Marius Gröger (mag@sysgo.de) * Copyright (c) 2001 Thomas Gleixner (gleixner@autronix.de) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Overview: * This is a device driver for the NAND flash device found on the * iPAQ h1910 board which utilizes the Samsung K9F2808 part. This is * a 128Mibit (16MiB x 8 bits) NAND flash device. */ #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <asm/io.h> #include <mach/hardware.h> /* for CLPS7111_VIRT_BASE */ #include <asm/sizes.h> #include <mach/h1900-gpio.h> #include <mach/ipaq.h> /* * MTD structure for EDB7312 board */ static struct mtd_info *h1910_nand_mtd = NULL; /* * Module stuff */ /* * Define static partitions for flash device */ static struct mtd_partition partition_info[] = { {name:"h1910 NAND Flash", offset:0, size:16 * 1024 * 1024} }; #define NUM_PARTITIONS 1 /* * hardware specific access to control-lines * * NAND_NCE: bit 0 - don't care * NAND_CLE: bit 1 - address bit 2 * NAND_ALE: bit 2 - address bit 3 */ static void h1910_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *chip = mtd->priv; if (cmd != NAND_CMD_NONE) writeb(cmd, chip->IO_ADDR_W | ((ctrl & 0x6) << 1)); } /* * read device ready pin */ #if 0 static int h1910_device_ready(struct mtd_info *mtd) { return (GPLR(55) & GPIO_bit(55)); } #endif /* * Main initialization routine */ static int __init h1910_init(void) { struct nand_chip *this; void __iomem *nandaddr; if (!machine_is_h1900()) return -ENODEV; nandaddr = ioremap(0x08000000, 0x1000); if (!nandaddr) { printk("Failed to ioremap nand flash.\n"); return -ENOMEM; } /* Allocate memory for MTD device structure and private data */ h1910_nand_mtd = kmalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!h1910_nand_mtd) { printk("Unable to allocate h1910 NAND MTD device structure.\n"); iounmap((void *)nandaddr); return -ENOMEM; } /* Get pointer to private data */ this = (struct nand_chip *)(&h1910_nand_mtd[1]); /* Initialize structures */ memset(h1910_nand_mtd, 0, sizeof(struct mtd_info)); memset(this, 0, sizeof(struct nand_chip)); /* Link the private data with the MTD structure */ h1910_nand_mtd->priv = this; h1910_nand_mtd->owner = THIS_MODULE; /* * Enable VPEN */ GPSR(37) = GPIO_bit(37); /* insert callbacks */ this->IO_ADDR_R = nandaddr; this->IO_ADDR_W = nandaddr; this->cmd_ctrl = h1910_hwcontrol; this->dev_ready = NULL; /* unknown whether that was correct or not so we will just do it like this */ /* 15 us command delay time */ this->chip_delay = 50; this->ecc.mode = NAND_ECC_SOFT; this->options = NAND_NO_AUTOINCR; /* Scan to find existence of the device */ if (nand_scan(h1910_nand_mtd, 1)) { printk(KERN_NOTICE "No NAND device - returning -ENXIO\n"); kfree(h1910_nand_mtd); iounmap((void *)nandaddr); return -ENXIO; } /* Register the partitions */ mtd_device_parse_register(h1910_nand_mtd, NULL, NULL, partition_info, NUM_PARTITIONS); /* Return happy */ return 0; } module_init(h1910_init); /* * Clean up routine */ static void __exit h1910_cleanup(void) { struct nand_chip *this = (struct nand_chip *)&h1910_nand_mtd[1]; /* Release resources, unregister device */ nand_release(h1910_nand_mtd); /* Release io resource */ iounmap((void *)this->IO_ADDR_W); /* Free the MTD device structure */ kfree(h1910_nand_mtd); } module_exit(h1910_cleanup); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Joshua Wise <joshua at joshuawise dot com>"); MODULE_DESCRIPTION("NAND flash driver for iPAQ h1910");
gpl-2.0
jrior001/android_kernel_htc_msm8960
sound/core/device.c
5054
6751
/* * Device management routines * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/slab.h> #include <linux/time.h> #include <linux/export.h> #include <linux/errno.h> #include <sound/core.h> /** * snd_device_new - create an ALSA device component * @card: the card instance * @type: the device type, SNDRV_DEV_XXX * @device_data: the data pointer of this device * @ops: the operator table * * Creates a new device component for the given data pointer. * The device will be assigned to the card and managed together * by the card. * * The data pointer plays a role as the identifier, too, so the * pointer address must be unique and unchanged. * * Returns zero if successful, or a negative error code on failure. */ int snd_device_new(struct snd_card *card, snd_device_type_t type, void *device_data, struct snd_device_ops *ops) { struct snd_device *dev; if (snd_BUG_ON(!card || !device_data || !ops)) return -ENXIO; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { snd_printk(KERN_ERR "Cannot allocate device\n"); return -ENOMEM; } dev->card = card; dev->type = type; dev->state = SNDRV_DEV_BUILD; dev->device_data = device_data; dev->ops = ops; list_add(&dev->list, &card->devices); /* add to the head of list */ return 0; } EXPORT_SYMBOL(snd_device_new); /** * snd_device_free - release the device from the card * @card: the card instance * @device_data: the data pointer to release * * Removes the device from the list on the card and invokes the * callbacks, dev_disconnect and dev_free, corresponding to the state. * Then release the device. * * Returns zero if successful, or a negative error code on failure or if the * device not found. */ int snd_device_free(struct snd_card *card, void *device_data) { struct snd_device *dev; if (snd_BUG_ON(!card || !device_data)) return -ENXIO; list_for_each_entry(dev, &card->devices, list) { if (dev->device_data != device_data) continue; /* unlink */ list_del(&dev->list); if (dev->state == SNDRV_DEV_REGISTERED && dev->ops->dev_disconnect) if (dev->ops->dev_disconnect(dev)) snd_printk(KERN_ERR "device disconnect failure\n"); if (dev->ops->dev_free) { if (dev->ops->dev_free(dev)) snd_printk(KERN_ERR "device free failure\n"); } kfree(dev); return 0; } snd_printd("device free %p (from %pF), not found\n", device_data, __builtin_return_address(0)); return -ENXIO; } EXPORT_SYMBOL(snd_device_free); /** * snd_device_disconnect - disconnect the device * @card: the card instance * @device_data: the data pointer to disconnect * * Turns the device into the disconnection state, invoking * dev_disconnect callback, if the device was already registered. * * Usually called from snd_card_disconnect(). * * Returns zero if successful, or a negative error code on failure or if the * device not found. */ int snd_device_disconnect(struct snd_card *card, void *device_data) { struct snd_device *dev; if (snd_BUG_ON(!card || !device_data)) return -ENXIO; list_for_each_entry(dev, &card->devices, list) { if (dev->device_data != device_data) continue; if (dev->state == SNDRV_DEV_REGISTERED && dev->ops->dev_disconnect) { if (dev->ops->dev_disconnect(dev)) snd_printk(KERN_ERR "device disconnect failure\n"); dev->state = SNDRV_DEV_DISCONNECTED; } return 0; } snd_printd("device disconnect %p (from %pF), not found\n", device_data, __builtin_return_address(0)); return -ENXIO; } /** * snd_device_register - register the device * @card: the card instance * @device_data: the data pointer to register * * Registers the device which was already created via * snd_device_new(). Usually this is called from snd_card_register(), * but it can be called later if any new devices are created after * invocation of snd_card_register(). * * Returns zero if successful, or a negative error code on failure or if the * device not found. */ int snd_device_register(struct snd_card *card, void *device_data) { struct snd_device *dev; int err; if (snd_BUG_ON(!card || !device_data)) return -ENXIO; list_for_each_entry(dev, &card->devices, list) { if (dev->device_data != device_data) continue; if (dev->state == SNDRV_DEV_BUILD && dev->ops->dev_register) { if ((err = dev->ops->dev_register(dev)) < 0) return err; dev->state = SNDRV_DEV_REGISTERED; return 0; } snd_printd("snd_device_register busy\n"); return -EBUSY; } snd_BUG(); return -ENXIO; } EXPORT_SYMBOL(snd_device_register); /* * register all the devices on the card. * called from init.c */ int snd_device_register_all(struct snd_card *card) { struct snd_device *dev; int err; if (snd_BUG_ON(!card)) return -ENXIO; list_for_each_entry(dev, &card->devices, list) { if (dev->state == SNDRV_DEV_BUILD && dev->ops->dev_register) { if ((err = dev->ops->dev_register(dev)) < 0) return err; dev->state = SNDRV_DEV_REGISTERED; } } return 0; } /* * disconnect all the devices on the card. * called from init.c */ int snd_device_disconnect_all(struct snd_card *card) { struct snd_device *dev; int err = 0; if (snd_BUG_ON(!card)) return -ENXIO; list_for_each_entry(dev, &card->devices, list) { if (snd_device_disconnect(card, dev->device_data) < 0) err = -ENXIO; } return err; } /* * release all the devices on the card. * called from init.c */ int snd_device_free_all(struct snd_card *card, snd_device_cmd_t cmd) { struct snd_device *dev; int err; unsigned int range_low, range_high, type; if (snd_BUG_ON(!card)) return -ENXIO; range_low = (__force unsigned int)cmd * SNDRV_DEV_TYPE_RANGE_SIZE; range_high = range_low + SNDRV_DEV_TYPE_RANGE_SIZE - 1; __again: list_for_each_entry(dev, &card->devices, list) { type = (__force unsigned int)dev->type; if (type >= range_low && type <= range_high) { if ((err = snd_device_free(card, dev->device_data)) < 0) return err; goto __again; } } return 0; }
gpl-2.0
MoKee/android_kernel_motorola_otus
drivers/media/video/sh_vou.c
5054
40042
/* * SuperH Video Output Unit (VOU) driver * * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/sh_vou.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mediabus.h> #include <media/videobuf-dma-contig.h> /* Mirror addresses are not available for all registers */ #define VOUER 0 #define VOUCR 4 #define VOUSTR 8 #define VOUVCR 0xc #define VOUISR 0x10 #define VOUBCR 0x14 #define VOUDPR 0x18 #define VOUDSR 0x1c #define VOUVPR 0x20 #define VOUIR 0x24 #define VOUSRR 0x28 #define VOUMSR 0x2c #define VOUHIR 0x30 #define VOUDFR 0x34 #define VOUAD1R 0x38 #define VOUAD2R 0x3c #define VOUAIR 0x40 #define VOUSWR 0x44 #define VOURCR 0x48 #define VOURPR 0x50 enum sh_vou_status { SH_VOU_IDLE, SH_VOU_INITIALISING, SH_VOU_RUNNING, }; #define VOU_MAX_IMAGE_WIDTH 720 #define VOU_MAX_IMAGE_HEIGHT 576 struct sh_vou_device { struct v4l2_device v4l2_dev; struct video_device *vdev; atomic_t use_count; struct sh_vou_pdata *pdata; spinlock_t lock; void __iomem *base; /* State information */ struct v4l2_pix_format pix; struct v4l2_rect rect; struct list_head queue; v4l2_std_id std; int pix_idx; struct videobuf_buffer *active; enum sh_vou_status status; struct mutex fop_lock; }; struct sh_vou_file { struct videobuf_queue vbq; }; /* Register access routines for sides A, B and mirror addresses */ static void sh_vou_reg_a_write(struct sh_vou_device *vou_dev, unsigned int reg, u32 value) { __raw_writel(value, vou_dev->base + reg); } static void sh_vou_reg_ab_write(struct sh_vou_device *vou_dev, unsigned int reg, u32 value) { __raw_writel(value, vou_dev->base + reg); __raw_writel(value, vou_dev->base + reg + 0x1000); } static void sh_vou_reg_m_write(struct sh_vou_device *vou_dev, unsigned int reg, u32 value) { __raw_writel(value, vou_dev->base + reg + 0x2000); } static u32 sh_vou_reg_a_read(struct sh_vou_device *vou_dev, unsigned int reg) { return __raw_readl(vou_dev->base + reg); } static void sh_vou_reg_a_set(struct sh_vou_device *vou_dev, unsigned int reg, u32 value, u32 mask) { u32 old = __raw_readl(vou_dev->base + reg); value = (value & mask) | (old & ~mask); __raw_writel(value, vou_dev->base + reg); } static void sh_vou_reg_b_set(struct sh_vou_device *vou_dev, unsigned int reg, u32 value, u32 mask) { sh_vou_reg_a_set(vou_dev, reg + 0x1000, value, mask); } static void sh_vou_reg_ab_set(struct sh_vou_device *vou_dev, unsigned int reg, u32 value, u32 mask) { sh_vou_reg_a_set(vou_dev, reg, value, mask); sh_vou_reg_b_set(vou_dev, reg, value, mask); } struct sh_vou_fmt { u32 pfmt; char *desc; unsigned char bpp; unsigned char rgb; unsigned char yf; unsigned char pkf; }; /* Further pixel formats can be added */ static struct sh_vou_fmt vou_fmt[] = { { .pfmt = V4L2_PIX_FMT_NV12, .bpp = 12, .desc = "YVU420 planar", .yf = 0, .rgb = 0, }, { .pfmt = V4L2_PIX_FMT_NV16, .bpp = 16, .desc = "YVYU planar", .yf = 1, .rgb = 0, }, { .pfmt = V4L2_PIX_FMT_RGB24, .bpp = 24, .desc = "RGB24", .pkf = 2, .rgb = 1, }, { .pfmt = V4L2_PIX_FMT_RGB565, .bpp = 16, .desc = "RGB565", .pkf = 3, .rgb = 1, }, { .pfmt = V4L2_PIX_FMT_RGB565X, .bpp = 16, .desc = "RGB565 byteswapped", .pkf = 3, .rgb = 1, }, }; static void sh_vou_schedule_next(struct sh_vou_device *vou_dev, struct videobuf_buffer *vb) { dma_addr_t addr1, addr2; addr1 = videobuf_to_dma_contig(vb); switch (vou_dev->pix.pixelformat) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: addr2 = addr1 + vou_dev->pix.width * vou_dev->pix.height; break; default: addr2 = 0; } sh_vou_reg_m_write(vou_dev, VOUAD1R, addr1); sh_vou_reg_m_write(vou_dev, VOUAD2R, addr2); } static void sh_vou_stream_start(struct sh_vou_device *vou_dev, struct videobuf_buffer *vb) { unsigned int row_coeff; #ifdef __LITTLE_ENDIAN u32 dataswap = 7; #else u32 dataswap = 0; #endif switch (vou_dev->pix.pixelformat) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV16: row_coeff = 1; break; case V4L2_PIX_FMT_RGB565: dataswap ^= 1; case V4L2_PIX_FMT_RGB565X: row_coeff = 2; break; case V4L2_PIX_FMT_RGB24: row_coeff = 3; break; } sh_vou_reg_a_write(vou_dev, VOUSWR, dataswap); sh_vou_reg_ab_write(vou_dev, VOUAIR, vou_dev->pix.width * row_coeff); sh_vou_schedule_next(vou_dev, vb); } static void free_buffer(struct videobuf_queue *vq, struct videobuf_buffer *vb) { BUG_ON(in_interrupt()); /* Wait until this buffer is no longer in STATE_QUEUED or STATE_ACTIVE */ videobuf_waiton(vq, vb, 0, 0); videobuf_dma_contig_free(vq, vb); vb->state = VIDEOBUF_NEEDS_INIT; } /* Locking: caller holds fop_lock mutex */ static int sh_vou_buf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct video_device *vdev = vq->priv_data; struct sh_vou_device *vou_dev = video_get_drvdata(vdev); *size = vou_fmt[vou_dev->pix_idx].bpp * vou_dev->pix.width * vou_dev->pix.height / 8; if (*count < 2) *count = 2; /* Taking into account maximum frame size, *count will stay >= 2 */ if (PAGE_ALIGN(*size) * *count > 4 * 1024 * 1024) *count = 4 * 1024 * 1024 / PAGE_ALIGN(*size); dev_dbg(vq->dev, "%s(): count=%d, size=%d\n", __func__, *count, *size); return 0; } /* Locking: caller holds fop_lock mutex */ static int sh_vou_buf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct video_device *vdev = vq->priv_data; struct sh_vou_device *vou_dev = video_get_drvdata(vdev); struct v4l2_pix_format *pix = &vou_dev->pix; int bytes_per_line = vou_fmt[vou_dev->pix_idx].bpp * pix->width / 8; int ret; dev_dbg(vq->dev, "%s()\n", __func__); if (vb->width != pix->width || vb->height != pix->height || vb->field != pix->field) { vb->width = pix->width; vb->height = pix->height; vb->field = field; if (vb->state != VIDEOBUF_NEEDS_INIT) free_buffer(vq, vb); } vb->size = vb->height * bytes_per_line; if (vb->baddr && vb->bsize < vb->size) { /* User buffer too small */ dev_warn(vq->dev, "User buffer too small: [%u] @ %lx\n", vb->bsize, vb->baddr); return -EINVAL; } if (vb->state == VIDEOBUF_NEEDS_INIT) { ret = videobuf_iolock(vq, vb, NULL); if (ret < 0) { dev_warn(vq->dev, "IOLOCK buf-type %d: %d\n", vb->memory, ret); return ret; } vb->state = VIDEOBUF_PREPARED; } dev_dbg(vq->dev, "%s(): fmt #%d, %u bytes per line, phys 0x%x, type %d, state %d\n", __func__, vou_dev->pix_idx, bytes_per_line, videobuf_to_dma_contig(vb), vb->memory, vb->state); return 0; } /* Locking: caller holds fop_lock mutex and vq->irqlock spinlock */ static void sh_vou_buf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct video_device *vdev = vq->priv_data; struct sh_vou_device *vou_dev = video_get_drvdata(vdev); dev_dbg(vq->dev, "%s()\n", __func__); vb->state = VIDEOBUF_QUEUED; list_add_tail(&vb->queue, &vou_dev->queue); if (vou_dev->status == SH_VOU_RUNNING) { return; } else if (!vou_dev->active) { vou_dev->active = vb; /* Start from side A: we use mirror addresses, so, set B */ sh_vou_reg_a_write(vou_dev, VOURPR, 1); dev_dbg(vq->dev, "%s: first buffer status 0x%x\n", __func__, sh_vou_reg_a_read(vou_dev, VOUSTR)); sh_vou_schedule_next(vou_dev, vb); /* Only activate VOU after the second buffer */ } else if (vou_dev->active->queue.next == &vb->queue) { /* Second buffer - initialise register side B */ sh_vou_reg_a_write(vou_dev, VOURPR, 0); sh_vou_stream_start(vou_dev, vb); /* Register side switching with frame VSYNC */ sh_vou_reg_a_write(vou_dev, VOURCR, 5); dev_dbg(vq->dev, "%s: second buffer status 0x%x\n", __func__, sh_vou_reg_a_read(vou_dev, VOUSTR)); /* Enable End-of-Frame (VSYNC) interrupts */ sh_vou_reg_a_write(vou_dev, VOUIR, 0x10004); /* Two buffers on the queue - activate the hardware */ vou_dev->status = SH_VOU_RUNNING; sh_vou_reg_a_write(vou_dev, VOUER, 0x107); } } static void sh_vou_buf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct video_device *vdev = vq->priv_data; struct sh_vou_device *vou_dev = video_get_drvdata(vdev); unsigned long flags; dev_dbg(vq->dev, "%s()\n", __func__); spin_lock_irqsave(&vou_dev->lock, flags); if (vou_dev->active == vb) { /* disable output */ sh_vou_reg_a_set(vou_dev, VOUER, 0, 1); /* ...but the current frame will complete */ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000); vou_dev->active = NULL; } if ((vb->state == VIDEOBUF_ACTIVE || vb->state == VIDEOBUF_QUEUED)) { vb->state = VIDEOBUF_ERROR; list_del(&vb->queue); } spin_unlock_irqrestore(&vou_dev->lock, flags); free_buffer(vq, vb); } static struct videobuf_queue_ops sh_vou_video_qops = { .buf_setup = sh_vou_buf_setup, .buf_prepare = sh_vou_buf_prepare, .buf_queue = sh_vou_buf_queue, .buf_release = sh_vou_buf_release, }; /* Video IOCTLs */ static int sh_vou_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct sh_vou_file *vou_file = priv; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); strlcpy(cap->card, "SuperH VOU", sizeof(cap->card)); cap->capabilities = V4L2_CAP_VIDEO_OUTPUT | V4L2_CAP_STREAMING; return 0; } /* Enumerate formats, that the device can accept from the user */ static int sh_vou_enum_fmt_vid_out(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { struct sh_vou_file *vou_file = priv; if (fmt->index >= ARRAY_SIZE(vou_fmt)) return -EINVAL; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; strlcpy(fmt->description, vou_fmt[fmt->index].desc, sizeof(fmt->description)); fmt->pixelformat = vou_fmt[fmt->index].pfmt; return 0; } static int sh_vou_g_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; fmt->fmt.pix = vou_dev->pix; return 0; } static const unsigned char vou_scale_h_num[] = {1, 9, 2, 9, 4}; static const unsigned char vou_scale_h_den[] = {1, 8, 1, 4, 1}; static const unsigned char vou_scale_h_fld[] = {0, 2, 1, 3}; static const unsigned char vou_scale_v_num[] = {1, 2, 4}; static const unsigned char vou_scale_v_den[] = {1, 1, 1}; static const unsigned char vou_scale_v_fld[] = {0, 1}; static void sh_vou_configure_geometry(struct sh_vou_device *vou_dev, int pix_idx, int w_idx, int h_idx) { struct sh_vou_fmt *fmt = vou_fmt + pix_idx; unsigned int black_left, black_top, width_max, height_max, frame_in_height, frame_out_height, frame_out_top; struct v4l2_rect *rect = &vou_dev->rect; struct v4l2_pix_format *pix = &vou_dev->pix; u32 vouvcr = 0, dsr_h, dsr_v; if (vou_dev->std & V4L2_STD_525_60) { width_max = 858; height_max = 262; } else { width_max = 864; height_max = 312; } frame_in_height = pix->height / 2; frame_out_height = rect->height / 2; frame_out_top = rect->top / 2; /* * Cropping scheme: max useful image is 720x480, and the total video * area is 858x525 (NTSC) or 864x625 (PAL). AK8813 / 8814 starts * sampling data beginning with fixed 276th (NTSC) / 288th (PAL) clock, * of which the first 33 / 25 clocks HSYNC must be held active. This * has to be configured in CR[HW]. 1 pixel equals 2 clock periods. * This gives CR[HW] = 16 / 12, VPR[HVP] = 138 / 144, which gives * exactly 858 - 138 = 864 - 144 = 720! We call the out-of-display area, * beyond DSR, specified on the left and top by the VPR register "black * pixels" and out-of-image area (DPR) "background pixels." We fix VPR * at 138 / 144 : 20, because that's the HSYNC timing, that our first * client requires, and that's exactly what leaves us 720 pixels for the * image; we leave VPR[VVP] at default 20 for now, because the client * doesn't seem to have any special requirements for it. Otherwise we * could also set it to max - 240 = 22 / 72. Thus VPR depends only on * the selected standard, and DPR and DSR are selected according to * cropping. Q: how does the client detect the first valid line? Does * HSYNC stay inactive during invalid (black) lines? */ black_left = width_max - VOU_MAX_IMAGE_WIDTH; black_top = 20; dsr_h = rect->width + rect->left; dsr_v = frame_out_height + frame_out_top; dev_dbg(vou_dev->v4l2_dev.dev, "image %ux%u, black %u:%u, offset %u:%u, display %ux%u\n", pix->width, frame_in_height, black_left, black_top, rect->left, frame_out_top, dsr_h, dsr_v); /* VOUISR height - half of a frame height in frame mode */ sh_vou_reg_ab_write(vou_dev, VOUISR, (pix->width << 16) | frame_in_height); sh_vou_reg_ab_write(vou_dev, VOUVPR, (black_left << 16) | black_top); sh_vou_reg_ab_write(vou_dev, VOUDPR, (rect->left << 16) | frame_out_top); sh_vou_reg_ab_write(vou_dev, VOUDSR, (dsr_h << 16) | dsr_v); /* * if necessary, we could set VOUHIR to * max(black_left + dsr_h, width_max) here */ if (w_idx) vouvcr |= (1 << 15) | (vou_scale_h_fld[w_idx - 1] << 4); if (h_idx) vouvcr |= (1 << 14) | vou_scale_v_fld[h_idx - 1]; dev_dbg(vou_dev->v4l2_dev.dev, "%s: scaling 0x%x\n", fmt->desc, vouvcr); /* To produce a colour bar for testing set bit 23 of VOUVCR */ sh_vou_reg_ab_write(vou_dev, VOUVCR, vouvcr); sh_vou_reg_ab_write(vou_dev, VOUDFR, fmt->pkf | (fmt->yf << 8) | (fmt->rgb << 16)); } struct sh_vou_geometry { struct v4l2_rect output; unsigned int in_width; unsigned int in_height; int scale_idx_h; int scale_idx_v; }; /* * Find input geometry, that we can use to produce output, closest to the * requested rectangle, using VOU scaling */ static void vou_adjust_input(struct sh_vou_geometry *geo, v4l2_std_id std) { /* The compiler cannot know, that best and idx will indeed be set */ unsigned int best_err = UINT_MAX, best = 0, img_height_max; int i, idx = 0; if (std & V4L2_STD_525_60) img_height_max = 480; else img_height_max = 576; /* Image width must be a multiple of 4 */ v4l_bound_align_image(&geo->in_width, 0, VOU_MAX_IMAGE_WIDTH, 2, &geo->in_height, 0, img_height_max, 1, 0); /* Select scales to come as close as possible to the output image */ for (i = ARRAY_SIZE(vou_scale_h_num) - 1; i >= 0; i--) { unsigned int err; unsigned int found = geo->output.width * vou_scale_h_den[i] / vou_scale_h_num[i]; if (found > VOU_MAX_IMAGE_WIDTH) /* scales increase */ break; err = abs(found - geo->in_width); if (err < best_err) { best_err = err; idx = i; best = found; } if (!err) break; } geo->in_width = best; geo->scale_idx_h = idx; best_err = UINT_MAX; /* This loop can be replaced with one division */ for (i = ARRAY_SIZE(vou_scale_v_num) - 1; i >= 0; i--) { unsigned int err; unsigned int found = geo->output.height * vou_scale_v_den[i] / vou_scale_v_num[i]; if (found > img_height_max) /* scales increase */ break; err = abs(found - geo->in_height); if (err < best_err) { best_err = err; idx = i; best = found; } if (!err) break; } geo->in_height = best; geo->scale_idx_v = idx; } /* * Find output geometry, that we can produce, using VOU scaling, closest to * the requested rectangle */ static void vou_adjust_output(struct sh_vou_geometry *geo, v4l2_std_id std) { unsigned int best_err = UINT_MAX, best, width_max, height_max, img_height_max; int i, idx; if (std & V4L2_STD_525_60) { width_max = 858; height_max = 262 * 2; img_height_max = 480; } else { width_max = 864; height_max = 312 * 2; img_height_max = 576; } /* Select scales to come as close as possible to the output image */ for (i = 0; i < ARRAY_SIZE(vou_scale_h_num); i++) { unsigned int err; unsigned int found = geo->in_width * vou_scale_h_num[i] / vou_scale_h_den[i]; if (found > VOU_MAX_IMAGE_WIDTH) /* scales increase */ break; err = abs(found - geo->output.width); if (err < best_err) { best_err = err; idx = i; best = found; } if (!err) break; } geo->output.width = best; geo->scale_idx_h = idx; if (geo->output.left + best > width_max) geo->output.left = width_max - best; pr_debug("%s(): W %u * %u/%u = %u\n", __func__, geo->in_width, vou_scale_h_num[idx], vou_scale_h_den[idx], best); best_err = UINT_MAX; /* This loop can be replaced with one division */ for (i = 0; i < ARRAY_SIZE(vou_scale_v_num); i++) { unsigned int err; unsigned int found = geo->in_height * vou_scale_v_num[i] / vou_scale_v_den[i]; if (found > img_height_max) /* scales increase */ break; err = abs(found - geo->output.height); if (err < best_err) { best_err = err; idx = i; best = found; } if (!err) break; } geo->output.height = best; geo->scale_idx_v = idx; if (geo->output.top + best > height_max) geo->output.top = height_max - best; pr_debug("%s(): H %u * %u/%u = %u\n", __func__, geo->in_height, vou_scale_v_num[idx], vou_scale_v_den[idx], best); } static int sh_vou_s_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); struct v4l2_pix_format *pix = &fmt->fmt.pix; unsigned int img_height_max; int pix_idx; struct sh_vou_geometry geo; struct v4l2_mbus_framefmt mbfmt = { /* Revisit: is this the correct code? */ .code = V4L2_MBUS_FMT_YUYV8_2X8, .field = V4L2_FIELD_INTERLACED, .colorspace = V4L2_COLORSPACE_SMPTE170M, }; int ret; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__, vou_dev->rect.width, vou_dev->rect.height, pix->width, pix->height); if (pix->field == V4L2_FIELD_ANY) pix->field = V4L2_FIELD_NONE; if (fmt->type != V4L2_BUF_TYPE_VIDEO_OUTPUT || pix->field != V4L2_FIELD_NONE) return -EINVAL; for (pix_idx = 0; pix_idx < ARRAY_SIZE(vou_fmt); pix_idx++) if (vou_fmt[pix_idx].pfmt == pix->pixelformat) break; if (pix_idx == ARRAY_SIZE(vou_fmt)) return -EINVAL; if (vou_dev->std & V4L2_STD_525_60) img_height_max = 480; else img_height_max = 576; /* Image width must be a multiple of 4 */ v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 2, &pix->height, 0, img_height_max, 1, 0); geo.in_width = pix->width; geo.in_height = pix->height; geo.output = vou_dev->rect; vou_adjust_output(&geo, vou_dev->std); mbfmt.width = geo.output.width; mbfmt.height = geo.output.height; ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_mbus_fmt, &mbfmt); /* Must be implemented, so, don't check for -ENOIOCTLCMD */ if (ret < 0) return ret; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u -> %ux%u\n", __func__, geo.output.width, geo.output.height, mbfmt.width, mbfmt.height); /* Sanity checks */ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH || (unsigned)mbfmt.height > img_height_max || mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8) return -EIO; if (mbfmt.width != geo.output.width || mbfmt.height != geo.output.height) { geo.output.width = mbfmt.width; geo.output.height = mbfmt.height; vou_adjust_input(&geo, vou_dev->std); } /* We tried to preserve output rectangle, but it could have changed */ vou_dev->rect = geo.output; pix->width = geo.in_width; pix->height = geo.in_height; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u\n", __func__, pix->width, pix->height); vou_dev->pix_idx = pix_idx; vou_dev->pix = *pix; sh_vou_configure_geometry(vou_dev, pix_idx, geo.scale_idx_h, geo.scale_idx_v); return 0; } static int sh_vou_try_fmt_vid_out(struct file *file, void *priv, struct v4l2_format *fmt) { struct sh_vou_file *vou_file = priv; struct v4l2_pix_format *pix = &fmt->fmt.pix; int i; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); fmt->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; pix->field = V4L2_FIELD_NONE; v4l_bound_align_image(&pix->width, 0, VOU_MAX_IMAGE_WIDTH, 1, &pix->height, 0, VOU_MAX_IMAGE_HEIGHT, 1, 0); for (i = 0; ARRAY_SIZE(vou_fmt); i++) if (vou_fmt[i].pfmt == pix->pixelformat) return 0; pix->pixelformat = vou_fmt[0].pfmt; return 0; } static int sh_vou_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *req) { struct sh_vou_file *vou_file = priv; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); if (req->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; return videobuf_reqbufs(&vou_file->vbq, req); } static int sh_vou_querybuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct sh_vou_file *vou_file = priv; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); return videobuf_querybuf(&vou_file->vbq, b); } static int sh_vou_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct sh_vou_file *vou_file = priv; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); return videobuf_qbuf(&vou_file->vbq, b); } static int sh_vou_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct sh_vou_file *vou_file = priv; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); return videobuf_dqbuf(&vou_file->vbq, b, file->f_flags & O_NONBLOCK); } static int sh_vou_streamon(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); struct sh_vou_file *vou_file = priv; int ret; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 1); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; /* This calls our .buf_queue() (== sh_vou_buf_queue) */ return videobuf_streamon(&vou_file->vbq); } static int sh_vou_streamoff(struct file *file, void *priv, enum v4l2_buf_type buftype) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); struct sh_vou_file *vou_file = priv; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); /* * This calls buf_release from host driver's videobuf_queue_ops for all * remaining buffers. When the last buffer is freed, stop streaming */ videobuf_streamoff(&vou_file->vbq); v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_stream, 0); return 0; } static u32 sh_vou_ntsc_mode(enum sh_vou_bus_fmt bus_fmt) { switch (bus_fmt) { default: pr_warning("%s(): Invalid bus-format code %d, using default 8-bit\n", __func__, bus_fmt); case SH_VOU_BUS_8BIT: return 1; case SH_VOU_BUS_16BIT: return 0; case SH_VOU_BUS_BT656: return 3; } } static int sh_vou_s_std(struct file *file, void *priv, v4l2_std_id *std_id) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); int ret; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): 0x%llx\n", __func__, *std_id); if (*std_id & ~vdev->tvnorms) return -EINVAL; ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_std_output, *std_id); /* Shall we continue, if the subdev doesn't support .s_std_output()? */ if (ret < 0 && ret != -ENOIOCTLCMD) return ret; if (*std_id & V4L2_STD_525_60) sh_vou_reg_ab_set(vou_dev, VOUCR, sh_vou_ntsc_mode(vou_dev->pdata->bus_fmt) << 29, 7 << 29); else sh_vou_reg_ab_set(vou_dev, VOUCR, 5 << 29, 7 << 29); vou_dev->std = *std_id; return 0; } static int sh_vou_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); *std = vou_dev->std; return 0; } static int sh_vou_g_crop(struct file *file, void *fh, struct v4l2_crop *a) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; a->c = vou_dev->rect; return 0; } /* Assume a dull encoder, do all the work ourselves. */ static int sh_vou_s_crop(struct file *file, void *fh, struct v4l2_crop *a) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); struct v4l2_rect *rect = &a->c; struct v4l2_crop sd_crop = {.type = V4L2_BUF_TYPE_VIDEO_OUTPUT}; struct v4l2_pix_format *pix = &vou_dev->pix; struct sh_vou_geometry geo; struct v4l2_mbus_framefmt mbfmt = { /* Revisit: is this the correct code? */ .code = V4L2_MBUS_FMT_YUYV8_2X8, .field = V4L2_FIELD_INTERLACED, .colorspace = V4L2_COLORSPACE_SMPTE170M, }; unsigned int img_height_max; int ret; dev_dbg(vou_dev->v4l2_dev.dev, "%s(): %ux%u@%u:%u\n", __func__, rect->width, rect->height, rect->left, rect->top); if (a->type != V4L2_BUF_TYPE_VIDEO_OUTPUT) return -EINVAL; if (vou_dev->std & V4L2_STD_525_60) img_height_max = 480; else img_height_max = 576; v4l_bound_align_image(&rect->width, 0, VOU_MAX_IMAGE_WIDTH, 1, &rect->height, 0, img_height_max, 1, 0); if (rect->width + rect->left > VOU_MAX_IMAGE_WIDTH) rect->left = VOU_MAX_IMAGE_WIDTH - rect->width; if (rect->height + rect->top > img_height_max) rect->top = img_height_max - rect->height; geo.output = *rect; geo.in_width = pix->width; geo.in_height = pix->height; /* Configure the encoder one-to-one, position at 0, ignore errors */ sd_crop.c.width = geo.output.width; sd_crop.c.height = geo.output.height; /* * We first issue a S_CROP, so that the subsequent S_FMT delivers the * final encoder configuration. */ v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_crop, &sd_crop); mbfmt.width = geo.output.width; mbfmt.height = geo.output.height; ret = v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, video, s_mbus_fmt, &mbfmt); /* Must be implemented, so, don't check for -ENOIOCTLCMD */ if (ret < 0) return ret; /* Sanity checks */ if ((unsigned)mbfmt.width > VOU_MAX_IMAGE_WIDTH || (unsigned)mbfmt.height > img_height_max || mbfmt.code != V4L2_MBUS_FMT_YUYV8_2X8) return -EIO; geo.output.width = mbfmt.width; geo.output.height = mbfmt.height; /* * No down-scaling. According to the API, current call has precedence: * http://v4l2spec.bytesex.org/spec/x1904.htm#AEN1954 paragraph two. */ vou_adjust_input(&geo, vou_dev->std); /* We tried to preserve output rectangle, but it could have changed */ vou_dev->rect = geo.output; pix->width = geo.in_width; pix->height = geo.in_height; sh_vou_configure_geometry(vou_dev, vou_dev->pix_idx, geo.scale_idx_h, geo.scale_idx_v); return 0; } /* * Total field: NTSC 858 x 2 * 262/263, PAL 864 x 2 * 312/313, default rectangle * is the initial register values, height takes the interlaced format into * account. The actual image can only go up to 720 x 2 * 240, So, VOUVPR can * actually only meaningfully contain values <= 720 and <= 240 respectively, and * not <= 864 and <= 312. */ static int sh_vou_cropcap(struct file *file, void *priv, struct v4l2_cropcap *a) { struct sh_vou_file *vou_file = priv; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; a->bounds.left = 0; a->bounds.top = 0; a->bounds.width = VOU_MAX_IMAGE_WIDTH; a->bounds.height = VOU_MAX_IMAGE_HEIGHT; /* Default = max, set VOUDPR = 0, which is not hardware default */ a->defrect.left = 0; a->defrect.top = 0; a->defrect.width = VOU_MAX_IMAGE_WIDTH; a->defrect.height = VOU_MAX_IMAGE_HEIGHT; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static irqreturn_t sh_vou_isr(int irq, void *dev_id) { struct sh_vou_device *vou_dev = dev_id; static unsigned long j; struct videobuf_buffer *vb; static int cnt; static int side; u32 irq_status = sh_vou_reg_a_read(vou_dev, VOUIR), masked; u32 vou_status = sh_vou_reg_a_read(vou_dev, VOUSTR); if (!(irq_status & 0x300)) { if (printk_timed_ratelimit(&j, 500)) dev_warn(vou_dev->v4l2_dev.dev, "IRQ status 0x%x!\n", irq_status); return IRQ_NONE; } spin_lock(&vou_dev->lock); if (!vou_dev->active || list_empty(&vou_dev->queue)) { if (printk_timed_ratelimit(&j, 500)) dev_warn(vou_dev->v4l2_dev.dev, "IRQ without active buffer: %x!\n", irq_status); /* Just ack: buf_release will disable further interrupts */ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x300); spin_unlock(&vou_dev->lock); return IRQ_HANDLED; } masked = ~(0x300 & irq_status) & irq_status & 0x30304; dev_dbg(vou_dev->v4l2_dev.dev, "IRQ status 0x%x -> 0x%x, VOU status 0x%x, cnt %d\n", irq_status, masked, vou_status, cnt); cnt++; side = vou_status & 0x10000; /* Clear only set interrupts */ sh_vou_reg_a_write(vou_dev, VOUIR, masked); vb = vou_dev->active; list_del(&vb->queue); vb->state = VIDEOBUF_DONE; do_gettimeofday(&vb->ts); vb->field_count++; wake_up(&vb->done); if (list_empty(&vou_dev->queue)) { /* Stop VOU */ dev_dbg(vou_dev->v4l2_dev.dev, "%s: queue empty after %d\n", __func__, cnt); sh_vou_reg_a_set(vou_dev, VOUER, 0, 1); vou_dev->active = NULL; vou_dev->status = SH_VOU_INITIALISING; /* Disable End-of-Frame (VSYNC) interrupts */ sh_vou_reg_a_set(vou_dev, VOUIR, 0, 0x30000); spin_unlock(&vou_dev->lock); return IRQ_HANDLED; } vou_dev->active = list_entry(vou_dev->queue.next, struct videobuf_buffer, queue); if (vou_dev->active->queue.next != &vou_dev->queue) { struct videobuf_buffer *new = list_entry(vou_dev->active->queue.next, struct videobuf_buffer, queue); sh_vou_schedule_next(vou_dev, new); } spin_unlock(&vou_dev->lock); return IRQ_HANDLED; } static int sh_vou_hw_init(struct sh_vou_device *vou_dev) { struct sh_vou_pdata *pdata = vou_dev->pdata; u32 voucr = sh_vou_ntsc_mode(pdata->bus_fmt) << 29; int i = 100; /* Disable all IRQs */ sh_vou_reg_a_write(vou_dev, VOUIR, 0); /* Reset VOU interfaces - registers unaffected */ sh_vou_reg_a_write(vou_dev, VOUSRR, 0x101); while (--i && (sh_vou_reg_a_read(vou_dev, VOUSRR) & 0x101)) udelay(1); if (!i) return -ETIMEDOUT; dev_dbg(vou_dev->v4l2_dev.dev, "Reset took %dus\n", 100 - i); if (pdata->flags & SH_VOU_PCLK_FALLING) voucr |= 1 << 28; if (pdata->flags & SH_VOU_HSYNC_LOW) voucr |= 1 << 27; if (pdata->flags & SH_VOU_VSYNC_LOW) voucr |= 1 << 26; sh_vou_reg_ab_set(vou_dev, VOUCR, voucr, 0xfc000000); /* Manual register side switching at first */ sh_vou_reg_a_write(vou_dev, VOURCR, 4); /* Default - fixed HSYNC length, can be made configurable is required */ sh_vou_reg_ab_write(vou_dev, VOUMSR, 0x800000); return 0; } /* File operations */ static int sh_vou_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); struct sh_vou_file *vou_file = kzalloc(sizeof(struct sh_vou_file), GFP_KERNEL); if (!vou_file) return -ENOMEM; dev_dbg(vou_dev->v4l2_dev.dev, "%s()\n", __func__); file->private_data = vou_file; if (atomic_inc_return(&vou_dev->use_count) == 1) { int ret; /* First open */ vou_dev->status = SH_VOU_INITIALISING; pm_runtime_get_sync(vdev->v4l2_dev->dev); ret = sh_vou_hw_init(vou_dev); if (ret < 0) { atomic_dec(&vou_dev->use_count); pm_runtime_put(vdev->v4l2_dev->dev); vou_dev->status = SH_VOU_IDLE; return ret; } } videobuf_queue_dma_contig_init(&vou_file->vbq, &sh_vou_video_qops, vou_dev->v4l2_dev.dev, &vou_dev->lock, V4L2_BUF_TYPE_VIDEO_OUTPUT, V4L2_FIELD_NONE, sizeof(struct videobuf_buffer), vdev, &vou_dev->fop_lock); return 0; } static int sh_vou_release(struct file *file) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); struct sh_vou_file *vou_file = file->private_data; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); if (!atomic_dec_return(&vou_dev->use_count)) { /* Last close */ vou_dev->status = SH_VOU_IDLE; sh_vou_reg_a_set(vou_dev, VOUER, 0, 0x101); pm_runtime_put(vdev->v4l2_dev->dev); } file->private_data = NULL; kfree(vou_file); return 0; } static int sh_vou_mmap(struct file *file, struct vm_area_struct *vma) { struct sh_vou_file *vou_file = file->private_data; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); return videobuf_mmap_mapper(&vou_file->vbq, vma); } static unsigned int sh_vou_poll(struct file *file, poll_table *wait) { struct sh_vou_file *vou_file = file->private_data; dev_dbg(vou_file->vbq.dev, "%s()\n", __func__); return videobuf_poll_stream(file, &vou_file->vbq, wait); } static int sh_vou_g_chip_ident(struct file *file, void *fh, struct v4l2_dbg_chip_ident *id) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_chip_ident, id); } #ifdef CONFIG_VIDEO_ADV_DEBUG static int sh_vou_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, g_register, reg); } static int sh_vou_s_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct video_device *vdev = video_devdata(file); struct sh_vou_device *vou_dev = video_get_drvdata(vdev); return v4l2_device_call_until_err(&vou_dev->v4l2_dev, 0, core, s_register, reg); } #endif /* sh_vou display ioctl operations */ static const struct v4l2_ioctl_ops sh_vou_ioctl_ops = { .vidioc_querycap = sh_vou_querycap, .vidioc_enum_fmt_vid_out = sh_vou_enum_fmt_vid_out, .vidioc_g_fmt_vid_out = sh_vou_g_fmt_vid_out, .vidioc_s_fmt_vid_out = sh_vou_s_fmt_vid_out, .vidioc_try_fmt_vid_out = sh_vou_try_fmt_vid_out, .vidioc_reqbufs = sh_vou_reqbufs, .vidioc_querybuf = sh_vou_querybuf, .vidioc_qbuf = sh_vou_qbuf, .vidioc_dqbuf = sh_vou_dqbuf, .vidioc_streamon = sh_vou_streamon, .vidioc_streamoff = sh_vou_streamoff, .vidioc_s_std = sh_vou_s_std, .vidioc_g_std = sh_vou_g_std, .vidioc_cropcap = sh_vou_cropcap, .vidioc_g_crop = sh_vou_g_crop, .vidioc_s_crop = sh_vou_s_crop, .vidioc_g_chip_ident = sh_vou_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = sh_vou_g_register, .vidioc_s_register = sh_vou_s_register, #endif }; static const struct v4l2_file_operations sh_vou_fops = { .owner = THIS_MODULE, .open = sh_vou_open, .release = sh_vou_release, .unlocked_ioctl = video_ioctl2, .mmap = sh_vou_mmap, .poll = sh_vou_poll, }; static const struct video_device sh_vou_video_template = { .name = "sh_vou", .fops = &sh_vou_fops, .ioctl_ops = &sh_vou_ioctl_ops, .tvnorms = V4L2_STD_525_60, /* PAL only supported in 8-bit non-bt656 mode */ .current_norm = V4L2_STD_NTSC_M, }; static int __devinit sh_vou_probe(struct platform_device *pdev) { struct sh_vou_pdata *vou_pdata = pdev->dev.platform_data; struct v4l2_rect *rect; struct v4l2_pix_format *pix; struct i2c_adapter *i2c_adap; struct video_device *vdev; struct sh_vou_device *vou_dev; struct resource *reg_res, *region; struct v4l2_subdev *subdev; int irq, ret; reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!vou_pdata || !reg_res || irq <= 0) { dev_err(&pdev->dev, "Insufficient VOU platform information.\n"); return -ENODEV; } vou_dev = kzalloc(sizeof(*vou_dev), GFP_KERNEL); if (!vou_dev) return -ENOMEM; INIT_LIST_HEAD(&vou_dev->queue); spin_lock_init(&vou_dev->lock); mutex_init(&vou_dev->fop_lock); atomic_set(&vou_dev->use_count, 0); vou_dev->pdata = vou_pdata; vou_dev->status = SH_VOU_IDLE; rect = &vou_dev->rect; pix = &vou_dev->pix; /* Fill in defaults */ vou_dev->std = sh_vou_video_template.current_norm; rect->left = 0; rect->top = 0; rect->width = VOU_MAX_IMAGE_WIDTH; rect->height = 480; pix->width = VOU_MAX_IMAGE_WIDTH; pix->height = 480; pix->pixelformat = V4L2_PIX_FMT_YVYU; pix->field = V4L2_FIELD_NONE; pix->bytesperline = VOU_MAX_IMAGE_WIDTH * 2; pix->sizeimage = VOU_MAX_IMAGE_WIDTH * 2 * 480; pix->colorspace = V4L2_COLORSPACE_SMPTE170M; region = request_mem_region(reg_res->start, resource_size(reg_res), pdev->name); if (!region) { dev_err(&pdev->dev, "VOU region already claimed\n"); ret = -EBUSY; goto ereqmemreg; } vou_dev->base = ioremap(reg_res->start, resource_size(reg_res)); if (!vou_dev->base) { ret = -ENOMEM; goto emap; } ret = request_irq(irq, sh_vou_isr, 0, "vou", vou_dev); if (ret < 0) goto ereqirq; ret = v4l2_device_register(&pdev->dev, &vou_dev->v4l2_dev); if (ret < 0) { dev_err(&pdev->dev, "Error registering v4l2 device\n"); goto ev4l2devreg; } /* Allocate memory for video device */ vdev = video_device_alloc(); if (vdev == NULL) { ret = -ENOMEM; goto evdevalloc; } *vdev = sh_vou_video_template; if (vou_pdata->bus_fmt == SH_VOU_BUS_8BIT) vdev->tvnorms |= V4L2_STD_PAL; vdev->v4l2_dev = &vou_dev->v4l2_dev; vdev->release = video_device_release; vdev->lock = &vou_dev->fop_lock; vou_dev->vdev = vdev; video_set_drvdata(vdev, vou_dev); pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); i2c_adap = i2c_get_adapter(vou_pdata->i2c_adap); if (!i2c_adap) { ret = -ENODEV; goto ei2cgadap; } ret = sh_vou_hw_init(vou_dev); if (ret < 0) goto ereset; subdev = v4l2_i2c_new_subdev_board(&vou_dev->v4l2_dev, i2c_adap, vou_pdata->board_info, NULL); if (!subdev) { ret = -ENOMEM; goto ei2cnd; } ret = video_register_device(vdev, VFL_TYPE_GRABBER, -1); if (ret < 0) goto evregdev; return 0; evregdev: ei2cnd: ereset: i2c_put_adapter(i2c_adap); ei2cgadap: video_device_release(vdev); pm_runtime_disable(&pdev->dev); evdevalloc: v4l2_device_unregister(&vou_dev->v4l2_dev); ev4l2devreg: free_irq(irq, vou_dev); ereqirq: iounmap(vou_dev->base); emap: release_mem_region(reg_res->start, resource_size(reg_res)); ereqmemreg: kfree(vou_dev); return ret; } static int __devexit sh_vou_remove(struct platform_device *pdev) { int irq = platform_get_irq(pdev, 0); struct v4l2_device *v4l2_dev = platform_get_drvdata(pdev); struct sh_vou_device *vou_dev = container_of(v4l2_dev, struct sh_vou_device, v4l2_dev); struct v4l2_subdev *sd = list_entry(v4l2_dev->subdevs.next, struct v4l2_subdev, list); struct i2c_client *client = v4l2_get_subdevdata(sd); struct resource *reg_res; if (irq > 0) free_irq(irq, vou_dev); pm_runtime_disable(&pdev->dev); video_unregister_device(vou_dev->vdev); i2c_put_adapter(client->adapter); v4l2_device_unregister(&vou_dev->v4l2_dev); iounmap(vou_dev->base); reg_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (reg_res) release_mem_region(reg_res->start, resource_size(reg_res)); kfree(vou_dev); return 0; } static struct platform_driver __refdata sh_vou = { .remove = __devexit_p(sh_vou_remove), .driver = { .name = "sh-vou", .owner = THIS_MODULE, }, }; static int __init sh_vou_init(void) { return platform_driver_probe(&sh_vou, sh_vou_probe); } static void __exit sh_vou_exit(void) { platform_driver_unregister(&sh_vou); } module_init(sh_vou_init); module_exit(sh_vou_exit); MODULE_DESCRIPTION("SuperH VOU driver"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2"); MODULE_VERSION("0.1.0"); MODULE_ALIAS("platform:sh-vou");
gpl-2.0
cm12-g600/chil360-kernel
drivers/scsi/constants.c
7614
52104
/* * ASCII values for a number of symbolic constants, printing functions, * etc. * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422) * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002) * by D. Gilbert and aeb (20020609) * Update to SPC-4 T10/1713-D Rev 20, 22 May 2009, D. Gilbert 20090624 */ #include <linux/blkdev.h> #include <linux/module.h> #include <linux/kernel.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dbg.h> /* Commands with service actions that change the command name */ #define MAINTENANCE_IN 0xa3 #define MAINTENANCE_OUT 0xa4 #define SERVICE_ACTION_IN_12 0xab #define SERVICE_ACTION_OUT_12 0xa9 #define SERVICE_ACTION_IN_16 0x9e #define SERVICE_ACTION_OUT_16 0x9f #ifdef CONFIG_SCSI_CONSTANTS static const char * cdb_byte0_names[] = { /* 00-03 */ "Test Unit Ready", "Rezero Unit/Rewind", NULL, "Request Sense", /* 04-07 */ "Format Unit/Medium", "Read Block Limits", NULL, "Reassign Blocks", /* 08-0d */ "Read(6)", NULL, "Write(6)", "Seek(6)", NULL, NULL, /* 0e-12 */ NULL, "Read Reverse", "Write Filemarks", "Space", "Inquiry", /* 13-16 */ "Verify(6)", "Recover Buffered Data", "Mode Select(6)", "Reserve(6)", /* 17-1a */ "Release(6)", "Copy", "Erase", "Mode Sense(6)", /* 1b-1d */ "Start/Stop Unit", "Receive Diagnostic", "Send Diagnostic", /* 1e-1f */ "Prevent/Allow Medium Removal", NULL, /* 20-22 */ NULL, NULL, NULL, /* 23-28 */ "Read Format Capacities", "Set Window", "Read Capacity(10)", NULL, NULL, "Read(10)", /* 29-2d */ "Read Generation", "Write(10)", "Seek(10)", "Erase(10)", "Read updated block", /* 2e-31 */ "Write Verify(10)", "Verify(10)", "Search High", "Search Equal", /* 32-34 */ "Search Low", "Set Limits", "Prefetch/Read Position", /* 35-37 */ "Synchronize Cache(10)", "Lock/Unlock Cache(10)", "Read Defect Data(10)", /* 38-3c */ "Medium Scan", "Compare", "Copy Verify", "Write Buffer", "Read Buffer", /* 3d-3f */ "Update Block", "Read Long(10)", "Write Long(10)", /* 40-41 */ "Change Definition", "Write Same(10)", /* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP", "Read density support", "Play audio(10)", "Get configuration", "Play audio msf", "Play audio track/index", /* 49-4f */ "Play track relative(10)", "Get event status notification", "Pause/resume", "Log Select", "Log Sense", "Stop play/scan", NULL, /* 50-55 */ "Xdwrite", "Xpwrite, Read disk info", "Xdread, Read track info", "Reserve track", "Send OPC info", "Mode Select(10)", /* 56-5b */ "Reserve(10)", "Release(10)", "Repair track", "Read master cue", "Mode Sense(10)", "Close track/session", /* 5c-5f */ "Read buffer capacity", "Send cue sheet", "Persistent reserve in", "Persistent reserve out", /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, /* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB", "Variable length", /* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy", "Receive copy results", /* 85-89 */ "ATA command pass through(16)", "Access control in", "Access control out", "Read(16)", "Memory Export Out(16)", /* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes", "Write and verify(16)", "Verify(16)", /* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)", "Lock/unlock cache(16)", "Write same(16)", NULL, /* 95-99 */ NULL, NULL, NULL, NULL, NULL, /* 9a-9f */ NULL, NULL, NULL, NULL, "Service action in(16)", "Service action out(16)", /* a0-a5 */ "Report luns", "ATA command pass through(12)/Blank", "Security protocol in", "Maintenance in", "Maintenance out", "Move medium/play audio(12)", /* a6-a9 */ "Exchange medium", "Move medium attached", "Read(12)", "Play track relative(12)", /* aa-ae */ "Write(12)", NULL, "Erase(12), Get Performance", "Read DVD structure", "Write and verify(12)", /* af-b1 */ "Verify(12)", "Search data high(12)", "Search data equal(12)", /* b2-b4 */ "Search data low(12)", "Set limits(12)", "Read element status attached", /* b5-b6 */ "Security protocol out", "Send volume tag, set streaming", /* b7-b9 */ "Read defect data(12)", "Read element status", "Read CD msf", /* ba-bc */ "Redundancy group (in), Scan", "Redundancy group (out), Set cd-rom speed", "Spare (in), Play cd", /* bd-bf */ "Spare (out), Mechanism status", "Volume set (in), Read cd", "Volume set (out), Send DVD structure", }; struct value_name_pair { int value; const char * name; }; static const struct value_name_pair maint_in_arr[] = { {0x5, "Report identifying information"}, {0xa, "Report target port groups"}, {0xb, "Report aliases"}, {0xc, "Report supported operation codes"}, {0xd, "Report supported task management functions"}, {0xe, "Report priority"}, {0xf, "Report timestamp"}, {0x10, "Management protocol in"}, }; #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr) static const struct value_name_pair maint_out_arr[] = { {0x6, "Set identifying information"}, {0xa, "Set target port groups"}, {0xb, "Change aliases"}, {0xe, "Set priority"}, {0xf, "Set timestamp"}, {0x10, "Management protocol out"}, }; #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr) static const struct value_name_pair serv_in12_arr[] = { {0x1, "Read media serial number"}, }; #define SERV_IN12_SZ ARRAY_SIZE(serv_in12_arr) static const struct value_name_pair serv_out12_arr[] = { {-1, "dummy entry"}, }; #define SERV_OUT12_SZ ARRAY_SIZE(serv_out12_arr) static const struct value_name_pair serv_in16_arr[] = { {0x10, "Read capacity(16)"}, {0x11, "Read long(16)"}, {0x12, "Get LBA status"}, }; #define SERV_IN16_SZ ARRAY_SIZE(serv_in16_arr) static const struct value_name_pair serv_out16_arr[] = { {0x11, "Write long(16)"}, {0x1f, "Notify data transfer device(16)"}, }; #define SERV_OUT16_SZ ARRAY_SIZE(serv_out16_arr) static const struct value_name_pair variable_length_arr[] = { {0x1, "Rebuild(32)"}, {0x2, "Regenerate(32)"}, {0x3, "Xdread(32)"}, {0x4, "Xdwrite(32)"}, {0x5, "Xdwrite extended(32)"}, {0x6, "Xpwrite(32)"}, {0x7, "Xdwriteread(32)"}, {0x8, "Xdwrite extended(64)"}, {0x9, "Read(32)"}, {0xa, "Verify(32)"}, {0xb, "Write(32)"}, {0xc, "Write an verify(32)"}, {0xd, "Write same(32)"}, {0x8801, "Format OSD"}, {0x8802, "Create (osd)"}, {0x8803, "List (osd)"}, {0x8805, "Read (osd)"}, {0x8806, "Write (osd)"}, {0x8807, "Append (osd)"}, {0x8808, "Flush (osd)"}, {0x880a, "Remove (osd)"}, {0x880b, "Create partition (osd)"}, {0x880c, "Remove partition (osd)"}, {0x880e, "Get attributes (osd)"}, {0x880f, "Set attributes (osd)"}, {0x8812, "Create and write (osd)"}, {0x8815, "Create collection (osd)"}, {0x8816, "Remove collection (osd)"}, {0x8817, "List collection (osd)"}, {0x8818, "Set key (osd)"}, {0x8819, "Set master key (osd)"}, {0x881a, "Flush collection (osd)"}, {0x881b, "Flush partition (osd)"}, {0x881c, "Flush OSD"}, {0x8f7e, "Perform SCSI command (osd)"}, {0x8f7f, "Perform task management function (osd)"}, }; #define VARIABLE_LENGTH_SZ ARRAY_SIZE(variable_length_arr) static const char * get_sa_name(const struct value_name_pair * arr, int arr_sz, int service_action) { int k; for (k = 0; k < arr_sz; ++k, ++arr) { if (service_action == arr->value) break; } return (k < arr_sz) ? arr->name : NULL; } /* attempt to guess cdb length if cdb_len==0 . No trailing linefeed. */ static void print_opcode_name(unsigned char * cdbp, int cdb_len) { int sa, len, cdb0; const char * name; cdb0 = cdbp[0]; switch(cdb0) { case VARIABLE_LENGTH_CMD: len = scsi_varlen_cdb_length(cdbp); if (len < 10) { printk("short variable length command, " "len=%d ext_len=%d", len, cdb_len); break; } sa = (cdbp[8] << 8) + cdbp[9]; name = get_sa_name(variable_length_arr, VARIABLE_LENGTH_SZ, sa); if (name) printk("%s", name); else printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); if ((cdb_len > 0) && (len != cdb_len)) printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); break; case MAINTENANCE_IN: sa = cdbp[1] & 0x1f; name = get_sa_name(maint_in_arr, MAINT_IN_SZ, sa); if (name) printk("%s", name); else printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; case MAINTENANCE_OUT: sa = cdbp[1] & 0x1f; name = get_sa_name(maint_out_arr, MAINT_OUT_SZ, sa); if (name) printk("%s", name); else printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; case SERVICE_ACTION_IN_12: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_in12_arr, SERV_IN12_SZ, sa); if (name) printk("%s", name); else printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; case SERVICE_ACTION_OUT_12: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_out12_arr, SERV_OUT12_SZ, sa); if (name) printk("%s", name); else printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; case SERVICE_ACTION_IN_16: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_in16_arr, SERV_IN16_SZ, sa); if (name) printk("%s", name); else printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; case SERVICE_ACTION_OUT_16: sa = cdbp[1] & 0x1f; name = get_sa_name(serv_out16_arr, SERV_OUT16_SZ, sa); if (name) printk("%s", name); else printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; default: if (cdb0 < 0xc0) { name = cdb_byte0_names[cdb0]; if (name) printk("%s", name); else printk("cdb[0]=0x%x (reserved)", cdb0); } else printk("cdb[0]=0x%x (vendor)", cdb0); break; } } #else /* ifndef CONFIG_SCSI_CONSTANTS */ static void print_opcode_name(unsigned char * cdbp, int cdb_len) { int sa, len, cdb0; cdb0 = cdbp[0]; switch(cdb0) { case VARIABLE_LENGTH_CMD: len = scsi_varlen_cdb_length(cdbp); if (len < 10) { printk("short opcode=0x%x command, len=%d " "ext_len=%d", cdb0, len, cdb_len); break; } sa = (cdbp[8] << 8) + cdbp[9]; printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); if (len != cdb_len) printk(", in_cdb_len=%d, ext_len=%d", len, cdb_len); break; case MAINTENANCE_IN: case MAINTENANCE_OUT: case SERVICE_ACTION_IN_12: case SERVICE_ACTION_OUT_12: case SERVICE_ACTION_IN_16: case SERVICE_ACTION_OUT_16: sa = cdbp[1] & 0x1f; printk("cdb[0]=0x%x, sa=0x%x", cdb0, sa); break; default: if (cdb0 < 0xc0) printk("cdb[0]=0x%x", cdb0); else printk("cdb[0]=0x%x (vendor)", cdb0); break; } } #endif void __scsi_print_command(unsigned char *cdb) { int k, len; print_opcode_name(cdb, 0); len = scsi_command_size(cdb); /* print out all bytes in cdb */ for (k = 0; k < len; ++k) printk(" %02x", cdb[k]); printk("\n"); } EXPORT_SYMBOL(__scsi_print_command); void scsi_print_command(struct scsi_cmnd *cmd) { int k; if (cmd->cmnd == NULL) return; scmd_printk(KERN_INFO, cmd, "CDB: "); print_opcode_name(cmd->cmnd, cmd->cmd_len); /* print out all bytes in cdb */ printk(":"); for (k = 0; k < cmd->cmd_len; ++k) printk(" %02x", cmd->cmnd[k]); printk("\n"); } EXPORT_SYMBOL(scsi_print_command); /** * scsi_print_status - print scsi status description * @scsi_status: scsi status value * * If the status is recognized, the description is printed. * Otherwise "Unknown status" is output. No trailing space. * If CONFIG_SCSI_CONSTANTS is not set, then print status in hex * (e.g. "0x2" for Check Condition). **/ void scsi_print_status(unsigned char scsi_status) { #ifdef CONFIG_SCSI_CONSTANTS const char * ccp; switch (scsi_status) { case 0: ccp = "Good"; break; case 0x2: ccp = "Check Condition"; break; case 0x4: ccp = "Condition Met"; break; case 0x8: ccp = "Busy"; break; case 0x10: ccp = "Intermediate"; break; case 0x14: ccp = "Intermediate-Condition Met"; break; case 0x18: ccp = "Reservation Conflict"; break; case 0x22: ccp = "Command Terminated"; break; /* obsolete */ case 0x28: ccp = "Task set Full"; break; /* was: Queue Full */ case 0x30: ccp = "ACA Active"; break; case 0x40: ccp = "Task Aborted"; break; default: ccp = "Unknown status"; } printk(KERN_INFO "%s", ccp); #else printk(KERN_INFO "0x%0x", scsi_status); #endif } EXPORT_SYMBOL(scsi_print_status); #ifdef CONFIG_SCSI_CONSTANTS struct error_info { unsigned short code12; /* 0x0302 looks better than 0x03,0x02 */ const char * text; }; /* * The canonical list of T10 Additional Sense Codes is available at: * http://www.t10.org/lists/asc-num.txt */ static const struct error_info additional[] = { {0x0000, "No additional sense information"}, {0x0001, "Filemark detected"}, {0x0002, "End-of-partition/medium detected"}, {0x0003, "Setmark detected"}, {0x0004, "Beginning-of-partition/medium detected"}, {0x0005, "End-of-data detected"}, {0x0006, "I/O process terminated"}, {0x0007, "Programmable early warning detected"}, {0x0011, "Audio play operation in progress"}, {0x0012, "Audio play operation paused"}, {0x0013, "Audio play operation successfully completed"}, {0x0014, "Audio play operation stopped due to error"}, {0x0015, "No current audio status to return"}, {0x0016, "Operation in progress"}, {0x0017, "Cleaning requested"}, {0x0018, "Erase operation in progress"}, {0x0019, "Locate operation in progress"}, {0x001A, "Rewind operation in progress"}, {0x001B, "Set capacity operation in progress"}, {0x001C, "Verify operation in progress"}, {0x001D, "ATA pass through information available"}, {0x001E, "Conflicting SA creation request"}, {0x0100, "No index/sector signal"}, {0x0200, "No seek complete"}, {0x0300, "Peripheral device write fault"}, {0x0301, "No write current"}, {0x0302, "Excessive write errors"}, {0x0400, "Logical unit not ready, cause not reportable"}, {0x0401, "Logical unit is in process of becoming ready"}, {0x0402, "Logical unit not ready, initializing command required"}, {0x0403, "Logical unit not ready, manual intervention required"}, {0x0404, "Logical unit not ready, format in progress"}, {0x0405, "Logical unit not ready, rebuild in progress"}, {0x0406, "Logical unit not ready, recalculation in progress"}, {0x0407, "Logical unit not ready, operation in progress"}, {0x0408, "Logical unit not ready, long write in progress"}, {0x0409, "Logical unit not ready, self-test in progress"}, {0x040A, "Logical unit not accessible, asymmetric access state " "transition"}, {0x040B, "Logical unit not accessible, target port in standby state"}, {0x040C, "Logical unit not accessible, target port in unavailable " "state"}, {0x040D, "Logical unit not ready, structure check required"}, {0x0410, "Logical unit not ready, auxiliary memory not accessible"}, {0x0411, "Logical unit not ready, notify (enable spinup) required"}, {0x0412, "Logical unit not ready, offline"}, {0x0413, "Logical unit not ready, SA creation in progress"}, {0x0414, "Logical unit not ready, space allocation in progress"}, {0x0500, "Logical unit does not respond to selection"}, {0x0600, "No reference position found"}, {0x0700, "Multiple peripheral devices selected"}, {0x0800, "Logical unit communication failure"}, {0x0801, "Logical unit communication time-out"}, {0x0802, "Logical unit communication parity error"}, {0x0803, "Logical unit communication CRC error (Ultra-DMA/32)"}, {0x0804, "Unreachable copy target"}, {0x0900, "Track following error"}, {0x0901, "Tracking servo failure"}, {0x0902, "Focus servo failure"}, {0x0903, "Spindle servo failure"}, {0x0904, "Head select fault"}, {0x0A00, "Error log overflow"}, {0x0B00, "Warning"}, {0x0B01, "Warning - specified temperature exceeded"}, {0x0B02, "Warning - enclosure degraded"}, {0x0B03, "Warning - background self-test failed"}, {0x0B04, "Warning - background pre-scan detected medium error"}, {0x0B05, "Warning - background medium scan detected medium error"}, {0x0B06, "Warning - non-volatile cache now volatile"}, {0x0B07, "Warning - degraded power to non-volatile cache"}, {0x0B08, "Warning - power loss expected"}, {0x0C00, "Write error"}, {0x0C01, "Write error - recovered with auto reallocation"}, {0x0C02, "Write error - auto reallocation failed"}, {0x0C03, "Write error - recommend reassignment"}, {0x0C04, "Compression check miscompare error"}, {0x0C05, "Data expansion occurred during compression"}, {0x0C06, "Block not compressible"}, {0x0C07, "Write error - recovery needed"}, {0x0C08, "Write error - recovery failed"}, {0x0C09, "Write error - loss of streaming"}, {0x0C0A, "Write error - padding blocks added"}, {0x0C0B, "Auxiliary memory write error"}, {0x0C0C, "Write error - unexpected unsolicited data"}, {0x0C0D, "Write error - not enough unsolicited data"}, {0x0C0F, "Defects in error window"}, {0x0D00, "Error detected by third party temporary initiator"}, {0x0D01, "Third party device failure"}, {0x0D02, "Copy target device not reachable"}, {0x0D03, "Incorrect copy target device type"}, {0x0D04, "Copy target device data underrun"}, {0x0D05, "Copy target device data overrun"}, {0x0E00, "Invalid information unit"}, {0x0E01, "Information unit too short"}, {0x0E02, "Information unit too long"}, {0x0E03, "Invalid field in command information unit"}, {0x1000, "Id CRC or ECC error"}, {0x1001, "Logical block guard check failed"}, {0x1002, "Logical block application tag check failed"}, {0x1003, "Logical block reference tag check failed"}, {0x1100, "Unrecovered read error"}, {0x1101, "Read retries exhausted"}, {0x1102, "Error too long to correct"}, {0x1103, "Multiple read errors"}, {0x1104, "Unrecovered read error - auto reallocate failed"}, {0x1105, "L-EC uncorrectable error"}, {0x1106, "CIRC unrecovered error"}, {0x1107, "Data re-synchronization error"}, {0x1108, "Incomplete block read"}, {0x1109, "No gap found"}, {0x110A, "Miscorrected error"}, {0x110B, "Unrecovered read error - recommend reassignment"}, {0x110C, "Unrecovered read error - recommend rewrite the data"}, {0x110D, "De-compression CRC error"}, {0x110E, "Cannot decompress using declared algorithm"}, {0x110F, "Error reading UPC/EAN number"}, {0x1110, "Error reading ISRC number"}, {0x1111, "Read error - loss of streaming"}, {0x1112, "Auxiliary memory read error"}, {0x1113, "Read error - failed retransmission request"}, {0x1114, "Read error - lba marked bad by application client"}, {0x1200, "Address mark not found for id field"}, {0x1300, "Address mark not found for data field"}, {0x1400, "Recorded entity not found"}, {0x1401, "Record not found"}, {0x1402, "Filemark or setmark not found"}, {0x1403, "End-of-data not found"}, {0x1404, "Block sequence error"}, {0x1405, "Record not found - recommend reassignment"}, {0x1406, "Record not found - data auto-reallocated"}, {0x1407, "Locate operation failure"}, {0x1500, "Random positioning error"}, {0x1501, "Mechanical positioning error"}, {0x1502, "Positioning error detected by read of medium"}, {0x1600, "Data synchronization mark error"}, {0x1601, "Data sync error - data rewritten"}, {0x1602, "Data sync error - recommend rewrite"}, {0x1603, "Data sync error - data auto-reallocated"}, {0x1604, "Data sync error - recommend reassignment"}, {0x1700, "Recovered data with no error correction applied"}, {0x1701, "Recovered data with retries"}, {0x1702, "Recovered data with positive head offset"}, {0x1703, "Recovered data with negative head offset"}, {0x1704, "Recovered data with retries and/or circ applied"}, {0x1705, "Recovered data using previous sector id"}, {0x1706, "Recovered data without ECC - data auto-reallocated"}, {0x1707, "Recovered data without ECC - recommend reassignment"}, {0x1708, "Recovered data without ECC - recommend rewrite"}, {0x1709, "Recovered data without ECC - data rewritten"}, {0x1800, "Recovered data with error correction applied"}, {0x1801, "Recovered data with error corr. & retries applied"}, {0x1802, "Recovered data - data auto-reallocated"}, {0x1803, "Recovered data with CIRC"}, {0x1804, "Recovered data with L-EC"}, {0x1805, "Recovered data - recommend reassignment"}, {0x1806, "Recovered data - recommend rewrite"}, {0x1807, "Recovered data with ECC - data rewritten"}, {0x1808, "Recovered data with linking"}, {0x1900, "Defect list error"}, {0x1901, "Defect list not available"}, {0x1902, "Defect list error in primary list"}, {0x1903, "Defect list error in grown list"}, {0x1A00, "Parameter list length error"}, {0x1B00, "Synchronous data transfer error"}, {0x1C00, "Defect list not found"}, {0x1C01, "Primary defect list not found"}, {0x1C02, "Grown defect list not found"}, {0x1D00, "Miscompare during verify operation"}, {0x1D01, "Miscompare verify of unmapped LBA"}, {0x1E00, "Recovered id with ECC correction"}, {0x1F00, "Partial defect list transfer"}, {0x2000, "Invalid command operation code"}, {0x2001, "Access denied - initiator pending-enrolled"}, {0x2002, "Access denied - no access rights"}, {0x2003, "Access denied - invalid mgmt id key"}, {0x2004, "Illegal command while in write capable state"}, {0x2005, "Obsolete"}, {0x2006, "Illegal command while in explicit address mode"}, {0x2007, "Illegal command while in implicit address mode"}, {0x2008, "Access denied - enrollment conflict"}, {0x2009, "Access denied - invalid LU identifier"}, {0x200A, "Access denied - invalid proxy token"}, {0x200B, "Access denied - ACL LUN conflict"}, {0x2100, "Logical block address out of range"}, {0x2101, "Invalid element address"}, {0x2102, "Invalid address for write"}, {0x2103, "Invalid write crossing layer jump"}, {0x2200, "Illegal function (use 20 00, 24 00, or 26 00)"}, {0x2400, "Invalid field in cdb"}, {0x2401, "CDB decryption error"}, {0x2402, "Obsolete"}, {0x2403, "Obsolete"}, {0x2404, "Security audit value frozen"}, {0x2405, "Security working key frozen"}, {0x2406, "Nonce not unique"}, {0x2407, "Nonce timestamp out of range"}, {0x2408, "Invalid XCDB"}, {0x2500, "Logical unit not supported"}, {0x2600, "Invalid field in parameter list"}, {0x2601, "Parameter not supported"}, {0x2602, "Parameter value invalid"}, {0x2603, "Threshold parameters not supported"}, {0x2604, "Invalid release of persistent reservation"}, {0x2605, "Data decryption error"}, {0x2606, "Too many target descriptors"}, {0x2607, "Unsupported target descriptor type code"}, {0x2608, "Too many segment descriptors"}, {0x2609, "Unsupported segment descriptor type code"}, {0x260A, "Unexpected inexact segment"}, {0x260B, "Inline data length exceeded"}, {0x260C, "Invalid operation for copy source or destination"}, {0x260D, "Copy segment granularity violation"}, {0x260E, "Invalid parameter while port is enabled"}, {0x260F, "Invalid data-out buffer integrity check value"}, {0x2610, "Data decryption key fail limit reached"}, {0x2611, "Incomplete key-associated data set"}, {0x2612, "Vendor specific key reference not found"}, {0x2700, "Write protected"}, {0x2701, "Hardware write protected"}, {0x2702, "Logical unit software write protected"}, {0x2703, "Associated write protect"}, {0x2704, "Persistent write protect"}, {0x2705, "Permanent write protect"}, {0x2706, "Conditional write protect"}, {0x2707, "Space allocation failed write protect"}, {0x2800, "Not ready to ready change, medium may have changed"}, {0x2801, "Import or export element accessed"}, {0x2802, "Format-layer may have changed"}, {0x2803, "Import/export element accessed, medium changed"}, {0x2900, "Power on, reset, or bus device reset occurred"}, {0x2901, "Power on occurred"}, {0x2902, "Scsi bus reset occurred"}, {0x2903, "Bus device reset function occurred"}, {0x2904, "Device internal reset"}, {0x2905, "Transceiver mode changed to single-ended"}, {0x2906, "Transceiver mode changed to lvd"}, {0x2907, "I_T nexus loss occurred"}, {0x2A00, "Parameters changed"}, {0x2A01, "Mode parameters changed"}, {0x2A02, "Log parameters changed"}, {0x2A03, "Reservations preempted"}, {0x2A04, "Reservations released"}, {0x2A05, "Registrations preempted"}, {0x2A06, "Asymmetric access state changed"}, {0x2A07, "Implicit asymmetric access state transition failed"}, {0x2A08, "Priority changed"}, {0x2A09, "Capacity data has changed"}, {0x2A0A, "Error history I_T nexus cleared"}, {0x2A0B, "Error history snapshot released"}, {0x2A0C, "Error recovery attributes have changed"}, {0x2A0D, "Data encryption capabilities changed"}, {0x2A10, "Timestamp changed"}, {0x2A11, "Data encryption parameters changed by another i_t nexus"}, {0x2A12, "Data encryption parameters changed by vendor specific " "event"}, {0x2A13, "Data encryption key instance counter has changed"}, {0x2A14, "SA creation capabilities data has changed"}, {0x2B00, "Copy cannot execute since host cannot disconnect"}, {0x2C00, "Command sequence error"}, {0x2C01, "Too many windows specified"}, {0x2C02, "Invalid combination of windows specified"}, {0x2C03, "Current program area is not empty"}, {0x2C04, "Current program area is empty"}, {0x2C05, "Illegal power condition request"}, {0x2C06, "Persistent prevent conflict"}, {0x2C07, "Previous busy status"}, {0x2C08, "Previous task set full status"}, {0x2C09, "Previous reservation conflict status"}, {0x2C0A, "Partition or collection contains user objects"}, {0x2C0B, "Not reserved"}, {0x2D00, "Overwrite error on update in place"}, {0x2E00, "Insufficient time for operation"}, {0x2F00, "Commands cleared by another initiator"}, {0x2F01, "Commands cleared by power loss notification"}, {0x2F02, "Commands cleared by device server"}, {0x3000, "Incompatible medium installed"}, {0x3001, "Cannot read medium - unknown format"}, {0x3002, "Cannot read medium - incompatible format"}, {0x3003, "Cleaning cartridge installed"}, {0x3004, "Cannot write medium - unknown format"}, {0x3005, "Cannot write medium - incompatible format"}, {0x3006, "Cannot format medium - incompatible medium"}, {0x3007, "Cleaning failure"}, {0x3008, "Cannot write - application code mismatch"}, {0x3009, "Current session not fixated for append"}, {0x300A, "Cleaning request rejected"}, {0x300C, "WORM medium - overwrite attempted"}, {0x300D, "WORM medium - integrity check"}, {0x3010, "Medium not formatted"}, {0x3011, "Incompatible volume type"}, {0x3012, "Incompatible volume qualifier"}, {0x3100, "Medium format corrupted"}, {0x3101, "Format command failed"}, {0x3102, "Zoned formatting failed due to spare linking"}, {0x3200, "No defect spare location available"}, {0x3201, "Defect list update failure"}, {0x3300, "Tape length error"}, {0x3400, "Enclosure failure"}, {0x3500, "Enclosure services failure"}, {0x3501, "Unsupported enclosure function"}, {0x3502, "Enclosure services unavailable"}, {0x3503, "Enclosure services transfer failure"}, {0x3504, "Enclosure services transfer refused"}, {0x3505, "Enclosure services checksum error"}, {0x3600, "Ribbon, ink, or toner failure"}, {0x3700, "Rounded parameter"}, {0x3800, "Event status notification"}, {0x3802, "Esn - power management class event"}, {0x3804, "Esn - media class event"}, {0x3806, "Esn - device busy class event"}, {0x3807, "Thin Provisioning soft threshold reached"}, {0x3900, "Saving parameters not supported"}, {0x3A00, "Medium not present"}, {0x3A01, "Medium not present - tray closed"}, {0x3A02, "Medium not present - tray open"}, {0x3A03, "Medium not present - loadable"}, {0x3A04, "Medium not present - medium auxiliary memory accessible"}, {0x3B00, "Sequential positioning error"}, {0x3B01, "Tape position error at beginning-of-medium"}, {0x3B02, "Tape position error at end-of-medium"}, {0x3B03, "Tape or electronic vertical forms unit not ready"}, {0x3B04, "Slew failure"}, {0x3B05, "Paper jam"}, {0x3B06, "Failed to sense top-of-form"}, {0x3B07, "Failed to sense bottom-of-form"}, {0x3B08, "Reposition error"}, {0x3B09, "Read past end of medium"}, {0x3B0A, "Read past beginning of medium"}, {0x3B0B, "Position past end of medium"}, {0x3B0C, "Position past beginning of medium"}, {0x3B0D, "Medium destination element full"}, {0x3B0E, "Medium source element empty"}, {0x3B0F, "End of medium reached"}, {0x3B11, "Medium magazine not accessible"}, {0x3B12, "Medium magazine removed"}, {0x3B13, "Medium magazine inserted"}, {0x3B14, "Medium magazine locked"}, {0x3B15, "Medium magazine unlocked"}, {0x3B16, "Mechanical positioning or changer error"}, {0x3B17, "Read past end of user object"}, {0x3B18, "Element disabled"}, {0x3B19, "Element enabled"}, {0x3B1A, "Data transfer device removed"}, {0x3B1B, "Data transfer device inserted"}, {0x3D00, "Invalid bits in identify message"}, {0x3E00, "Logical unit has not self-configured yet"}, {0x3E01, "Logical unit failure"}, {0x3E02, "Timeout on logical unit"}, {0x3E03, "Logical unit failed self-test"}, {0x3E04, "Logical unit unable to update self-test log"}, {0x3F00, "Target operating conditions have changed"}, {0x3F01, "Microcode has been changed"}, {0x3F02, "Changed operating definition"}, {0x3F03, "Inquiry data has changed"}, {0x3F04, "Component device attached"}, {0x3F05, "Device identifier changed"}, {0x3F06, "Redundancy group created or modified"}, {0x3F07, "Redundancy group deleted"}, {0x3F08, "Spare created or modified"}, {0x3F09, "Spare deleted"}, {0x3F0A, "Volume set created or modified"}, {0x3F0B, "Volume set deleted"}, {0x3F0C, "Volume set deassigned"}, {0x3F0D, "Volume set reassigned"}, {0x3F0E, "Reported luns data has changed"}, {0x3F0F, "Echo buffer overwritten"}, {0x3F10, "Medium loadable"}, {0x3F11, "Medium auxiliary memory accessible"}, {0x3F12, "iSCSI IP address added"}, {0x3F13, "iSCSI IP address removed"}, {0x3F14, "iSCSI IP address changed"}, /* * {0x40NN, "Ram failure"}, * {0x40NN, "Diagnostic failure on component nn"}, * {0x41NN, "Data path failure"}, * {0x42NN, "Power-on or self-test failure"}, */ {0x4300, "Message error"}, {0x4400, "Internal target failure"}, {0x4471, "ATA device failed set features"}, {0x4500, "Select or reselect failure"}, {0x4600, "Unsuccessful soft reset"}, {0x4700, "Scsi parity error"}, {0x4701, "Data phase CRC error detected"}, {0x4702, "Scsi parity error detected during st data phase"}, {0x4703, "Information unit iuCRC error detected"}, {0x4704, "Asynchronous information protection error detected"}, {0x4705, "Protocol service CRC error"}, {0x4706, "Phy test function in progress"}, {0x477f, "Some commands cleared by iSCSI Protocol event"}, {0x4800, "Initiator detected error message received"}, {0x4900, "Invalid message error"}, {0x4A00, "Command phase error"}, {0x4B00, "Data phase error"}, {0x4B01, "Invalid target port transfer tag received"}, {0x4B02, "Too much write data"}, {0x4B03, "Ack/nak timeout"}, {0x4B04, "Nak received"}, {0x4B05, "Data offset error"}, {0x4B06, "Initiator response timeout"}, {0x4C00, "Logical unit failed self-configuration"}, /* * {0x4DNN, "Tagged overlapped commands (nn = queue tag)"}, */ {0x4E00, "Overlapped commands attempted"}, {0x5000, "Write append error"}, {0x5001, "Write append position error"}, {0x5002, "Position error related to timing"}, {0x5100, "Erase failure"}, {0x5101, "Erase failure - incomplete erase operation detected"}, {0x5200, "Cartridge fault"}, {0x5300, "Media load or eject failed"}, {0x5301, "Unload tape failure"}, {0x5302, "Medium removal prevented"}, {0x5303, "Medium removal prevented by data transfer element"}, {0x5304, "Medium thread or unthread failure"}, {0x5400, "Scsi to host system interface failure"}, {0x5500, "System resource failure"}, {0x5501, "System buffer full"}, {0x5502, "Insufficient reservation resources"}, {0x5503, "Insufficient resources"}, {0x5504, "Insufficient registration resources"}, {0x5505, "Insufficient access control resources"}, {0x5506, "Auxiliary memory out of space"}, {0x5507, "Quota error"}, {0x5508, "Maximum number of supplemental decryption keys exceeded"}, {0x5509, "Medium auxiliary memory not accessible"}, {0x550A, "Data currently unavailable"}, {0x5700, "Unable to recover table-of-contents"}, {0x5800, "Generation does not exist"}, {0x5900, "Updated block read"}, {0x5A00, "Operator request or state change input"}, {0x5A01, "Operator medium removal request"}, {0x5A02, "Operator selected write protect"}, {0x5A03, "Operator selected write permit"}, {0x5B00, "Log exception"}, {0x5B01, "Threshold condition met"}, {0x5B02, "Log counter at maximum"}, {0x5B03, "Log list codes exhausted"}, {0x5C00, "Rpl status change"}, {0x5C01, "Spindles synchronized"}, {0x5C02, "Spindles not synchronized"}, {0x5D00, "Failure prediction threshold exceeded"}, {0x5D01, "Media failure prediction threshold exceeded"}, {0x5D02, "Logical unit failure prediction threshold exceeded"}, {0x5D03, "Spare area exhaustion prediction threshold exceeded"}, {0x5D10, "Hardware impending failure general hard drive failure"}, {0x5D11, "Hardware impending failure drive error rate too high"}, {0x5D12, "Hardware impending failure data error rate too high"}, {0x5D13, "Hardware impending failure seek error rate too high"}, {0x5D14, "Hardware impending failure too many block reassigns"}, {0x5D15, "Hardware impending failure access times too high"}, {0x5D16, "Hardware impending failure start unit times too high"}, {0x5D17, "Hardware impending failure channel parametrics"}, {0x5D18, "Hardware impending failure controller detected"}, {0x5D19, "Hardware impending failure throughput performance"}, {0x5D1A, "Hardware impending failure seek time performance"}, {0x5D1B, "Hardware impending failure spin-up retry count"}, {0x5D1C, "Hardware impending failure drive calibration retry count"}, {0x5D20, "Controller impending failure general hard drive failure"}, {0x5D21, "Controller impending failure drive error rate too high"}, {0x5D22, "Controller impending failure data error rate too high"}, {0x5D23, "Controller impending failure seek error rate too high"}, {0x5D24, "Controller impending failure too many block reassigns"}, {0x5D25, "Controller impending failure access times too high"}, {0x5D26, "Controller impending failure start unit times too high"}, {0x5D27, "Controller impending failure channel parametrics"}, {0x5D28, "Controller impending failure controller detected"}, {0x5D29, "Controller impending failure throughput performance"}, {0x5D2A, "Controller impending failure seek time performance"}, {0x5D2B, "Controller impending failure spin-up retry count"}, {0x5D2C, "Controller impending failure drive calibration retry count"}, {0x5D30, "Data channel impending failure general hard drive failure"}, {0x5D31, "Data channel impending failure drive error rate too high"}, {0x5D32, "Data channel impending failure data error rate too high"}, {0x5D33, "Data channel impending failure seek error rate too high"}, {0x5D34, "Data channel impending failure too many block reassigns"}, {0x5D35, "Data channel impending failure access times too high"}, {0x5D36, "Data channel impending failure start unit times too high"}, {0x5D37, "Data channel impending failure channel parametrics"}, {0x5D38, "Data channel impending failure controller detected"}, {0x5D39, "Data channel impending failure throughput performance"}, {0x5D3A, "Data channel impending failure seek time performance"}, {0x5D3B, "Data channel impending failure spin-up retry count"}, {0x5D3C, "Data channel impending failure drive calibration retry " "count"}, {0x5D40, "Servo impending failure general hard drive failure"}, {0x5D41, "Servo impending failure drive error rate too high"}, {0x5D42, "Servo impending failure data error rate too high"}, {0x5D43, "Servo impending failure seek error rate too high"}, {0x5D44, "Servo impending failure too many block reassigns"}, {0x5D45, "Servo impending failure access times too high"}, {0x5D46, "Servo impending failure start unit times too high"}, {0x5D47, "Servo impending failure channel parametrics"}, {0x5D48, "Servo impending failure controller detected"}, {0x5D49, "Servo impending failure throughput performance"}, {0x5D4A, "Servo impending failure seek time performance"}, {0x5D4B, "Servo impending failure spin-up retry count"}, {0x5D4C, "Servo impending failure drive calibration retry count"}, {0x5D50, "Spindle impending failure general hard drive failure"}, {0x5D51, "Spindle impending failure drive error rate too high"}, {0x5D52, "Spindle impending failure data error rate too high"}, {0x5D53, "Spindle impending failure seek error rate too high"}, {0x5D54, "Spindle impending failure too many block reassigns"}, {0x5D55, "Spindle impending failure access times too high"}, {0x5D56, "Spindle impending failure start unit times too high"}, {0x5D57, "Spindle impending failure channel parametrics"}, {0x5D58, "Spindle impending failure controller detected"}, {0x5D59, "Spindle impending failure throughput performance"}, {0x5D5A, "Spindle impending failure seek time performance"}, {0x5D5B, "Spindle impending failure spin-up retry count"}, {0x5D5C, "Spindle impending failure drive calibration retry count"}, {0x5D60, "Firmware impending failure general hard drive failure"}, {0x5D61, "Firmware impending failure drive error rate too high"}, {0x5D62, "Firmware impending failure data error rate too high"}, {0x5D63, "Firmware impending failure seek error rate too high"}, {0x5D64, "Firmware impending failure too many block reassigns"}, {0x5D65, "Firmware impending failure access times too high"}, {0x5D66, "Firmware impending failure start unit times too high"}, {0x5D67, "Firmware impending failure channel parametrics"}, {0x5D68, "Firmware impending failure controller detected"}, {0x5D69, "Firmware impending failure throughput performance"}, {0x5D6A, "Firmware impending failure seek time performance"}, {0x5D6B, "Firmware impending failure spin-up retry count"}, {0x5D6C, "Firmware impending failure drive calibration retry count"}, {0x5DFF, "Failure prediction threshold exceeded (false)"}, {0x5E00, "Low power condition on"}, {0x5E01, "Idle condition activated by timer"}, {0x5E02, "Standby condition activated by timer"}, {0x5E03, "Idle condition activated by command"}, {0x5E04, "Standby condition activated by command"}, {0x5E05, "Idle_b condition activated by timer"}, {0x5E06, "Idle_b condition activated by command"}, {0x5E07, "Idle_c condition activated by timer"}, {0x5E08, "Idle_c condition activated by command"}, {0x5E09, "Standby_y condition activated by timer"}, {0x5E0A, "Standby_y condition activated by command"}, {0x5E41, "Power state change to active"}, {0x5E42, "Power state change to idle"}, {0x5E43, "Power state change to standby"}, {0x5E45, "Power state change to sleep"}, {0x5E47, "Power state change to device control"}, {0x6000, "Lamp failure"}, {0x6100, "Video acquisition error"}, {0x6101, "Unable to acquire video"}, {0x6102, "Out of focus"}, {0x6200, "Scan head positioning error"}, {0x6300, "End of user area encountered on this track"}, {0x6301, "Packet does not fit in available space"}, {0x6400, "Illegal mode for this track"}, {0x6401, "Invalid packet size"}, {0x6500, "Voltage fault"}, {0x6600, "Automatic document feeder cover up"}, {0x6601, "Automatic document feeder lift up"}, {0x6602, "Document jam in automatic document feeder"}, {0x6603, "Document miss feed automatic in document feeder"}, {0x6700, "Configuration failure"}, {0x6701, "Configuration of incapable logical units failed"}, {0x6702, "Add logical unit failed"}, {0x6703, "Modification of logical unit failed"}, {0x6704, "Exchange of logical unit failed"}, {0x6705, "Remove of logical unit failed"}, {0x6706, "Attachment of logical unit failed"}, {0x6707, "Creation of logical unit failed"}, {0x6708, "Assign failure occurred"}, {0x6709, "Multiply assigned logical unit"}, {0x670A, "Set target port groups command failed"}, {0x670B, "ATA device feature not enabled"}, {0x6800, "Logical unit not configured"}, {0x6900, "Data loss on logical unit"}, {0x6901, "Multiple logical unit failures"}, {0x6902, "Parity/data mismatch"}, {0x6A00, "Informational, refer to log"}, {0x6B00, "State change has occurred"}, {0x6B01, "Redundancy level got better"}, {0x6B02, "Redundancy level got worse"}, {0x6C00, "Rebuild failure occurred"}, {0x6D00, "Recalculate failure occurred"}, {0x6E00, "Command to logical unit failed"}, {0x6F00, "Copy protection key exchange failure - authentication " "failure"}, {0x6F01, "Copy protection key exchange failure - key not present"}, {0x6F02, "Copy protection key exchange failure - key not established"}, {0x6F03, "Read of scrambled sector without authentication"}, {0x6F04, "Media region code is mismatched to logical unit region"}, {0x6F05, "Drive region must be permanent/region reset count error"}, {0x6F06, "Insufficient block count for binding nonce recording"}, {0x6F07, "Conflict in binding nonce recording"}, /* * {0x70NN, "Decompression exception short algorithm id of nn"}, */ {0x7100, "Decompression exception long algorithm id"}, {0x7200, "Session fixation error"}, {0x7201, "Session fixation error writing lead-in"}, {0x7202, "Session fixation error writing lead-out"}, {0x7203, "Session fixation error - incomplete track in session"}, {0x7204, "Empty or partially written reserved track"}, {0x7205, "No more track reservations allowed"}, {0x7206, "RMZ extension is not allowed"}, {0x7207, "No more test zone extensions are allowed"}, {0x7300, "Cd control error"}, {0x7301, "Power calibration area almost full"}, {0x7302, "Power calibration area is full"}, {0x7303, "Power calibration area error"}, {0x7304, "Program memory area update failure"}, {0x7305, "Program memory area is full"}, {0x7306, "RMA/PMA is almost full"}, {0x7310, "Current power calibration area almost full"}, {0x7311, "Current power calibration area is full"}, {0x7317, "RDZ is full"}, {0x7400, "Security error"}, {0x7401, "Unable to decrypt data"}, {0x7402, "Unencrypted data encountered while decrypting"}, {0x7403, "Incorrect data encryption key"}, {0x7404, "Cryptographic integrity validation failed"}, {0x7405, "Error decrypting data"}, {0x7406, "Unknown signature verification key"}, {0x7407, "Encryption parameters not useable"}, {0x7408, "Digital signature validation failure"}, {0x7409, "Encryption mode mismatch on read"}, {0x740A, "Encrypted block not raw read enabled"}, {0x740B, "Incorrect Encryption parameters"}, {0x740C, "Unable to decrypt parameter list"}, {0x740D, "Encryption algorithm disabled"}, {0x7410, "SA creation parameter value invalid"}, {0x7411, "SA creation parameter value rejected"}, {0x7412, "Invalid SA usage"}, {0x7421, "Data Encryption configuration prevented"}, {0x7430, "SA creation parameter not supported"}, {0x7440, "Authentication failed"}, {0x7461, "External data encryption key manager access error"}, {0x7462, "External data encryption key manager error"}, {0x7463, "External data encryption key not found"}, {0x7464, "External data encryption request not authorized"}, {0x746E, "External data encryption control timeout"}, {0x746F, "External data encryption control error"}, {0x7471, "Logical unit access not authorized"}, {0x7479, "Security conflict in translated device"}, {0, NULL} }; struct error_info2 { unsigned char code1, code2_min, code2_max; const char * fmt; }; static const struct error_info2 additional2[] = { {0x40, 0x00, 0x7f, "Ram failure (%x)"}, {0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"}, {0x41, 0x00, 0xff, "Data path failure (%x)"}, {0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"}, {0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"}, {0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"}, {0, 0, 0, NULL} }; /* description of the sense key values */ static const char * const snstext[] = { "No Sense", /* 0: There is no sense information */ "Recovered Error", /* 1: The last command completed successfully but used error correction */ "Not Ready", /* 2: The addressed target is not ready */ "Medium Error", /* 3: Data error detected on the medium */ "Hardware Error", /* 4: Controller or device failure */ "Illegal Request", /* 5: Error in request */ "Unit Attention", /* 6: Removable medium was changed, or the target has been reset, or ... */ "Data Protect", /* 7: Access to the data is blocked */ "Blank Check", /* 8: Reached unexpected written or unwritten region of the medium */ "Vendor Specific(9)", "Copy Aborted", /* A: COPY or COMPARE was aborted */ "Aborted Command", /* B: The target aborted the command */ "Equal", /* C: A SEARCH DATA command found data equal */ "Volume Overflow", /* D: Medium full with still data to be written */ "Miscompare", /* E: Source data and data on the medium do not agree */ }; #endif /* Get sense key string or NULL if not available */ const char * scsi_sense_key_string(unsigned char key) { #ifdef CONFIG_SCSI_CONSTANTS if (key <= 0xE) return snstext[key]; #endif return NULL; } EXPORT_SYMBOL(scsi_sense_key_string); /* * Get additional sense code string or NULL if not available. * This string may contain a "%x" and should be printed with ascq as arg. */ const char * scsi_extd_sense_format(unsigned char asc, unsigned char ascq) { #ifdef CONFIG_SCSI_CONSTANTS int i; unsigned short code = ((asc << 8) | ascq); for (i = 0; additional[i].text; i++) if (additional[i].code12 == code) return additional[i].text; for (i = 0; additional2[i].fmt; i++) { if (additional2[i].code1 == asc && ascq >= additional2[i].code2_min && ascq <= additional2[i].code2_max) return additional2[i].fmt; } #endif return NULL; } EXPORT_SYMBOL(scsi_extd_sense_format); void scsi_show_extd_sense(unsigned char asc, unsigned char ascq) { const char *extd_sense_fmt = scsi_extd_sense_format(asc, ascq); if (extd_sense_fmt) { if (strstr(extd_sense_fmt, "%x")) { printk("Add. Sense: "); printk(extd_sense_fmt, ascq); } else printk("Add. Sense: %s", extd_sense_fmt); } else { if (asc >= 0x80) printk("<<vendor>> ASC=0x%x ASCQ=0x%x", asc, ascq); if (ascq >= 0x80) printk("ASC=0x%x <<vendor>> ASCQ=0x%x", asc, ascq); else printk("ASC=0x%x ASCQ=0x%x", asc, ascq); } printk("\n"); } EXPORT_SYMBOL(scsi_show_extd_sense); void scsi_show_sense_hdr(struct scsi_sense_hdr *sshdr) { const char *sense_txt; sense_txt = scsi_sense_key_string(sshdr->sense_key); if (sense_txt) printk("Sense Key : %s ", sense_txt); else printk("Sense Key : 0x%x ", sshdr->sense_key); printk("%s", scsi_sense_is_deferred(sshdr) ? "[deferred] " : "[current] "); if (sshdr->response_code >= 0x72) printk("[descriptor]"); printk("\n"); } EXPORT_SYMBOL(scsi_show_sense_hdr); /* * Print normalized SCSI sense header with a prefix. */ void scsi_print_sense_hdr(const char *name, struct scsi_sense_hdr *sshdr) { printk(KERN_INFO "%s: ", name); scsi_show_sense_hdr(sshdr); printk(KERN_INFO "%s: ", name); scsi_show_extd_sense(sshdr->asc, sshdr->ascq); } EXPORT_SYMBOL(scsi_print_sense_hdr); /* * Print normalized SCSI sense header with device information and a prefix. */ void scsi_cmd_print_sense_hdr(struct scsi_cmnd *scmd, const char *desc, struct scsi_sense_hdr *sshdr) { scmd_printk(KERN_INFO, scmd, "%s: ", desc); scsi_show_sense_hdr(sshdr); scmd_printk(KERN_INFO, scmd, "%s: ", desc); scsi_show_extd_sense(sshdr->asc, sshdr->ascq); } EXPORT_SYMBOL(scsi_cmd_print_sense_hdr); static void scsi_decode_sense_buffer(const unsigned char *sense_buffer, int sense_len, struct scsi_sense_hdr *sshdr) { int k, num, res; res = scsi_normalize_sense(sense_buffer, sense_len, sshdr); if (0 == res) { /* this may be SCSI-1 sense data */ num = (sense_len < 32) ? sense_len : 32; printk("Unrecognized sense data (in hex):"); for (k = 0; k < num; ++k) { if (0 == (k % 16)) { printk("\n"); printk(KERN_INFO " "); } printk("%02x ", sense_buffer[k]); } printk("\n"); return; } } static void scsi_decode_sense_extras(const unsigned char *sense_buffer, int sense_len, struct scsi_sense_hdr *sshdr) { int k, num, res; if (sshdr->response_code < 0x72) { /* only decode extras for "fixed" format now */ char buff[80]; int blen, fixed_valid; unsigned int info; fixed_valid = sense_buffer[0] & 0x80; info = ((sense_buffer[3] << 24) | (sense_buffer[4] << 16) | (sense_buffer[5] << 8) | sense_buffer[6]); res = 0; memset(buff, 0, sizeof(buff)); blen = sizeof(buff) - 1; if (fixed_valid) res += snprintf(buff + res, blen - res, "Info fld=0x%x", info); if (sense_buffer[2] & 0x80) { /* current command has read a filemark */ if (res > 0) res += snprintf(buff + res, blen - res, ", "); res += snprintf(buff + res, blen - res, "FMK"); } if (sense_buffer[2] & 0x40) { /* end-of-medium condition exists */ if (res > 0) res += snprintf(buff + res, blen - res, ", "); res += snprintf(buff + res, blen - res, "EOM"); } if (sense_buffer[2] & 0x20) { /* incorrect block length requested */ if (res > 0) res += snprintf(buff + res, blen - res, ", "); res += snprintf(buff + res, blen - res, "ILI"); } if (res > 0) printk("%s\n", buff); } else if (sshdr->additional_length > 0) { /* descriptor format with sense descriptors */ num = 8 + sshdr->additional_length; num = (sense_len < num) ? sense_len : num; printk("Descriptor sense data with sense descriptors " "(in hex):"); for (k = 0; k < num; ++k) { if (0 == (k % 16)) { printk("\n"); printk(KERN_INFO " "); } printk("%02x ", sense_buffer[k]); } printk("\n"); } } /* Normalize and print sense buffer with name prefix */ void __scsi_print_sense(const char *name, const unsigned char *sense_buffer, int sense_len) { struct scsi_sense_hdr sshdr; printk(KERN_INFO "%s: ", name); scsi_decode_sense_buffer(sense_buffer, sense_len, &sshdr); scsi_show_sense_hdr(&sshdr); scsi_decode_sense_extras(sense_buffer, sense_len, &sshdr); printk(KERN_INFO "%s: ", name); scsi_show_extd_sense(sshdr.asc, sshdr.ascq); } EXPORT_SYMBOL(__scsi_print_sense); /* Normalize and print sense buffer in SCSI command */ void scsi_print_sense(char *name, struct scsi_cmnd *cmd) { struct scsi_sense_hdr sshdr; scmd_printk(KERN_INFO, cmd, " "); scsi_decode_sense_buffer(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sshdr); scsi_show_sense_hdr(&sshdr); scsi_decode_sense_extras(cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sshdr); scmd_printk(KERN_INFO, cmd, " "); scsi_show_extd_sense(sshdr.asc, sshdr.ascq); } EXPORT_SYMBOL(scsi_print_sense); #ifdef CONFIG_SCSI_CONSTANTS static const char * const hostbyte_table[]={ "DID_OK", "DID_NO_CONNECT", "DID_BUS_BUSY", "DID_TIME_OUT", "DID_BAD_TARGET", "DID_ABORT", "DID_PARITY", "DID_ERROR", "DID_RESET", "DID_BAD_INTR", "DID_PASSTHROUGH", "DID_SOFT_ERROR", "DID_IMM_RETRY", "DID_REQUEUE", "DID_TRANSPORT_DISRUPTED", "DID_TRANSPORT_FAILFAST" }; #define NUM_HOSTBYTE_STRS ARRAY_SIZE(hostbyte_table) static const char * const driverbyte_table[]={ "DRIVER_OK", "DRIVER_BUSY", "DRIVER_SOFT", "DRIVER_MEDIA", "DRIVER_ERROR", "DRIVER_INVALID", "DRIVER_TIMEOUT", "DRIVER_HARD", "DRIVER_SENSE"}; #define NUM_DRIVERBYTE_STRS ARRAY_SIZE(driverbyte_table) void scsi_show_result(int result) { int hb = host_byte(result); int db = driver_byte(result); printk("Result: hostbyte=%s driverbyte=%s\n", (hb < NUM_HOSTBYTE_STRS ? hostbyte_table[hb] : "invalid"), (db < NUM_DRIVERBYTE_STRS ? driverbyte_table[db] : "invalid")); } #else void scsi_show_result(int result) { printk("Result: hostbyte=0x%02x driverbyte=0x%02x\n", host_byte(result), driver_byte(result)); } #endif EXPORT_SYMBOL(scsi_show_result); void scsi_print_result(struct scsi_cmnd *cmd) { scmd_printk(KERN_INFO, cmd, " "); scsi_show_result(cmd->result); } EXPORT_SYMBOL(scsi_print_result);
gpl-2.0
Kali-/tf101-kernel
drivers/infiniband/hw/ehca/ehca_cq.c
8382
11814
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * Completion queue handling * * Authors: Waleri Fomin <fomin@de.ibm.com> * Khadija Souissi <souissi@de.ibm.com> * Reinhard Ernst <rernst@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> * Hoang-Nam Nguyen <hnguyen@de.ibm.com> * * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/slab.h> #include "ehca_iverbs.h" #include "ehca_classes.h" #include "ehca_irq.h" #include "hcp_if.h" static struct kmem_cache *cq_cache; int ehca_cq_assign_qp(struct ehca_cq *cq, struct ehca_qp *qp) { unsigned int qp_num = qp->real_qp_num; unsigned int key = qp_num & (QP_HASHTAB_LEN-1); unsigned long flags; spin_lock_irqsave(&cq->spinlock, flags); hlist_add_head(&qp->list_entries, &cq->qp_hashtab[key]); spin_unlock_irqrestore(&cq->spinlock, flags); ehca_dbg(cq->ib_cq.device, "cq_num=%x real_qp_num=%x", cq->cq_number, qp_num); return 0; } int ehca_cq_unassign_qp(struct ehca_cq *cq, unsigned int real_qp_num) { int ret = -EINVAL; unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); struct hlist_node *iter; struct ehca_qp *qp; unsigned long flags; spin_lock_irqsave(&cq->spinlock, flags); hlist_for_each(iter, &cq->qp_hashtab[key]) { qp = hlist_entry(iter, struct ehca_qp, list_entries); if (qp->real_qp_num == real_qp_num) { hlist_del(iter); ehca_dbg(cq->ib_cq.device, "removed qp from cq .cq_num=%x real_qp_num=%x", cq->cq_number, real_qp_num); ret = 0; break; } } spin_unlock_irqrestore(&cq->spinlock, flags); if (ret) ehca_err(cq->ib_cq.device, "qp not found cq_num=%x real_qp_num=%x", cq->cq_number, real_qp_num); return ret; } struct ehca_qp *ehca_cq_get_qp(struct ehca_cq *cq, int real_qp_num) { struct ehca_qp *ret = NULL; unsigned int key = real_qp_num & (QP_HASHTAB_LEN-1); struct hlist_node *iter; struct ehca_qp *qp; hlist_for_each(iter, &cq->qp_hashtab[key]) { qp = hlist_entry(iter, struct ehca_qp, list_entries); if (qp->real_qp_num == real_qp_num) { ret = qp; break; } } return ret; } struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector, struct ib_ucontext *context, struct ib_udata *udata) { static const u32 additional_cqe = 20; struct ib_cq *cq; struct ehca_cq *my_cq; struct ehca_shca *shca = container_of(device, struct ehca_shca, ib_device); struct ipz_adapter_handle adapter_handle; struct ehca_alloc_cq_parms param; /* h_call's out parameters */ struct h_galpa gal; void *vpage; u32 counter; u64 rpage, cqx_fec, h_ret; int ipz_rc, ret, i; unsigned long flags; if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) return ERR_PTR(-EINVAL); if (!atomic_add_unless(&shca->num_cqs, 1, shca->max_num_cqs)) { ehca_err(device, "Unable to create CQ, max number of %i " "CQs reached.", shca->max_num_cqs); ehca_err(device, "To increase the maximum number of CQs " "use the number_of_cqs module parameter.\n"); return ERR_PTR(-ENOSPC); } my_cq = kmem_cache_zalloc(cq_cache, GFP_KERNEL); if (!my_cq) { ehca_err(device, "Out of memory for ehca_cq struct device=%p", device); atomic_dec(&shca->num_cqs); return ERR_PTR(-ENOMEM); } memset(&param, 0, sizeof(struct ehca_alloc_cq_parms)); spin_lock_init(&my_cq->spinlock); spin_lock_init(&my_cq->cb_lock); spin_lock_init(&my_cq->task_lock); atomic_set(&my_cq->nr_events, 0); init_waitqueue_head(&my_cq->wait_completion); cq = &my_cq->ib_cq; adapter_handle = shca->ipz_hca_handle; param.eq_handle = shca->eq.ipz_eq_handle; do { if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) { cq = ERR_PTR(-ENOMEM); ehca_err(device, "Can't reserve idr nr. device=%p", device); goto create_cq_exit1; } write_lock_irqsave(&ehca_cq_idr_lock, flags); ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token); write_unlock_irqrestore(&ehca_cq_idr_lock, flags); } while (ret == -EAGAIN); if (ret) { cq = ERR_PTR(-ENOMEM); ehca_err(device, "Can't allocate new idr entry. device=%p", device); goto create_cq_exit1; } if (my_cq->token > 0x1FFFFFF) { cq = ERR_PTR(-ENOMEM); ehca_err(device, "Invalid number of cq. device=%p", device); goto create_cq_exit2; } /* * CQs maximum depth is 4GB-64, but we need additional 20 as buffer * for receiving errors CQEs. */ param.nr_cqe = cqe + additional_cqe; h_ret = hipz_h_alloc_resource_cq(adapter_handle, my_cq, &param); if (h_ret != H_SUCCESS) { ehca_err(device, "hipz_h_alloc_resource_cq() failed " "h_ret=%lli device=%p", h_ret, device); cq = ERR_PTR(ehca2ib_return_code(h_ret)); goto create_cq_exit2; } ipz_rc = ipz_queue_ctor(NULL, &my_cq->ipz_queue, param.act_pages, EHCA_PAGESIZE, sizeof(struct ehca_cqe), 0, 0); if (!ipz_rc) { ehca_err(device, "ipz_queue_ctor() failed ipz_rc=%i device=%p", ipz_rc, device); cq = ERR_PTR(-EINVAL); goto create_cq_exit3; } for (counter = 0; counter < param.act_pages; counter++) { vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); if (!vpage) { ehca_err(device, "ipz_qpageit_get_inc() " "returns NULL device=%p", device); cq = ERR_PTR(-EAGAIN); goto create_cq_exit4; } rpage = virt_to_abs(vpage); h_ret = hipz_h_register_rpage_cq(adapter_handle, my_cq->ipz_cq_handle, &my_cq->pf, 0, 0, rpage, 1, my_cq->galpas. kernel); if (h_ret < H_SUCCESS) { ehca_err(device, "hipz_h_register_rpage_cq() failed " "ehca_cq=%p cq_num=%x h_ret=%lli counter=%i " "act_pages=%i", my_cq, my_cq->cq_number, h_ret, counter, param.act_pages); cq = ERR_PTR(-EINVAL); goto create_cq_exit4; } if (counter == (param.act_pages - 1)) { vpage = ipz_qpageit_get_inc(&my_cq->ipz_queue); if ((h_ret != H_SUCCESS) || vpage) { ehca_err(device, "Registration of pages not " "complete ehca_cq=%p cq_num=%x " "h_ret=%lli", my_cq, my_cq->cq_number, h_ret); cq = ERR_PTR(-EAGAIN); goto create_cq_exit4; } } else { if (h_ret != H_PAGE_REGISTERED) { ehca_err(device, "Registration of page failed " "ehca_cq=%p cq_num=%x h_ret=%lli " "counter=%i act_pages=%i", my_cq, my_cq->cq_number, h_ret, counter, param.act_pages); cq = ERR_PTR(-ENOMEM); goto create_cq_exit4; } } } ipz_qeit_reset(&my_cq->ipz_queue); gal = my_cq->galpas.kernel; cqx_fec = hipz_galpa_load(gal, CQTEMM_OFFSET(cqx_fec)); ehca_dbg(device, "ehca_cq=%p cq_num=%x CQX_FEC=%llx", my_cq, my_cq->cq_number, cqx_fec); my_cq->ib_cq.cqe = my_cq->nr_of_entries = param.act_nr_of_entries - additional_cqe; my_cq->cq_number = (my_cq->ipz_cq_handle.handle) & 0xffff; for (i = 0; i < QP_HASHTAB_LEN; i++) INIT_HLIST_HEAD(&my_cq->qp_hashtab[i]); INIT_LIST_HEAD(&my_cq->sqp_err_list); INIT_LIST_HEAD(&my_cq->rqp_err_list); if (context) { struct ipz_queue *ipz_queue = &my_cq->ipz_queue; struct ehca_create_cq_resp resp; memset(&resp, 0, sizeof(resp)); resp.cq_number = my_cq->cq_number; resp.token = my_cq->token; resp.ipz_queue.qe_size = ipz_queue->qe_size; resp.ipz_queue.act_nr_of_sg = ipz_queue->act_nr_of_sg; resp.ipz_queue.queue_length = ipz_queue->queue_length; resp.ipz_queue.pagesize = ipz_queue->pagesize; resp.ipz_queue.toggle_state = ipz_queue->toggle_state; resp.fw_handle_ofs = (u32) (my_cq->galpas.user.fw_handle & (PAGE_SIZE - 1)); if (ib_copy_to_udata(udata, &resp, sizeof(resp))) { ehca_err(device, "Copy to udata failed."); goto create_cq_exit4; } } return cq; create_cq_exit4: ipz_queue_dtor(NULL, &my_cq->ipz_queue); create_cq_exit3: h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); if (h_ret != H_SUCCESS) ehca_err(device, "hipz_h_destroy_cq() failed ehca_cq=%p " "cq_num=%x h_ret=%lli", my_cq, my_cq->cq_number, h_ret); create_cq_exit2: write_lock_irqsave(&ehca_cq_idr_lock, flags); idr_remove(&ehca_cq_idr, my_cq->token); write_unlock_irqrestore(&ehca_cq_idr_lock, flags); create_cq_exit1: kmem_cache_free(cq_cache, my_cq); atomic_dec(&shca->num_cqs); return cq; } int ehca_destroy_cq(struct ib_cq *cq) { u64 h_ret; struct ehca_cq *my_cq = container_of(cq, struct ehca_cq, ib_cq); int cq_num = my_cq->cq_number; struct ib_device *device = cq->device; struct ehca_shca *shca = container_of(device, struct ehca_shca, ib_device); struct ipz_adapter_handle adapter_handle = shca->ipz_hca_handle; unsigned long flags; if (cq->uobject) { if (my_cq->mm_count_galpa || my_cq->mm_count_queue) { ehca_err(device, "Resources still referenced in " "user space cq_num=%x", my_cq->cq_number); return -EINVAL; } } /* * remove the CQ from the idr first to make sure * no more interrupt tasklets will touch this CQ */ write_lock_irqsave(&ehca_cq_idr_lock, flags); idr_remove(&ehca_cq_idr, my_cq->token); write_unlock_irqrestore(&ehca_cq_idr_lock, flags); /* now wait until all pending events have completed */ wait_event(my_cq->wait_completion, !atomic_read(&my_cq->nr_events)); /* nobody's using our CQ any longer -- we can destroy it */ h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 0); if (h_ret == H_R_STATE) { /* cq in err: read err data and destroy it forcibly */ ehca_dbg(device, "ehca_cq=%p cq_num=%x resource=%llx in err " "state. Try to delete it forcibly.", my_cq, cq_num, my_cq->ipz_cq_handle.handle); ehca_error_data(shca, my_cq, my_cq->ipz_cq_handle.handle); h_ret = hipz_h_destroy_cq(adapter_handle, my_cq, 1); if (h_ret == H_SUCCESS) ehca_dbg(device, "cq_num=%x deleted successfully.", cq_num); } if (h_ret != H_SUCCESS) { ehca_err(device, "hipz_h_destroy_cq() failed h_ret=%lli " "ehca_cq=%p cq_num=%x", h_ret, my_cq, cq_num); return ehca2ib_return_code(h_ret); } ipz_queue_dtor(NULL, &my_cq->ipz_queue); kmem_cache_free(cq_cache, my_cq); atomic_dec(&shca->num_cqs); return 0; } int ehca_resize_cq(struct ib_cq *cq, int cqe, struct ib_udata *udata) { /* TODO: proper resize needs to be done */ ehca_err(cq->device, "not implemented yet"); return -EFAULT; } int ehca_init_cq_cache(void) { cq_cache = kmem_cache_create("ehca_cache_cq", sizeof(struct ehca_cq), 0, SLAB_HWCACHE_ALIGN, NULL); if (!cq_cache) return -ENOMEM; return 0; } void ehca_cleanup_cq_cache(void) { if (cq_cache) kmem_cache_destroy(cq_cache); }
gpl-2.0
Krabappel2548/tf300_jb_kernel
drivers/input/keyboard/newtonkbd.c
9918
4968
/* * Copyright (c) 2000 Justin Cormack */ /* * Newton keyboard driver for Linux */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <j.cormack@doc.ic.ac.uk>, or by paper mail: * Justin Cormack, 68 Dartmouth Park Road, London NW5 1SN, UK. */ #include <linux/slab.h> #include <linux/module.h> #include <linux/input.h> #include <linux/init.h> #include <linux/serio.h> #define DRIVER_DESC "Newton keyboard driver" MODULE_AUTHOR("Justin Cormack <j.cormack@doc.ic.ac.uk>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define NKBD_KEY 0x7f #define NKBD_PRESS 0x80 static unsigned char nkbd_keycode[128] = { KEY_A, KEY_S, KEY_D, KEY_F, KEY_H, KEY_G, KEY_Z, KEY_X, KEY_C, KEY_V, 0, KEY_B, KEY_Q, KEY_W, KEY_E, KEY_R, KEY_Y, KEY_T, KEY_1, KEY_2, KEY_3, KEY_4, KEY_6, KEY_5, KEY_EQUAL, KEY_9, KEY_7, KEY_MINUS, KEY_8, KEY_0, KEY_RIGHTBRACE, KEY_O, KEY_U, KEY_LEFTBRACE, KEY_I, KEY_P, KEY_ENTER, KEY_L, KEY_J, KEY_APOSTROPHE, KEY_K, KEY_SEMICOLON, KEY_BACKSLASH, KEY_COMMA, KEY_SLASH, KEY_N, KEY_M, KEY_DOT, KEY_TAB, KEY_SPACE, KEY_GRAVE, KEY_DELETE, 0, 0, 0, KEY_LEFTMETA, KEY_LEFTSHIFT, KEY_CAPSLOCK, KEY_LEFTALT, KEY_LEFTCTRL, KEY_RIGHTSHIFT, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, KEY_LEFT, KEY_RIGHT, KEY_DOWN, KEY_UP, 0 }; struct nkbd { unsigned char keycode[128]; struct input_dev *dev; struct serio *serio; char phys[32]; }; static irqreturn_t nkbd_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct nkbd *nkbd = serio_get_drvdata(serio); /* invalid scan codes are probably the init sequence, so we ignore them */ if (nkbd->keycode[data & NKBD_KEY]) { input_report_key(nkbd->dev, nkbd->keycode[data & NKBD_KEY], data & NKBD_PRESS); input_sync(nkbd->dev); } else if (data == 0xe7) /* end of init sequence */ printk(KERN_INFO "input: %s on %s\n", nkbd->dev->name, serio->phys); return IRQ_HANDLED; } static int nkbd_connect(struct serio *serio, struct serio_driver *drv) { struct nkbd *nkbd; struct input_dev *input_dev; int err = -ENOMEM; int i; nkbd = kzalloc(sizeof(struct nkbd), GFP_KERNEL); input_dev = input_allocate_device(); if (!nkbd || !input_dev) goto fail1; nkbd->serio = serio; nkbd->dev = input_dev; snprintf(nkbd->phys, sizeof(nkbd->phys), "%s/input0", serio->phys); memcpy(nkbd->keycode, nkbd_keycode, sizeof(nkbd->keycode)); input_dev->name = "Newton Keyboard"; input_dev->phys = nkbd->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_NEWTON; input_dev->id.product = 0x0001; input_dev->id.version = 0x0100; input_dev->dev.parent = &serio->dev; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REP); input_dev->keycode = nkbd->keycode; input_dev->keycodesize = sizeof(unsigned char); input_dev->keycodemax = ARRAY_SIZE(nkbd_keycode); for (i = 0; i < 128; i++) set_bit(nkbd->keycode[i], input_dev->keybit); clear_bit(0, input_dev->keybit); serio_set_drvdata(serio, nkbd); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(nkbd->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(nkbd); return err; } static void nkbd_disconnect(struct serio *serio) { struct nkbd *nkbd = serio_get_drvdata(serio); serio_close(serio); serio_set_drvdata(serio, NULL); input_unregister_device(nkbd->dev); kfree(nkbd); } static struct serio_device_id nkbd_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_NEWTON, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, nkbd_serio_ids); static struct serio_driver nkbd_drv = { .driver = { .name = "newtonkbd", }, .description = DRIVER_DESC, .id_table = nkbd_serio_ids, .interrupt = nkbd_interrupt, .connect = nkbd_connect, .disconnect = nkbd_disconnect, }; static int __init nkbd_init(void) { return serio_register_driver(&nkbd_drv); } static void __exit nkbd_exit(void) { serio_unregister_driver(&nkbd_drv); } module_init(nkbd_init); module_exit(nkbd_exit);
gpl-2.0
poitee/LTEOMAPNEX
drivers/i2c/busses/i2c-amd756-s4882.c
10174
7686
/* * i2c-amd756-s4882.c - i2c-amd756 extras for the Tyan S4882 motherboard * * Copyright (C) 2004, 2008 Jean Delvare <khali@linux-fr.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * We select the channels by sending commands to the Philips * PCA9556 chip at I2C address 0x18. The main adapter is used for * the non-multiplexed part of the bus, and 4 virtual adapters * are defined for the multiplexed addresses: 0x50-0x53 (memory * module EEPROM) located on channels 1-4, and 0x4c (LM63) * located on multiplexed channels 0 and 5-7. We define one * virtual adapter per CPU, which corresponds to two multiplexed * channels: * CPU0: virtual adapter 1, channels 1 and 0 * CPU1: virtual adapter 2, channels 2 and 5 * CPU2: virtual adapter 3, channels 3 and 6 * CPU3: virtual adapter 4, channels 4 and 7 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/mutex.h> extern struct i2c_adapter amd756_smbus; static struct i2c_adapter *s4882_adapter; static struct i2c_algorithm *s4882_algo; /* Wrapper access functions for multiplexed SMBus */ static DEFINE_MUTEX(amd756_lock); static s32 amd756_access_virt0(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { int error; /* We exclude the multiplexed addresses */ if (addr == 0x4c || (addr & 0xfc) == 0x50 || (addr & 0xfc) == 0x30 || addr == 0x18) return -ENXIO; mutex_lock(&amd756_lock); error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); mutex_unlock(&amd756_lock); return error; } /* We remember the last used channels combination so as to only switch channels when it is really needed. This greatly reduces the SMBus overhead, but also assumes that nobody will be writing to the PCA9556 in our back. */ static u8 last_channels; static inline s32 amd756_access_channel(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data, u8 channels) { int error; /* We exclude the non-multiplexed addresses */ if (addr != 0x4c && (addr & 0xfc) != 0x50 && (addr & 0xfc) != 0x30) return -ENXIO; mutex_lock(&amd756_lock); if (last_channels != channels) { union i2c_smbus_data mplxdata; mplxdata.byte = channels; error = amd756_smbus.algo->smbus_xfer(adap, 0x18, 0, I2C_SMBUS_WRITE, 0x01, I2C_SMBUS_BYTE_DATA, &mplxdata); if (error) goto UNLOCK; last_channels = channels; } error = amd756_smbus.algo->smbus_xfer(adap, addr, flags, read_write, command, size, data); UNLOCK: mutex_unlock(&amd756_lock); return error; } static s32 amd756_access_virt1(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU0: channels 1 and 0 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x03); } static s32 amd756_access_virt2(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU1: channels 2 and 5 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x24); } static s32 amd756_access_virt3(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU2: channels 3 and 6 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x48); } static s32 amd756_access_virt4(struct i2c_adapter * adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data * data) { /* CPU3: channels 4 and 7 enabled */ return amd756_access_channel(adap, addr, flags, read_write, command, size, data, 0x90); } static int __init amd756_s4882_init(void) { int i, error; union i2c_smbus_data ioconfig; if (!amd756_smbus.dev.parent) return -ENODEV; /* Configure the PCA9556 multiplexer */ ioconfig.byte = 0x00; /* All I/O to output mode */ error = i2c_smbus_xfer(&amd756_smbus, 0x18, 0, I2C_SMBUS_WRITE, 0x03, I2C_SMBUS_BYTE_DATA, &ioconfig); if (error) { dev_err(&amd756_smbus.dev, "PCA9556 configuration failed\n"); error = -EIO; goto ERROR0; } /* Unregister physical bus */ error = i2c_del_adapter(&amd756_smbus); if (error) { dev_err(&amd756_smbus.dev, "Physical bus removal failed\n"); goto ERROR0; } printk(KERN_INFO "Enabling SMBus multiplexing for Tyan S4882\n"); /* Define the 5 virtual adapters and algorithms structures */ if (!(s4882_adapter = kzalloc(5 * sizeof(struct i2c_adapter), GFP_KERNEL))) { error = -ENOMEM; goto ERROR1; } if (!(s4882_algo = kzalloc(5 * sizeof(struct i2c_algorithm), GFP_KERNEL))) { error = -ENOMEM; goto ERROR2; } /* Fill in the new structures */ s4882_algo[0] = *(amd756_smbus.algo); s4882_algo[0].smbus_xfer = amd756_access_virt0; s4882_adapter[0] = amd756_smbus; s4882_adapter[0].algo = s4882_algo; s4882_adapter[0].dev.parent = amd756_smbus.dev.parent; for (i = 1; i < 5; i++) { s4882_algo[i] = *(amd756_smbus.algo); s4882_adapter[i] = amd756_smbus; snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name), "SMBus 8111 adapter (CPU%d)", i-1); s4882_adapter[i].algo = s4882_algo+i; s4882_adapter[i].dev.parent = amd756_smbus.dev.parent; } s4882_algo[1].smbus_xfer = amd756_access_virt1; s4882_algo[2].smbus_xfer = amd756_access_virt2; s4882_algo[3].smbus_xfer = amd756_access_virt3; s4882_algo[4].smbus_xfer = amd756_access_virt4; /* Register virtual adapters */ for (i = 0; i < 5; i++) { error = i2c_add_adapter(s4882_adapter+i); if (error) { printk(KERN_ERR "i2c-amd756-s4882: " "Virtual adapter %d registration " "failed, module not inserted\n", i); for (i--; i >= 0; i--) i2c_del_adapter(s4882_adapter+i); goto ERROR3; } } return 0; ERROR3: kfree(s4882_algo); s4882_algo = NULL; ERROR2: kfree(s4882_adapter); s4882_adapter = NULL; ERROR1: /* Restore physical bus */ i2c_add_adapter(&amd756_smbus); ERROR0: return error; } static void __exit amd756_s4882_exit(void) { if (s4882_adapter) { int i; for (i = 0; i < 5; i++) i2c_del_adapter(s4882_adapter+i); kfree(s4882_adapter); s4882_adapter = NULL; } kfree(s4882_algo); s4882_algo = NULL; /* Restore physical bus */ if (i2c_add_adapter(&amd756_smbus)) printk(KERN_ERR "i2c-amd756-s4882: " "Physical bus restoration failed\n"); } MODULE_AUTHOR("Jean Delvare <khali@linux-fr.org>"); MODULE_DESCRIPTION("S4882 SMBus multiplexing"); MODULE_LICENSE("GPL"); module_init(amd756_s4882_init); module_exit(amd756_s4882_exit);
gpl-2.0
hvaibhav/beagle-dev
lib/zlib_inflate/inflate.c
12478
31348
/* inflate.c -- zlib decompression * Copyright (C) 1995-2005 Mark Adler * For conditions of distribution and use, see copyright notice in zlib.h * * Based on zlib 1.2.3 but modified for the Linux Kernel by * Richard Purdie <richard@openedhand.com> * * Changes mainly for static instead of dynamic memory allocation * */ #include <linux/zutil.h> #include "inftrees.h" #include "inflate.h" #include "inffast.h" #include "infutil.h" int zlib_inflate_workspacesize(void) { return sizeof(struct inflate_workspace); } int zlib_inflateReset(z_streamp strm) { struct inflate_state *state; if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; state = (struct inflate_state *)strm->state; strm->total_in = strm->total_out = state->total = 0; strm->msg = NULL; strm->adler = 1; /* to support ill-conceived Java test suite */ state->mode = HEAD; state->last = 0; state->havedict = 0; state->dmax = 32768U; state->hold = 0; state->bits = 0; state->lencode = state->distcode = state->next = state->codes; /* Initialise Window */ state->wsize = 1U << state->wbits; state->write = 0; state->whave = 0; return Z_OK; } #if 0 int zlib_inflatePrime(z_streamp strm, int bits, int value) { struct inflate_state *state; if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; state = (struct inflate_state *)strm->state; if (bits > 16 || state->bits + bits > 32) return Z_STREAM_ERROR; value &= (1L << bits) - 1; state->hold += value << state->bits; state->bits += bits; return Z_OK; } #endif int zlib_inflateInit2(z_streamp strm, int windowBits) { struct inflate_state *state; if (strm == NULL) return Z_STREAM_ERROR; strm->msg = NULL; /* in case we return an error */ state = &WS(strm)->inflate_state; strm->state = (struct internal_state *)state; if (windowBits < 0) { state->wrap = 0; windowBits = -windowBits; } else { state->wrap = (windowBits >> 4) + 1; } if (windowBits < 8 || windowBits > 15) { return Z_STREAM_ERROR; } state->wbits = (unsigned)windowBits; state->window = &WS(strm)->working_window[0]; return zlib_inflateReset(strm); } /* Return state with length and distance decoding tables and index sizes set to fixed code decoding. This returns fixed tables from inffixed.h. */ static void zlib_fixedtables(struct inflate_state *state) { # include "inffixed.h" state->lencode = lenfix; state->lenbits = 9; state->distcode = distfix; state->distbits = 5; } /* Update the window with the last wsize (normally 32K) bytes written before returning. This is only called when a window is already in use, or when output has been written during this inflate call, but the end of the deflate stream has not been reached yet. It is also called to window dictionary data when a dictionary is loaded. Providing output buffers larger than 32K to inflate() should provide a speed advantage, since only the last 32K of output is copied to the sliding window upon return from inflate(), and since all distances after the first 32K of output will fall in the output data, making match copies simpler and faster. The advantage may be dependent on the size of the processor's data caches. */ static void zlib_updatewindow(z_streamp strm, unsigned out) { struct inflate_state *state; unsigned copy, dist; state = (struct inflate_state *)strm->state; /* copy state->wsize or less output bytes into the circular window */ copy = out - strm->avail_out; if (copy >= state->wsize) { memcpy(state->window, strm->next_out - state->wsize, state->wsize); state->write = 0; state->whave = state->wsize; } else { dist = state->wsize - state->write; if (dist > copy) dist = copy; memcpy(state->window + state->write, strm->next_out - copy, dist); copy -= dist; if (copy) { memcpy(state->window, strm->next_out - copy, copy); state->write = copy; state->whave = state->wsize; } else { state->write += dist; if (state->write == state->wsize) state->write = 0; if (state->whave < state->wsize) state->whave += dist; } } } /* * At the end of a Deflate-compressed PPP packet, we expect to have seen * a `stored' block type value but not the (zero) length bytes. */ /* Returns true if inflate is currently at the end of a block generated by Z_SYNC_FLUSH or Z_FULL_FLUSH. This function is used by one PPP implementation to provide an additional safety check. PPP uses Z_SYNC_FLUSH but removes the length bytes of the resulting empty stored block. When decompressing, PPP checks that at the end of input packet, inflate is waiting for these length bytes. */ static int zlib_inflateSyncPacket(z_streamp strm) { struct inflate_state *state; if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; state = (struct inflate_state *)strm->state; if (state->mode == STORED && state->bits == 0) { state->mode = TYPE; return Z_OK; } return Z_DATA_ERROR; } /* Macros for inflate(): */ /* check function to use adler32() for zlib or crc32() for gzip */ #define UPDATE(check, buf, len) zlib_adler32(check, buf, len) /* Load registers with state in inflate() for speed */ #define LOAD() \ do { \ put = strm->next_out; \ left = strm->avail_out; \ next = strm->next_in; \ have = strm->avail_in; \ hold = state->hold; \ bits = state->bits; \ } while (0) /* Restore state from registers in inflate() */ #define RESTORE() \ do { \ strm->next_out = put; \ strm->avail_out = left; \ strm->next_in = next; \ strm->avail_in = have; \ state->hold = hold; \ state->bits = bits; \ } while (0) /* Clear the input bit accumulator */ #define INITBITS() \ do { \ hold = 0; \ bits = 0; \ } while (0) /* Get a byte of input into the bit accumulator, or return from inflate() if there is no input available. */ #define PULLBYTE() \ do { \ if (have == 0) goto inf_leave; \ have--; \ hold += (unsigned long)(*next++) << bits; \ bits += 8; \ } while (0) /* Assure that there are at least n bits in the bit accumulator. If there is not enough available input to do that, then return from inflate(). */ #define NEEDBITS(n) \ do { \ while (bits < (unsigned)(n)) \ PULLBYTE(); \ } while (0) /* Return the low n bits of the bit accumulator (n < 16) */ #define BITS(n) \ ((unsigned)hold & ((1U << (n)) - 1)) /* Remove n bits from the bit accumulator */ #define DROPBITS(n) \ do { \ hold >>= (n); \ bits -= (unsigned)(n); \ } while (0) /* Remove zero to seven bits as needed to go to a byte boundary */ #define BYTEBITS() \ do { \ hold >>= bits & 7; \ bits -= bits & 7; \ } while (0) /* Reverse the bytes in a 32-bit value */ #define REVERSE(q) \ ((((q) >> 24) & 0xff) + (((q) >> 8) & 0xff00) + \ (((q) & 0xff00) << 8) + (((q) & 0xff) << 24)) /* inflate() uses a state machine to process as much input data and generate as much output data as possible before returning. The state machine is structured roughly as follows: for (;;) switch (state) { ... case STATEn: if (not enough input data or output space to make progress) return; ... make progress ... state = STATEm; break; ... } so when inflate() is called again, the same case is attempted again, and if the appropriate resources are provided, the machine proceeds to the next state. The NEEDBITS() macro is usually the way the state evaluates whether it can proceed or should return. NEEDBITS() does the return if the requested bits are not available. The typical use of the BITS macros is: NEEDBITS(n); ... do something with BITS(n) ... DROPBITS(n); where NEEDBITS(n) either returns from inflate() if there isn't enough input left to load n bits into the accumulator, or it continues. BITS(n) gives the low n bits in the accumulator. When done, DROPBITS(n) drops the low n bits off the accumulator. INITBITS() clears the accumulator and sets the number of available bits to zero. BYTEBITS() discards just enough bits to put the accumulator on a byte boundary. After BYTEBITS() and a NEEDBITS(8), then BITS(8) would return the next byte in the stream. NEEDBITS(n) uses PULLBYTE() to get an available byte of input, or to return if there is no input available. The decoding of variable length codes uses PULLBYTE() directly in order to pull just enough bytes to decode the next code, and no more. Some states loop until they get enough input, making sure that enough state information is maintained to continue the loop where it left off if NEEDBITS() returns in the loop. For example, want, need, and keep would all have to actually be part of the saved state in case NEEDBITS() returns: case STATEw: while (want < need) { NEEDBITS(n); keep[want++] = BITS(n); DROPBITS(n); } state = STATEx; case STATEx: As shown above, if the next state is also the next case, then the break is omitted. A state may also return if there is not enough output space available to complete that state. Those states are copying stored data, writing a literal byte, and copying a matching string. When returning, a "goto inf_leave" is used to update the total counters, update the check value, and determine whether any progress has been made during that inflate() call in order to return the proper return code. Progress is defined as a change in either strm->avail_in or strm->avail_out. When there is a window, goto inf_leave will update the window with the last output written. If a goto inf_leave occurs in the middle of decompression and there is no window currently, goto inf_leave will create one and copy output to the window for the next call of inflate(). In this implementation, the flush parameter of inflate() only affects the return code (per zlib.h). inflate() always writes as much as possible to strm->next_out, given the space available and the provided input--the effect documented in zlib.h of Z_SYNC_FLUSH. Furthermore, inflate() always defers the allocation of and copying into a sliding window until necessary, which provides the effect documented in zlib.h for Z_FINISH when the entire input stream available. So the only thing the flush parameter actually does is: when flush is set to Z_FINISH, inflate() cannot return Z_OK. Instead it will return Z_BUF_ERROR if it has not reached the end of the stream. */ int zlib_inflate(z_streamp strm, int flush) { struct inflate_state *state; const unsigned char *next; /* next input */ unsigned char *put; /* next output */ unsigned have, left; /* available input and output */ unsigned long hold; /* bit buffer */ unsigned bits; /* bits in bit buffer */ unsigned in, out; /* save starting available input and output */ unsigned copy; /* number of stored or match bytes to copy */ unsigned char *from; /* where to copy match bytes from */ code this; /* current decoding table entry */ code last; /* parent table entry */ unsigned len; /* length to copy for repeats, bits to drop */ int ret; /* return code */ static const unsigned short order[19] = /* permutation of code lengths */ {16, 17, 18, 0, 8, 7, 9, 6, 10, 5, 11, 4, 12, 3, 13, 2, 14, 1, 15}; /* Do not check for strm->next_out == NULL here as ppc zImage inflates to strm->next_out = 0 */ if (strm == NULL || strm->state == NULL || (strm->next_in == NULL && strm->avail_in != 0)) return Z_STREAM_ERROR; state = (struct inflate_state *)strm->state; if (state->mode == TYPE) state->mode = TYPEDO; /* skip check */ LOAD(); in = have; out = left; ret = Z_OK; for (;;) switch (state->mode) { case HEAD: if (state->wrap == 0) { state->mode = TYPEDO; break; } NEEDBITS(16); if ( ((BITS(8) << 8) + (hold >> 8)) % 31) { strm->msg = (char *)"incorrect header check"; state->mode = BAD; break; } if (BITS(4) != Z_DEFLATED) { strm->msg = (char *)"unknown compression method"; state->mode = BAD; break; } DROPBITS(4); len = BITS(4) + 8; if (len > state->wbits) { strm->msg = (char *)"invalid window size"; state->mode = BAD; break; } state->dmax = 1U << len; strm->adler = state->check = zlib_adler32(0L, NULL, 0); state->mode = hold & 0x200 ? DICTID : TYPE; INITBITS(); break; case DICTID: NEEDBITS(32); strm->adler = state->check = REVERSE(hold); INITBITS(); state->mode = DICT; case DICT: if (state->havedict == 0) { RESTORE(); return Z_NEED_DICT; } strm->adler = state->check = zlib_adler32(0L, NULL, 0); state->mode = TYPE; case TYPE: if (flush == Z_BLOCK) goto inf_leave; case TYPEDO: if (state->last) { BYTEBITS(); state->mode = CHECK; break; } NEEDBITS(3); state->last = BITS(1); DROPBITS(1); switch (BITS(2)) { case 0: /* stored block */ state->mode = STORED; break; case 1: /* fixed block */ zlib_fixedtables(state); state->mode = LEN; /* decode codes */ break; case 2: /* dynamic block */ state->mode = TABLE; break; case 3: strm->msg = (char *)"invalid block type"; state->mode = BAD; } DROPBITS(2); break; case STORED: BYTEBITS(); /* go to byte boundary */ NEEDBITS(32); if ((hold & 0xffff) != ((hold >> 16) ^ 0xffff)) { strm->msg = (char *)"invalid stored block lengths"; state->mode = BAD; break; } state->length = (unsigned)hold & 0xffff; INITBITS(); state->mode = COPY; case COPY: copy = state->length; if (copy) { if (copy > have) copy = have; if (copy > left) copy = left; if (copy == 0) goto inf_leave; memcpy(put, next, copy); have -= copy; next += copy; left -= copy; put += copy; state->length -= copy; break; } state->mode = TYPE; break; case TABLE: NEEDBITS(14); state->nlen = BITS(5) + 257; DROPBITS(5); state->ndist = BITS(5) + 1; DROPBITS(5); state->ncode = BITS(4) + 4; DROPBITS(4); #ifndef PKZIP_BUG_WORKAROUND if (state->nlen > 286 || state->ndist > 30) { strm->msg = (char *)"too many length or distance symbols"; state->mode = BAD; break; } #endif state->have = 0; state->mode = LENLENS; case LENLENS: while (state->have < state->ncode) { NEEDBITS(3); state->lens[order[state->have++]] = (unsigned short)BITS(3); DROPBITS(3); } while (state->have < 19) state->lens[order[state->have++]] = 0; state->next = state->codes; state->lencode = (code const *)(state->next); state->lenbits = 7; ret = zlib_inflate_table(CODES, state->lens, 19, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid code lengths set"; state->mode = BAD; break; } state->have = 0; state->mode = CODELENS; case CODELENS: while (state->have < state->nlen + state->ndist) { for (;;) { this = state->lencode[BITS(state->lenbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if (this.val < 16) { NEEDBITS(this.bits); DROPBITS(this.bits); state->lens[state->have++] = this.val; } else { if (this.val == 16) { NEEDBITS(this.bits + 2); DROPBITS(this.bits); if (state->have == 0) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } len = state->lens[state->have - 1]; copy = 3 + BITS(2); DROPBITS(2); } else if (this.val == 17) { NEEDBITS(this.bits + 3); DROPBITS(this.bits); len = 0; copy = 3 + BITS(3); DROPBITS(3); } else { NEEDBITS(this.bits + 7); DROPBITS(this.bits); len = 0; copy = 11 + BITS(7); DROPBITS(7); } if (state->have + copy > state->nlen + state->ndist) { strm->msg = (char *)"invalid bit length repeat"; state->mode = BAD; break; } while (copy--) state->lens[state->have++] = (unsigned short)len; } } /* handle error breaks in while */ if (state->mode == BAD) break; /* build code tables */ state->next = state->codes; state->lencode = (code const *)(state->next); state->lenbits = 9; ret = zlib_inflate_table(LENS, state->lens, state->nlen, &(state->next), &(state->lenbits), state->work); if (ret) { strm->msg = (char *)"invalid literal/lengths set"; state->mode = BAD; break; } state->distcode = (code const *)(state->next); state->distbits = 6; ret = zlib_inflate_table(DISTS, state->lens + state->nlen, state->ndist, &(state->next), &(state->distbits), state->work); if (ret) { strm->msg = (char *)"invalid distances set"; state->mode = BAD; break; } state->mode = LEN; case LEN: if (have >= 6 && left >= 258) { RESTORE(); inflate_fast(strm, out); LOAD(); break; } for (;;) { this = state->lencode[BITS(state->lenbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if (this.op && (this.op & 0xf0) == 0) { last = this; for (;;) { this = state->lencode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + this.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(this.bits); state->length = (unsigned)this.val; if ((int)(this.op) == 0) { state->mode = LIT; break; } if (this.op & 32) { state->mode = TYPE; break; } if (this.op & 64) { strm->msg = (char *)"invalid literal/length code"; state->mode = BAD; break; } state->extra = (unsigned)(this.op) & 15; state->mode = LENEXT; case LENEXT: if (state->extra) { NEEDBITS(state->extra); state->length += BITS(state->extra); DROPBITS(state->extra); } state->mode = DIST; case DIST: for (;;) { this = state->distcode[BITS(state->distbits)]; if ((unsigned)(this.bits) <= bits) break; PULLBYTE(); } if ((this.op & 0xf0) == 0) { last = this; for (;;) { this = state->distcode[last.val + (BITS(last.bits + last.op) >> last.bits)]; if ((unsigned)(last.bits + this.bits) <= bits) break; PULLBYTE(); } DROPBITS(last.bits); } DROPBITS(this.bits); if (this.op & 64) { strm->msg = (char *)"invalid distance code"; state->mode = BAD; break; } state->offset = (unsigned)this.val; state->extra = (unsigned)(this.op) & 15; state->mode = DISTEXT; case DISTEXT: if (state->extra) { NEEDBITS(state->extra); state->offset += BITS(state->extra); DROPBITS(state->extra); } #ifdef INFLATE_STRICT if (state->offset > state->dmax) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } #endif if (state->offset > state->whave + out - left) { strm->msg = (char *)"invalid distance too far back"; state->mode = BAD; break; } state->mode = MATCH; case MATCH: if (left == 0) goto inf_leave; copy = out - left; if (state->offset > copy) { /* copy from window */ copy = state->offset - copy; if (copy > state->write) { copy -= state->write; from = state->window + (state->wsize - copy); } else from = state->window + (state->write - copy); if (copy > state->length) copy = state->length; } else { /* copy from output */ from = put - state->offset; copy = state->length; } if (copy > left) copy = left; left -= copy; state->length -= copy; do { *put++ = *from++; } while (--copy); if (state->length == 0) state->mode = LEN; break; case LIT: if (left == 0) goto inf_leave; *put++ = (unsigned char)(state->length); left--; state->mode = LEN; break; case CHECK: if (state->wrap) { NEEDBITS(32); out -= left; strm->total_out += out; state->total += out; if (out) strm->adler = state->check = UPDATE(state->check, put - out, out); out = left; if (( REVERSE(hold)) != state->check) { strm->msg = (char *)"incorrect data check"; state->mode = BAD; break; } INITBITS(); } state->mode = DONE; case DONE: ret = Z_STREAM_END; goto inf_leave; case BAD: ret = Z_DATA_ERROR; goto inf_leave; case MEM: return Z_MEM_ERROR; case SYNC: default: return Z_STREAM_ERROR; } /* Return from inflate(), updating the total counts and the check value. If there was no progress during the inflate() call, return a buffer error. Call zlib_updatewindow() to create and/or update the window state. */ inf_leave: RESTORE(); if (state->wsize || (state->mode < CHECK && out != strm->avail_out)) zlib_updatewindow(strm, out); in -= strm->avail_in; out -= strm->avail_out; strm->total_in += in; strm->total_out += out; state->total += out; if (state->wrap && out) strm->adler = state->check = UPDATE(state->check, strm->next_out - out, out); strm->data_type = state->bits + (state->last ? 64 : 0) + (state->mode == TYPE ? 128 : 0); if (flush == Z_PACKET_FLUSH && ret == Z_OK && strm->avail_out != 0 && strm->avail_in == 0) return zlib_inflateSyncPacket(strm); if (((in == 0 && out == 0) || flush == Z_FINISH) && ret == Z_OK) ret = Z_BUF_ERROR; return ret; } int zlib_inflateEnd(z_streamp strm) { if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; return Z_OK; } #if 0 int zlib_inflateSetDictionary(z_streamp strm, const Byte *dictionary, uInt dictLength) { struct inflate_state *state; unsigned long id; /* check state */ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; state = (struct inflate_state *)strm->state; if (state->wrap != 0 && state->mode != DICT) return Z_STREAM_ERROR; /* check for correct dictionary id */ if (state->mode == DICT) { id = zlib_adler32(0L, NULL, 0); id = zlib_adler32(id, dictionary, dictLength); if (id != state->check) return Z_DATA_ERROR; } /* copy dictionary to window */ zlib_updatewindow(strm, strm->avail_out); if (dictLength > state->wsize) { memcpy(state->window, dictionary + dictLength - state->wsize, state->wsize); state->whave = state->wsize; } else { memcpy(state->window + state->wsize - dictLength, dictionary, dictLength); state->whave = dictLength; } state->havedict = 1; return Z_OK; } #endif #if 0 /* Search buf[0..len-1] for the pattern: 0, 0, 0xff, 0xff. Return when found or when out of input. When called, *have is the number of pattern bytes found in order so far, in 0..3. On return *have is updated to the new state. If on return *have equals four, then the pattern was found and the return value is how many bytes were read including the last byte of the pattern. If *have is less than four, then the pattern has not been found yet and the return value is len. In the latter case, zlib_syncsearch() can be called again with more data and the *have state. *have is initialized to zero for the first call. */ static unsigned zlib_syncsearch(unsigned *have, unsigned char *buf, unsigned len) { unsigned got; unsigned next; got = *have; next = 0; while (next < len && got < 4) { if ((int)(buf[next]) == (got < 2 ? 0 : 0xff)) got++; else if (buf[next]) got = 0; else got = 4 - got; next++; } *have = got; return next; } #endif #if 0 int zlib_inflateSync(z_streamp strm) { unsigned len; /* number of bytes to look at or looked at */ unsigned long in, out; /* temporary to save total_in and total_out */ unsigned char buf[4]; /* to restore bit buffer to byte string */ struct inflate_state *state; /* check parameters */ if (strm == NULL || strm->state == NULL) return Z_STREAM_ERROR; state = (struct inflate_state *)strm->state; if (strm->avail_in == 0 && state->bits < 8) return Z_BUF_ERROR; /* if first time, start search in bit buffer */ if (state->mode != SYNC) { state->mode = SYNC; state->hold <<= state->bits & 7; state->bits -= state->bits & 7; len = 0; while (state->bits >= 8) { buf[len++] = (unsigned char)(state->hold); state->hold >>= 8; state->bits -= 8; } state->have = 0; zlib_syncsearch(&(state->have), buf, len); } /* search available input */ len = zlib_syncsearch(&(state->have), strm->next_in, strm->avail_in); strm->avail_in -= len; strm->next_in += len; strm->total_in += len; /* return no joy or set up to restart inflate() on a new block */ if (state->have != 4) return Z_DATA_ERROR; in = strm->total_in; out = strm->total_out; zlib_inflateReset(strm); strm->total_in = in; strm->total_out = out; state->mode = TYPE; return Z_OK; } #endif /* * This subroutine adds the data at next_in/avail_in to the output history * without performing any output. The output buffer must be "caught up"; * i.e. no pending output but this should always be the case. The state must * be waiting on the start of a block (i.e. mode == TYPE or HEAD). On exit, * the output will also be caught up, and the checksum will have been updated * if need be. */ int zlib_inflateIncomp(z_stream *z) { struct inflate_state *state = (struct inflate_state *)z->state; Byte *saved_no = z->next_out; uInt saved_ao = z->avail_out; if (state->mode != TYPE && state->mode != HEAD) return Z_DATA_ERROR; /* Setup some variables to allow misuse of updateWindow */ z->avail_out = 0; z->next_out = (unsigned char*)z->next_in + z->avail_in; zlib_updatewindow(z, z->avail_in); /* Restore saved variables */ z->avail_out = saved_ao; z->next_out = saved_no; z->adler = state->check = UPDATE(state->check, z->next_in, z->avail_in); z->total_out += z->avail_in; z->total_in += z->avail_in; z->next_in += z->avail_in; state->total += z->avail_in; z->avail_in = 0; return Z_OK; }
gpl-2.0
nikitines/zte-kernel-roamer2
samples/kprobes/jprobe_example.c
12990
1815
/* * Here's a sample kernel module showing the use of jprobes to dump * the arguments of do_fork(). * * For more information on theory of operation of jprobes, see * Documentation/kprobes.txt * * Build and insert the kernel module as done in the kprobe example. * You will see the trace data in /var/log/messages and on the * console whenever do_fork() is invoked to create a new process. * (Some messages may be suppressed if syslogd is configured to * eliminate duplicate messages.) */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/kprobes.h> /* * Jumper probe for do_fork. * Mirror principle enables access to arguments of the probed routine * from the probe handler. */ /* Proxy routine having the same arguments as actual do_fork() routine */ static long jdo_fork(unsigned long clone_flags, unsigned long stack_start, struct pt_regs *regs, unsigned long stack_size, int __user *parent_tidptr, int __user *child_tidptr) { printk(KERN_INFO "jprobe: clone_flags = 0x%lx, stack_size = 0x%lx," " regs = 0x%p\n", clone_flags, stack_size, regs); /* Always end with a call to jprobe_return(). */ jprobe_return(); return 0; } static struct jprobe my_jprobe = { .entry = jdo_fork, .kp = { .symbol_name = "do_fork", }, }; static int __init jprobe_init(void) { int ret; ret = register_jprobe(&my_jprobe); if (ret < 0) { printk(KERN_INFO "register_jprobe failed, returned %d\n", ret); return -1; } printk(KERN_INFO "Planted jprobe at %p, handler addr %p\n", my_jprobe.kp.addr, my_jprobe.entry); return 0; } static void __exit jprobe_exit(void) { unregister_jprobe(&my_jprobe); printk(KERN_INFO "jprobe at %p unregistered\n", my_jprobe.kp.addr); } module_init(jprobe_init) module_exit(jprobe_exit) MODULE_LICENSE("GPL");
gpl-2.0
TheTypoMaster/ubuntu-utopic
arch/arm/mach-at91/at91sam926x_time.c
191
7248
/* * at91sam926x_time.c - Periodic Interval Timer (PIT) for at91sam926x * * Copyright (C) 2005-2006 M. Amine SAYA, ATMEL Rousset, France * Revision 2005 M. Nicolas Diremdjian, ATMEL Rousset, France * Converted to ClockSource/ClockEvents by David Brownell. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/clockchips.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <asm/mach/time.h> #include <mach/hardware.h> #define AT91_PIT_MR 0x00 /* Mode Register */ #define AT91_PIT_PITIEN (1 << 25) /* Timer Interrupt Enable */ #define AT91_PIT_PITEN (1 << 24) /* Timer Enabled */ #define AT91_PIT_PIV (0xfffff) /* Periodic Interval Value */ #define AT91_PIT_SR 0x04 /* Status Register */ #define AT91_PIT_PITS (1 << 0) /* Timer Status */ #define AT91_PIT_PIVR 0x08 /* Periodic Interval Value Register */ #define AT91_PIT_PIIR 0x0c /* Periodic Interval Image Register */ #define AT91_PIT_PICNT (0xfff << 20) /* Interval Counter */ #define AT91_PIT_CPIV (0xfffff) /* Inverval Value */ #define PIT_CPIV(x) ((x) & AT91_PIT_CPIV) #define PIT_PICNT(x) (((x) & AT91_PIT_PICNT) >> 20) static u32 pit_cycle; /* write-once */ static u32 pit_cnt; /* access only w/system irq blocked */ static void __iomem *pit_base_addr __read_mostly; static struct clk *mck; static inline unsigned int pit_read(unsigned int reg_offset) { return __raw_readl(pit_base_addr + reg_offset); } static inline void pit_write(unsigned int reg_offset, unsigned long value) { __raw_writel(value, pit_base_addr + reg_offset); } /* * Clocksource: just a monotonic counter of MCK/16 cycles. * We don't care whether or not PIT irqs are enabled. */ static cycle_t read_pit_clk(struct clocksource *cs) { unsigned long flags; u32 elapsed; u32 t; raw_local_irq_save(flags); elapsed = pit_cnt; t = pit_read(AT91_PIT_PIIR); raw_local_irq_restore(flags); elapsed += PIT_PICNT(t) * pit_cycle; elapsed += PIT_CPIV(t); return elapsed; } static struct clocksource pit_clk = { .name = "pit", .rating = 175, .read = read_pit_clk, .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; /* * Clockevent device: interrupts every 1/HZ (== pit_cycles * MCK/16) */ static void pit_clkevt_mode(enum clock_event_mode mode, struct clock_event_device *dev) { switch (mode) { case CLOCK_EVT_MODE_PERIODIC: /* update clocksource counter */ pit_cnt += pit_cycle * PIT_PICNT(pit_read(AT91_PIT_PIVR)); pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN | AT91_PIT_PITIEN); break; case CLOCK_EVT_MODE_ONESHOT: BUG(); /* FALLTHROUGH */ case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: /* disable irq, leaving the clocksource active */ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); break; case CLOCK_EVT_MODE_RESUME: break; } } static void at91sam926x_pit_suspend(struct clock_event_device *cedev) { /* Disable timer */ pit_write(AT91_PIT_MR, 0); } static void at91sam926x_pit_reset(void) { /* Disable timer and irqs */ pit_write(AT91_PIT_MR, 0); /* Clear any pending interrupts, wait for PIT to stop counting */ while (PIT_CPIV(pit_read(AT91_PIT_PIVR)) != 0) cpu_relax(); /* Start PIT but don't enable IRQ */ pit_write(AT91_PIT_MR, (pit_cycle - 1) | AT91_PIT_PITEN); } static void at91sam926x_pit_resume(struct clock_event_device *cedev) { at91sam926x_pit_reset(); } static struct clock_event_device pit_clkevt = { .name = "pit", .features = CLOCK_EVT_FEAT_PERIODIC, .shift = 32, .rating = 100, .set_mode = pit_clkevt_mode, .suspend = at91sam926x_pit_suspend, .resume = at91sam926x_pit_resume, }; /* * IRQ handler for the timer. */ static irqreturn_t at91sam926x_pit_interrupt(int irq, void *dev_id) { /* * irqs should be disabled here, but as the irq is shared they are only * guaranteed to be off if the timer irq is registered first. */ WARN_ON_ONCE(!irqs_disabled()); /* The PIT interrupt may be disabled, and is shared */ if ((pit_clkevt.mode == CLOCK_EVT_MODE_PERIODIC) && (pit_read(AT91_PIT_SR) & AT91_PIT_PITS)) { unsigned nr_ticks; /* Get number of ticks performed before irq, and ack it */ nr_ticks = PIT_PICNT(pit_read(AT91_PIT_PIVR)); do { pit_cnt += pit_cycle; pit_clkevt.event_handler(&pit_clkevt); nr_ticks--; } while (nr_ticks); return IRQ_HANDLED; } return IRQ_NONE; } static struct irqaction at91sam926x_pit_irq = { .name = "at91_tick", .flags = IRQF_SHARED | IRQF_TIMER | IRQF_IRQPOLL, .handler = at91sam926x_pit_interrupt, .irq = NR_IRQS_LEGACY + AT91_ID_SYS, }; #ifdef CONFIG_OF static struct of_device_id pit_timer_ids[] = { { .compatible = "atmel,at91sam9260-pit" }, { /* sentinel */ } }; static int __init of_at91sam926x_pit_init(void) { struct device_node *np; int ret; np = of_find_matching_node(NULL, pit_timer_ids); if (!np) goto err; pit_base_addr = of_iomap(np, 0); if (!pit_base_addr) goto node_err; mck = of_clk_get(np, 0); /* Get the interrupts property */ ret = irq_of_parse_and_map(np, 0); if (!ret) { pr_crit("AT91: PIT: Unable to get IRQ from DT\n"); if (!IS_ERR(mck)) clk_put(mck); goto ioremap_err; } at91sam926x_pit_irq.irq = ret; of_node_put(np); return 0; ioremap_err: iounmap(pit_base_addr); node_err: of_node_put(np); err: return -EINVAL; } #else static int __init of_at91sam926x_pit_init(void) { return -EINVAL; } #endif /* * Set up both clocksource and clockevent support. */ void __init at91sam926x_pit_init(void) { unsigned long pit_rate; unsigned bits; int ret; mck = ERR_PTR(-ENOENT); /* For device tree enabled device: initialize here */ of_at91sam926x_pit_init(); /* * Use our actual MCK to figure out how many MCK/16 ticks per * 1/HZ period (instead of a compile-time constant LATCH). */ if (IS_ERR(mck)) mck = clk_get(NULL, "mck"); if (IS_ERR(mck)) panic("AT91: PIT: Unable to get mck clk\n"); pit_rate = clk_get_rate(mck) / 16; pit_cycle = (pit_rate + HZ/2) / HZ; WARN_ON(((pit_cycle - 1) & ~AT91_PIT_PIV) != 0); /* Initialize and enable the timer */ at91sam926x_pit_reset(); /* * Register clocksource. The high order bits of PIV are unused, * so this isn't a 32-bit counter unless we get clockevent irqs. */ bits = 12 /* PICNT */ + ilog2(pit_cycle) /* PIV */; pit_clk.mask = CLOCKSOURCE_MASK(bits); clocksource_register_hz(&pit_clk, pit_rate); /* Set up irq handler */ ret = setup_irq(at91sam926x_pit_irq.irq, &at91sam926x_pit_irq); if (ret) pr_crit("AT91: PIT: Unable to setup IRQ\n"); /* Set up and register clockevents */ pit_clkevt.mult = div_sc(pit_rate, NSEC_PER_SEC, pit_clkevt.shift); pit_clkevt.cpumask = cpumask_of(0); clockevents_register_device(&pit_clkevt); } void __init at91sam926x_ioremap_pit(u32 addr) { #if defined(CONFIG_OF) struct device_node *np = of_find_matching_node(NULL, pit_timer_ids); if (np) { of_node_put(np); return; } #endif pit_base_addr = ioremap(addr, 16); if (!pit_base_addr) panic("Impossible to ioremap PIT\n"); }
gpl-2.0
NXT-F1V3/kernel_dev
drivers/thunderbolt/nhi.c
191
17870
/* * Thunderbolt Cactus Ridge driver - NHI driver * * The NHI (native host interface) is the pci device that allows us to send and * receive frames from the thunderbolt bus. * * Copyright (c) 2014 Andreas Noever <andreas.noever@gmail.com> */ #include <linux/pm_runtime.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/dmi.h> #include "nhi.h" #include "nhi_regs.h" #include "tb.h" #define RING_TYPE(ring) ((ring)->is_tx ? "TX ring" : "RX ring") static int ring_interrupt_index(struct tb_ring *ring) { int bit = ring->hop; if (!ring->is_tx) bit += ring->nhi->hop_count; return bit; } /** * ring_interrupt_active() - activate/deactivate interrupts for a single ring * * ring->nhi->lock must be held. */ static void ring_interrupt_active(struct tb_ring *ring, bool active) { int reg = REG_RING_INTERRUPT_BASE + ring_interrupt_index(ring) / 32 * 4; int bit = ring_interrupt_index(ring) & 31; int mask = 1 << bit; u32 old, new; old = ioread32(ring->nhi->iobase + reg); if (active) new = old | mask; else new = old & ~mask; dev_info(&ring->nhi->pdev->dev, "%s interrupt at register %#x bit %d (%#x -> %#x)\n", active ? "enabling" : "disabling", reg, bit, old, new); if (new == old) dev_WARN(&ring->nhi->pdev->dev, "interrupt for %s %d is already %s\n", RING_TYPE(ring), ring->hop, active ? "enabled" : "disabled"); iowrite32(new, ring->nhi->iobase + reg); } /** * nhi_disable_interrupts() - disable interrupts for all rings * * Use only during init and shutdown. */ static void nhi_disable_interrupts(struct tb_nhi *nhi) { int i = 0; /* disable interrupts */ for (i = 0; i < RING_INTERRUPT_REG_COUNT(nhi); i++) iowrite32(0, nhi->iobase + REG_RING_INTERRUPT_BASE + 4 * i); /* clear interrupt status bits */ for (i = 0; i < RING_NOTIFY_REG_COUNT(nhi); i++) ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * i); } /* ring helper methods */ static void __iomem *ring_desc_base(struct tb_ring *ring) { void __iomem *io = ring->nhi->iobase; io += ring->is_tx ? REG_TX_RING_BASE : REG_RX_RING_BASE; io += ring->hop * 16; return io; } static void __iomem *ring_options_base(struct tb_ring *ring) { void __iomem *io = ring->nhi->iobase; io += ring->is_tx ? REG_TX_OPTIONS_BASE : REG_RX_OPTIONS_BASE; io += ring->hop * 32; return io; } static void ring_iowrite16desc(struct tb_ring *ring, u32 value, u32 offset) { iowrite16(value, ring_desc_base(ring) + offset); } static void ring_iowrite32desc(struct tb_ring *ring, u32 value, u32 offset) { iowrite32(value, ring_desc_base(ring) + offset); } static void ring_iowrite64desc(struct tb_ring *ring, u64 value, u32 offset) { iowrite32(value, ring_desc_base(ring) + offset); iowrite32(value >> 32, ring_desc_base(ring) + offset + 4); } static void ring_iowrite32options(struct tb_ring *ring, u32 value, u32 offset) { iowrite32(value, ring_options_base(ring) + offset); } static bool ring_full(struct tb_ring *ring) { return ((ring->head + 1) % ring->size) == ring->tail; } static bool ring_empty(struct tb_ring *ring) { return ring->head == ring->tail; } /** * ring_write_descriptors() - post frames from ring->queue to the controller * * ring->lock is held. */ static void ring_write_descriptors(struct tb_ring *ring) { struct ring_frame *frame, *n; struct ring_desc *descriptor; list_for_each_entry_safe(frame, n, &ring->queue, list) { if (ring_full(ring)) break; list_move_tail(&frame->list, &ring->in_flight); descriptor = &ring->descriptors[ring->head]; descriptor->phys = frame->buffer_phy; descriptor->time = 0; descriptor->flags = RING_DESC_POSTED | RING_DESC_INTERRUPT; if (ring->is_tx) { descriptor->length = frame->size; descriptor->eof = frame->eof; descriptor->sof = frame->sof; } ring->head = (ring->head + 1) % ring->size; ring_iowrite16desc(ring, ring->head, ring->is_tx ? 10 : 8); } } /** * ring_work() - progress completed frames * * If the ring is shutting down then all frames are marked as canceled and * their callbacks are invoked. * * Otherwise we collect all completed frame from the ring buffer, write new * frame to the ring buffer and invoke the callbacks for the completed frames. */ static void ring_work(struct work_struct *work) { struct tb_ring *ring = container_of(work, typeof(*ring), work); struct ring_frame *frame; bool canceled = false; LIST_HEAD(done); mutex_lock(&ring->lock); if (!ring->running) { /* Move all frames to done and mark them as canceled. */ list_splice_tail_init(&ring->in_flight, &done); list_splice_tail_init(&ring->queue, &done); canceled = true; goto invoke_callback; } while (!ring_empty(ring)) { if (!(ring->descriptors[ring->tail].flags & RING_DESC_COMPLETED)) break; frame = list_first_entry(&ring->in_flight, typeof(*frame), list); list_move_tail(&frame->list, &done); if (!ring->is_tx) { frame->size = ring->descriptors[ring->tail].length; frame->eof = ring->descriptors[ring->tail].eof; frame->sof = ring->descriptors[ring->tail].sof; frame->flags = ring->descriptors[ring->tail].flags; if (frame->sof != 0) dev_WARN(&ring->nhi->pdev->dev, "%s %d got unexpected SOF: %#x\n", RING_TYPE(ring), ring->hop, frame->sof); /* * known flags: * raw not enabled, interupt not set: 0x2=0010 * raw enabled: 0xa=1010 * raw not enabled: 0xb=1011 * partial frame (>MAX_FRAME_SIZE): 0xe=1110 */ if (frame->flags != 0xa) dev_WARN(&ring->nhi->pdev->dev, "%s %d got unexpected flags: %#x\n", RING_TYPE(ring), ring->hop, frame->flags); } ring->tail = (ring->tail + 1) % ring->size; } ring_write_descriptors(ring); invoke_callback: mutex_unlock(&ring->lock); /* allow callbacks to schedule new work */ while (!list_empty(&done)) { frame = list_first_entry(&done, typeof(*frame), list); /* * The callback may reenqueue or delete frame. * Do not hold on to it. */ list_del_init(&frame->list); frame->callback(ring, frame, canceled); } } int __ring_enqueue(struct tb_ring *ring, struct ring_frame *frame) { int ret = 0; mutex_lock(&ring->lock); if (ring->running) { list_add_tail(&frame->list, &ring->queue); ring_write_descriptors(ring); } else { ret = -ESHUTDOWN; } mutex_unlock(&ring->lock); return ret; } static struct tb_ring *ring_alloc(struct tb_nhi *nhi, u32 hop, int size, bool transmit) { struct tb_ring *ring = NULL; dev_info(&nhi->pdev->dev, "allocating %s ring %d of size %d\n", transmit ? "TX" : "RX", hop, size); mutex_lock(&nhi->lock); if (hop >= nhi->hop_count) { dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop); goto err; } if (transmit && nhi->tx_rings[hop]) { dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop); goto err; } else if (!transmit && nhi->rx_rings[hop]) { dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop); goto err; } ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) goto err; mutex_init(&ring->lock); INIT_LIST_HEAD(&ring->queue); INIT_LIST_HEAD(&ring->in_flight); INIT_WORK(&ring->work, ring_work); ring->nhi = nhi; ring->hop = hop; ring->is_tx = transmit; ring->size = size; ring->head = 0; ring->tail = 0; ring->running = false; ring->descriptors = dma_alloc_coherent(&ring->nhi->pdev->dev, size * sizeof(*ring->descriptors), &ring->descriptors_dma, GFP_KERNEL | __GFP_ZERO); if (!ring->descriptors) goto err; if (transmit) nhi->tx_rings[hop] = ring; else nhi->rx_rings[hop] = ring; mutex_unlock(&nhi->lock); return ring; err: if (ring) mutex_destroy(&ring->lock); kfree(ring); mutex_unlock(&nhi->lock); return NULL; } struct tb_ring *ring_alloc_tx(struct tb_nhi *nhi, int hop, int size) { return ring_alloc(nhi, hop, size, true); } struct tb_ring *ring_alloc_rx(struct tb_nhi *nhi, int hop, int size) { return ring_alloc(nhi, hop, size, false); } /** * ring_start() - enable a ring * * Must not be invoked in parallel with ring_stop(). */ void ring_start(struct tb_ring *ring) { mutex_lock(&ring->nhi->lock); mutex_lock(&ring->lock); if (ring->running) { dev_WARN(&ring->nhi->pdev->dev, "ring already started\n"); goto err; } dev_info(&ring->nhi->pdev->dev, "starting %s %d\n", RING_TYPE(ring), ring->hop); ring_iowrite64desc(ring, ring->descriptors_dma, 0); if (ring->is_tx) { ring_iowrite32desc(ring, ring->size, 12); ring_iowrite32options(ring, 0, 4); /* time releated ? */ ring_iowrite32options(ring, RING_FLAG_ENABLE | RING_FLAG_RAW, 0); } else { ring_iowrite32desc(ring, (TB_FRAME_SIZE << 16) | ring->size, 12); ring_iowrite32options(ring, 0xffffffff, 4); /* SOF EOF mask */ ring_iowrite32options(ring, RING_FLAG_ENABLE | RING_FLAG_RAW, 0); } ring_interrupt_active(ring, true); ring->running = true; err: mutex_unlock(&ring->lock); mutex_unlock(&ring->nhi->lock); } /** * ring_stop() - shutdown a ring * * Must not be invoked from a callback. * * This method will disable the ring. Further calls to ring_tx/ring_rx will * return -ESHUTDOWN until ring_stop has been called. * * All enqueued frames will be canceled and their callbacks will be executed * with frame->canceled set to true (on the callback thread). This method * returns only after all callback invocations have finished. */ void ring_stop(struct tb_ring *ring) { mutex_lock(&ring->nhi->lock); mutex_lock(&ring->lock); dev_info(&ring->nhi->pdev->dev, "stopping %s %d\n", RING_TYPE(ring), ring->hop); if (!ring->running) { dev_WARN(&ring->nhi->pdev->dev, "%s %d already stopped\n", RING_TYPE(ring), ring->hop); goto err; } ring_interrupt_active(ring, false); ring_iowrite32options(ring, 0, 0); ring_iowrite64desc(ring, 0, 0); ring_iowrite16desc(ring, 0, ring->is_tx ? 10 : 8); ring_iowrite32desc(ring, 0, 12); ring->head = 0; ring->tail = 0; ring->running = false; err: mutex_unlock(&ring->lock); mutex_unlock(&ring->nhi->lock); /* * schedule ring->work to invoke callbacks on all remaining frames. */ schedule_work(&ring->work); flush_work(&ring->work); } /* * ring_free() - free ring * * When this method returns all invocations of ring->callback will have * finished. * * Ring must be stopped. * * Must NOT be called from ring_frame->callback! */ void ring_free(struct tb_ring *ring) { mutex_lock(&ring->nhi->lock); /* * Dissociate the ring from the NHI. This also ensures that * nhi_interrupt_work cannot reschedule ring->work. */ if (ring->is_tx) ring->nhi->tx_rings[ring->hop] = NULL; else ring->nhi->rx_rings[ring->hop] = NULL; if (ring->running) { dev_WARN(&ring->nhi->pdev->dev, "%s %d still running\n", RING_TYPE(ring), ring->hop); } dma_free_coherent(&ring->nhi->pdev->dev, ring->size * sizeof(*ring->descriptors), ring->descriptors, ring->descriptors_dma); ring->descriptors = NULL; ring->descriptors_dma = 0; dev_info(&ring->nhi->pdev->dev, "freeing %s %d\n", RING_TYPE(ring), ring->hop); mutex_unlock(&ring->nhi->lock); /** * ring->work can no longer be scheduled (it is scheduled only by * nhi_interrupt_work and ring_stop). Wait for it to finish before * freeing the ring. */ flush_work(&ring->work); mutex_destroy(&ring->lock); kfree(ring); } static void nhi_interrupt_work(struct work_struct *work) { struct tb_nhi *nhi = container_of(work, typeof(*nhi), interrupt_work); int value = 0; /* Suppress uninitialized usage warning. */ int bit; int hop = -1; int type = 0; /* current interrupt type 0: TX, 1: RX, 2: RX overflow */ struct tb_ring *ring; mutex_lock(&nhi->lock); /* * Starting at REG_RING_NOTIFY_BASE there are three status bitfields * (TX, RX, RX overflow). We iterate over the bits and read a new * dwords as required. The registers are cleared on read. */ for (bit = 0; bit < 3 * nhi->hop_count; bit++) { if (bit % 32 == 0) value = ioread32(nhi->iobase + REG_RING_NOTIFY_BASE + 4 * (bit / 32)); if (++hop == nhi->hop_count) { hop = 0; type++; } if ((value & (1 << (bit % 32))) == 0) continue; if (type == 2) { dev_warn(&nhi->pdev->dev, "RX overflow for ring %d\n", hop); continue; } if (type == 0) ring = nhi->tx_rings[hop]; else ring = nhi->rx_rings[hop]; if (ring == NULL) { dev_warn(&nhi->pdev->dev, "got interrupt for inactive %s ring %d\n", type ? "RX" : "TX", hop); continue; } /* we do not check ring->running, this is done in ring->work */ schedule_work(&ring->work); } mutex_unlock(&nhi->lock); } static irqreturn_t nhi_msi(int irq, void *data) { struct tb_nhi *nhi = data; schedule_work(&nhi->interrupt_work); return IRQ_HANDLED; } static int nhi_suspend_noirq(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); thunderbolt_suspend(tb); return 0; } static int nhi_resume_noirq(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct tb *tb = pci_get_drvdata(pdev); thunderbolt_resume(tb); return 0; } static void nhi_shutdown(struct tb_nhi *nhi) { int i; dev_info(&nhi->pdev->dev, "shutdown\n"); for (i = 0; i < nhi->hop_count; i++) { if (nhi->tx_rings[i]) dev_WARN(&nhi->pdev->dev, "TX ring %d is still active\n", i); if (nhi->rx_rings[i]) dev_WARN(&nhi->pdev->dev, "RX ring %d is still active\n", i); } nhi_disable_interrupts(nhi); /* * We have to release the irq before calling flush_work. Otherwise an * already executing IRQ handler could call schedule_work again. */ devm_free_irq(&nhi->pdev->dev, nhi->pdev->irq, nhi); flush_work(&nhi->interrupt_work); mutex_destroy(&nhi->lock); } static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tb_nhi *nhi; struct tb *tb; int res; res = pcim_enable_device(pdev); if (res) { dev_err(&pdev->dev, "cannot enable PCI device, aborting\n"); return res; } res = pci_enable_msi(pdev); if (res) { dev_err(&pdev->dev, "cannot enable MSI, aborting\n"); return res; } res = pcim_iomap_regions(pdev, 1 << 0, "thunderbolt"); if (res) { dev_err(&pdev->dev, "cannot obtain PCI resources, aborting\n"); return res; } nhi = devm_kzalloc(&pdev->dev, sizeof(*nhi), GFP_KERNEL); if (!nhi) return -ENOMEM; nhi->pdev = pdev; /* cannot fail - table is allocated bin pcim_iomap_regions */ nhi->iobase = pcim_iomap_table(pdev)[0]; nhi->hop_count = ioread32(nhi->iobase + REG_HOP_COUNT) & 0x3ff; if (nhi->hop_count != 12 && nhi->hop_count != 32) dev_warn(&pdev->dev, "unexpected hop count: %d\n", nhi->hop_count); INIT_WORK(&nhi->interrupt_work, nhi_interrupt_work); nhi->tx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, sizeof(*nhi->tx_rings), GFP_KERNEL); nhi->rx_rings = devm_kcalloc(&pdev->dev, nhi->hop_count, sizeof(*nhi->rx_rings), GFP_KERNEL); if (!nhi->tx_rings || !nhi->rx_rings) return -ENOMEM; nhi_disable_interrupts(nhi); /* In case someone left them on. */ res = devm_request_irq(&pdev->dev, pdev->irq, nhi_msi, IRQF_NO_SUSPEND, /* must work during _noirq */ "thunderbolt", nhi); if (res) { dev_err(&pdev->dev, "request_irq failed, aborting\n"); return res; } mutex_init(&nhi->lock); pci_set_master(pdev); /* magic value - clock related? */ iowrite32(3906250 / 10000, nhi->iobase + 0x38c00); dev_info(&nhi->pdev->dev, "NHI initialized, starting thunderbolt\n"); tb = thunderbolt_alloc_and_start(nhi); if (!tb) { /* * At this point the RX/TX rings might already have been * activated. Do a proper shutdown. */ nhi_shutdown(nhi); return -EIO; } pci_set_drvdata(pdev, tb); return 0; } static void nhi_remove(struct pci_dev *pdev) { struct tb *tb = pci_get_drvdata(pdev); struct tb_nhi *nhi = tb->nhi; thunderbolt_shutdown_and_free(tb); nhi_shutdown(nhi); } /* * The tunneled pci bridges are siblings of us. Use resume_noirq to reenable * the tunnels asap. A corresponding pci quirk blocks the downstream bridges * resume_noirq until we are done. */ static const struct dev_pm_ops nhi_pm_ops = { .suspend_noirq = nhi_suspend_noirq, .resume_noirq = nhi_resume_noirq, .freeze_noirq = nhi_suspend_noirq, /* * we just disable hotplug, the * pci-tunnels stay alive. */ .restore_noirq = nhi_resume_noirq, }; static struct pci_device_id nhi_ids[] = { /* * We have to specify class, the TB bridges use the same device and * vendor (sub)id on gen 1 and gen 2 controllers. */ { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, .subvendor = 0x2222, .subdevice = 0x1111, }, { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, .subvendor = 0x2222, .subdevice = 0x1111, }, { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { .class = PCI_CLASS_SYSTEM_OTHER << 8, .class_mask = ~0, .vendor = PCI_VENDOR_ID_INTEL, .device = PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI, .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, }, { 0,} }; MODULE_DEVICE_TABLE(pci, nhi_ids); MODULE_LICENSE("GPL"); static struct pci_driver nhi_driver = { .name = "thunderbolt", .id_table = nhi_ids, .probe = nhi_probe, .remove = nhi_remove, .driver.pm = &nhi_pm_ops, }; static int __init nhi_init(void) { if (!dmi_match(DMI_BOARD_VENDOR, "Apple Inc.")) return -ENOSYS; return pci_register_driver(&nhi_driver); } static void __exit nhi_unload(void) { pci_unregister_driver(&nhi_driver); } module_init(nhi_init); module_exit(nhi_unload);
gpl-2.0
PyYoshi/b2g_kernel_sharp_is01
drivers/scsi/qla4xxx/ql4_init.c
703
40003
/* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2006 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ #include <scsi/iscsi_if.h> #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, uint32_t fw_ddb_index); static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) { uint32_t value; uint8_t func_number; unsigned long flags; /* Get the function number */ spin_lock_irqsave(&ha->hardware_lock, flags); value = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); func_number = (uint8_t) ((value >> 4) & 0x30); switch (value & ISP_CONTROL_FN_MASK) { case ISP_CONTROL_FN0_SCSI: ha->mac_index = 1; break; case ISP_CONTROL_FN1_SCSI: ha->mac_index = 3; break; default: DEBUG2(printk("scsi%ld: %s: Invalid function number, " "ispControlStatus = 0x%x\n", ha->host_no, __func__, value)); break; } DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__, ha->mac_index)); } /** * qla4xxx_free_ddb - deallocate ddb * @ha: pointer to host adapter structure. * @ddb_entry: pointer to device database entry * * This routine deallocates and unlinks the specified ddb_entry from the * adapter's **/ static void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { /* Remove device entry from list */ list_del_init(&ddb_entry->list); /* Remove device pointer from index mapping arrays */ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = (struct ddb_entry *) INVALID_ENTRY; ha->tot_ddbs--; /* Free memory and scsi-ml struct for device entry */ qla4xxx_destroy_sess(ddb_entry); } /** * qla4xxx_free_ddb_list - deallocate all ddbs * @ha: pointer to host adapter structure. * * This routine deallocates and removes all devices on the sppecified adapter. **/ void qla4xxx_free_ddb_list(struct scsi_qla_host *ha) { struct list_head *ptr; struct ddb_entry *ddb_entry; while (!list_empty(&ha->ddb_list)) { ptr = ha->ddb_list.next; /* Free memory for device entry and remove */ ddb_entry = list_entry(ptr, struct ddb_entry, list); qla4xxx_free_ddb(ha, ddb_entry); } } /** * qla4xxx_init_rings - initialize hw queues * @ha: pointer to host adapter structure. * * This routine initializes the internal queues for the specified adapter. * The QLA4010 requires us to restart the queues at index 0. * The QLA4000 doesn't care, so just default to QLA4010's requirement. **/ int qla4xxx_init_rings(struct scsi_qla_host *ha) { unsigned long flags = 0; /* Initialize request queue. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->request_out = 0; ha->request_in = 0; ha->request_ptr = &ha->request_ring[ha->request_in]; ha->req_q_count = REQUEST_QUEUE_DEPTH; /* Initialize response queue. */ ha->response_in = 0; ha->response_out = 0; ha->response_ptr = &ha->response_ring[ha->response_out]; /* * Initialize DMA Shadow registers. The firmware is really supposed to * take care of this, but on some uniprocessor systems, the shadow * registers aren't cleared-- causing the interrupt_handler to think * there are responses to be processed when there aren't. */ ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0); ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0); wmb(); writel(0, &ha->reg->req_q_in); writel(0, &ha->reg->rsp_q_out); readl(&ha->reg->rsp_q_out); spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla4xxx_validate_mac_address - validate adapter MAC address(es) * @ha: pointer to host adapter structure. * **/ static int qla4xxx_validate_mac_address(struct scsi_qla_host *ha) { struct flash_sys_info *sys_info; dma_addr_t sys_info_dma; int status = QLA_ERROR; sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), &sys_info_dma, GFP_KERNEL); if (sys_info == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); goto exit_validate_mac_no_free; } memset(sys_info, 0, sizeof(*sys_info)); /* Get flash sys info */ if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO, sizeof(*sys_info)) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO " "failed\n", ha->host_no, __func__)); goto exit_validate_mac; } /* Save M.A.C. address & serial_number */ memcpy(ha->my_mac, &sys_info->physAddr[0].address[0], min(sizeof(ha->my_mac), sizeof(sys_info->physAddr[0].address))); memcpy(ha->serial_number, &sys_info->acSerialNumber, min(sizeof(ha->serial_number), sizeof(sys_info->acSerialNumber))); status = QLA_SUCCESS; exit_validate_mac: dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info, sys_info_dma); exit_validate_mac_no_free: return status; } /** * qla4xxx_init_local_data - initialize adapter specific local data * @ha: pointer to host adapter structure. * **/ static int qla4xxx_init_local_data(struct scsi_qla_host *ha) { /* Initilize aen queue */ ha->aen_q_count = MAX_AEN_ENTRIES; return qla4xxx_get_firmware_status(ha); } static int qla4xxx_fw_ready(struct scsi_qla_host *ha) { uint32_t timeout_count; int ready = 0; DEBUG2(dev_info(&ha->pdev->dev, "Waiting for Firmware Ready..\n")); for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0; timeout_count--) { if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) qla4xxx_get_dhcp_ip_address(ha); /* Get firmware state. */ if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: unable to get firmware " "state\n", ha->host_no, __func__)); break; } if (ha->firmware_state & FW_STATE_ERROR) { DEBUG2(printk("scsi%ld: %s: an unrecoverable error has" " occurred\n", ha->host_no, __func__)); break; } if (ha->firmware_state & FW_STATE_CONFIG_WAIT) { /* * The firmware has not yet been issued an Initialize * Firmware command, so issue it now. */ if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) break; /* Go back and test for ready state - no wait. */ continue; } if (ha->firmware_state == FW_STATE_READY) { DEBUG2(dev_info(&ha->pdev->dev, "Firmware Ready..\n")); /* The firmware is ready to process SCSI commands. */ DEBUG2(dev_info(&ha->pdev->dev, "scsi%ld: %s: MEDIA TYPE - %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_OPTICAL_MEDIA) != 0 ? "OPTICAL" : "COPPER")); DEBUG2(dev_info(&ha->pdev->dev, "scsi%ld: %s: DHCP STATE Enabled " "%s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_DHCP_ENABLED) != 0 ? "YES" : "NO")); DEBUG2(dev_info(&ha->pdev->dev, "scsi%ld: %s: LINK %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) != 0 ? "UP" : "DOWN")); DEBUG2(dev_info(&ha->pdev->dev, "scsi%ld: %s: iSNS Service " "Started %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ? "YES" : "NO")); ready = 1; break; } DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - " "seconds expired= %d\n", ha->host_no, __func__, ha->firmware_state, ha->addl_fw_state, timeout_count)); if (is_qla4032(ha) && !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) && (timeout_count < ADAPTER_INIT_TOV - 5)) { break; } msleep(1000); } /* end of for */ if (timeout_count == 0) DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", ha->host_no, __func__)); if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) { DEBUG2(printk("scsi%ld: %s: FW is reporting its waiting to" " grab an IP address from DHCP server\n", ha->host_no, __func__)); ready = 1; } return ready; } /** * qla4xxx_init_firmware - initializes the firmware. * @ha: pointer to host adapter structure. * **/ static int qla4xxx_init_firmware(struct scsi_qla_host *ha) { int status = QLA_ERROR; dev_info(&ha->pdev->dev, "Initializing firmware..\n"); if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware " "control block\n", ha->host_no, __func__)); return status; } if (!qla4xxx_fw_ready(ha)) return status; return qla4xxx_get_firmware_status(ha); } static struct ddb_entry* qla4xxx_get_ddb_entry(struct scsi_qla_host *ha, uint32_t fw_ddb_index, uint32_t *new_tgt) { struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; struct ddb_entry *ddb_entry = NULL; int found = 0; uint32_t device_state; *new_tgt = 0; /* Make sure the dma buffer is valid */ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (fw_ddb_entry == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); return NULL; } if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &device_state, NULL, NULL, NULL) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for " "fw_ddb_index %d\n", ha->host_no, __func__, fw_ddb_index)); return NULL; } /* Allocate DDB if not already allocated. */ DEBUG2(printk("scsi%ld: %s: Looking for ddb[%d]\n", ha->host_no, __func__, fw_ddb_index)); list_for_each_entry(ddb_entry, &ha->ddb_list, list) { if ((memcmp(ddb_entry->iscsi_name, fw_ddb_entry->iscsi_name, ISCSI_NAME_SIZE) == 0) && (ddb_entry->tpgt == le32_to_cpu(fw_ddb_entry->tgt_portal_grp)) && (memcmp(ddb_entry->isid, fw_ddb_entry->isid, sizeof(ddb_entry->isid)) == 0)) { found++; break; } } if (!found) { DEBUG2(printk("scsi%ld: %s: ddb[%d] not found - allocating " "new ddb\n", ha->host_no, __func__, fw_ddb_index)); *new_tgt = 1; ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index); } /* if not found allocate new ddb */ dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return ddb_entry; } /** * qla4xxx_update_ddb_entry - update driver's internal ddb * @ha: pointer to host adapter structure. * @ddb_entry: pointer to device database structure to be filled * @fw_ddb_index: index of the ddb entry in fw ddb table * * This routine updates the driver's internal device database entry * with information retrieved from the firmware's device database * entry for the specified device. The ddb_entry->fw_ddb_index field * must be initialized prior to calling this routine * **/ static int qla4xxx_update_ddb_entry(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry, uint32_t fw_ddb_index) { struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_entry_dma; int status = QLA_ERROR; if (ddb_entry == NULL) { DEBUG2(printk("scsi%ld: %s: ddb_entry is NULL\n", ha->host_no, __func__)); goto exit_update_ddb; } /* Make sure the dma buffer is valid */ fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), &fw_ddb_entry_dma, GFP_KERNEL); if (fw_ddb_entry == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); goto exit_update_ddb; } if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, fw_ddb_entry, fw_ddb_entry_dma, NULL, NULL, &ddb_entry->fw_ddb_device_state, NULL, &ddb_entry->tcp_source_port_num, &ddb_entry->connection_id) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: failed get_ddb_entry for " "fw_ddb_index %d\n", ha->host_no, __func__, fw_ddb_index)); goto exit_update_ddb; } status = QLA_SUCCESS; ddb_entry->target_session_id = le16_to_cpu(fw_ddb_entry->tsid); ddb_entry->task_mgmt_timeout = le16_to_cpu(fw_ddb_entry->def_timeout); ddb_entry->CmdSn = 0; ddb_entry->exe_throttle = le16_to_cpu(fw_ddb_entry->exec_throttle); ddb_entry->default_relogin_timeout = le16_to_cpu(fw_ddb_entry->def_timeout); ddb_entry->default_time2wait = le16_to_cpu(fw_ddb_entry->iscsi_def_time2wait); /* Update index in case it changed */ ddb_entry->fw_ddb_index = fw_ddb_index; ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; ddb_entry->port = le16_to_cpu(fw_ddb_entry->port); ddb_entry->tpgt = le32_to_cpu(fw_ddb_entry->tgt_portal_grp); memcpy(ddb_entry->isid, fw_ddb_entry->isid, sizeof(ddb_entry->isid)); memcpy(&ddb_entry->iscsi_name[0], &fw_ddb_entry->iscsi_name[0], min(sizeof(ddb_entry->iscsi_name), sizeof(fw_ddb_entry->iscsi_name))); memcpy(&ddb_entry->ip_addr[0], &fw_ddb_entry->ip_addr[0], min(sizeof(ddb_entry->ip_addr), sizeof(fw_ddb_entry->ip_addr))); DEBUG2(printk("scsi%ld: %s: ddb[%d] - State= %x status= %d.\n", ha->host_no, __func__, fw_ddb_index, ddb_entry->fw_ddb_device_state, status)); exit_update_ddb: if (fw_ddb_entry) dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry), fw_ddb_entry, fw_ddb_entry_dma); return status; } /** * qla4xxx_alloc_ddb - allocate device database entry * @ha: Pointer to host adapter structure. * @fw_ddb_index: Firmware's device database index * * This routine allocates a ddb_entry, ititializes some values, and * inserts it into the ddb list. **/ static struct ddb_entry * qla4xxx_alloc_ddb(struct scsi_qla_host *ha, uint32_t fw_ddb_index) { struct ddb_entry *ddb_entry; DEBUG2(printk("scsi%ld: %s: fw_ddb_index [%d]\n", ha->host_no, __func__, fw_ddb_index)); ddb_entry = qla4xxx_alloc_sess(ha); if (ddb_entry == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate memory " "to add fw_ddb_index [%d]\n", ha->host_no, __func__, fw_ddb_index)); return ddb_entry; } ddb_entry->fw_ddb_index = fw_ddb_index; atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count); atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY); atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); list_add_tail(&ddb_entry->list, &ha->ddb_list); ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry; ha->tot_ddbs++; return ddb_entry; } /** * qla4xxx_configure_ddbs - builds driver ddb list * @ha: Pointer to host adapter structure. * * This routine searches for all valid firmware ddb entries and builds * an internal ddb list. Ddbs that are considered valid are those with * a device state of SESSION_ACTIVE. **/ static int qla4xxx_build_ddb_list(struct scsi_qla_host *ha) { int status = QLA_SUCCESS; uint32_t fw_ddb_index = 0; uint32_t next_fw_ddb_index = 0; uint32_t ddb_state; uint32_t conn_err, err_code; struct ddb_entry *ddb_entry; uint32_t new_tgt; dev_info(&ha->pdev->dev, "Initializing DDBs ...\n"); for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index = next_fw_ddb_index) { /* First, let's see if a device exists here */ if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL, &next_fw_ddb_index, &ddb_state, &conn_err, NULL, NULL) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: get_ddb_entry, " "fw_ddb_index %d failed", ha->host_no, __func__, fw_ddb_index)); return QLA_ERROR; } DEBUG2(printk("scsi%ld: %s: Getting DDB[%d] ddbstate=0x%x, " "next_fw_ddb_index=%d.\n", ha->host_no, __func__, fw_ddb_index, ddb_state, next_fw_ddb_index)); /* Issue relogin, if necessary. */ if (ddb_state == DDB_DS_SESSION_FAILED || ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) { /* Try and login to device */ DEBUG2(printk("scsi%ld: %s: Login to DDB[%d]\n", ha->host_no, __func__, fw_ddb_index)); err_code = ((conn_err & 0x00ff0000) >> 16); if (err_code == 0x1c || err_code == 0x06) { DEBUG2(printk("scsi%ld: %s send target " "completed " "or access denied failure\n", ha->host_no, __func__)); } else { qla4xxx_set_ddb_entry(ha, fw_ddb_index, 0); if (qla4xxx_get_fwddb_entry(ha, fw_ddb_index, NULL, 0, NULL, &next_fw_ddb_index, &ddb_state, &conn_err, NULL, NULL) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s:" "get_ddb_entry %d failed\n", ha->host_no, __func__, fw_ddb_index)); return QLA_ERROR; } } } if (ddb_state != DDB_DS_SESSION_ACTIVE) goto next_one; /* * if fw_ddb with session active state found, * add to ddb_list */ DEBUG2(printk("scsi%ld: %s: DDB[%d] added to list\n", ha->host_no, __func__, fw_ddb_index)); /* Add DDB to internal our ddb list. */ ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt); if (ddb_entry == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate memory " "for device at fw_ddb_index %d\n", ha->host_no, __func__, fw_ddb_index)); return QLA_ERROR; } /* Fill in the device structure */ if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) == QLA_ERROR) { ha->fw_ddb_index_map[fw_ddb_index] = (struct ddb_entry *)INVALID_ENTRY; DEBUG2(printk("scsi%ld: %s: update_ddb_entry failed " "for fw_ddb_index %d.\n", ha->host_no, __func__, fw_ddb_index)); return QLA_ERROR; } next_one: /* We know we've reached the last device when * next_fw_ddb_index is 0 */ if (next_fw_ddb_index == 0) break; } dev_info(&ha->pdev->dev, "DDB list done..\n"); return status; } struct qla4_relog_scan { int halt_wait; uint32_t conn_err; uint32_t err_code; uint32_t fw_ddb_index; uint32_t next_fw_ddb_index; uint32_t fw_ddb_device_state; }; static int qla4_test_rdy(struct scsi_qla_host *ha, struct qla4_relog_scan *rs) { struct ddb_entry *ddb_entry; /* * Don't want to do a relogin if connection * error is 0x1c. */ rs->err_code = ((rs->conn_err & 0x00ff0000) >> 16); if (rs->err_code == 0x1c || rs->err_code == 0x06) { DEBUG2(printk( "scsi%ld: %s send target" " completed or " "access denied failure\n", ha->host_no, __func__)); } else { /* We either have a device that is in * the process of relogging in or a * device that is waiting to be * relogged in */ rs->halt_wait = 0; ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, rs->fw_ddb_index); if (ddb_entry == NULL) return QLA_ERROR; if (ddb_entry->dev_scan_wait_to_start_relogin != 0 && time_after_eq(jiffies, ddb_entry-> dev_scan_wait_to_start_relogin)) { ddb_entry->dev_scan_wait_to_start_relogin = 0; qla4xxx_set_ddb_entry(ha, rs->fw_ddb_index, 0); } } return QLA_SUCCESS; } static int qla4_scan_for_relogin(struct scsi_qla_host *ha, struct qla4_relog_scan *rs) { int error; /* scan for relogins * ----------------- */ for (rs->fw_ddb_index = 0; rs->fw_ddb_index < MAX_DDB_ENTRIES; rs->fw_ddb_index = rs->next_fw_ddb_index) { if (qla4xxx_get_fwddb_entry(ha, rs->fw_ddb_index, NULL, 0, NULL, &rs->next_fw_ddb_index, &rs->fw_ddb_device_state, &rs->conn_err, NULL, NULL) == QLA_ERROR) return QLA_ERROR; if (rs->fw_ddb_device_state == DDB_DS_LOGIN_IN_PROCESS) rs->halt_wait = 0; if (rs->fw_ddb_device_state == DDB_DS_SESSION_FAILED || rs->fw_ddb_device_state == DDB_DS_NO_CONNECTION_ACTIVE) { error = qla4_test_rdy(ha, rs); if (error) return error; } /* We know we've reached the last device when * next_fw_ddb_index is 0 */ if (rs->next_fw_ddb_index == 0) break; } return QLA_SUCCESS; } /** * qla4xxx_devices_ready - wait for target devices to be logged in * @ha: pointer to adapter structure * * This routine waits up to ql4xdiscoverywait seconds * F/W database during driver load time. **/ static int qla4xxx_devices_ready(struct scsi_qla_host *ha) { int error; unsigned long discovery_wtime; struct qla4_relog_scan rs; discovery_wtime = jiffies + (ql4xdiscoverywait * HZ); DEBUG(printk("Waiting (%d) for devices ...\n", ql4xdiscoverywait)); do { /* poll for AEN. */ qla4xxx_get_firmware_state(ha); if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) { /* Set time-between-relogin timer */ qla4xxx_process_aen(ha, RELOGIN_DDB_CHANGED_AENS); } /* if no relogins active or needed, halt discvery wait */ rs.halt_wait = 1; error = qla4_scan_for_relogin(ha, &rs); if (rs.halt_wait) { DEBUG2(printk("scsi%ld: %s: Delay halted. Devices " "Ready.\n", ha->host_no, __func__)); return QLA_SUCCESS; } msleep(2000); } while (!time_after_eq(jiffies, discovery_wtime)); DEBUG3(qla4xxx_get_conn_event_log(ha)); return QLA_SUCCESS; } static void qla4xxx_flush_AENS(struct scsi_qla_host *ha) { unsigned long wtime; /* Flush the 0x8014 AEN from the firmware as a result of * Auto connect. We are basically doing get_firmware_ddb() * to determine whether we need to log back in or not. * Trying to do a set ddb before we have processed 0x8014 * will result in another set_ddb() for the same ddb. In other * words there will be stale entries in the aen_q. */ wtime = jiffies + (2 * HZ); do { if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) if (ha->firmware_state & (BIT_2 | BIT_0)) return; if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS); msleep(1000); } while (!time_after_eq(jiffies, wtime)); } static int qla4xxx_initialize_ddb_list(struct scsi_qla_host *ha) { uint16_t fw_ddb_index; int status = QLA_SUCCESS; /* free the ddb list if is not empty */ if (!list_empty(&ha->ddb_list)) qla4xxx_free_ddb_list(ha); for (fw_ddb_index = 0; fw_ddb_index < MAX_DDB_ENTRIES; fw_ddb_index++) ha->fw_ddb_index_map[fw_ddb_index] = (struct ddb_entry *)INVALID_ENTRY; ha->tot_ddbs = 0; qla4xxx_flush_AENS(ha); /* * First perform device discovery for active * fw ddb indexes and build * ddb list. */ if ((status = qla4xxx_build_ddb_list(ha)) == QLA_ERROR) return status; /* Wait for an AEN */ qla4xxx_devices_ready(ha); /* * Targets can come online after the inital discovery, so processing * the aens here will catch them. */ if (test_and_clear_bit(DPC_AEN, &ha->dpc_flags)) qla4xxx_process_aen(ha, PROCESS_ALL_AENS); return status; } /** * qla4xxx_update_ddb_list - update the driver ddb list * @ha: pointer to host adapter structure. * * This routine obtains device information from the F/W database after * firmware or adapter resets. The device table is preserved. **/ int qla4xxx_reinitialize_ddb_list(struct scsi_qla_host *ha) { int status = QLA_SUCCESS; struct ddb_entry *ddb_entry, *detemp; /* Update the device information for all devices. */ list_for_each_entry_safe(ddb_entry, detemp, &ha->ddb_list, list) { qla4xxx_update_ddb_entry(ha, ddb_entry, ddb_entry->fw_ddb_index); if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); DEBUG2(printk ("scsi%ld: %s: ddb index [%d] marked " "ONLINE\n", ha->host_no, __func__, ddb_entry->fw_ddb_index)); } else if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) qla4xxx_mark_device_missing(ha, ddb_entry); } return status; } /** * qla4xxx_relogin_device - re-establish session * @ha: Pointer to host adapter structure. * @ddb_entry: Pointer to device database entry * * This routine does a session relogin with the specified device. * The ddb entry must be assigned prior to making this call. **/ int qla4xxx_relogin_device(struct scsi_qla_host *ha, struct ddb_entry * ddb_entry) { uint16_t relogin_timer; relogin_timer = max(ddb_entry->default_relogin_timeout, (uint16_t)RELOGIN_TOV); atomic_set(&ddb_entry->relogin_timer, relogin_timer); DEBUG2(printk("scsi%ld: Relogin index [%d]. TOV=%d\n", ha->host_no, ddb_entry->fw_ddb_index, relogin_timer)); qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, 0); return QLA_SUCCESS; } static int qla4xxx_config_nvram(struct scsi_qla_host *ha) { unsigned long flags; union external_hw_config_reg extHwConfig; DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, __func__)); if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) return (QLA_ERROR); if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { ql4xxx_unlock_flash(ha); return (QLA_ERROR); } /* Get EEPRom Parameters from NVRAM and validate */ dev_info(&ha->pdev->dev, "Configuring NVRAM ...\n"); if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) { spin_lock_irqsave(&ha->hardware_lock, flags); extHwConfig.Asuint32_t = rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); } else { /* * QLogic adapters should always have a valid NVRAM. * If not valid, do not load. */ dev_warn(&ha->pdev->dev, "scsi%ld: %s: EEProm checksum invalid. " "Please update your EEPROM\n", ha->host_no, __func__); /* set defaults */ if (is_qla4010(ha)) extHwConfig.Asuint32_t = 0x1912; else if (is_qla4022(ha) | is_qla4032(ha)) extHwConfig.Asuint32_t = 0x0023; } DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", ha->host_no, __func__, extHwConfig.Asuint32_t)); spin_lock_irqsave(&ha->hardware_lock, flags); writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha)); readl(isp_ext_hw_conf(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql4xxx_unlock_nvram(ha); ql4xxx_unlock_flash(ha); return (QLA_SUCCESS); } static void qla4x00_pci_config(struct scsi_qla_host *ha) { uint16_t w; int status; dev_info(&ha->pdev->dev, "Configuring PCI space...\n"); pci_set_master(ha->pdev); status = pci_set_mwi(ha->pdev); /* * We want to respect framework's setting of PCI configuration space * command register and also want to make sure that all bits of * interest to us are properly set in command register. */ pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); } static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha) { int status = QLA_ERROR; uint32_t max_wait_time; unsigned long flags; uint32_t mbox_status; dev_info(&ha->pdev->dev, "Starting firmware ...\n"); /* * Start firmware from flash ROM * * WORKAROUND: Stuff a non-constant value that the firmware can * use as a seed for a random number generator in MB7 prior to * setting BOOT_ENABLE. Fixes problem where the TCP * connections use the same TCP ports after each reboot, * causing some connections to not get re-established. */ DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n", ha->host_no, __func__)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(jiffies, &ha->reg->mailbox[7]); if (is_qla4022(ha) | is_qla4032(ha)) writel(set_rmask(NVR_WRITE_ENABLE), &ha->reg->u1.isp4022.nvram); writel(2, &ha->reg->mailbox[6]); readl(&ha->reg->mailbox[6]); writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for firmware to come UP. */ max_wait_time = FIRMWARE_UP_TOV * 4; do { uint32_t ctrl_status; spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); mbox_status = readw(&ha->reg->mailbox[0]); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR)) break; if (mbox_status == MBOX_STS_COMMAND_COMPLETE) break; DEBUG2(printk("scsi%ld: %s: Waiting for boot firmware to " "complete... ctrl_sts=0x%x, remaining=%d\n", ha->host_no, __func__, ctrl_status, max_wait_time)); msleep(250); } while ((max_wait_time--)); if (mbox_status == MBOX_STS_COMMAND_COMPLETE) { DEBUG(printk("scsi%ld: %s: Firmware has started\n", ha->host_no, __func__)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); status = QLA_SUCCESS; } else { printk(KERN_INFO "scsi%ld: %s: Boot firmware failed " "- mbox status 0x%x\n", ha->host_no, __func__, mbox_status); status = QLA_ERROR; } return status; } int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a) { #define QL4_LOCK_DRVR_WAIT 60 #define QL4_LOCK_DRVR_SLEEP 1 int drvr_wait = QL4_LOCK_DRVR_WAIT; while (drvr_wait) { if (ql4xxx_lock_drvr(a) == 0) { ssleep(QL4_LOCK_DRVR_SLEEP); if (drvr_wait) { DEBUG2(printk("scsi%ld: %s: Waiting for " "Global Init Semaphore(%d)...\n", a->host_no, __func__, drvr_wait)); } drvr_wait -= QL4_LOCK_DRVR_SLEEP; } else { DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " "acquired\n", a->host_no, __func__)); return QLA_SUCCESS; } } return QLA_ERROR; } /** * qla4xxx_start_firmware - starts qla4xxx firmware * @ha: Pointer to host adapter structure. * * This routine performs the necessary steps to start the firmware for * the QLA4010 adapter. **/ static int qla4xxx_start_firmware(struct scsi_qla_host *ha) { unsigned long flags = 0; uint32_t mbox_status; int status = QLA_ERROR; int soft_reset = 1; int config_chip = 0; if (is_qla4022(ha) | is_qla4032(ha)) ql4xxx_set_mac_number(ha); if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; spin_lock_irqsave(&ha->hardware_lock, flags); DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no, __func__, readw(isp_port_ctrl(ha)))); DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no, __func__, readw(isp_port_status(ha)))); /* Is Hardware already initialized? */ if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) { DEBUG(printk("scsi%ld: %s: Hardware has already been " "initialized\n", ha->host_no, __func__)); /* Receive firmware boot acknowledgement */ mbox_status = readw(&ha->reg->mailbox[0]); DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= " "0x%x\n", ha->host_no, __func__, mbox_status)); /* Is firmware already booted? */ if (mbox_status == 0) { /* F/W not running, must be config by net driver */ config_chip = 1; soft_reset = 0; } else { writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: Get firmware " "state -- state = 0x%x\n", ha->host_no, __func__, ha->firmware_state)); /* F/W is running */ if (ha->firmware_state & FW_STATE_CONFIG_WAIT) { DEBUG2(printk("scsi%ld: %s: Firmware " "in known state -- " "config and " "boot, state = 0x%x\n", ha->host_no, __func__, ha->firmware_state)); config_chip = 1; soft_reset = 0; } } else { DEBUG2(printk("scsi%ld: %s: Firmware in " "unknown state -- resetting," " state = " "0x%x\n", ha->host_no, __func__, ha->firmware_state)); } spin_lock_irqsave(&ha->hardware_lock, flags); } } else { DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been " "started - resetting\n", ha->host_no, __func__)); } spin_unlock_irqrestore(&ha->hardware_lock, flags); DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ", ha->host_no, __func__, soft_reset, config_chip)); if (soft_reset) { DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no, __func__)); status = qla4xxx_soft_reset(ha); if (status == QLA_ERROR) { DEBUG(printk("scsi%d: %s: Soft Reset failed!\n", ha->host_no, __func__)); ql4xxx_unlock_drvr(ha); return QLA_ERROR; } config_chip = 1; /* Reset clears the semaphore, so acquire again */ if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; } if (config_chip) { if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS) status = qla4xxx_start_firmware_from_flash(ha); } ql4xxx_unlock_drvr(ha); if (status == QLA_SUCCESS) { qla4xxx_get_fw_version(ha); if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) qla4xxx_get_crash_record(ha); } else { DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", ha->host_no, __func__)); } return status; } /** * qla4xxx_initialize_adapter - initiailizes hba * @ha: Pointer to host adapter structure. * @renew_ddb_list: Indicates what to do with the adapter's ddb list * after adapter recovery has completed. * 0=preserve ddb list, 1=destroy and rebuild ddb list * * This routine parforms all of the steps necessary to initialize the adapter. * **/ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, uint8_t renew_ddb_list) { int status = QLA_ERROR; int8_t ip_address[IP_ADDR_LEN] = {0} ; ha->eeprom_cmd_data = 0; qla4x00_pci_config(ha); qla4xxx_disable_intrs(ha); /* Initialize the Host adapter request/response queues and firmware */ if (qla4xxx_start_firmware(ha) == QLA_ERROR) goto exit_init_hba; if (qla4xxx_validate_mac_address(ha) == QLA_ERROR) goto exit_init_hba; if (qla4xxx_init_local_data(ha) == QLA_ERROR) goto exit_init_hba; status = qla4xxx_init_firmware(ha); if (status == QLA_ERROR) goto exit_init_hba; /* * FW is waiting to get an IP address from DHCP server: Skip building * the ddb_list and wait for DHCP lease acquired aen to come in * followed by 0x8014 aen" to trigger the tgt discovery process. */ if (ha->firmware_state & FW_STATE_DHCP_IN_PROGRESS) goto exit_init_online; /* Skip device discovery if ip and subnet is zero */ if (memcmp(ha->ip_address, ip_address, IP_ADDR_LEN) == 0 || memcmp(ha->subnet_mask, ip_address, IP_ADDR_LEN) == 0) goto exit_init_online; if (renew_ddb_list == PRESERVE_DDB_LIST) { /* * We want to preserve lun states (i.e. suspended, etc.) * for recovery initiated by the driver. So just update * the device states for the existing ddb_list. */ qla4xxx_reinitialize_ddb_list(ha); } else if (renew_ddb_list == REBUILD_DDB_LIST) { /* * We want to build the ddb_list from scratch during * driver initialization and recovery initiated by the * INT_HBA_RESET IOCTL. */ status = qla4xxx_initialize_ddb_list(ha); if (status == QLA_ERROR) { DEBUG2(printk("%s(%ld) Error occurred during build" "ddb list\n", __func__, ha->host_no)); goto exit_init_hba; } } if (!ha->tot_ddbs) { DEBUG2(printk("scsi%ld: Failed to initialize devices or none " "present in Firmware device database\n", ha->host_no)); } exit_init_online: set_bit(AF_ONLINE, &ha->flags); exit_init_hba: return status; } /** * qla4xxx_add_device_dynamically - ddb addition due to an AEN * @ha: Pointer to host adapter structure. * @fw_ddb_index: Firmware's device database index * * This routine processes adds a device as a result of an 8014h AEN. **/ static void qla4xxx_add_device_dynamically(struct scsi_qla_host *ha, uint32_t fw_ddb_index) { struct ddb_entry * ddb_entry; uint32_t new_tgt; /* First allocate a device structure */ ddb_entry = qla4xxx_get_ddb_entry(ha, fw_ddb_index, &new_tgt); if (ddb_entry == NULL) { DEBUG2(printk(KERN_WARNING "scsi%ld: Unable to allocate memory to add " "fw_ddb_index %d\n", ha->host_no, fw_ddb_index)); return; } if (!new_tgt && (ddb_entry->fw_ddb_index != fw_ddb_index)) { /* Target has been bound to a new fw_ddb_index */ qla4xxx_free_ddb(ha, ddb_entry); ddb_entry = qla4xxx_alloc_ddb(ha, fw_ddb_index); if (ddb_entry == NULL) { DEBUG2(printk(KERN_WARNING "scsi%ld: Unable to allocate memory" " to add fw_ddb_index %d\n", ha->host_no, fw_ddb_index)); return; } } if (qla4xxx_update_ddb_entry(ha, ddb_entry, fw_ddb_index) == QLA_ERROR) { ha->fw_ddb_index_map[fw_ddb_index] = (struct ddb_entry *)INVALID_ENTRY; DEBUG2(printk(KERN_WARNING "scsi%ld: failed to add new device at index " "[%d]\n Unable to retrieve fw ddb entry\n", ha->host_no, fw_ddb_index)); qla4xxx_free_ddb(ha, ddb_entry); return; } if (qla4xxx_add_sess(ddb_entry)) { DEBUG2(printk(KERN_WARNING "scsi%ld: failed to add new device at index " "[%d]\n Unable to add connection and session\n", ha->host_no, fw_ddb_index)); qla4xxx_free_ddb(ha, ddb_entry); } } /** * qla4xxx_process_ddb_changed - process ddb state change * @ha - Pointer to host adapter structure. * @fw_ddb_index - Firmware's device database index * @state - Device state * * This routine processes a Decive Database Changed AEN Event. **/ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, uint32_t state) { struct ddb_entry * ddb_entry; uint32_t old_fw_ddb_device_state; /* check for out of range index */ if (fw_ddb_index >= MAX_DDB_ENTRIES) return QLA_ERROR; /* Get the corresponging ddb entry */ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); /* Device does not currently exist in our database. */ if (ddb_entry == NULL) { if (state == DDB_DS_SESSION_ACTIVE) qla4xxx_add_device_dynamically(ha, fw_ddb_index); return QLA_SUCCESS; } /* Device already exists in our database. */ old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; DEBUG2(printk("scsi%ld: %s DDB - old state= 0x%x, new state=0x%x for " "index [%d]\n", ha->host_no, __func__, ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); if (old_fw_ddb_device_state == state && state == DDB_DS_SESSION_ACTIVE) { /* Do nothing, state not changed. */ return QLA_SUCCESS; } ddb_entry->fw_ddb_device_state = state; /* Device is back online. */ if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_ACTIVE) { atomic_set(&ddb_entry->state, DDB_STATE_ONLINE); atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count); atomic_set(&ddb_entry->relogin_retry_count, 0); atomic_set(&ddb_entry->relogin_timer, 0); clear_bit(DF_RELOGIN, &ddb_entry->flags); clear_bit(DF_NO_RELOGIN, &ddb_entry->flags); iscsi_unblock_session(ddb_entry->sess); iscsi_session_event(ddb_entry->sess, ISCSI_KEVENT_CREATE_SESSION); /* * Change the lun state to READY in case the lun TIMEOUT before * the device came back. */ } else { /* Device went away, try to relogin. */ /* Mark device missing */ if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE) qla4xxx_mark_device_missing(ha, ddb_entry); /* * Relogin if device state changed to a not active state. * However, do not relogin if this aen is a result of an IOCTL * logout (DF_NO_RELOGIN) or if this is a discovered device. */ if (ddb_entry->fw_ddb_device_state == DDB_DS_SESSION_FAILED && !test_bit(DF_RELOGIN, &ddb_entry->flags) && !test_bit(DF_NO_RELOGIN, &ddb_entry->flags) && !test_bit(DF_ISNS_DISCOVERED, &ddb_entry->flags)) { /* * This triggers a relogin. After the relogin_timer * expires, the relogin gets scheduled. We must wait a * minimum amount of time since receiving an 0x8014 AEN * with failed device_state or a logout response before * we can issue another relogin. */ /* Firmware padds this timeout: (time2wait +1). * Driver retry to login should be longer than F/W. * Otherwise F/W will fail * set_ddb() mbx cmd with 0x4005 since it still * counting down its time2wait. */ atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->retry_relogin_timer, ddb_entry->default_time2wait + 4); } } return QLA_SUCCESS; }
gpl-2.0
Zkin/pf-kernel-updates
arch/mips/kernel/jump_label.c
703
2175
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (c) 2010 Cavium Networks, Inc. */ #include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/memory.h> #include <linux/mutex.h> #include <linux/types.h> #include <linux/cpu.h> #include <asm/cacheflush.h> #include <asm/inst.h> #ifdef HAVE_JUMP_LABEL /* * Define parameters for the standard MIPS and the microMIPS jump * instruction encoding respectively: * * - the ISA bit of the target, either 0 or 1 respectively, * * - the amount the jump target address is shifted right to fit in the * immediate field of the machine instruction, either 2 or 1, * * - the mask determining the size of the jump region relative to the * delay-slot instruction, either 256MB or 128MB, * * - the jump target alignment, either 4 or 2 bytes. */ #define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS) #define J_RANGE_SHIFT (2 - J_ISA_BIT) #define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1) #define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1) void arch_jump_label_transform(struct jump_entry *e, enum jump_label_type type) { union mips_instruction *insn_p; union mips_instruction insn; insn_p = (union mips_instruction *)msk_isa16_mode(e->code); /* Jump only works within an aligned region its delay slot is in. */ BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK)); /* Target must have the right alignment and ISA must be preserved. */ BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); if (type == JUMP_LABEL_JMP) { insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; insn.j_format.target = e->target >> J_RANGE_SHIFT; } else { insn.word = 0; /* nop */ } get_online_cpus(); mutex_lock(&text_mutex); if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { insn_p->halfword[0] = insn.word >> 16; insn_p->halfword[1] = insn.word; } else *insn_p = insn; flush_icache_range((unsigned long)insn_p, (unsigned long)insn_p + sizeof(*insn_p)); mutex_unlock(&text_mutex); put_online_cpus(); } #endif /* HAVE_JUMP_LABEL */
gpl-2.0
EPDCenterSpain/kernel_Archos_97_Titan
lib/bitmap.c
959
35325
/* * lib/bitmap.c * Helper functions for bitmap.h. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/module.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/bitmap.h> #include <linux/bitops.h> #include <asm/uaccess.h> /* * bitmaps provide an array of bits, implemented using an an * array of unsigned longs. The number of valid bits in a * given bitmap does _not_ need to be an exact multiple of * BITS_PER_LONG. * * The possible unused bits in the last, partially used word * of a bitmap are 'don't care'. The implementation makes * no particular effort to keep them zero. It ensures that * their value will not affect the results of any operation. * The bitmap operations that return Boolean (bitmap_empty, * for example) or scalar (bitmap_weight, for example) results * carefully filter out these unused bits from impacting their * results. * * These operations actually hold to a slightly stronger rule: * if you don't input any bitmaps to these ops that have some * unused bits set, then they won't output any set unused bits * in output bitmaps. * * The byte ordering of bitmaps is more natural on little * endian architectures. See the big-endian headers * include/asm-ppc64/bitops.h and include/asm-s390/bitops.h * for the best explanations of this ordering. */ int __bitmap_empty(const unsigned long *bitmap, int bits) { int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap[k]) return 0; if (bits % BITS_PER_LONG) if (bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) return 0; return 1; } EXPORT_SYMBOL(__bitmap_empty); int __bitmap_full(const unsigned long *bitmap, int bits) { int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (~bitmap[k]) return 0; if (bits % BITS_PER_LONG) if (~bitmap[k] & BITMAP_LAST_WORD_MASK(bits)) return 0; return 1; } EXPORT_SYMBOL(__bitmap_full); int __bitmap_equal(const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] != bitmap2[k]) return 0; if (bits % BITS_PER_LONG) if ((bitmap1[k] ^ bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) return 0; return 1; } EXPORT_SYMBOL(__bitmap_equal); void __bitmap_complement(unsigned long *dst, const unsigned long *src, int bits) { int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) dst[k] = ~src[k]; if (bits % BITS_PER_LONG) dst[k] = ~src[k] & BITMAP_LAST_WORD_MASK(bits); } EXPORT_SYMBOL(__bitmap_complement); /** * __bitmap_shift_right - logical right shift of the bits in a bitmap * @dst : destination bitmap * @src : source bitmap * @shift : shift by this many bits * @bits : bitmap size, in bits * * Shifting right (dividing) means moving bits in the MS -> LS bit * direction. Zeros are fed into the vacated MS positions and the * LS bits shifted off the bottom are lost. */ void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, int shift, int bits) { int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; unsigned long mask = (1UL << left) - 1; for (k = 0; off + k < lim; ++k) { unsigned long upper, lower; /* * If shift is not word aligned, take lower rem bits of * word above and make them the top rem bits of result. */ if (!rem || off + k + 1 >= lim) upper = 0; else { upper = src[off + k + 1]; if (off + k + 1 == lim - 1 && left) upper &= mask; } lower = src[off + k]; if (left && off + k == lim - 1) lower &= mask; dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; if (left && k == lim - 1) dst[k] &= mask; } if (off) memset(&dst[lim - off], 0, off*sizeof(unsigned long)); } EXPORT_SYMBOL(__bitmap_shift_right); /** * __bitmap_shift_left - logical left shift of the bits in a bitmap * @dst : destination bitmap * @src : source bitmap * @shift : shift by this many bits * @bits : bitmap size, in bits * * Shifting left (multiplying) means moving bits in the LS -> MS * direction. Zeros are fed into the vacated LS bit positions * and those MS bits shifted off the top are lost. */ void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, int shift, int bits) { int k, lim = BITS_TO_LONGS(bits), left = bits % BITS_PER_LONG; int off = shift/BITS_PER_LONG, rem = shift % BITS_PER_LONG; for (k = lim - off - 1; k >= 0; --k) { unsigned long upper, lower; /* * If shift is not word aligned, take upper rem bits of * word below and make them the bottom rem bits of result. */ if (rem && k > 0) lower = src[k - 1]; else lower = 0; upper = src[k]; if (left && k == lim - 1) upper &= (1UL << left) - 1; dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; if (left && k + off == lim - 1) dst[k + off] &= (1UL << left) - 1; } if (off) memset(dst, 0, off*sizeof(unsigned long)); } EXPORT_SYMBOL(__bitmap_shift_left); int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k; int nr = BITS_TO_LONGS(bits); unsigned long result = 0; for (k = 0; k < nr; k++) result |= (dst[k] = bitmap1[k] & bitmap2[k]); return result != 0; } EXPORT_SYMBOL(__bitmap_and); void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k; int nr = BITS_TO_LONGS(bits); for (k = 0; k < nr; k++) dst[k] = bitmap1[k] | bitmap2[k]; } EXPORT_SYMBOL(__bitmap_or); void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k; int nr = BITS_TO_LONGS(bits); for (k = 0; k < nr; k++) dst[k] = bitmap1[k] ^ bitmap2[k]; } EXPORT_SYMBOL(__bitmap_xor); int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k; int nr = BITS_TO_LONGS(bits); unsigned long result = 0; for (k = 0; k < nr; k++) result |= (dst[k] = bitmap1[k] & ~bitmap2[k]); return result != 0; } EXPORT_SYMBOL(__bitmap_andnot); int __bitmap_intersects(const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & bitmap2[k]) return 1; if (bits % BITS_PER_LONG) if ((bitmap1[k] & bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) return 1; return 0; } EXPORT_SYMBOL(__bitmap_intersects); int __bitmap_subset(const unsigned long *bitmap1, const unsigned long *bitmap2, int bits) { int k, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; ++k) if (bitmap1[k] & ~bitmap2[k]) return 0; if (bits % BITS_PER_LONG) if ((bitmap1[k] & ~bitmap2[k]) & BITMAP_LAST_WORD_MASK(bits)) return 0; return 1; } EXPORT_SYMBOL(__bitmap_subset); int __bitmap_weight(const unsigned long *bitmap, int bits) { int k, w = 0, lim = bits/BITS_PER_LONG; for (k = 0; k < lim; k++) w += hweight_long(bitmap[k]); if (bits % BITS_PER_LONG) w += hweight_long(bitmap[k] & BITMAP_LAST_WORD_MASK(bits)); return w; } EXPORT_SYMBOL(__bitmap_weight); #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG)) void bitmap_set(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_set >= 0) { *p |= mask_to_set; nr -= bits_to_set; bits_to_set = BITS_PER_LONG; mask_to_set = ~0UL; p++; } if (nr) { mask_to_set &= BITMAP_LAST_WORD_MASK(size); *p |= mask_to_set; } } EXPORT_SYMBOL(bitmap_set); void bitmap_clear(unsigned long *map, int start, int nr) { unsigned long *p = map + BIT_WORD(start); const int size = start + nr; int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG); unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start); while (nr - bits_to_clear >= 0) { *p &= ~mask_to_clear; nr -= bits_to_clear; bits_to_clear = BITS_PER_LONG; mask_to_clear = ~0UL; p++; } if (nr) { mask_to_clear &= BITMAP_LAST_WORD_MASK(size); *p &= ~mask_to_clear; } } EXPORT_SYMBOL(bitmap_clear); /* * bitmap_find_next_zero_area - find a contiguous aligned zero area * @map: The address to base the search on * @size: The bitmap size in bits * @start: The bitnumber to start searching at * @nr: The number of zeroed bits we're looking for * @align_mask: Alignment mask for zero area * * The @align_mask should be one less than a power of 2; the effect is that * the bit offset of all zero areas this function finds is multiples of that * power of 2. A @align_mask of 0 means no alignment is required. */ unsigned long bitmap_find_next_zero_area(unsigned long *map, unsigned long size, unsigned long start, unsigned int nr, unsigned long align_mask) { unsigned long index, end, i; again: index = find_next_zero_bit(map, size, start); /* Align allocation */ index = __ALIGN_MASK(index, align_mask); end = index + nr; if (end > size) return end; i = find_next_bit(map, end, index); if (i < end) { start = i + 1; goto again; } return index; } EXPORT_SYMBOL(bitmap_find_next_zero_area); /* * Bitmap printing & parsing functions: first version by Bill Irwin, * second version by Paul Jackson, third by Joe Korty. */ #define CHUNKSZ 32 #define nbits_to_hold_value(val) fls(val) #define BASEDEC 10 /* fancier cpuset lists input in decimal */ /** * bitmap_scnprintf - convert bitmap to an ASCII hex string. * @buf: byte buffer into which string is placed * @buflen: reserved size of @buf, in bytes * @maskp: pointer to bitmap to convert * @nmaskbits: size of bitmap, in bits * * Exactly @nmaskbits bits are displayed. Hex digits are grouped into * comma-separated sets of eight digits per set. */ int bitmap_scnprintf(char *buf, unsigned int buflen, const unsigned long *maskp, int nmaskbits) { int i, word, bit, len = 0; unsigned long val; const char *sep = ""; int chunksz; u32 chunkmask; chunksz = nmaskbits & (CHUNKSZ - 1); if (chunksz == 0) chunksz = CHUNKSZ; i = ALIGN(nmaskbits, CHUNKSZ) - CHUNKSZ; for (; i >= 0; i -= CHUNKSZ) { chunkmask = ((1ULL << chunksz) - 1); word = i / BITS_PER_LONG; bit = i % BITS_PER_LONG; val = (maskp[word] >> bit) & chunkmask; len += scnprintf(buf+len, buflen-len, "%s%0*lx", sep, (chunksz+3)/4, val); chunksz = CHUNKSZ; sep = ","; } return len; } EXPORT_SYMBOL(bitmap_scnprintf); /** * __bitmap_parse - convert an ASCII hex string into a bitmap. * @buf: pointer to buffer containing string. * @buflen: buffer size in bytes. If string is smaller than this * then it must be terminated with a \0. * @is_user: location of buffer, 0 indicates kernel space * @maskp: pointer to bitmap array that will contain result. * @nmaskbits: size of bitmap, in bits. * * Commas group hex digits into chunks. Each chunk defines exactly 32 * bits of the resultant bitmask. No chunk may specify a value larger * than 32 bits (%-EOVERFLOW), and if a chunk specifies a smaller value * then leading 0-bits are prepended. %-EINVAL is returned for illegal * characters and for grouping errors such as "1,,5", ",44", "," and "". * Leading and trailing whitespace accepted, but not embedded whitespace. */ int __bitmap_parse(const char *buf, unsigned int buflen, int is_user, unsigned long *maskp, int nmaskbits) { int c, old_c, totaldigits, ndigits, nchunks, nbits; u32 chunk; const char __user *ubuf = buf; bitmap_zero(maskp, nmaskbits); nchunks = nbits = totaldigits = c = 0; do { chunk = ndigits = 0; /* Get the next chunk of the bitmap */ while (buflen) { old_c = c; if (is_user) { if (__get_user(c, ubuf++)) return -EFAULT; } else c = *buf++; buflen--; if (isspace(c)) continue; /* * If the last character was a space and the current * character isn't '\0', we've got embedded whitespace. * This is a no-no, so throw an error. */ if (totaldigits && c && isspace(old_c)) return -EINVAL; /* A '\0' or a ',' signal the end of the chunk */ if (c == '\0' || c == ',') break; if (!isxdigit(c)) return -EINVAL; /* * Make sure there are at least 4 free bits in 'chunk'. * If not, this hexdigit will overflow 'chunk', so * throw an error. */ if (chunk & ~((1UL << (CHUNKSZ - 4)) - 1)) return -EOVERFLOW; chunk = (chunk << 4) | hex_to_bin(c); ndigits++; totaldigits++; } if (ndigits == 0) return -EINVAL; if (nchunks == 0 && chunk == 0) continue; __bitmap_shift_left(maskp, maskp, CHUNKSZ, nmaskbits); *maskp |= chunk; nchunks++; nbits += (nchunks == 1) ? nbits_to_hold_value(chunk) : CHUNKSZ; if (nbits > nmaskbits) return -EOVERFLOW; } while (buflen && c == ','); return 0; } EXPORT_SYMBOL(__bitmap_parse); /** * bitmap_parse_user - convert an ASCII hex string in a user buffer into a bitmap * * @ubuf: pointer to user buffer containing string. * @ulen: buffer size in bytes. If string is smaller than this * then it must be terminated with a \0. * @maskp: pointer to bitmap array that will contain result. * @nmaskbits: size of bitmap, in bits. * * Wrapper for __bitmap_parse(), providing it with user buffer. * * We cannot have this as an inline function in bitmap.h because it needs * linux/uaccess.h to get the access_ok() declaration and this causes * cyclic dependencies. */ int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *maskp, int nmaskbits) { if (!access_ok(VERIFY_READ, ubuf, ulen)) return -EFAULT; return __bitmap_parse((const char *)ubuf, ulen, 1, maskp, nmaskbits); } EXPORT_SYMBOL(bitmap_parse_user); /* * bscnl_emit(buf, buflen, rbot, rtop, bp) * * Helper routine for bitmap_scnlistprintf(). Write decimal number * or range to buf, suppressing output past buf+buflen, with optional * comma-prefix. Return len of what would be written to buf, if it * all fit. */ static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) { if (len > 0) len += scnprintf(buf + len, buflen - len, ","); if (rbot == rtop) len += scnprintf(buf + len, buflen - len, "%d", rbot); else len += scnprintf(buf + len, buflen - len, "%d-%d", rbot, rtop); return len; } /** * bitmap_scnlistprintf - convert bitmap to list format ASCII string * @buf: byte buffer into which string is placed * @buflen: reserved size of @buf, in bytes * @maskp: pointer to bitmap to convert * @nmaskbits: size of bitmap, in bits * * Output format is a comma-separated list of decimal numbers and * ranges. Consecutively set bits are shown as two hyphen-separated * decimal numbers, the smallest and largest bit numbers set in * the range. Output format is compatible with the format * accepted as input by bitmap_parselist(). * * The return value is the number of characters which would be * generated for the given input, excluding the trailing '\0', as * per ISO C99. */ int bitmap_scnlistprintf(char *buf, unsigned int buflen, const unsigned long *maskp, int nmaskbits) { int len = 0; /* current bit is 'cur', most recently seen range is [rbot, rtop] */ int cur, rbot, rtop; if (buflen == 0) return 0; buf[0] = 0; rbot = cur = find_first_bit(maskp, nmaskbits); while (cur < nmaskbits) { rtop = cur; cur = find_next_bit(maskp, nmaskbits, cur+1); if (cur >= nmaskbits || cur > rtop + 1) { len = bscnl_emit(buf, buflen, rbot, rtop, len); rbot = cur; } } return len; } EXPORT_SYMBOL(bitmap_scnlistprintf); /** * __bitmap_parselist - convert list format ASCII string to bitmap * @buf: read nul-terminated user string from this buffer * @buflen: buffer size in bytes. If string is smaller than this * then it must be terminated with a \0. * @is_user: location of buffer, 0 indicates kernel space * @maskp: write resulting mask here * @nmaskbits: number of bits in mask to be written * * Input format is a comma-separated list of decimal numbers and * ranges. Consecutively set bits are shown as two hyphen-separated * decimal numbers, the smallest and largest bit numbers set in * the range. * * Returns 0 on success, -errno on invalid input strings. * Error values: * %-EINVAL: second number in range smaller than first * %-EINVAL: invalid character in string * %-ERANGE: bit number specified too large for mask */ static int __bitmap_parselist(const char *buf, unsigned int buflen, int is_user, unsigned long *maskp, int nmaskbits) { unsigned a, b; int c, old_c, totaldigits; const char __user *ubuf = buf; int exp_digit, in_range; totaldigits = c = 0; bitmap_zero(maskp, nmaskbits); do { exp_digit = 1; in_range = 0; a = b = 0; /* Get the next cpu# or a range of cpu#'s */ while (buflen) { old_c = c; if (is_user) { if (__get_user(c, ubuf++)) return -EFAULT; } else c = *buf++; buflen--; if (isspace(c)) continue; /* * If the last character was a space and the current * character isn't '\0', we've got embedded whitespace. * This is a no-no, so throw an error. */ if (totaldigits && c && isspace(old_c)) return -EINVAL; /* A '\0' or a ',' signal the end of a cpu# or range */ if (c == '\0' || c == ',') break; if (c == '-') { if (exp_digit || in_range) return -EINVAL; b = 0; in_range = 1; exp_digit = 1; continue; } if (!isdigit(c)) return -EINVAL; b = b * 10 + (c - '0'); if (!in_range) a = b; exp_digit = 0; totaldigits++; } if (!(a <= b)) return -EINVAL; if (b >= nmaskbits) return -ERANGE; while (a <= b) { set_bit(a, maskp); a++; } } while (buflen && c == ','); return 0; } int bitmap_parselist(const char *bp, unsigned long *maskp, int nmaskbits) { char *nl = strchr(bp, '\n'); int len; if (nl) len = nl - bp; else len = strlen(bp); return __bitmap_parselist(bp, len, 0, maskp, nmaskbits); } EXPORT_SYMBOL(bitmap_parselist); /** * bitmap_parselist_user() * * @ubuf: pointer to user buffer containing string. * @ulen: buffer size in bytes. If string is smaller than this * then it must be terminated with a \0. * @maskp: pointer to bitmap array that will contain result. * @nmaskbits: size of bitmap, in bits. * * Wrapper for bitmap_parselist(), providing it with user buffer. * * We cannot have this as an inline function in bitmap.h because it needs * linux/uaccess.h to get the access_ok() declaration and this causes * cyclic dependencies. */ int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, unsigned long *maskp, int nmaskbits) { if (!access_ok(VERIFY_READ, ubuf, ulen)) return -EFAULT; return __bitmap_parselist((const char *)ubuf, ulen, 1, maskp, nmaskbits); } EXPORT_SYMBOL(bitmap_parselist_user); /** * bitmap_pos_to_ord - find ordinal of set bit at given position in bitmap * @buf: pointer to a bitmap * @pos: a bit position in @buf (0 <= @pos < @bits) * @bits: number of valid bit positions in @buf * * Map the bit at position @pos in @buf (of length @bits) to the * ordinal of which set bit it is. If it is not set or if @pos * is not a valid bit position, map to -1. * * If for example, just bits 4 through 7 are set in @buf, then @pos * values 4 through 7 will get mapped to 0 through 3, respectively, * and other @pos values will get mapped to 0. When @pos value 7 * gets mapped to (returns) @ord value 3 in this example, that means * that bit 7 is the 3rd (starting with 0th) set bit in @buf. * * The bit positions 0 through @bits are valid positions in @buf. */ static int bitmap_pos_to_ord(const unsigned long *buf, int pos, int bits) { int i, ord; if (pos < 0 || pos >= bits || !test_bit(pos, buf)) return -1; i = find_first_bit(buf, bits); ord = 0; while (i < pos) { i = find_next_bit(buf, bits, i + 1); ord++; } BUG_ON(i != pos); return ord; } /** * bitmap_ord_to_pos - find position of n-th set bit in bitmap * @buf: pointer to bitmap * @ord: ordinal bit position (n-th set bit, n >= 0) * @bits: number of valid bit positions in @buf * * Map the ordinal offset of bit @ord in @buf to its position in @buf. * Value of @ord should be in range 0 <= @ord < weight(buf), else * results are undefined. * * If for example, just bits 4 through 7 are set in @buf, then @ord * values 0 through 3 will get mapped to 4 through 7, respectively, * and all other @ord values return undefined values. When @ord value 3 * gets mapped to (returns) @pos value 7 in this example, that means * that the 3rd set bit (starting with 0th) is at position 7 in @buf. * * The bit positions 0 through @bits are valid positions in @buf. */ static int bitmap_ord_to_pos(const unsigned long *buf, int ord, int bits) { int pos = 0; if (ord >= 0 && ord < bits) { int i; for (i = find_first_bit(buf, bits); i < bits && ord > 0; i = find_next_bit(buf, bits, i + 1)) ord--; if (i < bits && ord == 0) pos = i; } return pos; } /** * bitmap_remap - Apply map defined by a pair of bitmaps to another bitmap * @dst: remapped result * @src: subset to be remapped * @old: defines domain of map * @new: defines range of map * @bits: number of bits in each of these bitmaps * * Let @old and @new define a mapping of bit positions, such that * whatever position is held by the n-th set bit in @old is mapped * to the n-th set bit in @new. In the more general case, allowing * for the possibility that the weight 'w' of @new is less than the * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * * If either of the @old and @new bitmaps are empty, or if @src and * @dst point to the same location, then this routine copies @src * to @dst. * * The positions of unset bits in @old are mapped to themselves * (the identify map). * * Apply the above specified mapping to @src, placing the result in * @dst, clearing any bits previously set in @dst. * * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other * bit positions unchanged. So if say @src comes into this routine * with bits 1, 5 and 7 set, then @dst should leave with bits 1, * 13 and 15 set. */ void bitmap_remap(unsigned long *dst, const unsigned long *src, const unsigned long *old, const unsigned long *new, int bits) { int oldbit, w; if (dst == src) /* following doesn't handle inplace remaps */ return; bitmap_zero(dst, bits); w = bitmap_weight(new, bits); for_each_set_bit(oldbit, src, bits) { int n = bitmap_pos_to_ord(old, oldbit, bits); if (n < 0 || w == 0) set_bit(oldbit, dst); /* identity map */ else set_bit(bitmap_ord_to_pos(new, n % w, bits), dst); } } EXPORT_SYMBOL(bitmap_remap); /** * bitmap_bitremap - Apply map defined by a pair of bitmaps to a single bit * @oldbit: bit position to be mapped * @old: defines domain of map * @new: defines range of map * @bits: number of bits in each of these bitmaps * * Let @old and @new define a mapping of bit positions, such that * whatever position is held by the n-th set bit in @old is mapped * to the n-th set bit in @new. In the more general case, allowing * for the possibility that the weight 'w' of @new is less than the * weight of @old, map the position of the n-th set bit in @old to * the position of the m-th set bit in @new, where m == n % w. * * The positions of unset bits in @old are mapped to themselves * (the identify map). * * Apply the above specified mapping to bit position @oldbit, returning * the new bit position. * * For example, lets say that @old has bits 4 through 7 set, and * @new has bits 12 through 15 set. This defines the mapping of bit * position 4 to 12, 5 to 13, 6 to 14 and 7 to 15, and of all other * bit positions unchanged. So if say @oldbit is 5, then this routine * returns 13. */ int bitmap_bitremap(int oldbit, const unsigned long *old, const unsigned long *new, int bits) { int w = bitmap_weight(new, bits); int n = bitmap_pos_to_ord(old, oldbit, bits); if (n < 0 || w == 0) return oldbit; else return bitmap_ord_to_pos(new, n % w, bits); } EXPORT_SYMBOL(bitmap_bitremap); /** * bitmap_onto - translate one bitmap relative to another * @dst: resulting translated bitmap * @orig: original untranslated bitmap * @relmap: bitmap relative to which translated * @bits: number of bits in each of these bitmaps * * Set the n-th bit of @dst iff there exists some m such that the * n-th bit of @relmap is set, the m-th bit of @orig is set, and * the n-th bit of @relmap is also the m-th _set_ bit of @relmap. * (If you understood the previous sentence the first time your * read it, you're overqualified for your current job.) * * In other words, @orig is mapped onto (surjectively) @dst, * using the the map { <n, m> | the n-th bit of @relmap is the * m-th set bit of @relmap }. * * Any set bits in @orig above bit number W, where W is the * weight of (number of set bits in) @relmap are mapped nowhere. * In particular, if for all bits m set in @orig, m >= W, then * @dst will end up empty. In situations where the possibility * of such an empty result is not desired, one way to avoid it is * to use the bitmap_fold() operator, below, to first fold the * @orig bitmap over itself so that all its set bits x are in the * range 0 <= x < W. The bitmap_fold() operator does this by * setting the bit (m % W) in @dst, for each bit (m) set in @orig. * * Example [1] for bitmap_onto(): * Let's say @relmap has bits 30-39 set, and @orig has bits * 1, 3, 5, 7, 9 and 11 set. Then on return from this routine, * @dst will have bits 31, 33, 35, 37 and 39 set. * * When bit 0 is set in @orig, it means turn on the bit in * @dst corresponding to whatever is the first bit (if any) * that is turned on in @relmap. Since bit 0 was off in the * above example, we leave off that bit (bit 30) in @dst. * * When bit 1 is set in @orig (as in the above example), it * means turn on the bit in @dst corresponding to whatever * is the second bit that is turned on in @relmap. The second * bit in @relmap that was turned on in the above example was * bit 31, so we turned on bit 31 in @dst. * * Similarly, we turned on bits 33, 35, 37 and 39 in @dst, * because they were the 4th, 6th, 8th and 10th set bits * set in @relmap, and the 4th, 6th, 8th and 10th bits of * @orig (i.e. bits 3, 5, 7 and 9) were also set. * * When bit 11 is set in @orig, it means turn on the bit in * @dst corresponding to whatever is the twelfth bit that is * turned on in @relmap. In the above example, there were * only ten bits turned on in @relmap (30..39), so that bit * 11 was set in @orig had no affect on @dst. * * Example [2] for bitmap_fold() + bitmap_onto(): * Let's say @relmap has these ten bits set: * 40 41 42 43 45 48 53 61 74 95 * (for the curious, that's 40 plus the first ten terms of the * Fibonacci sequence.) * * Further lets say we use the following code, invoking * bitmap_fold() then bitmap_onto, as suggested above to * avoid the possitility of an empty @dst result: * * unsigned long *tmp; // a temporary bitmap's bits * * bitmap_fold(tmp, orig, bitmap_weight(relmap, bits), bits); * bitmap_onto(dst, tmp, relmap, bits); * * Then this table shows what various values of @dst would be, for * various @orig's. I list the zero-based positions of each set bit. * The tmp column shows the intermediate result, as computed by * using bitmap_fold() to fold the @orig bitmap modulo ten * (the weight of @relmap). * * @orig tmp @dst * 0 0 40 * 1 1 41 * 9 9 95 * 10 0 40 (*) * 1 3 5 7 1 3 5 7 41 43 48 61 * 0 1 2 3 4 0 1 2 3 4 40 41 42 43 45 * 0 9 18 27 0 9 8 7 40 61 74 95 * 0 10 20 30 0 40 * 0 11 22 33 0 1 2 3 40 41 42 43 * 0 12 24 36 0 2 4 6 40 42 45 53 * 78 102 211 1 2 8 41 42 74 (*) * * (*) For these marked lines, if we hadn't first done bitmap_fold() * into tmp, then the @dst result would have been empty. * * If either of @orig or @relmap is empty (no set bits), then @dst * will be returned empty. * * If (as explained above) the only set bits in @orig are in positions * m where m >= W, (where W is the weight of @relmap) then @dst will * once again be returned empty. * * All bits in @dst not set by the above rule are cleared. */ void bitmap_onto(unsigned long *dst, const unsigned long *orig, const unsigned long *relmap, int bits) { int n, m; /* same meaning as in above comment */ if (dst == orig) /* following doesn't handle inplace mappings */ return; bitmap_zero(dst, bits); /* * The following code is a more efficient, but less * obvious, equivalent to the loop: * for (m = 0; m < bitmap_weight(relmap, bits); m++) { * n = bitmap_ord_to_pos(orig, m, bits); * if (test_bit(m, orig)) * set_bit(n, dst); * } */ m = 0; for_each_set_bit(n, relmap, bits) { /* m == bitmap_pos_to_ord(relmap, n, bits) */ if (test_bit(m, orig)) set_bit(n, dst); m++; } } EXPORT_SYMBOL(bitmap_onto); /** * bitmap_fold - fold larger bitmap into smaller, modulo specified size * @dst: resulting smaller bitmap * @orig: original larger bitmap * @sz: specified size * @bits: number of bits in each of these bitmaps * * For each bit oldbit in @orig, set bit oldbit mod @sz in @dst. * Clear all other bits in @dst. See further the comment and * Example [2] for bitmap_onto() for why and how to use this. */ void bitmap_fold(unsigned long *dst, const unsigned long *orig, int sz, int bits) { int oldbit; if (dst == orig) /* following doesn't handle inplace mappings */ return; bitmap_zero(dst, bits); for_each_set_bit(oldbit, orig, bits) set_bit(oldbit % sz, dst); } EXPORT_SYMBOL(bitmap_fold); /* * Common code for bitmap_*_region() routines. * bitmap: array of unsigned longs corresponding to the bitmap * pos: the beginning of the region * order: region size (log base 2 of number of bits) * reg_op: operation(s) to perform on that region of bitmap * * Can set, verify and/or release a region of bits in a bitmap, * depending on which combination of REG_OP_* flag bits is set. * * A region of a bitmap is a sequence of bits in the bitmap, of * some size '1 << order' (a power of two), aligned to that same * '1 << order' power of two. * * Returns 1 if REG_OP_ISFREE succeeds (region is all zero bits). * Returns 0 in all other cases and reg_ops. */ enum { REG_OP_ISFREE, /* true if region is all zero bits */ REG_OP_ALLOC, /* set all bits in region */ REG_OP_RELEASE, /* clear all bits in region */ }; static int __reg_op(unsigned long *bitmap, int pos, int order, int reg_op) { int nbits_reg; /* number of bits in region */ int index; /* index first long of region in bitmap */ int offset; /* bit offset region in bitmap[index] */ int nlongs_reg; /* num longs spanned by region in bitmap */ int nbitsinlong; /* num bits of region in each spanned long */ unsigned long mask; /* bitmask for one long of region */ int i; /* scans bitmap by longs */ int ret = 0; /* return value */ /* * Either nlongs_reg == 1 (for small orders that fit in one long) * or (offset == 0 && mask == ~0UL) (for larger multiword orders.) */ nbits_reg = 1 << order; index = pos / BITS_PER_LONG; offset = pos - (index * BITS_PER_LONG); nlongs_reg = BITS_TO_LONGS(nbits_reg); nbitsinlong = min(nbits_reg, BITS_PER_LONG); /* * Can't do "mask = (1UL << nbitsinlong) - 1", as that * overflows if nbitsinlong == BITS_PER_LONG. */ mask = (1UL << (nbitsinlong - 1)); mask += mask - 1; mask <<= offset; switch (reg_op) { case REG_OP_ISFREE: for (i = 0; i < nlongs_reg; i++) { if (bitmap[index + i] & mask) goto done; } ret = 1; /* all bits in region free (zero) */ break; case REG_OP_ALLOC: for (i = 0; i < nlongs_reg; i++) bitmap[index + i] |= mask; break; case REG_OP_RELEASE: for (i = 0; i < nlongs_reg; i++) bitmap[index + i] &= ~mask; break; } done: return ret; } /** * bitmap_find_free_region - find a contiguous aligned mem region * @bitmap: array of unsigned longs corresponding to the bitmap * @bits: number of bits in the bitmap * @order: region size (log base 2 of number of bits) to find * * Find a region of free (zero) bits in a @bitmap of @bits bits and * allocate them (set them to one). Only consider regions of length * a power (@order) of two, aligned to that power of two, which * makes the search algorithm much faster. * * Return the bit offset in bitmap of the allocated region, * or -errno on failure. */ int bitmap_find_free_region(unsigned long *bitmap, int bits, int order) { int pos, end; /* scans bitmap by regions of size order */ for (pos = 0 ; (end = pos + (1 << order)) <= bits; pos = end) { if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) continue; __reg_op(bitmap, pos, order, REG_OP_ALLOC); return pos; } return -ENOMEM; } EXPORT_SYMBOL(bitmap_find_free_region); /** * bitmap_release_region - release allocated bitmap region * @bitmap: array of unsigned longs corresponding to the bitmap * @pos: beginning of bit region to release * @order: region size (log base 2 of number of bits) to release * * This is the complement to __bitmap_find_free_region() and releases * the found region (by clearing it in the bitmap). * * No return value. */ void bitmap_release_region(unsigned long *bitmap, int pos, int order) { __reg_op(bitmap, pos, order, REG_OP_RELEASE); } EXPORT_SYMBOL(bitmap_release_region); /** * bitmap_allocate_region - allocate bitmap region * @bitmap: array of unsigned longs corresponding to the bitmap * @pos: beginning of bit region to allocate * @order: region size (log base 2 of number of bits) to allocate * * Allocate (set bits in) a specified region of a bitmap. * * Return 0 on success, or %-EBUSY if specified region wasn't * free (not all bits were zero). */ int bitmap_allocate_region(unsigned long *bitmap, int pos, int order) { if (!__reg_op(bitmap, pos, order, REG_OP_ISFREE)) return -EBUSY; __reg_op(bitmap, pos, order, REG_OP_ALLOC); return 0; } EXPORT_SYMBOL(bitmap_allocate_region); /** * bitmap_copy_le - copy a bitmap, putting the bits into little-endian order. * @dst: destination buffer * @src: bitmap to copy * @nbits: number of bits in the bitmap * * Require nbits % BITS_PER_LONG == 0. */ void bitmap_copy_le(void *dst, const unsigned long *src, int nbits) { unsigned long *d = dst; int i; for (i = 0; i < nbits/BITS_PER_LONG; i++) { if (BITS_PER_LONG == 64) d[i] = cpu_to_le64(src[i]); else d[i] = cpu_to_le32(src[i]); } } EXPORT_SYMBOL(bitmap_copy_le);
gpl-2.0
Pesach85/PH85-KERNEL
drivers/net/wireless/iwlwifi/iwl-power.c
2239
14673
/****************************************************************************** * * Copyright(c) 2007 - 2011 Intel Corporation. All rights reserved. * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *****************************************************************************/ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <net/mac80211.h> #include "iwl-eeprom.h" #include "iwl-dev.h" #include "iwl-core.h" #include "iwl-io.h" #include "iwl-commands.h" #include "iwl-debug.h" #include "iwl-power.h" /* * Setting power level allows the card to go to sleep when not busy. * * We calculate a sleep command based on the required latency, which * we get from mac80211. In order to handle thermal throttling, we can * also use pre-defined power levels. */ /* * For now, keep using power level 1 instead of automatically * adjusting ... */ bool no_sleep_autoadjust = true; module_param(no_sleep_autoadjust, bool, S_IRUGO); MODULE_PARM_DESC(no_sleep_autoadjust, "don't automatically adjust sleep level " "according to maximum network latency"); /* * This defines the old power levels. They are still used by default * (level 1) and for thermal throttle (levels 3 through 5) */ struct iwl_power_vec_entry { struct iwl_powertable_cmd cmd; u8 no_dtim; /* number of skip dtim */ }; #define IWL_DTIM_RANGE_0_MAX 2 #define IWL_DTIM_RANGE_1_MAX 10 #define NOSLP cpu_to_le16(0), 0, 0 #define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 #define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \ IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \ IWL_POWER_ADVANCE_PM_ENA_MSK) #define ASLP_TOUT(T) cpu_to_le32(T) #define TU_TO_USEC 1024 #define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC) #define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \ cpu_to_le32(X1), \ cpu_to_le32(X2), \ cpu_to_le32(X3), \ cpu_to_le32(X4)} /* default power management (not Tx power) table values */ /* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ /* DTIM 0 - 2 */ static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0}, {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 2, 4, 6, 0xFF)}, 2} }; /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ /* DTIM 3 - 10 */ static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2} }; /* for DTIM period > IWL_DTIM_RANGE_1_MAX */ /* DTIM 11 - */ static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 7, 9, 9, 0xFF)}, 0}, {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} }; /* advance power management */ /* DTIM 0 - 2 */ static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = { {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} }; /* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ /* DTIM 3 - 10 */ static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = { {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2} }; /* for DTIM period > IWL_DTIM_RANGE_1_MAX */ /* DTIM 11 - */ static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = { {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0}, {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50), SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2} }; static void iwl_static_sleep_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, enum iwl_power_level lvl, int period) { const struct iwl_power_vec_entry *table; int max_sleep[IWL_POWER_VEC_SIZE] = { 0 }; int i; u8 skip; u32 slp_itrvl; if (priv->cfg->adv_pm) { table = apm_range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = apm_range_1; if (period <= IWL_DTIM_RANGE_0_MAX) table = apm_range_0; } else { table = range_2; if (period <= IWL_DTIM_RANGE_1_MAX) table = range_1; if (period <= IWL_DTIM_RANGE_0_MAX) table = range_0; } if (WARN_ON(lvl < 0 || lvl >= IWL_POWER_NUM)) memset(cmd, 0, sizeof(*cmd)); else *cmd = table[lvl].cmd; if (period == 0) { skip = 0; period = 1; for (i = 0; i < IWL_POWER_VEC_SIZE; i++) max_sleep[i] = 1; } else { skip = table[lvl].no_dtim; for (i = 0; i < IWL_POWER_VEC_SIZE; i++) max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]); max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1; } slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); /* figure out the listen interval based on dtim period and skip */ if (slp_itrvl == 0xFF) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32(period * (skip + 1)); slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); if (slp_itrvl > period) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32((slp_itrvl / period) * period); if (skip) cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; else cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; if (priv->cfg->base_params->shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; } slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL); /* enforce max sleep interval */ for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) { if (le32_to_cpu(cmd->sleep_interval[i]) > (max_sleep[i] * period)) cmd->sleep_interval[i] = cpu_to_le32(max_sleep[i] * period); if (i != (IWL_POWER_VEC_SIZE - 1)) { if (le32_to_cpu(cmd->sleep_interval[i]) > le32_to_cpu(cmd->sleep_interval[i+1])) cmd->sleep_interval[i] = cmd->sleep_interval[i+1]; } } if (priv->power_data.pci_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; else cmd->flags &= ~IWL_POWER_PCI_PM_MSK; IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n", skip, period); IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); } static void iwl_power_sleep_cam_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) { memset(cmd, 0, sizeof(*cmd)); if (priv->power_data.pci_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; IWL_DEBUG_POWER(priv, "Sleep command for CAM\n"); } static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, int dynps_ms, int wakeup_period) { /* * These are the original power level 3 sleep successions. The * device may behave better with such succession and was also * only tested with that. Just like the original sleep commands, * also adjust the succession here to the wakeup_period below. * The ranges are the same as for the sleep commands, 0-2, 3-9 * and >10, which is selected based on the DTIM interval for * the sleep index but here we use the wakeup period since that * is what we need to do for the latency requirements. */ static const u8 slp_succ_r0[IWL_POWER_VEC_SIZE] = { 2, 2, 2, 2, 2 }; static const u8 slp_succ_r1[IWL_POWER_VEC_SIZE] = { 2, 4, 6, 7, 9 }; static const u8 slp_succ_r2[IWL_POWER_VEC_SIZE] = { 2, 7, 9, 9, 0xFF }; const u8 *slp_succ = slp_succ_r0; int i; if (wakeup_period > IWL_DTIM_RANGE_0_MAX) slp_succ = slp_succ_r1; if (wakeup_period > IWL_DTIM_RANGE_1_MAX) slp_succ = slp_succ_r2; memset(cmd, 0, sizeof(*cmd)); cmd->flags = IWL_POWER_DRIVER_ALLOW_SLEEP_MSK | IWL_POWER_FAST_PD; /* no use seeing frames for others */ if (priv->power_data.pci_pm) cmd->flags |= IWL_POWER_PCI_PM_MSK; if (priv->cfg->base_params->shadow_reg_enable) cmd->flags |= IWL_POWER_SHADOW_REG_ENA; else cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA; if (iwl_advanced_bt_coexist(priv)) { if (!priv->cfg->bt_params->bt_sco_disable) cmd->flags |= IWL_POWER_BT_SCO_ENA; else cmd->flags &= ~IWL_POWER_BT_SCO_ENA; } cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms); cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms); for (i = 0; i < IWL_POWER_VEC_SIZE; i++) cmd->sleep_interval[i] = cpu_to_le32(min_t(int, slp_succ[i], wakeup_period)); IWL_DEBUG_POWER(priv, "Automatic sleep command\n"); } static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) { IWL_DEBUG_POWER(priv, "Sending power/sleep command\n"); IWL_DEBUG_POWER(priv, "Flags value = 0x%08X\n", cmd->flags); IWL_DEBUG_POWER(priv, "Tx timeout = %u\n", le32_to_cpu(cmd->tx_data_timeout)); IWL_DEBUG_POWER(priv, "Rx timeout = %u\n", le32_to_cpu(cmd->rx_data_timeout)); IWL_DEBUG_POWER(priv, "Sleep interval vector = { %d , %d , %d , %d , %d }\n", le32_to_cpu(cmd->sleep_interval[0]), le32_to_cpu(cmd->sleep_interval[1]), le32_to_cpu(cmd->sleep_interval[2]), le32_to_cpu(cmd->sleep_interval[3]), le32_to_cpu(cmd->sleep_interval[4])); return iwl_send_cmd_pdu(priv, POWER_TABLE_CMD, sizeof(struct iwl_powertable_cmd), cmd); } static void iwl_power_build_cmd(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd) { bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; int dtimper; dtimper = priv->hw->conf.ps_dtim_period ?: 1; if (!priv->cfg->base_params->no_idle_support && priv->hw->conf.flags & IEEE80211_CONF_IDLE) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20); else if (iwl_tt_is_low_power_state(priv)) { /* in thermal throttling low power state */ iwl_static_sleep_cmd(priv, cmd, iwl_tt_current_power_mode(priv), dtimper); } else if (!enabled) iwl_power_sleep_cam_cmd(priv, cmd); else if (priv->power_data.debug_sleep_level_override >= 0) iwl_static_sleep_cmd(priv, cmd, priv->power_data.debug_sleep_level_override, dtimper); else if (no_sleep_autoadjust) iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper); else iwl_power_fill_sleep_cmd(priv, cmd, priv->hw->conf.dynamic_ps_timeout, priv->hw->conf.max_sleep_period); } int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd, bool force) { int ret; bool update_chains; lockdep_assert_held(&priv->mutex); /* Don't update the RX chain when chain noise calibration is running */ update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE || priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE; if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force) return 0; if (!iwl_is_ready_rf(priv)) return -EIO; /* scan complete use sleep_power_next, need to be updated */ memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd)); if (test_bit(STATUS_SCANNING, &priv->status) && !force) { IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n"); return 0; } if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) set_bit(STATUS_POWER_PMI, &priv->status); ret = iwl_set_power(priv, cmd); if (!ret) { if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) clear_bit(STATUS_POWER_PMI, &priv->status); if (priv->cfg->ops->lib->update_chain_flags && update_chains) priv->cfg->ops->lib->update_chain_flags(priv); else if (priv->cfg->ops->lib->update_chain_flags) IWL_DEBUG_POWER(priv, "Cannot update the power, chain noise " "calibration running: %d\n", priv->chain_noise_data.state); memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)); } else IWL_ERR(priv, "set power fail, ret = %d", ret); return ret; } int iwl_power_update_mode(struct iwl_priv *priv, bool force) { struct iwl_powertable_cmd cmd; iwl_power_build_cmd(priv, &cmd); return iwl_power_set_mode(priv, &cmd, force); } /* initialize to default */ void iwl_power_initialize(struct iwl_priv *priv) { u16 lctl = iwl_pcie_link_ctl(priv); priv->power_data.pci_pm = !(lctl & PCI_CFG_LINK_CTRL_VAL_L0S_EN); priv->power_data.debug_sleep_level_override = -1; memset(&priv->power_data.sleep_cmd, 0, sizeof(priv->power_data.sleep_cmd)); }
gpl-2.0
erikcas/android_kernel_sony_msm
arch/mips/boot/elf2ecoff.c
3007
17010
/* * Copyright (c) 1995 * Ted Lemon (hereinafter referred to as the author) * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. */ /* elf2ecoff.c This program converts an elf executable to an ECOFF executable. No symbol table is retained. This is useful primarily in building net-bootable kernels for machines (e.g., DECstation and Alpha) which only support the ECOFF object file format. */ #include <stdio.h> #include <string.h> #include <errno.h> #include <sys/types.h> #include <fcntl.h> #include <unistd.h> #include <elf.h> #include <limits.h> #include <netinet/in.h> #include <stdlib.h> #include "ecoff.h" /* * Some extra ELF definitions */ #define PT_MIPS_REGINFO 0x70000000 /* Register usage information */ /* -------------------------------------------------------------------- */ struct sect { unsigned long vaddr; unsigned long len; }; int *symTypeTable; int must_convert_endian; int format_bigendian; static void copy(int out, int in, off_t offset, off_t size) { char ibuf[4096]; int remaining, cur, count; /* Go to the start of the ELF symbol table... */ if (lseek(in, offset, SEEK_SET) < 0) { perror("copy: lseek"); exit(1); } remaining = size; while (remaining) { cur = remaining; if (cur > sizeof ibuf) cur = sizeof ibuf; remaining -= cur; if ((count = read(in, ibuf, cur)) != cur) { fprintf(stderr, "copy: read: %s\n", count ? strerror(errno) : "premature end of file"); exit(1); } if ((count = write(out, ibuf, cur)) != cur) { perror("copy: write"); exit(1); } } } /* * Combine two segments, which must be contiguous. If pad is true, it's * okay for there to be padding between. */ static void combine(struct sect *base, struct sect *new, int pad) { if (!base->len) *base = *new; else if (new->len) { if (base->vaddr + base->len != new->vaddr) { if (pad) base->len = new->vaddr - base->vaddr; else { fprintf(stderr, "Non-contiguous data can't be converted.\n"); exit(1); } } base->len += new->len; } } static int phcmp(const void *v1, const void *v2) { const Elf32_Phdr *h1 = v1; const Elf32_Phdr *h2 = v2; if (h1->p_vaddr > h2->p_vaddr) return 1; else if (h1->p_vaddr < h2->p_vaddr) return -1; else return 0; } static char *saveRead(int file, off_t offset, off_t len, char *name) { char *tmp; int count; off_t off; if ((off = lseek(file, offset, SEEK_SET)) < 0) { fprintf(stderr, "%s: fseek: %s\n", name, strerror(errno)); exit(1); } if (!(tmp = (char *) malloc(len))) { fprintf(stderr, "%s: Can't allocate %ld bytes.\n", name, len); exit(1); } count = read(file, tmp, len); if (count != len) { fprintf(stderr, "%s: read: %s.\n", name, count ? strerror(errno) : "End of file reached"); exit(1); } return tmp; } #define swab16(x) \ ((unsigned short)( \ (((unsigned short)(x) & (unsigned short)0x00ffU) << 8) | \ (((unsigned short)(x) & (unsigned short)0xff00U) >> 8) )) #define swab32(x) \ ((unsigned int)( \ (((unsigned int)(x) & (unsigned int)0x000000ffUL) << 24) | \ (((unsigned int)(x) & (unsigned int)0x0000ff00UL) << 8) | \ (((unsigned int)(x) & (unsigned int)0x00ff0000UL) >> 8) | \ (((unsigned int)(x) & (unsigned int)0xff000000UL) >> 24) )) static void convert_elf_hdr(Elf32_Ehdr * e) { e->e_type = swab16(e->e_type); e->e_machine = swab16(e->e_machine); e->e_version = swab32(e->e_version); e->e_entry = swab32(e->e_entry); e->e_phoff = swab32(e->e_phoff); e->e_shoff = swab32(e->e_shoff); e->e_flags = swab32(e->e_flags); e->e_ehsize = swab16(e->e_ehsize); e->e_phentsize = swab16(e->e_phentsize); e->e_phnum = swab16(e->e_phnum); e->e_shentsize = swab16(e->e_shentsize); e->e_shnum = swab16(e->e_shnum); e->e_shstrndx = swab16(e->e_shstrndx); } static void convert_elf_phdrs(Elf32_Phdr * p, int num) { int i; for (i = 0; i < num; i++, p++) { p->p_type = swab32(p->p_type); p->p_offset = swab32(p->p_offset); p->p_vaddr = swab32(p->p_vaddr); p->p_paddr = swab32(p->p_paddr); p->p_filesz = swab32(p->p_filesz); p->p_memsz = swab32(p->p_memsz); p->p_flags = swab32(p->p_flags); p->p_align = swab32(p->p_align); } } static void convert_elf_shdrs(Elf32_Shdr * s, int num) { int i; for (i = 0; i < num; i++, s++) { s->sh_name = swab32(s->sh_name); s->sh_type = swab32(s->sh_type); s->sh_flags = swab32(s->sh_flags); s->sh_addr = swab32(s->sh_addr); s->sh_offset = swab32(s->sh_offset); s->sh_size = swab32(s->sh_size); s->sh_link = swab32(s->sh_link); s->sh_info = swab32(s->sh_info); s->sh_addralign = swab32(s->sh_addralign); s->sh_entsize = swab32(s->sh_entsize); } } static void convert_ecoff_filehdr(struct filehdr *f) { f->f_magic = swab16(f->f_magic); f->f_nscns = swab16(f->f_nscns); f->f_timdat = swab32(f->f_timdat); f->f_symptr = swab32(f->f_symptr); f->f_nsyms = swab32(f->f_nsyms); f->f_opthdr = swab16(f->f_opthdr); f->f_flags = swab16(f->f_flags); } static void convert_ecoff_aouthdr(struct aouthdr *a) { a->magic = swab16(a->magic); a->vstamp = swab16(a->vstamp); a->tsize = swab32(a->tsize); a->dsize = swab32(a->dsize); a->bsize = swab32(a->bsize); a->entry = swab32(a->entry); a->text_start = swab32(a->text_start); a->data_start = swab32(a->data_start); a->bss_start = swab32(a->bss_start); a->gprmask = swab32(a->gprmask); a->cprmask[0] = swab32(a->cprmask[0]); a->cprmask[1] = swab32(a->cprmask[1]); a->cprmask[2] = swab32(a->cprmask[2]); a->cprmask[3] = swab32(a->cprmask[3]); a->gp_value = swab32(a->gp_value); } static void convert_ecoff_esecs(struct scnhdr *s, int num) { int i; for (i = 0; i < num; i++, s++) { s->s_paddr = swab32(s->s_paddr); s->s_vaddr = swab32(s->s_vaddr); s->s_size = swab32(s->s_size); s->s_scnptr = swab32(s->s_scnptr); s->s_relptr = swab32(s->s_relptr); s->s_lnnoptr = swab32(s->s_lnnoptr); s->s_nreloc = swab16(s->s_nreloc); s->s_nlnno = swab16(s->s_nlnno); s->s_flags = swab32(s->s_flags); } } int main(int argc, char *argv[]) { Elf32_Ehdr ex; Elf32_Phdr *ph; Elf32_Shdr *sh; char *shstrtab; int i, pad; struct sect text, data, bss; struct filehdr efh; struct aouthdr eah; struct scnhdr esecs[6]; int infile, outfile; unsigned long cur_vma = ULONG_MAX; int addflag = 0; int nosecs; text.len = data.len = bss.len = 0; text.vaddr = data.vaddr = bss.vaddr = 0; /* Check args... */ if (argc < 3 || argc > 4) { usage: fprintf(stderr, "usage: elf2ecoff <elf executable> <ecoff executable> [-a]\n"); exit(1); } if (argc == 4) { if (strcmp(argv[3], "-a")) goto usage; addflag = 1; } /* Try the input file... */ if ((infile = open(argv[1], O_RDONLY)) < 0) { fprintf(stderr, "Can't open %s for read: %s\n", argv[1], strerror(errno)); exit(1); } /* Read the header, which is at the beginning of the file... */ i = read(infile, &ex, sizeof ex); if (i != sizeof ex) { fprintf(stderr, "ex: %s: %s.\n", argv[1], i ? strerror(errno) : "End of file reached"); exit(1); } if (ex.e_ident[EI_DATA] == ELFDATA2MSB) format_bigendian = 1; if (ntohs(0xaa55) == 0xaa55) { if (!format_bigendian) must_convert_endian = 1; } else { if (format_bigendian) must_convert_endian = 1; } if (must_convert_endian) convert_elf_hdr(&ex); /* Read the program headers... */ ph = (Elf32_Phdr *) saveRead(infile, ex.e_phoff, ex.e_phnum * sizeof(Elf32_Phdr), "ph"); if (must_convert_endian) convert_elf_phdrs(ph, ex.e_phnum); /* Read the section headers... */ sh = (Elf32_Shdr *) saveRead(infile, ex.e_shoff, ex.e_shnum * sizeof(Elf32_Shdr), "sh"); if (must_convert_endian) convert_elf_shdrs(sh, ex.e_shnum); /* Read in the section string table. */ shstrtab = saveRead(infile, sh[ex.e_shstrndx].sh_offset, sh[ex.e_shstrndx].sh_size, "shstrtab"); /* Figure out if we can cram the program header into an ECOFF header... Basically, we can't handle anything but loadable segments, but we can ignore some kinds of segments. We can't handle holes in the address space. Segments may be out of order, so we sort them first. */ qsort(ph, ex.e_phnum, sizeof(Elf32_Phdr), phcmp); for (i = 0; i < ex.e_phnum; i++) { /* Section types we can ignore... */ if (ph[i].p_type == PT_NULL || ph[i].p_type == PT_NOTE || ph[i].p_type == PT_PHDR || ph[i].p_type == PT_MIPS_REGINFO) continue; /* Section types we can't handle... */ else if (ph[i].p_type != PT_LOAD) { fprintf(stderr, "Program header %d type %d can't be converted.\n", ex.e_phnum, ph[i].p_type); exit(1); } /* Writable (data) segment? */ if (ph[i].p_flags & PF_W) { struct sect ndata, nbss; ndata.vaddr = ph[i].p_vaddr; ndata.len = ph[i].p_filesz; nbss.vaddr = ph[i].p_vaddr + ph[i].p_filesz; nbss.len = ph[i].p_memsz - ph[i].p_filesz; combine(&data, &ndata, 0); combine(&bss, &nbss, 1); } else { struct sect ntxt; ntxt.vaddr = ph[i].p_vaddr; ntxt.len = ph[i].p_filesz; combine(&text, &ntxt, 0); } /* Remember the lowest segment start address. */ if (ph[i].p_vaddr < cur_vma) cur_vma = ph[i].p_vaddr; } /* Sections must be in order to be converted... */ if (text.vaddr > data.vaddr || data.vaddr > bss.vaddr || text.vaddr + text.len > data.vaddr || data.vaddr + data.len > bss.vaddr) { fprintf(stderr, "Sections ordering prevents a.out conversion.\n"); exit(1); } /* If there's a data section but no text section, then the loader combined everything into one section. That needs to be the text section, so just make the data section zero length following text. */ if (data.len && !text.len) { text = data; data.vaddr = text.vaddr + text.len; data.len = 0; } /* If there is a gap between text and data, we'll fill it when we copy the data, so update the length of the text segment as represented in a.out to reflect that, since a.out doesn't allow gaps in the program address space. */ if (text.vaddr + text.len < data.vaddr) text.len = data.vaddr - text.vaddr; /* We now have enough information to cons up an a.out header... */ eah.magic = OMAGIC; eah.vstamp = 200; eah.tsize = text.len; eah.dsize = data.len; eah.bsize = bss.len; eah.entry = ex.e_entry; eah.text_start = text.vaddr; eah.data_start = data.vaddr; eah.bss_start = bss.vaddr; eah.gprmask = 0xf3fffffe; memset(&eah.cprmask, '\0', sizeof eah.cprmask); eah.gp_value = 0; /* unused. */ if (format_bigendian) efh.f_magic = MIPSEBMAGIC; else efh.f_magic = MIPSELMAGIC; if (addflag) nosecs = 6; else nosecs = 3; efh.f_nscns = nosecs; efh.f_timdat = 0; /* bogus */ efh.f_symptr = 0; efh.f_nsyms = 0; efh.f_opthdr = sizeof eah; efh.f_flags = 0x100f; /* Stripped, not sharable. */ memset(esecs, 0, sizeof esecs); strcpy(esecs[0].s_name, ".text"); strcpy(esecs[1].s_name, ".data"); strcpy(esecs[2].s_name, ".bss"); if (addflag) { strcpy(esecs[3].s_name, ".rdata"); strcpy(esecs[4].s_name, ".sdata"); strcpy(esecs[5].s_name, ".sbss"); } esecs[0].s_paddr = esecs[0].s_vaddr = eah.text_start; esecs[1].s_paddr = esecs[1].s_vaddr = eah.data_start; esecs[2].s_paddr = esecs[2].s_vaddr = eah.bss_start; if (addflag) { esecs[3].s_paddr = esecs[3].s_vaddr = 0; esecs[4].s_paddr = esecs[4].s_vaddr = 0; esecs[5].s_paddr = esecs[5].s_vaddr = 0; } esecs[0].s_size = eah.tsize; esecs[1].s_size = eah.dsize; esecs[2].s_size = eah.bsize; if (addflag) { esecs[3].s_size = 0; esecs[4].s_size = 0; esecs[5].s_size = 0; } esecs[0].s_scnptr = N_TXTOFF(efh, eah); esecs[1].s_scnptr = N_DATOFF(efh, eah); #define ECOFF_SEGMENT_ALIGNMENT(a) 0x10 #define ECOFF_ROUND(s, a) (((s)+(a)-1)&~((a)-1)) esecs[2].s_scnptr = esecs[1].s_scnptr + ECOFF_ROUND(esecs[1].s_size, ECOFF_SEGMENT_ALIGNMENT(&eah)); if (addflag) { esecs[3].s_scnptr = 0; esecs[4].s_scnptr = 0; esecs[5].s_scnptr = 0; } esecs[0].s_relptr = esecs[1].s_relptr = esecs[2].s_relptr = 0; esecs[0].s_lnnoptr = esecs[1].s_lnnoptr = esecs[2].s_lnnoptr = 0; esecs[0].s_nreloc = esecs[1].s_nreloc = esecs[2].s_nreloc = 0; esecs[0].s_nlnno = esecs[1].s_nlnno = esecs[2].s_nlnno = 0; if (addflag) { esecs[3].s_relptr = esecs[4].s_relptr = esecs[5].s_relptr = 0; esecs[3].s_lnnoptr = esecs[4].s_lnnoptr = esecs[5].s_lnnoptr = 0; esecs[3].s_nreloc = esecs[4].s_nreloc = esecs[5].s_nreloc = 0; esecs[3].s_nlnno = esecs[4].s_nlnno = esecs[5].s_nlnno = 0; } esecs[0].s_flags = 0x20; esecs[1].s_flags = 0x40; esecs[2].s_flags = 0x82; if (addflag) { esecs[3].s_flags = 0x100; esecs[4].s_flags = 0x200; esecs[5].s_flags = 0x400; } /* Make the output file... */ if ((outfile = open(argv[2], O_WRONLY | O_CREAT, 0777)) < 0) { fprintf(stderr, "Unable to create %s: %s\n", argv[2], strerror(errno)); exit(1); } if (must_convert_endian) convert_ecoff_filehdr(&efh); /* Write the headers... */ i = write(outfile, &efh, sizeof efh); if (i != sizeof efh) { perror("efh: write"); exit(1); for (i = 0; i < nosecs; i++) { printf ("Section %d: %s phys %lx size %lx file offset %lx\n", i, esecs[i].s_name, esecs[i].s_paddr, esecs[i].s_size, esecs[i].s_scnptr); } } fprintf(stderr, "wrote %d byte file header.\n", i); if (must_convert_endian) convert_ecoff_aouthdr(&eah); i = write(outfile, &eah, sizeof eah); if (i != sizeof eah) { perror("eah: write"); exit(1); } fprintf(stderr, "wrote %d byte a.out header.\n", i); if (must_convert_endian) convert_ecoff_esecs(&esecs[0], nosecs); i = write(outfile, &esecs, nosecs * sizeof(struct scnhdr)); if (i != nosecs * sizeof(struct scnhdr)) { perror("esecs: write"); exit(1); } fprintf(stderr, "wrote %d bytes of section headers.\n", i); pad = (sizeof(efh) + sizeof(eah) + nosecs * sizeof(struct scnhdr)) & 15; if (pad) { pad = 16 - pad; i = write(outfile, "\0\0\0\0\0\0\0\0\0\0\0\0\0\0", pad); if (i < 0) { perror("ipad: write"); exit(1); } fprintf(stderr, "wrote %d byte pad.\n", i); } /* * Copy the loadable sections. Zero-fill any gaps less than 64k; * complain about any zero-filling, and die if we're asked to zero-fill * more than 64k. */ for (i = 0; i < ex.e_phnum; i++) { /* Unprocessable sections were handled above, so just verify that the section can be loaded before copying. */ if (ph[i].p_type == PT_LOAD && ph[i].p_filesz) { if (cur_vma != ph[i].p_vaddr) { unsigned long gap = ph[i].p_vaddr - cur_vma; char obuf[1024]; if (gap > 65536) { fprintf(stderr, "Intersegment gap (%ld bytes) too large.\n", gap); exit(1); } fprintf(stderr, "Warning: %ld byte intersegment gap.\n", gap); memset(obuf, 0, sizeof obuf); while (gap) { int count = write(outfile, obuf, (gap > sizeof obuf ? sizeof obuf : gap)); if (count < 0) { fprintf(stderr, "Error writing gap: %s\n", strerror(errno)); exit(1); } gap -= count; } } fprintf(stderr, "writing %d bytes...\n", ph[i].p_filesz); copy(outfile, infile, ph[i].p_offset, ph[i].p_filesz); cur_vma = ph[i].p_vaddr + ph[i].p_filesz; } } /* * Write a page of padding for boot PROMS that read entire pages. * Without this, they may attempt to read past the end of the * data section, incur an error, and refuse to boot. */ { char obuf[4096]; memset(obuf, 0, sizeof obuf); if (write(outfile, obuf, sizeof(obuf)) != sizeof(obuf)) { fprintf(stderr, "Error writing PROM padding: %s\n", strerror(errno)); exit(1); } } /* Looks like we won... */ exit(0); }
gpl-2.0
LimKyungWoo/linux-2.6.39
arch/mips/bcm47xx/gpio.c
3775
1627
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007 Aurelien Jarno <aurelien@aurel32.net> */ #include <linux/ssb/ssb.h> #include <linux/ssb/ssb_driver_chipcommon.h> #include <linux/ssb/ssb_driver_extif.h> #include <asm/mach-bcm47xx/bcm47xx.h> #include <asm/mach-bcm47xx/gpio.h> #if (BCM47XX_CHIPCO_GPIO_LINES > BCM47XX_EXTIF_GPIO_LINES) static DECLARE_BITMAP(gpio_in_use, BCM47XX_CHIPCO_GPIO_LINES); #else static DECLARE_BITMAP(gpio_in_use, BCM47XX_EXTIF_GPIO_LINES); #endif int gpio_request(unsigned gpio, const char *tag) { if (ssb_chipco_available(&ssb_bcm47xx.chipco) && ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) return -EINVAL; if (ssb_extif_available(&ssb_bcm47xx.extif) && ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) return -EINVAL; if (test_and_set_bit(gpio, gpio_in_use)) return -EBUSY; return 0; } EXPORT_SYMBOL(gpio_request); void gpio_free(unsigned gpio) { if (ssb_chipco_available(&ssb_bcm47xx.chipco) && ((unsigned)gpio >= BCM47XX_CHIPCO_GPIO_LINES)) return; if (ssb_extif_available(&ssb_bcm47xx.extif) && ((unsigned)gpio >= BCM47XX_EXTIF_GPIO_LINES)) return; clear_bit(gpio, gpio_in_use); } EXPORT_SYMBOL(gpio_free); int gpio_to_irq(unsigned gpio) { if (ssb_chipco_available(&ssb_bcm47xx.chipco)) return ssb_mips_irq(ssb_bcm47xx.chipco.dev) + 2; else if (ssb_extif_available(&ssb_bcm47xx.extif)) return ssb_mips_irq(ssb_bcm47xx.extif.dev) + 2; else return -EINVAL; } EXPORT_SYMBOL_GPL(gpio_to_irq);
gpl-2.0
evilwombat/gopro-linux
drivers/net/mlx4/srq.c
4031
7108
/* * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/mlx4/cmd.h> #include <linux/gfp.h> #include "mlx4.h" #include "icm.h" struct mlx4_srq_context { __be32 state_logsize_srqn; u8 logstride; u8 reserved1[3]; u8 pg_offset; u8 reserved2[3]; u32 reserved3; u8 log_page_size; u8 reserved4[2]; u8 mtt_base_addr_h; __be32 mtt_base_addr_l; __be32 pd; __be16 limit_watermark; __be16 wqe_cnt; u16 reserved5; __be16 wqe_counter; u32 reserved6; __be64 db_rec_addr; }; void mlx4_srq_event(struct mlx4_dev *dev, u32 srqn, int event_type) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_srq *srq; spin_lock(&srq_table->lock); srq = radix_tree_lookup(&srq_table->tree, srqn & (dev->caps.num_srqs - 1)); if (srq) atomic_inc(&srq->refcount); spin_unlock(&srq_table->lock); if (!srq) { mlx4_warn(dev, "Async event for bogus SRQ %08x\n", srqn); return; } srq->event(srq, event_type); if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); } static int mlx4_SW2HW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { return mlx4_cmd(dev, mailbox->dma, srq_num, 0, MLX4_CMD_SW2HW_SRQ, MLX4_CMD_TIME_CLASS_A); } static int mlx4_HW2SW_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { return mlx4_cmd_box(dev, 0, mailbox ? mailbox->dma : 0, srq_num, mailbox ? 0 : 1, MLX4_CMD_HW2SW_SRQ, MLX4_CMD_TIME_CLASS_A); } static int mlx4_ARM_SRQ(struct mlx4_dev *dev, int srq_num, int limit_watermark) { return mlx4_cmd(dev, limit_watermark, srq_num, 0, MLX4_CMD_ARM_SRQ, MLX4_CMD_TIME_CLASS_B); } static int mlx4_QUERY_SRQ(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *mailbox, int srq_num) { return mlx4_cmd_box(dev, 0, mailbox->dma, srq_num, 0, MLX4_CMD_QUERY_SRQ, MLX4_CMD_TIME_CLASS_A); } int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; struct mlx4_cmd_mailbox *mailbox; struct mlx4_srq_context *srq_context; u64 mtt_addr; int err; srq->srqn = mlx4_bitmap_alloc(&srq_table->bitmap); if (srq->srqn == -1) return -ENOMEM; err = mlx4_table_get(dev, &srq_table->table, srq->srqn); if (err) goto err_out; err = mlx4_table_get(dev, &srq_table->cmpt_table, srq->srqn); if (err) goto err_put; spin_lock_irq(&srq_table->lock); err = radix_tree_insert(&srq_table->tree, srq->srqn, srq); spin_unlock_irq(&srq_table->lock); if (err) goto err_cmpt_put; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) { err = PTR_ERR(mailbox); goto err_radix; } srq_context = mailbox->buf; memset(srq_context, 0, sizeof *srq_context); srq_context->state_logsize_srqn = cpu_to_be32((ilog2(srq->max) << 24) | srq->srqn); srq_context->logstride = srq->wqe_shift - 4; srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; mtt_addr = mlx4_mtt_addr(dev, mtt); srq_context->mtt_base_addr_h = mtt_addr >> 32; srq_context->mtt_base_addr_l = cpu_to_be32(mtt_addr & 0xffffffff); srq_context->pd = cpu_to_be32(pdn); srq_context->db_rec_addr = cpu_to_be64(db_rec); err = mlx4_SW2HW_SRQ(dev, mailbox, srq->srqn); mlx4_free_cmd_mailbox(dev, mailbox); if (err) goto err_radix; atomic_set(&srq->refcount, 1); init_completion(&srq->free); return 0; err_radix: spin_lock_irq(&srq_table->lock); radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); err_cmpt_put: mlx4_table_put(dev, &srq_table->cmpt_table, srq->srqn); err_put: mlx4_table_put(dev, &srq_table->table, srq->srqn); err_out: mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); return err; } EXPORT_SYMBOL_GPL(mlx4_srq_alloc); void mlx4_srq_free(struct mlx4_dev *dev, struct mlx4_srq *srq) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; int err; err = mlx4_HW2SW_SRQ(dev, NULL, srq->srqn); if (err) mlx4_warn(dev, "HW2SW_SRQ failed (%d) for SRQN %06x\n", err, srq->srqn); spin_lock_irq(&srq_table->lock); radix_tree_delete(&srq_table->tree, srq->srqn); spin_unlock_irq(&srq_table->lock); if (atomic_dec_and_test(&srq->refcount)) complete(&srq->free); wait_for_completion(&srq->free); mlx4_table_put(dev, &srq_table->table, srq->srqn); mlx4_bitmap_free(&srq_table->bitmap, srq->srqn); } EXPORT_SYMBOL_GPL(mlx4_srq_free); int mlx4_srq_arm(struct mlx4_dev *dev, struct mlx4_srq *srq, int limit_watermark) { return mlx4_ARM_SRQ(dev, srq->srqn, limit_watermark); } EXPORT_SYMBOL_GPL(mlx4_srq_arm); int mlx4_srq_query(struct mlx4_dev *dev, struct mlx4_srq *srq, int *limit_watermark) { struct mlx4_cmd_mailbox *mailbox; struct mlx4_srq_context *srq_context; int err; mailbox = mlx4_alloc_cmd_mailbox(dev); if (IS_ERR(mailbox)) return PTR_ERR(mailbox); srq_context = mailbox->buf; err = mlx4_QUERY_SRQ(dev, mailbox, srq->srqn); if (err) goto err_out; *limit_watermark = be16_to_cpu(srq_context->limit_watermark); err_out: mlx4_free_cmd_mailbox(dev, mailbox); return err; } EXPORT_SYMBOL_GPL(mlx4_srq_query); int mlx4_init_srq_table(struct mlx4_dev *dev) { struct mlx4_srq_table *srq_table = &mlx4_priv(dev)->srq_table; int err; spin_lock_init(&srq_table->lock); INIT_RADIX_TREE(&srq_table->tree, GFP_ATOMIC); err = mlx4_bitmap_init(&srq_table->bitmap, dev->caps.num_srqs, dev->caps.num_srqs - 1, dev->caps.reserved_srqs, 0); if (err) return err; return 0; } void mlx4_cleanup_srq_table(struct mlx4_dev *dev) { mlx4_bitmap_cleanup(&mlx4_priv(dev)->srq_table.bitmap); }
gpl-2.0
parheliamm/SCH-i939_Kernel
arch/arm/kernel/crunch.c
4543
2112
/* * arch/arm/kernel/crunch.c * Cirrus MaverickCrunch context switching and handling * * Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/io.h> #include <mach/ep93xx-regs.h> #include <asm/thread_notify.h> struct crunch_state *crunch_owner; void crunch_task_release(struct thread_info *thread) { local_irq_disable(); if (crunch_owner == &thread->crunchstate) crunch_owner = NULL; local_irq_enable(); } static int crunch_enabled(u32 devcfg) { return !!(devcfg & EP93XX_SYSCON_DEVCFG_CPENA); } static int crunch_do(struct notifier_block *self, unsigned long cmd, void *t) { struct thread_info *thread = (struct thread_info *)t; struct crunch_state *crunch_state; u32 devcfg; crunch_state = &thread->crunchstate; switch (cmd) { case THREAD_NOTIFY_FLUSH: memset(crunch_state, 0, sizeof(*crunch_state)); /* * FALLTHROUGH: Ensure we don't try to overwrite our newly * initialised state information on the first fault. */ case THREAD_NOTIFY_EXIT: crunch_task_release(thread); break; case THREAD_NOTIFY_SWITCH: devcfg = __raw_readl(EP93XX_SYSCON_DEVCFG); if (crunch_enabled(devcfg) || crunch_owner == crunch_state) { /* * We don't use ep93xx_syscon_swlocked_write() here * because we are on the context switch path and * preemption is already disabled. */ devcfg ^= EP93XX_SYSCON_DEVCFG_CPENA; __raw_writel(0xaa, EP93XX_SYSCON_SWLOCK); __raw_writel(devcfg, EP93XX_SYSCON_DEVCFG); } break; } return NOTIFY_DONE; } static struct notifier_block crunch_notifier_block = { .notifier_call = crunch_do, }; static int __init crunch_init(void) { thread_register_notifier(&crunch_notifier_block); elf_hwcap |= HWCAP_CRUNCH; return 0; } late_initcall(crunch_init);
gpl-2.0
flashalot/android_kernel_samsung_milletwifi
lib/dma-debug.c
4799
33945
/* * Copyright (C) 2008 Advanced Micro Devices, Inc. * * Author: Joerg Roedel <joerg.roedel@amd.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/stacktrace.h> #include <linux/dma-debug.h> #include <linux/spinlock.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/export.h> #include <linux/device.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/slab.h> #include <asm/sections.h> #define HASH_SIZE 1024ULL #define HASH_FN_SHIFT 13 #define HASH_FN_MASK (HASH_SIZE - 1) enum { dma_debug_single, dma_debug_page, dma_debug_sg, dma_debug_coherent, }; #define DMA_DEBUG_STACKTRACE_ENTRIES 5 struct dma_debug_entry { struct list_head list; struct device *dev; int type; phys_addr_t paddr; u64 dev_addr; u64 size; int direction; int sg_call_ents; int sg_mapped_ents; #ifdef CONFIG_STACKTRACE struct stack_trace stacktrace; unsigned long st_entries[DMA_DEBUG_STACKTRACE_ENTRIES]; #endif }; typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *); struct hash_bucket { struct list_head list; spinlock_t lock; } ____cacheline_aligned_in_smp; /* Hash list to save the allocated dma addresses */ static struct hash_bucket dma_entry_hash[HASH_SIZE]; /* List of pre-allocated dma_debug_entry's */ static LIST_HEAD(free_entries); /* Lock for the list above */ static DEFINE_SPINLOCK(free_entries_lock); /* Global disable flag - will be set in case of an error */ static bool global_disable __read_mostly; /* Global error count */ static u32 error_count; /* Global error show enable*/ static u32 show_all_errors __read_mostly; /* Number of errors to show */ static u32 show_num_errors = 1; static u32 num_free_entries; static u32 min_free_entries; static u32 nr_total_entries; /* number of preallocated entries requested by kernel cmdline */ static u32 req_entries; /* debugfs dentry's for the stuff above */ static struct dentry *dma_debug_dent __read_mostly; static struct dentry *global_disable_dent __read_mostly; static struct dentry *error_count_dent __read_mostly; static struct dentry *show_all_errors_dent __read_mostly; static struct dentry *show_num_errors_dent __read_mostly; static struct dentry *num_free_entries_dent __read_mostly; static struct dentry *min_free_entries_dent __read_mostly; static struct dentry *filter_dent __read_mostly; /* per-driver filter related state */ #define NAME_MAX_LEN 64 static char current_driver_name[NAME_MAX_LEN] __read_mostly; static struct device_driver *current_driver __read_mostly; static DEFINE_RWLOCK(driver_name_lock); static const char *type2name[4] = { "single", "page", "scather-gather", "coherent" }; static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE", "DMA_FROM_DEVICE", "DMA_NONE" }; /* little merge helper - remove it after the merge window */ #ifndef BUS_NOTIFY_UNBOUND_DRIVER #define BUS_NOTIFY_UNBOUND_DRIVER 0x0005 #endif /* * The access to some variables in this macro is racy. We can't use atomic_t * here because all these variables are exported to debugfs. Some of them even * writeable. This is also the reason why a lock won't help much. But anyway, * the races are no big deal. Here is why: * * error_count: the addition is racy, but the worst thing that can happen is * that we don't count some errors * show_num_errors: the subtraction is racy. Also no big deal because in * worst case this will result in one warning more in the * system log than the user configured. This variable is * writeable via debugfs. */ static inline void dump_entry_trace(struct dma_debug_entry *entry) { #ifdef CONFIG_STACKTRACE if (entry) { pr_warning("Mapped at:\n"); print_stack_trace(&entry->stacktrace, 0); } #endif } static bool driver_filter(struct device *dev) { struct device_driver *drv; unsigned long flags; bool ret; /* driver filter off */ if (likely(!current_driver_name[0])) return true; /* driver filter on and initialized */ if (current_driver && dev && dev->driver == current_driver) return true; /* driver filter on, but we can't filter on a NULL device... */ if (!dev) return false; if (current_driver || !current_driver_name[0]) return false; /* driver filter on but not yet initialized */ drv = dev->driver; if (!drv) return false; /* lock to protect against change of current_driver_name */ read_lock_irqsave(&driver_name_lock, flags); ret = false; if (drv->name && strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) { current_driver = drv; ret = true; } read_unlock_irqrestore(&driver_name_lock, flags); return ret; } #define err_printk(dev, entry, format, arg...) do { \ error_count += 1; \ if (driver_filter(dev) && \ (show_all_errors || show_num_errors > 0)) { \ WARN(1, "%s %s: " format, \ dev ? dev_driver_string(dev) : "NULL", \ dev ? dev_name(dev) : "NULL", ## arg); \ dump_entry_trace(entry); \ } \ if (!show_all_errors && show_num_errors > 0) \ show_num_errors -= 1; \ } while (0); /* * Hash related functions * * Every DMA-API request is saved into a struct dma_debug_entry. To * have quick access to these structs they are stored into a hash. */ static int hash_fn(struct dma_debug_entry *entry) { /* * Hash function is based on the dma address. * We use bits 20-27 here as the index into the hash */ return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK; } /* * Request exclusive access to a hash bucket for a given dma_debug_entry. */ static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry, unsigned long *flags) { int idx = hash_fn(entry); unsigned long __flags; spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags); *flags = __flags; return &dma_entry_hash[idx]; } /* * Give up exclusive access to the hash bucket */ static void put_hash_bucket(struct hash_bucket *bucket, unsigned long *flags) { unsigned long __flags = *flags; spin_unlock_irqrestore(&bucket->lock, __flags); } static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b) { return ((a->dev_addr == b->dev_addr) && (a->dev == b->dev)) ? true : false; } static bool containing_match(struct dma_debug_entry *a, struct dma_debug_entry *b) { if (a->dev != b->dev) return false; if ((b->dev_addr <= a->dev_addr) && ((b->dev_addr + b->size) >= (a->dev_addr + a->size))) return true; return false; } /* * Search a given entry in the hash bucket list */ static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket, struct dma_debug_entry *ref, match_fn match) { struct dma_debug_entry *entry, *ret = NULL; int matches = 0, match_lvl, last_lvl = 0; list_for_each_entry(entry, &bucket->list, list) { if (!match(ref, entry)) continue; /* * Some drivers map the same physical address multiple * times. Without a hardware IOMMU this results in the * same device addresses being put into the dma-debug * hash multiple times too. This can result in false * positives being reported. Therefore we implement a * best-fit algorithm here which returns the entry from * the hash which fits best to the reference value * instead of the first-fit. */ matches += 1; match_lvl = 0; entry->size == ref->size ? ++match_lvl : 0; entry->type == ref->type ? ++match_lvl : 0; entry->direction == ref->direction ? ++match_lvl : 0; entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0; if (match_lvl == 4) { /* perfect-fit - return the result */ return entry; } else if (match_lvl > last_lvl) { /* * We found an entry that fits better then the * previous one */ last_lvl = match_lvl; ret = entry; } } /* * If we have multiple matches but no perfect-fit, just return * NULL. */ ret = (matches == 1) ? ret : NULL; return ret; } static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket, struct dma_debug_entry *ref) { return __hash_bucket_find(bucket, ref, exact_match); } static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket, struct dma_debug_entry *ref, unsigned long *flags) { unsigned int max_range = dma_get_max_seg_size(ref->dev); struct dma_debug_entry *entry, index = *ref; unsigned int range = 0; while (range <= max_range) { entry = __hash_bucket_find(*bucket, &index, containing_match); if (entry) return entry; /* * Nothing found, go back a hash bucket */ put_hash_bucket(*bucket, flags); range += (1 << HASH_FN_SHIFT); index.dev_addr -= (1 << HASH_FN_SHIFT); *bucket = get_hash_bucket(&index, flags); } return NULL; } /* * Add an entry to a hash bucket */ static void hash_bucket_add(struct hash_bucket *bucket, struct dma_debug_entry *entry) { list_add_tail(&entry->list, &bucket->list); } /* * Remove entry from a hash bucket list */ static void hash_bucket_del(struct dma_debug_entry *entry) { list_del(&entry->list); } /* * Dump mapping entries for debugging purposes */ void debug_dma_dump_mappings(struct device *dev) { int idx; for (idx = 0; idx < HASH_SIZE; idx++) { struct hash_bucket *bucket = &dma_entry_hash[idx]; struct dma_debug_entry *entry; unsigned long flags; spin_lock_irqsave(&bucket->lock, flags); list_for_each_entry(entry, &bucket->list, list) { if (!dev || dev == entry->dev) { dev_info(entry->dev, "%s idx %d P=%Lx D=%Lx L=%Lx %s\n", type2name[entry->type], idx, (unsigned long long)entry->paddr, entry->dev_addr, entry->size, dir2name[entry->direction]); } } spin_unlock_irqrestore(&bucket->lock, flags); } } EXPORT_SYMBOL(debug_dma_dump_mappings); /* * Wrapper function for adding an entry to the hash. * This function takes care of locking itself. */ static void add_dma_entry(struct dma_debug_entry *entry) { struct hash_bucket *bucket; unsigned long flags; bucket = get_hash_bucket(entry, &flags); hash_bucket_add(bucket, entry); put_hash_bucket(bucket, &flags); } static struct dma_debug_entry *__dma_entry_alloc(void) { struct dma_debug_entry *entry; entry = list_entry(free_entries.next, struct dma_debug_entry, list); list_del(&entry->list); memset(entry, 0, sizeof(*entry)); num_free_entries -= 1; if (num_free_entries < min_free_entries) min_free_entries = num_free_entries; return entry; } /* struct dma_entry allocator * * The next two functions implement the allocator for * struct dma_debug_entries. */ static struct dma_debug_entry *dma_entry_alloc(void) { struct dma_debug_entry *entry = NULL; unsigned long flags; spin_lock_irqsave(&free_entries_lock, flags); if (list_empty(&free_entries)) { pr_err("DMA-API: debugging out of memory - disabling\n"); global_disable = true; goto out; } entry = __dma_entry_alloc(); #ifdef CONFIG_STACKTRACE entry->stacktrace.max_entries = DMA_DEBUG_STACKTRACE_ENTRIES; entry->stacktrace.entries = entry->st_entries; entry->stacktrace.skip = 2; save_stack_trace(&entry->stacktrace); #endif out: spin_unlock_irqrestore(&free_entries_lock, flags); return entry; } static void dma_entry_free(struct dma_debug_entry *entry) { unsigned long flags; /* * add to beginning of the list - this way the entries are * more likely cache hot when they are reallocated. */ spin_lock_irqsave(&free_entries_lock, flags); list_add(&entry->list, &free_entries); num_free_entries += 1; spin_unlock_irqrestore(&free_entries_lock, flags); } int dma_debug_resize_entries(u32 num_entries) { int i, delta, ret = 0; unsigned long flags; struct dma_debug_entry *entry; LIST_HEAD(tmp); spin_lock_irqsave(&free_entries_lock, flags); if (nr_total_entries < num_entries) { delta = num_entries - nr_total_entries; spin_unlock_irqrestore(&free_entries_lock, flags); for (i = 0; i < delta; i++) { entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) break; list_add_tail(&entry->list, &tmp); } spin_lock_irqsave(&free_entries_lock, flags); list_splice(&tmp, &free_entries); nr_total_entries += i; num_free_entries += i; } else { delta = nr_total_entries - num_entries; for (i = 0; i < delta && !list_empty(&free_entries); i++) { entry = __dma_entry_alloc(); kfree(entry); } nr_total_entries -= i; } if (nr_total_entries != num_entries) ret = 1; spin_unlock_irqrestore(&free_entries_lock, flags); return ret; } EXPORT_SYMBOL(dma_debug_resize_entries); /* * DMA-API debugging init code * * The init code does two things: * 1. Initialize core data structures * 2. Preallocate a given number of dma_debug_entry structs */ static int prealloc_memory(u32 num_entries) { struct dma_debug_entry *entry, *next_entry; int i; for (i = 0; i < num_entries; ++i) { entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) goto out_err; list_add_tail(&entry->list, &free_entries); } num_free_entries = num_entries; min_free_entries = num_entries; pr_info("DMA-API: preallocated %d debug entries\n", num_entries); return 0; out_err: list_for_each_entry_safe(entry, next_entry, &free_entries, list) { list_del(&entry->list); kfree(entry); } return -ENOMEM; } static ssize_t filter_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { char buf[NAME_MAX_LEN + 1]; unsigned long flags; int len; if (!current_driver_name[0]) return 0; /* * We can't copy to userspace directly because current_driver_name can * only be read under the driver_name_lock with irqs disabled. So * create a temporary copy first. */ read_lock_irqsave(&driver_name_lock, flags); len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name); read_unlock_irqrestore(&driver_name_lock, flags); return simple_read_from_buffer(user_buf, count, ppos, buf, len); } static ssize_t filter_write(struct file *file, const char __user *userbuf, size_t count, loff_t *ppos) { char buf[NAME_MAX_LEN]; unsigned long flags; size_t len; int i; /* * We can't copy from userspace directly. Access to * current_driver_name is protected with a write_lock with irqs * disabled. Since copy_from_user can fault and may sleep we * need to copy to temporary buffer first */ len = min(count, (size_t)(NAME_MAX_LEN - 1)); if (copy_from_user(buf, userbuf, len)) return -EFAULT; buf[len] = 0; write_lock_irqsave(&driver_name_lock, flags); /* * Now handle the string we got from userspace very carefully. * The rules are: * - only use the first token we got * - token delimiter is everything looking like a space * character (' ', '\n', '\t' ...) * */ if (!isalnum(buf[0])) { /* * If the first character userspace gave us is not * alphanumerical then assume the filter should be * switched off. */ if (current_driver_name[0]) pr_info("DMA-API: switching off dma-debug driver filter\n"); current_driver_name[0] = 0; current_driver = NULL; goto out_unlock; } /* * Now parse out the first token and use it as the name for the * driver to filter for. */ for (i = 0; i < NAME_MAX_LEN - 1; ++i) { current_driver_name[i] = buf[i]; if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0) break; } current_driver_name[i] = 0; current_driver = NULL; pr_info("DMA-API: enable driver filter for driver [%s]\n", current_driver_name); out_unlock: write_unlock_irqrestore(&driver_name_lock, flags); return count; } static const struct file_operations filter_fops = { .read = filter_read, .write = filter_write, .llseek = default_llseek, }; static int dma_debug_fs_init(void) { dma_debug_dent = debugfs_create_dir("dma-api", NULL); if (!dma_debug_dent) { pr_err("DMA-API: can not create debugfs directory\n"); return -ENOMEM; } global_disable_dent = debugfs_create_bool("disabled", 0444, dma_debug_dent, (u32 *)&global_disable); if (!global_disable_dent) goto out_err; error_count_dent = debugfs_create_u32("error_count", 0444, dma_debug_dent, &error_count); if (!error_count_dent) goto out_err; show_all_errors_dent = debugfs_create_u32("all_errors", 0644, dma_debug_dent, &show_all_errors); if (!show_all_errors_dent) goto out_err; show_num_errors_dent = debugfs_create_u32("num_errors", 0644, dma_debug_dent, &show_num_errors); if (!show_num_errors_dent) goto out_err; num_free_entries_dent = debugfs_create_u32("num_free_entries", 0444, dma_debug_dent, &num_free_entries); if (!num_free_entries_dent) goto out_err; min_free_entries_dent = debugfs_create_u32("min_free_entries", 0444, dma_debug_dent, &min_free_entries); if (!min_free_entries_dent) goto out_err; filter_dent = debugfs_create_file("driver_filter", 0644, dma_debug_dent, NULL, &filter_fops); if (!filter_dent) goto out_err; return 0; out_err: debugfs_remove_recursive(dma_debug_dent); return -ENOMEM; } static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry) { struct dma_debug_entry *entry; unsigned long flags; int count = 0, i; local_irq_save(flags); for (i = 0; i < HASH_SIZE; ++i) { spin_lock(&dma_entry_hash[i].lock); list_for_each_entry(entry, &dma_entry_hash[i].list, list) { if (entry->dev == dev) { count += 1; *out_entry = entry; } } spin_unlock(&dma_entry_hash[i].lock); } local_irq_restore(flags); return count; } static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct dma_debug_entry *uninitialized_var(entry); int count; if (global_disable) return 0; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: count = device_dma_allocations(dev, &entry); if (count == 0) break; err_printk(dev, entry, "DMA-API: device driver has pending " "DMA allocations while released from device " "[count=%d]\n" "One of leaked entries details: " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [mapped as %s]\n", count, entry->dev_addr, entry->size, dir2name[entry->direction], type2name[entry->type]); break; default: break; } return 0; } void dma_debug_add_bus(struct bus_type *bus) { struct notifier_block *nb; if (global_disable) return; nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL); if (nb == NULL) { pr_err("dma_debug_add_bus: out of memory\n"); return; } nb->notifier_call = dma_debug_device_change; bus_register_notifier(bus, nb); } /* * Let the architectures decide how many entries should be preallocated. */ void dma_debug_init(u32 num_entries) { int i; if (global_disable) return; for (i = 0; i < HASH_SIZE; ++i) { INIT_LIST_HEAD(&dma_entry_hash[i].list); spin_lock_init(&dma_entry_hash[i].lock); } if (dma_debug_fs_init() != 0) { pr_err("DMA-API: error creating debugfs entries - disabling\n"); global_disable = true; return; } if (req_entries) num_entries = req_entries; if (prealloc_memory(num_entries) != 0) { pr_err("DMA-API: debugging out of memory error - disabled\n"); global_disable = true; return; } nr_total_entries = num_free_entries; pr_info("DMA-API: debugging enabled by kernel config\n"); } static __init int dma_debug_cmdline(char *str) { if (!str) return -EINVAL; if (strncmp(str, "off", 3) == 0) { pr_info("DMA-API: debugging disabled on kernel command line\n"); global_disable = true; } return 0; } static __init int dma_debug_entries_cmdline(char *str) { int res; if (!str) return -EINVAL; res = get_option(&str, &req_entries); if (!res) req_entries = 0; return 0; } __setup("dma_debug=", dma_debug_cmdline); __setup("dma_debug_entries=", dma_debug_entries_cmdline); static void check_unmap(struct dma_debug_entry *ref) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; if (dma_mapping_error(ref->dev, ref->dev_addr)) { err_printk(ref->dev, NULL, "DMA-API: device driver tries " "to free an invalid DMA memory address\n"); return; } bucket = get_hash_bucket(ref, &flags); entry = bucket_find_exact(bucket, ref); if (!entry) { err_printk(ref->dev, NULL, "DMA-API: device driver tries " "to free DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes]\n", ref->dev_addr, ref->size); goto out; } if (ref->size != entry->size) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different size " "[device address=0x%016llx] [map size=%llu bytes] " "[unmap size=%llu bytes]\n", ref->dev_addr, entry->size, ref->size); } if (ref->type != entry->type) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with wrong function " "[device address=0x%016llx] [size=%llu bytes] " "[mapped as %s] [unmapped as %s]\n", ref->dev_addr, ref->size, type2name[entry->type], type2name[ref->type]); } else if ((entry->type == dma_debug_coherent) && (ref->paddr != entry->paddr)) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different CPU address " "[device address=0x%016llx] [size=%llu bytes] " "[cpu alloc address=0x%016llx] " "[cpu free address=0x%016llx]", ref->dev_addr, ref->size, (unsigned long long)entry->paddr, (unsigned long long)ref->paddr); } if (ref->sg_call_ents && ref->type == dma_debug_sg && ref->sg_call_ents != entry->sg_call_ents) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA sg list with different entry count " "[map count=%d] [unmap count=%d]\n", entry->sg_call_ents, ref->sg_call_ents); } /* * This may be no bug in reality - but most implementations of the * DMA API don't handle this properly, so check for it here */ if (ref->direction != entry->direction) { err_printk(ref->dev, entry, "DMA-API: device driver frees " "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [unmapped with %s]\n", ref->dev_addr, ref->size, dir2name[entry->direction], dir2name[ref->direction]); } hash_bucket_del(entry); dma_entry_free(entry); out: put_hash_bucket(bucket, &flags); } static void check_for_stack(struct device *dev, void *addr) { if (object_is_on_stack(addr)) err_printk(dev, NULL, "DMA-API: device driver maps memory from" "stack [addr=%p]\n", addr); } static inline bool overlap(void *addr, unsigned long len, void *start, void *end) { unsigned long a1 = (unsigned long)addr; unsigned long b1 = a1 + len; unsigned long a2 = (unsigned long)start; unsigned long b2 = (unsigned long)end; return !(b1 <= a2 || a1 >= b2); } static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len) { if (overlap(addr, len, _text, _etext) || overlap(addr, len, __start_rodata, __end_rodata)) err_printk(dev, NULL, "DMA-API: device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len); } static void check_sync(struct device *dev, struct dma_debug_entry *ref, bool to_cpu) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; bucket = get_hash_bucket(ref, &flags); entry = bucket_find_contain(&bucket, ref, &flags); if (!entry) { err_printk(dev, NULL, "DMA-API: device driver tries " "to sync DMA memory it has not allocated " "[device address=0x%016llx] [size=%llu bytes]\n", (unsigned long long)ref->dev_addr, ref->size); goto out; } if (ref->size > entry->size) { err_printk(dev, entry, "DMA-API: device driver syncs" " DMA memory outside allocated range " "[device address=0x%016llx] " "[allocation size=%llu bytes] " "[sync offset+size=%llu]\n", entry->dev_addr, entry->size, ref->size); } if (entry->direction == DMA_BIDIRECTIONAL) goto out; if (ref->direction != entry->direction) { err_printk(dev, entry, "DMA-API: device driver syncs " "DMA memory with different direction " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], dir2name[ref->direction]); } if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) && !(ref->direction == DMA_TO_DEVICE)) err_printk(dev, entry, "DMA-API: device driver syncs " "device read-only DMA memory for cpu " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], dir2name[ref->direction]); if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) && !(ref->direction == DMA_FROM_DEVICE)) err_printk(dev, entry, "DMA-API: device driver syncs " "device write-only DMA memory to device " "[device address=0x%016llx] [size=%llu bytes] " "[mapped with %s] [synced with %s]\n", (unsigned long long)ref->dev_addr, entry->size, dir2name[entry->direction], dir2name[ref->direction]); out: put_hash_bucket(bucket, &flags); } void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, size_t size, int direction, dma_addr_t dma_addr, bool map_single) { struct dma_debug_entry *entry; if (unlikely(global_disable)) return; if (unlikely(dma_mapping_error(dev, dma_addr))) return; entry = dma_entry_alloc(); if (!entry) return; entry->dev = dev; entry->type = dma_debug_page; entry->paddr = page_to_phys(page) + offset; entry->dev_addr = dma_addr; entry->size = size; entry->direction = direction; if (map_single) entry->type = dma_debug_single; if (!PageHighMem(page)) { void *addr = page_address(page) + offset; check_for_stack(dev, addr); check_for_illegal_area(dev, addr, size); } add_dma_entry(entry); } EXPORT_SYMBOL(debug_dma_map_page); void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, size_t size, int direction, bool map_single) { struct dma_debug_entry ref = { .type = dma_debug_page, .dev = dev, .dev_addr = addr, .size = size, .direction = direction, }; if (unlikely(global_disable)) return; if (map_single) ref.type = dma_debug_single; check_unmap(&ref); } EXPORT_SYMBOL(debug_dma_unmap_page); void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, int mapped_ents, int direction) { struct dma_debug_entry *entry; struct scatterlist *s; int i; if (unlikely(global_disable)) return; for_each_sg(sg, s, mapped_ents, i) { entry = dma_entry_alloc(); if (!entry) return; entry->type = dma_debug_sg; entry->dev = dev; entry->paddr = sg_phys(s); entry->size = sg_dma_len(s); entry->dev_addr = sg_dma_address(s); entry->direction = direction; entry->sg_call_ents = nents; entry->sg_mapped_ents = mapped_ents; if (!PageHighMem(sg_page(s))) { check_for_stack(dev, sg_virt(s)); check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s)); } add_dma_entry(entry); } } EXPORT_SYMBOL(debug_dma_map_sg); static int get_nr_mapped_entries(struct device *dev, struct dma_debug_entry *ref) { struct dma_debug_entry *entry; struct hash_bucket *bucket; unsigned long flags; int mapped_ents; bucket = get_hash_bucket(ref, &flags); entry = bucket_find_exact(bucket, ref); mapped_ents = 0; if (entry) mapped_ents = entry->sg_mapped_ents; put_hash_bucket(bucket, &flags); return mapped_ents; } void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, int nelems, int dir) { struct scatterlist *s; int mapped_ents = 0, i; if (unlikely(global_disable)) return; for_each_sg(sglist, s, nelems, i) { struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, .paddr = sg_phys(s), .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = dir, .sg_call_ents = nelems, }; if (mapped_ents && i >= mapped_ents) break; if (!i) mapped_ents = get_nr_mapped_entries(dev, &ref); check_unmap(&ref); } } EXPORT_SYMBOL(debug_dma_unmap_sg); void debug_dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t dma_addr, void *virt) { struct dma_debug_entry *entry; if (unlikely(global_disable)) return; if (unlikely(virt == NULL)) return; entry = dma_entry_alloc(); if (!entry) return; entry->type = dma_debug_coherent; entry->dev = dev; entry->paddr = virt_to_phys(virt); entry->size = size; entry->dev_addr = dma_addr; entry->direction = DMA_BIDIRECTIONAL; add_dma_entry(entry); } EXPORT_SYMBOL(debug_dma_alloc_coherent); void debug_dma_free_coherent(struct device *dev, size_t size, void *virt, dma_addr_t addr) { struct dma_debug_entry ref = { .type = dma_debug_coherent, .dev = dev, .paddr = virt_to_phys(virt), .dev_addr = addr, .size = size, .direction = DMA_BIDIRECTIONAL, }; if (unlikely(global_disable)) return; check_unmap(&ref); } EXPORT_SYMBOL(debug_dma_free_coherent); void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(global_disable)) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, true); } EXPORT_SYMBOL(debug_dma_sync_single_for_cpu); void debug_dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(global_disable)) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, false); } EXPORT_SYMBOL(debug_dma_sync_single_for_device); void debug_dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(global_disable)) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = offset + size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, true); } EXPORT_SYMBOL(debug_dma_sync_single_range_for_cpu); void debug_dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, unsigned long offset, size_t size, int direction) { struct dma_debug_entry ref; if (unlikely(global_disable)) return; ref.type = dma_debug_single; ref.dev = dev; ref.dev_addr = dma_handle; ref.size = offset + size; ref.direction = direction; ref.sg_call_ents = 0; check_sync(dev, &ref, false); } EXPORT_SYMBOL(debug_dma_sync_single_range_for_device); void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems, int direction) { struct scatterlist *s; int mapped_ents = 0, i; if (unlikely(global_disable)) return; for_each_sg(sg, s, nelems, i) { struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, .paddr = sg_phys(s), .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, .sg_call_ents = nelems, }; if (!i) mapped_ents = get_nr_mapped_entries(dev, &ref); if (i >= mapped_ents) break; check_sync(dev, &ref, true); } } EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu); void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, int direction) { struct scatterlist *s; int mapped_ents = 0, i; if (unlikely(global_disable)) return; for_each_sg(sg, s, nelems, i) { struct dma_debug_entry ref = { .type = dma_debug_sg, .dev = dev, .paddr = sg_phys(s), .dev_addr = sg_dma_address(s), .size = sg_dma_len(s), .direction = direction, .sg_call_ents = nelems, }; if (!i) mapped_ents = get_nr_mapped_entries(dev, &ref); if (i >= mapped_ents) break; check_sync(dev, &ref, false); } } EXPORT_SYMBOL(debug_dma_sync_sg_for_device); static int __init dma_debug_driver_setup(char *str) { int i; for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) { current_driver_name[i] = *str; if (*str == 0) break; } if (current_driver_name[0]) pr_info("DMA-API: enable driver filter for driver [%s]\n", current_driver_name); return 1; } __setup("dma_debug_driver=", dma_debug_driver_setup);
gpl-2.0
SlimSaber/kernel_sony_msm8974
drivers/mtd/nand/r852.c
4799
26049
/* * Copyright © 2009 - Maxim Levitsky * driver for Ricoh xD readers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/jiffies.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/pci_ids.h> #include <linux/delay.h> #include <linux/slab.h> #include <asm/byteorder.h> #include <linux/sched.h> #include "sm_common.h" #include "r852.h" static bool r852_enable_dma = 1; module_param(r852_enable_dma, bool, S_IRUGO); MODULE_PARM_DESC(r852_enable_dma, "Enable usage of the DMA (default)"); static int debug; module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug level (0-2)"); /* read register */ static inline uint8_t r852_read_reg(struct r852_device *dev, int address) { uint8_t reg = readb(dev->mmio + address); return reg; } /* write register */ static inline void r852_write_reg(struct r852_device *dev, int address, uint8_t value) { writeb(value, dev->mmio + address); mmiowb(); } /* read dword sized register */ static inline uint32_t r852_read_reg_dword(struct r852_device *dev, int address) { uint32_t reg = le32_to_cpu(readl(dev->mmio + address)); return reg; } /* write dword sized register */ static inline void r852_write_reg_dword(struct r852_device *dev, int address, uint32_t value) { writel(cpu_to_le32(value), dev->mmio + address); mmiowb(); } /* returns pointer to our private structure */ static inline struct r852_device *r852_get_dev(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; return chip->priv; } /* check if controller supports dma */ static void r852_dma_test(struct r852_device *dev) { dev->dma_usable = (r852_read_reg(dev, R852_DMA_CAP) & (R852_DMA1 | R852_DMA2)) == (R852_DMA1 | R852_DMA2); if (!dev->dma_usable) message("Non dma capable device detected, dma disabled"); if (!r852_enable_dma) { message("disabling dma on user request"); dev->dma_usable = 0; } } /* * Enable dma. Enables ether first or second stage of the DMA, * Expects dev->dma_dir and dev->dma_state be set */ static void r852_dma_enable(struct r852_device *dev) { uint8_t dma_reg, dma_irq_reg; /* Set up dma settings */ dma_reg = r852_read_reg_dword(dev, R852_DMA_SETTINGS); dma_reg &= ~(R852_DMA_READ | R852_DMA_INTERNAL | R852_DMA_MEMORY); if (dev->dma_dir) dma_reg |= R852_DMA_READ; if (dev->dma_state == DMA_INTERNAL) { dma_reg |= R852_DMA_INTERNAL; /* Precaution to make sure HW doesn't write */ /* to random kernel memory */ r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_bounce_buffer)); } else { dma_reg |= R852_DMA_MEMORY; r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_dma_addr)); } /* Precaution: make sure write reached the device */ r852_read_reg_dword(dev, R852_DMA_ADDR); r852_write_reg_dword(dev, R852_DMA_SETTINGS, dma_reg); /* Set dma irq */ dma_irq_reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, dma_irq_reg | R852_DMA_IRQ_INTERNAL | R852_DMA_IRQ_ERROR | R852_DMA_IRQ_MEMORY); } /* * Disable dma, called from the interrupt handler, which specifies * success of the operation via 'error' argument */ static void r852_dma_done(struct r852_device *dev, int error) { WARN_ON(dev->dma_stage == 0); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, r852_read_reg_dword(dev, R852_DMA_IRQ_STA)); r852_write_reg_dword(dev, R852_DMA_SETTINGS, 0); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, 0); /* Precaution to make sure HW doesn't write to random kernel memory */ r852_write_reg_dword(dev, R852_DMA_ADDR, cpu_to_le32(dev->phys_bounce_buffer)); r852_read_reg_dword(dev, R852_DMA_ADDR); dev->dma_error = error; dev->dma_stage = 0; if (dev->phys_dma_addr && dev->phys_dma_addr != dev->phys_bounce_buffer) pci_unmap_single(dev->pci_dev, dev->phys_dma_addr, R852_DMA_LEN, dev->dma_dir ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); } /* * Wait, till dma is done, which includes both phases of it */ static int r852_dma_wait(struct r852_device *dev) { long timeout = wait_for_completion_timeout(&dev->dma_done, msecs_to_jiffies(1000)); if (!timeout) { dbg("timeout waiting for DMA interrupt"); return -ETIMEDOUT; } return 0; } /* * Read/Write one page using dma. Only pages can be read (512 bytes) */ static void r852_do_dma(struct r852_device *dev, uint8_t *buf, int do_read) { int bounce = 0; unsigned long flags; int error; dev->dma_error = 0; /* Set dma direction */ dev->dma_dir = do_read; dev->dma_stage = 1; INIT_COMPLETION(dev->dma_done); dbg_verbose("doing dma %s ", do_read ? "read" : "write"); /* Set initial dma state: for reading first fill on board buffer, from device, for writes first fill the buffer from memory*/ dev->dma_state = do_read ? DMA_INTERNAL : DMA_MEMORY; /* if incoming buffer is not page aligned, we should do bounce */ if ((unsigned long)buf & (R852_DMA_LEN-1)) bounce = 1; if (!bounce) { dev->phys_dma_addr = pci_map_single(dev->pci_dev, (void *)buf, R852_DMA_LEN, (do_read ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE)); if (pci_dma_mapping_error(dev->pci_dev, dev->phys_dma_addr)) bounce = 1; } if (bounce) { dbg_verbose("dma: using bounce buffer"); dev->phys_dma_addr = dev->phys_bounce_buffer; if (!do_read) memcpy(dev->bounce_buffer, buf, R852_DMA_LEN); } /* Enable DMA */ spin_lock_irqsave(&dev->irqlock, flags); r852_dma_enable(dev); spin_unlock_irqrestore(&dev->irqlock, flags); /* Wait till complete */ error = r852_dma_wait(dev); if (error) { r852_dma_done(dev, error); return; } if (do_read && bounce) memcpy((void *)buf, dev->bounce_buffer, R852_DMA_LEN); } /* * Program data lines of the nand chip to send data to it */ void r852_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct r852_device *dev = r852_get_dev(mtd); uint32_t reg; /* Don't allow any access to hardware if we suspect card removal */ if (dev->card_unstable) return; /* Special case for whole sector read */ if (len == R852_DMA_LEN && dev->dma_usable) { r852_do_dma(dev, (uint8_t *)buf, 0); return; } /* write DWORD chinks - faster */ while (len) { reg = buf[0] | buf[1] << 8 | buf[2] << 16 | buf[3] << 24; r852_write_reg_dword(dev, R852_DATALINE, reg); buf += 4; len -= 4; } /* write rest */ while (len) r852_write_reg(dev, R852_DATALINE, *buf++); } /* * Read data lines of the nand chip to retrieve data */ void r852_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct r852_device *dev = r852_get_dev(mtd); uint32_t reg; if (dev->card_unstable) { /* since we can't signal error here, at least, return predictable buffer */ memset(buf, 0, len); return; } /* special case for whole sector read */ if (len == R852_DMA_LEN && dev->dma_usable) { r852_do_dma(dev, buf, 1); return; } /* read in dword sized chunks */ while (len >= 4) { reg = r852_read_reg_dword(dev, R852_DATALINE); *buf++ = reg & 0xFF; *buf++ = (reg >> 8) & 0xFF; *buf++ = (reg >> 16) & 0xFF; *buf++ = (reg >> 24) & 0xFF; len -= 4; } /* read the reset by bytes */ while (len--) *buf++ = r852_read_reg(dev, R852_DATALINE); } /* * Read one byte from nand chip */ static uint8_t r852_read_byte(struct mtd_info *mtd) { struct r852_device *dev = r852_get_dev(mtd); /* Same problem as in r852_read_buf.... */ if (dev->card_unstable) return 0; return r852_read_reg(dev, R852_DATALINE); } /* * Readback the buffer to verify it */ int r852_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct r852_device *dev = r852_get_dev(mtd); /* We can't be sure about anything here... */ if (dev->card_unstable) return -1; /* This will never happen, unless you wired up a nand chip with > 512 bytes page size to the reader */ if (len > SM_SECTOR_SIZE) return 0; r852_read_buf(mtd, dev->tmp_buffer, len); return memcmp(buf, dev->tmp_buffer, len); } /* * Control several chip lines & send commands */ void r852_cmdctl(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return; if (ctrl & NAND_CTRL_CHANGE) { dev->ctlreg &= ~(R852_CTL_DATA | R852_CTL_COMMAND | R852_CTL_ON | R852_CTL_CARDENABLE); if (ctrl & NAND_ALE) dev->ctlreg |= R852_CTL_DATA; if (ctrl & NAND_CLE) dev->ctlreg |= R852_CTL_COMMAND; if (ctrl & NAND_NCE) dev->ctlreg |= (R852_CTL_CARDENABLE | R852_CTL_ON); else dev->ctlreg &= ~R852_CTL_WRITE; /* when write is stareted, enable write access */ if (dat == NAND_CMD_ERASE1) dev->ctlreg |= R852_CTL_WRITE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } /* HACK: NAND_CMD_SEQIN is called without NAND_CTRL_CHANGE, but we need to set write mode */ if (dat == NAND_CMD_SEQIN && (dev->ctlreg & R852_CTL_COMMAND)) { dev->ctlreg |= R852_CTL_WRITE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } if (dat != NAND_CMD_NONE) r852_write_reg(dev, R852_DATALINE, dat); } /* * Wait till card is ready. * based on nand_wait, but returns errors on DMA error */ int r852_wait(struct mtd_info *mtd, struct nand_chip *chip) { struct r852_device *dev = chip->priv; unsigned long timeout; int status; timeout = jiffies + (chip->state == FL_ERASING ? msecs_to_jiffies(400) : msecs_to_jiffies(20)); while (time_before(jiffies, timeout)) if (chip->dev_ready(mtd)) break; chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1); status = (int)chip->read_byte(mtd); /* Unfortunelly, no way to send detailed error status... */ if (dev->dma_error) { status |= NAND_STATUS_FAIL; dev->dma_error = 0; } return status; } /* * Check if card is ready */ int r852_ready(struct mtd_info *mtd) { struct r852_device *dev = r852_get_dev(mtd); return !(r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_BUSY); } /* * Set ECC engine mode */ void r852_ecc_hwctl(struct mtd_info *mtd, int mode) { struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return; switch (mode) { case NAND_ECC_READ: case NAND_ECC_WRITE: /* enable ecc generation/check*/ dev->ctlreg |= R852_CTL_ECC_ENABLE; /* flush ecc buffer */ r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); r852_read_reg_dword(dev, R852_DATALINE); r852_write_reg(dev, R852_CTL, dev->ctlreg); return; case NAND_ECC_READSYN: /* disable ecc generation */ dev->ctlreg &= ~R852_CTL_ECC_ENABLE; r852_write_reg(dev, R852_CTL, dev->ctlreg); } } /* * Calculate ECC, only used for writes */ int r852_ecc_calculate(struct mtd_info *mtd, const uint8_t *dat, uint8_t *ecc_code) { struct r852_device *dev = r852_get_dev(mtd); struct sm_oob *oob = (struct sm_oob *)ecc_code; uint32_t ecc1, ecc2; if (dev->card_unstable) return 0; dev->ctlreg &= ~R852_CTL_ECC_ENABLE; r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); ecc1 = r852_read_reg_dword(dev, R852_DATALINE); ecc2 = r852_read_reg_dword(dev, R852_DATALINE); oob->ecc1[0] = (ecc1) & 0xFF; oob->ecc1[1] = (ecc1 >> 8) & 0xFF; oob->ecc1[2] = (ecc1 >> 16) & 0xFF; oob->ecc2[0] = (ecc2) & 0xFF; oob->ecc2[1] = (ecc2 >> 8) & 0xFF; oob->ecc2[2] = (ecc2 >> 16) & 0xFF; r852_write_reg(dev, R852_CTL, dev->ctlreg); return 0; } /* * Correct the data using ECC, hw did almost everything for us */ int r852_ecc_correct(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) { uint16_t ecc_reg; uint8_t ecc_status, err_byte; int i, error = 0; struct r852_device *dev = r852_get_dev(mtd); if (dev->card_unstable) return 0; if (dev->dma_error) { dev->dma_error = 0; return -1; } r852_write_reg(dev, R852_CTL, dev->ctlreg | R852_CTL_ECC_ACCESS); ecc_reg = r852_read_reg_dword(dev, R852_DATALINE); r852_write_reg(dev, R852_CTL, dev->ctlreg); for (i = 0 ; i <= 1 ; i++) { ecc_status = (ecc_reg >> 8) & 0xFF; /* ecc uncorrectable error */ if (ecc_status & R852_ECC_FAIL) { dbg("ecc: unrecoverable error, in half %d", i); error = -1; goto exit; } /* correctable error */ if (ecc_status & R852_ECC_CORRECTABLE) { err_byte = ecc_reg & 0xFF; dbg("ecc: recoverable error, " "in half %d, byte %d, bit %d", i, err_byte, ecc_status & R852_ECC_ERR_BIT_MSK); dat[err_byte] ^= 1 << (ecc_status & R852_ECC_ERR_BIT_MSK); error++; } dat += 256; ecc_reg >>= 16; } exit: return error; } /* * This is copy of nand_read_oob_std * nand_read_oob_syndrome assumes we can send column address - we can't */ static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page, int sndcmd) { if (sndcmd) { chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); sndcmd = 0; } chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); return sndcmd; } /* * Start the nand engine */ void r852_engine_enable(struct r852_device *dev) { if (r852_read_reg_dword(dev, R852_HW) & R852_HW_UNKNOWN) { r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); } else { r852_write_reg_dword(dev, R852_HW, R852_HW_ENABLED); r852_write_reg(dev, R852_CTL, R852_CTL_RESET | R852_CTL_ON); } msleep(300); r852_write_reg(dev, R852_CTL, 0); } /* * Stop the nand engine */ void r852_engine_disable(struct r852_device *dev) { r852_write_reg_dword(dev, R852_HW, 0); r852_write_reg(dev, R852_CTL, R852_CTL_RESET); } /* * Test if card is present */ void r852_card_update_present(struct r852_device *dev) { unsigned long flags; uint8_t reg; spin_lock_irqsave(&dev->irqlock, flags); reg = r852_read_reg(dev, R852_CARD_STA); dev->card_detected = !!(reg & R852_CARD_STA_PRESENT); spin_unlock_irqrestore(&dev->irqlock, flags); } /* * Update card detection IRQ state according to current card state * which is read in r852_card_update_present */ void r852_update_card_detect(struct r852_device *dev) { int card_detect_reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); dev->card_unstable = 0; card_detect_reg &= ~(R852_CARD_IRQ_REMOVE | R852_CARD_IRQ_INSERT); card_detect_reg |= R852_CARD_IRQ_GENABLE; card_detect_reg |= dev->card_detected ? R852_CARD_IRQ_REMOVE : R852_CARD_IRQ_INSERT; r852_write_reg(dev, R852_CARD_IRQ_ENABLE, card_detect_reg); } ssize_t r852_media_type_show(struct device *sys_dev, struct device_attribute *attr, char *buf) { struct mtd_info *mtd = container_of(sys_dev, struct mtd_info, dev); struct r852_device *dev = r852_get_dev(mtd); char *data = dev->sm ? "smartmedia" : "xd"; strcpy(buf, data); return strlen(data); } DEVICE_ATTR(media_type, S_IRUGO, r852_media_type_show, NULL); /* Detect properties of card in slot */ void r852_update_media_status(struct r852_device *dev) { uint8_t reg; unsigned long flags; int readonly; spin_lock_irqsave(&dev->irqlock, flags); if (!dev->card_detected) { message("card removed"); spin_unlock_irqrestore(&dev->irqlock, flags); return ; } readonly = r852_read_reg(dev, R852_CARD_STA) & R852_CARD_STA_RO; reg = r852_read_reg(dev, R852_DMA_CAP); dev->sm = (reg & (R852_DMA1 | R852_DMA2)) && (reg & R852_SMBIT); message("detected %s %s card in slot", dev->sm ? "SmartMedia" : "xD", readonly ? "readonly" : "writeable"); dev->readonly = readonly; spin_unlock_irqrestore(&dev->irqlock, flags); } /* * Register the nand device * Called when the card is detected */ int r852_register_nand_device(struct r852_device *dev) { dev->mtd = kzalloc(sizeof(struct mtd_info), GFP_KERNEL); if (!dev->mtd) goto error1; WARN_ON(dev->card_registred); dev->mtd->owner = THIS_MODULE; dev->mtd->priv = dev->chip; dev->mtd->dev.parent = &dev->pci_dev->dev; if (dev->readonly) dev->chip->options |= NAND_ROM; r852_engine_enable(dev); if (sm_register_device(dev->mtd, dev->sm)) goto error2; if (device_create_file(&dev->mtd->dev, &dev_attr_media_type)) message("can't create media type sysfs attribute"); dev->card_registred = 1; return 0; error2: kfree(dev->mtd); error1: /* Force card redetect */ dev->card_detected = 0; return -1; } /* * Unregister the card */ void r852_unregister_nand_device(struct r852_device *dev) { if (!dev->card_registred) return; device_remove_file(&dev->mtd->dev, &dev_attr_media_type); nand_release(dev->mtd); r852_engine_disable(dev); dev->card_registred = 0; kfree(dev->mtd); dev->mtd = NULL; } /* Card state updater */ void r852_card_detect_work(struct work_struct *work) { struct r852_device *dev = container_of(work, struct r852_device, card_detect_work.work); r852_card_update_present(dev); r852_update_card_detect(dev); dev->card_unstable = 0; /* False alarm */ if (dev->card_detected == dev->card_registred) goto exit; /* Read media properties */ r852_update_media_status(dev); /* Register the card */ if (dev->card_detected) r852_register_nand_device(dev); else r852_unregister_nand_device(dev); exit: r852_update_card_detect(dev); } /* Ack + disable IRQ generation */ static void r852_disable_irqs(struct r852_device *dev) { uint8_t reg; reg = r852_read_reg(dev, R852_CARD_IRQ_ENABLE); r852_write_reg(dev, R852_CARD_IRQ_ENABLE, reg & ~R852_CARD_IRQ_MASK); reg = r852_read_reg_dword(dev, R852_DMA_IRQ_ENABLE); r852_write_reg_dword(dev, R852_DMA_IRQ_ENABLE, reg & ~R852_DMA_IRQ_MASK); r852_write_reg(dev, R852_CARD_IRQ_STA, R852_CARD_IRQ_MASK); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, R852_DMA_IRQ_MASK); } /* Interrupt handler */ static irqreturn_t r852_irq(int irq, void *data) { struct r852_device *dev = (struct r852_device *)data; uint8_t card_status, dma_status; unsigned long flags; irqreturn_t ret = IRQ_NONE; spin_lock_irqsave(&dev->irqlock, flags); /* handle card detection interrupts first */ card_status = r852_read_reg(dev, R852_CARD_IRQ_STA); r852_write_reg(dev, R852_CARD_IRQ_STA, card_status); if (card_status & (R852_CARD_IRQ_INSERT|R852_CARD_IRQ_REMOVE)) { ret = IRQ_HANDLED; dev->card_detected = !!(card_status & R852_CARD_IRQ_INSERT); /* we shouldn't receive any interrupts if we wait for card to settle */ WARN_ON(dev->card_unstable); /* disable irqs while card is unstable */ /* this will timeout DMA if active, but better that garbage */ r852_disable_irqs(dev); if (dev->card_unstable) goto out; /* let, card state to settle a bit, and then do the work */ dev->card_unstable = 1; queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, msecs_to_jiffies(100)); goto out; } /* Handle dma interrupts */ dma_status = r852_read_reg_dword(dev, R852_DMA_IRQ_STA); r852_write_reg_dword(dev, R852_DMA_IRQ_STA, dma_status); if (dma_status & R852_DMA_IRQ_MASK) { ret = IRQ_HANDLED; if (dma_status & R852_DMA_IRQ_ERROR) { dbg("received dma error IRQ"); r852_dma_done(dev, -EIO); complete(&dev->dma_done); goto out; } /* received DMA interrupt out of nowhere? */ WARN_ON_ONCE(dev->dma_stage == 0); if (dev->dma_stage == 0) goto out; /* done device access */ if (dev->dma_state == DMA_INTERNAL && (dma_status & R852_DMA_IRQ_INTERNAL)) { dev->dma_state = DMA_MEMORY; dev->dma_stage++; } /* done memory DMA */ if (dev->dma_state == DMA_MEMORY && (dma_status & R852_DMA_IRQ_MEMORY)) { dev->dma_state = DMA_INTERNAL; dev->dma_stage++; } /* Enable 2nd half of dma dance */ if (dev->dma_stage == 2) r852_dma_enable(dev); /* Operation done */ if (dev->dma_stage == 3) { r852_dma_done(dev, 0); complete(&dev->dma_done); } goto out; } /* Handle unknown interrupts */ if (dma_status) dbg("bad dma IRQ status = %x", dma_status); if (card_status & ~R852_CARD_STA_CD) dbg("strange card status = %x", card_status); out: spin_unlock_irqrestore(&dev->irqlock, flags); return ret; } int r852_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { int error; struct nand_chip *chip; struct r852_device *dev; /* pci initialization */ error = pci_enable_device(pci_dev); if (error) goto error1; pci_set_master(pci_dev); error = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); if (error) goto error2; error = pci_request_regions(pci_dev, DRV_NAME); if (error) goto error3; error = -ENOMEM; /* init nand chip, but register it only on card insert */ chip = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); if (!chip) goto error4; /* commands */ chip->cmd_ctrl = r852_cmdctl; chip->waitfunc = r852_wait; chip->dev_ready = r852_ready; /* I/O */ chip->read_byte = r852_read_byte; chip->read_buf = r852_read_buf; chip->write_buf = r852_write_buf; chip->verify_buf = r852_verify_buf; /* ecc */ chip->ecc.mode = NAND_ECC_HW_SYNDROME; chip->ecc.size = R852_DMA_LEN; chip->ecc.bytes = SM_OOB_SIZE; chip->ecc.strength = 2; chip->ecc.hwctl = r852_ecc_hwctl; chip->ecc.calculate = r852_ecc_calculate; chip->ecc.correct = r852_ecc_correct; /* TODO: hack */ chip->ecc.read_oob = r852_read_oob; /* init our device structure */ dev = kzalloc(sizeof(struct r852_device), GFP_KERNEL); if (!dev) goto error5; chip->priv = dev; dev->chip = chip; dev->pci_dev = pci_dev; pci_set_drvdata(pci_dev, dev); dev->bounce_buffer = pci_alloc_consistent(pci_dev, R852_DMA_LEN, &dev->phys_bounce_buffer); if (!dev->bounce_buffer) goto error6; error = -ENODEV; dev->mmio = pci_ioremap_bar(pci_dev, 0); if (!dev->mmio) goto error7; error = -ENOMEM; dev->tmp_buffer = kzalloc(SM_SECTOR_SIZE, GFP_KERNEL); if (!dev->tmp_buffer) goto error8; init_completion(&dev->dma_done); dev->card_workqueue = create_freezable_workqueue(DRV_NAME); if (!dev->card_workqueue) goto error9; INIT_DELAYED_WORK(&dev->card_detect_work, r852_card_detect_work); /* shutdown everything - precation */ r852_engine_disable(dev); r852_disable_irqs(dev); r852_dma_test(dev); dev->irq = pci_dev->irq; spin_lock_init(&dev->irqlock); dev->card_detected = 0; r852_card_update_present(dev); /*register irq handler*/ error = -ENODEV; if (request_irq(pci_dev->irq, &r852_irq, IRQF_SHARED, DRV_NAME, dev)) goto error10; /* kick initial present test */ queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, 0); printk(KERN_NOTICE DRV_NAME ": driver loaded successfully\n"); return 0; error10: destroy_workqueue(dev->card_workqueue); error9: kfree(dev->tmp_buffer); error8: pci_iounmap(pci_dev, dev->mmio); error7: pci_free_consistent(pci_dev, R852_DMA_LEN, dev->bounce_buffer, dev->phys_bounce_buffer); error6: kfree(dev); error5: kfree(chip); error4: pci_release_regions(pci_dev); error3: error2: pci_disable_device(pci_dev); error1: return error; } void r852_remove(struct pci_dev *pci_dev) { struct r852_device *dev = pci_get_drvdata(pci_dev); /* Stop detect workqueue - we are going to unregister the device anyway*/ cancel_delayed_work_sync(&dev->card_detect_work); destroy_workqueue(dev->card_workqueue); /* Unregister the device, this might make more IO */ r852_unregister_nand_device(dev); /* Stop interrupts */ r852_disable_irqs(dev); synchronize_irq(dev->irq); free_irq(dev->irq, dev); /* Cleanup */ kfree(dev->tmp_buffer); pci_iounmap(pci_dev, dev->mmio); pci_free_consistent(pci_dev, R852_DMA_LEN, dev->bounce_buffer, dev->phys_bounce_buffer); kfree(dev->chip); kfree(dev); /* Shutdown the PCI device */ pci_release_regions(pci_dev); pci_disable_device(pci_dev); } void r852_shutdown(struct pci_dev *pci_dev) { struct r852_device *dev = pci_get_drvdata(pci_dev); cancel_delayed_work_sync(&dev->card_detect_work); r852_disable_irqs(dev); synchronize_irq(dev->irq); pci_disable_device(pci_dev); } #ifdef CONFIG_PM static int r852_suspend(struct device *device) { struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); if (dev->ctlreg & R852_CTL_CARDENABLE) return -EBUSY; /* First make sure the detect work is gone */ cancel_delayed_work_sync(&dev->card_detect_work); /* Turn off the interrupts and stop the device */ r852_disable_irqs(dev); r852_engine_disable(dev); /* If card was pulled off just during the suspend, which is very unlikely, we will remove it on resume, it too late now anyway... */ dev->card_unstable = 0; return 0; } static int r852_resume(struct device *device) { struct r852_device *dev = pci_get_drvdata(to_pci_dev(device)); r852_disable_irqs(dev); r852_card_update_present(dev); r852_engine_disable(dev); /* If card status changed, just do the work */ if (dev->card_detected != dev->card_registred) { dbg("card was %s during low power state", dev->card_detected ? "added" : "removed"); queue_delayed_work(dev->card_workqueue, &dev->card_detect_work, msecs_to_jiffies(1000)); return 0; } /* Otherwise, initialize the card */ if (dev->card_registred) { r852_engine_enable(dev); dev->chip->select_chip(dev->mtd, 0); dev->chip->cmdfunc(dev->mtd, NAND_CMD_RESET, -1, -1); dev->chip->select_chip(dev->mtd, -1); } /* Program card detection IRQ */ r852_update_card_detect(dev); return 0; } #else #define r852_suspend NULL #define r852_resume NULL #endif static const struct pci_device_id r852_pci_id_tbl[] = { { PCI_VDEVICE(RICOH, 0x0852), }, { }, }; MODULE_DEVICE_TABLE(pci, r852_pci_id_tbl); static SIMPLE_DEV_PM_OPS(r852_pm_ops, r852_suspend, r852_resume); static struct pci_driver r852_pci_driver = { .name = DRV_NAME, .id_table = r852_pci_id_tbl, .probe = r852_probe, .remove = r852_remove, .shutdown = r852_shutdown, .driver.pm = &r852_pm_ops, }; static __init int r852_module_init(void) { return pci_register_driver(&r852_pci_driver); } static void __exit r852_module_exit(void) { pci_unregister_driver(&r852_pci_driver); } module_init(r852_module_init); module_exit(r852_module_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); MODULE_DESCRIPTION("Ricoh 85xx xD/smartmedia card reader driver");
gpl-2.0
Ander-Alvarez/UltraKernel
drivers/media/common/tuners/tuner-types.c
4799
57187
/* * * i2c tv tuner chip device type database. * */ #include <linux/i2c.h> #include <linux/module.h> #include <media/tuner.h> #include <media/tuner-types.h> /* ---------------------------------------------------------------------- */ /* * The floats in the tuner struct are computed at compile time * by gcc and cast back to integers. Thus we don't violate the * "no float in kernel" rule. * * A tuner_range may be referenced by multiple tuner_params structs. * There are many duplicates in here. Reusing tuner_range structs, * rather than defining new ones for each tuner, will cut down on * memory usage, and is preferred when possible. * * Each tuner_params array may contain one or more elements, one * for each video standard. * * FIXME: tuner_params struct contains an element, tda988x. We must * set this for all tuners that contain a tda988x chip, and then we * can remove this setting from the various card structs. * * FIXME: Right now, all tuners are using the first tuner_params[] * array element for analog mode. In the future, we will be merging * similar tuner definitions together, such that each tuner definition * will have a tuner_params struct for each available video standard. * At that point, the tuner_params[] array element will be chosen * based on the video standard in use. */ /* The following was taken from dvb-pll.c: */ /* Set AGC TOP value to 103 dBuV: * 0x80 = Control Byte * 0x40 = 250 uA charge pump (irrelevant) * 0x18 = Aux Byte to follow * 0x06 = 64.5 kHz divider (irrelevant) * 0x01 = Disable Vt (aka sleep) * * 0x00 = AGC Time constant 2s Iagc = 300 nA (vs 0x80 = 9 nA) * 0x50 = AGC Take over point = 103 dBuV */ static u8 tua603x_agc103[] = { 2, 0x80|0x40|0x18|0x06|0x01, 0x00|0x50 }; /* 0x04 = 166.67 kHz divider * * 0x80 = AGC Time constant 50ms Iagc = 9 uA * 0x20 = AGC Take over point = 112 dBuV */ static u8 tua603x_agc112[] = { 2, 0x80|0x40|0x18|0x04|0x01, 0x80|0x20 }; /* 0-9 */ /* ------------ TUNER_TEMIC_PAL - TEMIC PAL ------------ */ static struct tuner_range tuner_temic_pal_ranges[] = { { 16 * 140.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 463.25 /*MHz*/, 0x8e, 0x04, }, { 16 * 999.99 , 0x8e, 0x01, }, }; static struct tuner_params tuner_temic_pal_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_pal_ranges, .count = ARRAY_SIZE(tuner_temic_pal_ranges), }, }; /* ------------ TUNER_PHILIPS_PAL_I - Philips PAL_I ------------ */ static struct tuner_range tuner_philips_pal_i_ranges[] = { { 16 * 140.25 /*MHz*/, 0x8e, 0xa0, }, { 16 * 463.25 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_philips_pal_i_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_philips_pal_i_ranges, .count = ARRAY_SIZE(tuner_philips_pal_i_ranges), }, }; /* ------------ TUNER_PHILIPS_NTSC - Philips NTSC ------------ */ static struct tuner_range tuner_philips_ntsc_ranges[] = { { 16 * 157.25 /*MHz*/, 0x8e, 0xa0, }, { 16 * 451.25 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_philips_ntsc_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_philips_ntsc_ranges, .count = ARRAY_SIZE(tuner_philips_ntsc_ranges), .cb_first_if_lower_freq = 1, }, }; /* ------------ TUNER_PHILIPS_SECAM - Philips SECAM ------------ */ static struct tuner_range tuner_philips_secam_ranges[] = { { 16 * 168.25 /*MHz*/, 0x8e, 0xa7, }, { 16 * 447.25 /*MHz*/, 0x8e, 0x97, }, { 16 * 999.99 , 0x8e, 0x37, }, }; static struct tuner_params tuner_philips_secam_params[] = { { .type = TUNER_PARAM_TYPE_SECAM, .ranges = tuner_philips_secam_ranges, .count = ARRAY_SIZE(tuner_philips_secam_ranges), .cb_first_if_lower_freq = 1, }, }; /* ------------ TUNER_PHILIPS_PAL - Philips PAL ------------ */ static struct tuner_range tuner_philips_pal_ranges[] = { { 16 * 168.25 /*MHz*/, 0x8e, 0xa0, }, { 16 * 447.25 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_philips_pal_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_philips_pal_ranges, .count = ARRAY_SIZE(tuner_philips_pal_ranges), .cb_first_if_lower_freq = 1, }, }; /* ------------ TUNER_TEMIC_NTSC - TEMIC NTSC ------------ */ static struct tuner_range tuner_temic_ntsc_ranges[] = { { 16 * 157.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 463.25 /*MHz*/, 0x8e, 0x04, }, { 16 * 999.99 , 0x8e, 0x01, }, }; static struct tuner_params tuner_temic_ntsc_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_temic_ntsc_ranges, .count = ARRAY_SIZE(tuner_temic_ntsc_ranges), }, }; /* ------------ TUNER_TEMIC_PAL_I - TEMIC PAL_I ------------ */ static struct tuner_range tuner_temic_pal_i_ranges[] = { { 16 * 170.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 450.00 /*MHz*/, 0x8e, 0x04, }, { 16 * 999.99 , 0x8e, 0x01, }, }; static struct tuner_params tuner_temic_pal_i_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_pal_i_ranges, .count = ARRAY_SIZE(tuner_temic_pal_i_ranges), }, }; /* ------------ TUNER_TEMIC_4036FY5_NTSC - TEMIC NTSC ------------ */ static struct tuner_range tuner_temic_4036fy5_ntsc_ranges[] = { { 16 * 157.25 /*MHz*/, 0x8e, 0xa0, }, { 16 * 463.25 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_temic_4036fy5_ntsc_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_temic_4036fy5_ntsc_ranges, .count = ARRAY_SIZE(tuner_temic_4036fy5_ntsc_ranges), }, }; /* ------------ TUNER_ALPS_TSBH1_NTSC - TEMIC NTSC ------------ */ static struct tuner_range tuner_alps_tsb_1_ranges[] = { { 16 * 137.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 385.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_alps_tsbh1_ntsc_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_alps_tsb_1_ranges, .count = ARRAY_SIZE(tuner_alps_tsb_1_ranges), }, }; /* 10-19 */ /* ------------ TUNER_ALPS_TSBE1_PAL - TEMIC PAL ------------ */ static struct tuner_params tuner_alps_tsb_1_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_alps_tsb_1_ranges, .count = ARRAY_SIZE(tuner_alps_tsb_1_ranges), }, }; /* ------------ TUNER_ALPS_TSBB5_PAL_I - Alps PAL_I ------------ */ static struct tuner_range tuner_alps_tsb_5_pal_ranges[] = { { 16 * 133.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 351.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_alps_tsbb5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_alps_tsb_5_pal_ranges, .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges), }, }; /* ------------ TUNER_ALPS_TSBE5_PAL - Alps PAL ------------ */ static struct tuner_params tuner_alps_tsbe5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_alps_tsb_5_pal_ranges, .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges), }, }; /* ------------ TUNER_ALPS_TSBC5_PAL - Alps PAL ------------ */ static struct tuner_params tuner_alps_tsbc5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_alps_tsb_5_pal_ranges, .count = ARRAY_SIZE(tuner_alps_tsb_5_pal_ranges), }, }; /* ------------ TUNER_TEMIC_4006FH5_PAL - TEMIC PAL ------------ */ static struct tuner_range tuner_lg_pal_ranges[] = { { 16 * 170.00 /*MHz*/, 0x8e, 0xa0, }, { 16 * 450.00 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_temic_4006fh5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_pal_ranges, .count = ARRAY_SIZE(tuner_lg_pal_ranges), }, }; /* ------------ TUNER_ALPS_TSHC6_NTSC - Alps NTSC ------------ */ static struct tuner_range tuner_alps_tshc6_ntsc_ranges[] = { { 16 * 137.25 /*MHz*/, 0x8e, 0x14, }, { 16 * 385.25 /*MHz*/, 0x8e, 0x12, }, { 16 * 999.99 , 0x8e, 0x11, }, }; static struct tuner_params tuner_alps_tshc6_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_alps_tshc6_ntsc_ranges, .count = ARRAY_SIZE(tuner_alps_tshc6_ntsc_ranges), }, }; /* ------------ TUNER_TEMIC_PAL_DK - TEMIC PAL ------------ */ static struct tuner_range tuner_temic_pal_dk_ranges[] = { { 16 * 168.25 /*MHz*/, 0x8e, 0xa0, }, { 16 * 456.25 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_temic_pal_dk_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_pal_dk_ranges, .count = ARRAY_SIZE(tuner_temic_pal_dk_ranges), }, }; /* ------------ TUNER_PHILIPS_NTSC_M - Philips NTSC ------------ */ static struct tuner_range tuner_philips_ntsc_m_ranges[] = { { 16 * 160.00 /*MHz*/, 0x8e, 0xa0, }, { 16 * 454.00 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_philips_ntsc_m_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_philips_ntsc_m_ranges, .count = ARRAY_SIZE(tuner_philips_ntsc_m_ranges), }, }; /* ------------ TUNER_TEMIC_4066FY5_PAL_I - TEMIC PAL_I ------------ */ static struct tuner_range tuner_temic_40x6f_5_pal_ranges[] = { { 16 * 169.00 /*MHz*/, 0x8e, 0xa0, }, { 16 * 454.00 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_temic_4066fy5_pal_i_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_40x6f_5_pal_ranges, .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges), }, }; /* ------------ TUNER_TEMIC_4006FN5_MULTI_PAL - TEMIC PAL ------------ */ static struct tuner_params tuner_temic_4006fn5_multi_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_40x6f_5_pal_ranges, .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges), }, }; /* 20-29 */ /* ------------ TUNER_TEMIC_4009FR5_PAL - TEMIC PAL ------------ */ static struct tuner_range tuner_temic_4009f_5_pal_ranges[] = { { 16 * 141.00 /*MHz*/, 0x8e, 0xa0, }, { 16 * 464.00 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_temic_4009f_5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_4009f_5_pal_ranges, .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), }, }; /* ------------ TUNER_TEMIC_4039FR5_NTSC - TEMIC NTSC ------------ */ static struct tuner_range tuner_temic_4x3x_f_5_ntsc_ranges[] = { { 16 * 158.00 /*MHz*/, 0x8e, 0xa0, }, { 16 * 453.00 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_temic_4039fr5_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_temic_4x3x_f_5_ntsc_ranges, .count = ARRAY_SIZE(tuner_temic_4x3x_f_5_ntsc_ranges), }, }; /* ------------ TUNER_TEMIC_4046FM5 - TEMIC PAL ------------ */ static struct tuner_params tuner_temic_4046fm5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_40x6f_5_pal_ranges, .count = ARRAY_SIZE(tuner_temic_40x6f_5_pal_ranges), }, }; /* ------------ TUNER_PHILIPS_PAL_DK - Philips PAL ------------ */ static struct tuner_params tuner_philips_pal_dk_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_pal_ranges, .count = ARRAY_SIZE(tuner_lg_pal_ranges), }, }; /* ------------ TUNER_PHILIPS_FQ1216ME - Philips PAL ------------ */ static struct tuner_params tuner_philips_fq1216me_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_pal_ranges, .count = ARRAY_SIZE(tuner_lg_pal_ranges), .has_tda9887 = 1, .port1_active = 1, .port2_active = 1, .port2_invert_for_secam_lc = 1, }, }; /* ------------ TUNER_LG_PAL_I_FM - LGINNOTEK PAL_I ------------ */ static struct tuner_params tuner_lg_pal_i_fm_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_pal_ranges, .count = ARRAY_SIZE(tuner_lg_pal_ranges), }, }; /* ------------ TUNER_LG_PAL_I - LGINNOTEK PAL_I ------------ */ static struct tuner_params tuner_lg_pal_i_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_pal_ranges, .count = ARRAY_SIZE(tuner_lg_pal_ranges), }, }; /* ------------ TUNER_LG_NTSC_FM - LGINNOTEK NTSC ------------ */ static struct tuner_range tuner_lg_ntsc_fm_ranges[] = { { 16 * 210.00 /*MHz*/, 0x8e, 0xa0, }, { 16 * 497.00 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_lg_ntsc_fm_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_lg_ntsc_fm_ranges, .count = ARRAY_SIZE(tuner_lg_ntsc_fm_ranges), }, }; /* ------------ TUNER_LG_PAL_FM - LGINNOTEK PAL ------------ */ static struct tuner_params tuner_lg_pal_fm_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_pal_ranges, .count = ARRAY_SIZE(tuner_lg_pal_ranges), }, }; /* ------------ TUNER_LG_PAL - LGINNOTEK PAL ------------ */ static struct tuner_params tuner_lg_pal_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_pal_ranges, .count = ARRAY_SIZE(tuner_lg_pal_ranges), }, }; /* 30-39 */ /* ------------ TUNER_TEMIC_4009FN5_MULTI_PAL_FM - TEMIC PAL ------------ */ static struct tuner_params tuner_temic_4009_fn5_multi_pal_fm_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_4009f_5_pal_ranges, .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), }, }; /* ------------ TUNER_SHARP_2U5JF5540_NTSC - SHARP NTSC ------------ */ static struct tuner_range tuner_sharp_2u5jf5540_ntsc_ranges[] = { { 16 * 137.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 317.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_sharp_2u5jf5540_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_sharp_2u5jf5540_ntsc_ranges, .count = ARRAY_SIZE(tuner_sharp_2u5jf5540_ntsc_ranges), }, }; /* ------------ TUNER_Samsung_PAL_TCPM9091PD27 - Samsung PAL ------------ */ static struct tuner_range tuner_samsung_pal_tcpm9091pd27_ranges[] = { { 16 * 169 /*MHz*/, 0x8e, 0xa0, }, { 16 * 464 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_samsung_pal_tcpm9091pd27_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_samsung_pal_tcpm9091pd27_ranges, .count = ARRAY_SIZE(tuner_samsung_pal_tcpm9091pd27_ranges), }, }; /* ------------ TUNER_TEMIC_4106FH5 - TEMIC PAL ------------ */ static struct tuner_params tuner_temic_4106fh5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_4009f_5_pal_ranges, .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), }, }; /* ------------ TUNER_TEMIC_4012FY5 - TEMIC PAL ------------ */ static struct tuner_params tuner_temic_4012fy5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_pal_ranges, .count = ARRAY_SIZE(tuner_temic_pal_ranges), }, }; /* ------------ TUNER_TEMIC_4136FY5 - TEMIC NTSC ------------ */ static struct tuner_params tuner_temic_4136_fy5_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_temic_4x3x_f_5_ntsc_ranges, .count = ARRAY_SIZE(tuner_temic_4x3x_f_5_ntsc_ranges), }, }; /* ------------ TUNER_LG_PAL_NEW_TAPC - LGINNOTEK PAL ------------ */ static struct tuner_range tuner_lg_new_tapc_ranges[] = { { 16 * 170.00 /*MHz*/, 0x8e, 0x01, }, { 16 * 450.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_lg_pal_new_tapc_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_new_tapc_ranges, .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges), }, }; /* ------------ TUNER_PHILIPS_FM1216ME_MK3 - Philips PAL ------------ */ static struct tuner_range tuner_fm1216me_mk3_pal_ranges[] = { { 16 * 158.00 /*MHz*/, 0x8e, 0x01, }, { 16 * 442.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x04, }, }; static struct tuner_params tuner_fm1216me_mk3_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_fm1216me_mk3_pal_ranges, .count = ARRAY_SIZE(tuner_fm1216me_mk3_pal_ranges), .cb_first_if_lower_freq = 1, .has_tda9887 = 1, .port1_active = 1, .port2_active = 1, .port2_invert_for_secam_lc = 1, .port1_fm_high_sensitivity = 1, .default_top_mid = -2, .default_top_secam_mid = -2, .default_top_secam_high = -2, }, }; /* ------------ TUNER_PHILIPS_FM1216MK5 - Philips PAL ------------ */ static struct tuner_range tuner_fm1216mk5_pal_ranges[] = { { 16 * 158.00 /*MHz*/, 0xce, 0x01, }, { 16 * 441.00 /*MHz*/, 0xce, 0x02, }, { 16 * 864.00 , 0xce, 0x04, }, }; static struct tuner_params tuner_fm1216mk5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_fm1216mk5_pal_ranges, .count = ARRAY_SIZE(tuner_fm1216mk5_pal_ranges), .cb_first_if_lower_freq = 1, .has_tda9887 = 1, .port1_active = 1, .port2_active = 1, .port2_invert_for_secam_lc = 1, .port1_fm_high_sensitivity = 1, .default_top_mid = -2, .default_top_secam_mid = -2, .default_top_secam_high = -2, }, }; /* ------------ TUNER_LG_NTSC_NEW_TAPC - LGINNOTEK NTSC ------------ */ static struct tuner_params tuner_lg_ntsc_new_tapc_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_lg_new_tapc_ranges, .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges), }, }; /* 40-49 */ /* ------------ TUNER_HITACHI_NTSC - HITACHI NTSC ------------ */ static struct tuner_params tuner_hitachi_ntsc_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_lg_new_tapc_ranges, .count = ARRAY_SIZE(tuner_lg_new_tapc_ranges), }, }; /* ------------ TUNER_PHILIPS_PAL_MK - Philips PAL ------------ */ static struct tuner_range tuner_philips_pal_mk_pal_ranges[] = { { 16 * 140.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 463.25 /*MHz*/, 0x8e, 0xc2, }, { 16 * 999.99 , 0x8e, 0xcf, }, }; static struct tuner_params tuner_philips_pal_mk_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_philips_pal_mk_pal_ranges, .count = ARRAY_SIZE(tuner_philips_pal_mk_pal_ranges), }, }; /* ---- TUNER_PHILIPS_FCV1236D - Philips FCV1236D (ATSC/NTSC) ---- */ static struct tuner_range tuner_philips_fcv1236d_ntsc_ranges[] = { { 16 * 157.25 /*MHz*/, 0x8e, 0xa2, }, { 16 * 451.25 /*MHz*/, 0x8e, 0x92, }, { 16 * 999.99 , 0x8e, 0x32, }, }; static struct tuner_range tuner_philips_fcv1236d_atsc_ranges[] = { { 16 * 159.00 /*MHz*/, 0x8e, 0xa0, }, { 16 * 453.00 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_philips_fcv1236d_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_philips_fcv1236d_ntsc_ranges, .count = ARRAY_SIZE(tuner_philips_fcv1236d_ntsc_ranges), }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_philips_fcv1236d_atsc_ranges, .count = ARRAY_SIZE(tuner_philips_fcv1236d_atsc_ranges), .iffreq = 16 * 44.00, }, }; /* ------------ TUNER_PHILIPS_FM1236_MK3 - Philips NTSC ------------ */ static struct tuner_range tuner_fm1236_mk3_ntsc_ranges[] = { { 16 * 160.00 /*MHz*/, 0x8e, 0x01, }, { 16 * 442.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x04, }, }; static struct tuner_params tuner_fm1236_mk3_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_fm1236_mk3_ntsc_ranges, .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges), .cb_first_if_lower_freq = 1, .has_tda9887 = 1, .port1_active = 1, .port2_active = 1, .port1_fm_high_sensitivity = 1, }, }; /* ------------ TUNER_PHILIPS_4IN1 - Philips NTSC ------------ */ static struct tuner_params tuner_philips_4in1_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_fm1236_mk3_ntsc_ranges, .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges), }, }; /* ------------ TUNER_MICROTUNE_4049FM5 - Microtune PAL ------------ */ static struct tuner_params tuner_microtune_4049_fm5_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_temic_4009f_5_pal_ranges, .count = ARRAY_SIZE(tuner_temic_4009f_5_pal_ranges), .has_tda9887 = 1, .port1_invert_for_secam_lc = 1, .default_pll_gating_18 = 1, .fm_gain_normal=1, .radio_if = 1, /* 33.3 MHz */ }, }; /* ------------ TUNER_PANASONIC_VP27 - Panasonic NTSC ------------ */ static struct tuner_range tuner_panasonic_vp27_ntsc_ranges[] = { { 16 * 160.00 /*MHz*/, 0xce, 0x01, }, { 16 * 454.00 /*MHz*/, 0xce, 0x02, }, { 16 * 999.99 , 0xce, 0x08, }, }; static struct tuner_params tuner_panasonic_vp27_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_panasonic_vp27_ntsc_ranges, .count = ARRAY_SIZE(tuner_panasonic_vp27_ntsc_ranges), .has_tda9887 = 1, .intercarrier_mode = 1, .default_top_low = -3, .default_top_mid = -3, .default_top_high = -3, }, }; /* ------------ TUNER_TNF_8831BGFF - Philips PAL ------------ */ static struct tuner_range tuner_tnf_8831bgff_pal_ranges[] = { { 16 * 161.25 /*MHz*/, 0x8e, 0xa0, }, { 16 * 463.25 /*MHz*/, 0x8e, 0x90, }, { 16 * 999.99 , 0x8e, 0x30, }, }; static struct tuner_params tuner_tnf_8831bgff_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_tnf_8831bgff_pal_ranges, .count = ARRAY_SIZE(tuner_tnf_8831bgff_pal_ranges), }, }; /* ------------ TUNER_MICROTUNE_4042FI5 - Microtune NTSC ------------ */ static struct tuner_range tuner_microtune_4042fi5_ntsc_ranges[] = { { 16 * 162.00 /*MHz*/, 0x8e, 0xa2, }, { 16 * 457.00 /*MHz*/, 0x8e, 0x94, }, { 16 * 999.99 , 0x8e, 0x31, }, }; static struct tuner_range tuner_microtune_4042fi5_atsc_ranges[] = { { 16 * 162.00 /*MHz*/, 0x8e, 0xa1, }, { 16 * 457.00 /*MHz*/, 0x8e, 0x91, }, { 16 * 999.99 , 0x8e, 0x31, }, }; static struct tuner_params tuner_microtune_4042fi5_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_microtune_4042fi5_ntsc_ranges, .count = ARRAY_SIZE(tuner_microtune_4042fi5_ntsc_ranges), }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_microtune_4042fi5_atsc_ranges, .count = ARRAY_SIZE(tuner_microtune_4042fi5_atsc_ranges), .iffreq = 16 * 44.00 /*MHz*/, }, }; /* 50-59 */ /* ------------ TUNER_TCL_2002N - TCL NTSC ------------ */ static struct tuner_range tuner_tcl_2002n_ntsc_ranges[] = { { 16 * 172.00 /*MHz*/, 0x8e, 0x01, }, { 16 * 448.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_tcl_2002n_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_tcl_2002n_ntsc_ranges, .count = ARRAY_SIZE(tuner_tcl_2002n_ntsc_ranges), .cb_first_if_lower_freq = 1, }, }; /* ------------ TUNER_PHILIPS_FM1256_IH3 - Philips PAL ------------ */ static struct tuner_params tuner_philips_fm1256_ih3_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_fm1236_mk3_ntsc_ranges, .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges), .radio_if = 1, /* 33.3 MHz */ }, }; /* ------------ TUNER_THOMSON_DTT7610 - THOMSON ATSC ------------ */ /* single range used for both ntsc and atsc */ static struct tuner_range tuner_thomson_dtt7610_ntsc_ranges[] = { { 16 * 157.25 /*MHz*/, 0x8e, 0x39, }, { 16 * 454.00 /*MHz*/, 0x8e, 0x3a, }, { 16 * 999.99 , 0x8e, 0x3c, }, }; static struct tuner_params tuner_thomson_dtt7610_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_thomson_dtt7610_ntsc_ranges, .count = ARRAY_SIZE(tuner_thomson_dtt7610_ntsc_ranges), }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_thomson_dtt7610_ntsc_ranges, .count = ARRAY_SIZE(tuner_thomson_dtt7610_ntsc_ranges), .iffreq = 16 * 44.00 /*MHz*/, }, }; /* ------------ TUNER_PHILIPS_FQ1286 - Philips NTSC ------------ */ static struct tuner_range tuner_philips_fq1286_ntsc_ranges[] = { { 16 * 160.00 /*MHz*/, 0x8e, 0x41, }, { 16 * 454.00 /*MHz*/, 0x8e, 0x42, }, { 16 * 999.99 , 0x8e, 0x04, }, }; static struct tuner_params tuner_philips_fq1286_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_philips_fq1286_ntsc_ranges, .count = ARRAY_SIZE(tuner_philips_fq1286_ntsc_ranges), }, }; /* ------------ TUNER_TCL_2002MB - TCL PAL ------------ */ static struct tuner_range tuner_tcl_2002mb_pal_ranges[] = { { 16 * 170.00 /*MHz*/, 0xce, 0x01, }, { 16 * 450.00 /*MHz*/, 0xce, 0x02, }, { 16 * 999.99 , 0xce, 0x08, }, }; static struct tuner_params tuner_tcl_2002mb_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_tcl_2002mb_pal_ranges, .count = ARRAY_SIZE(tuner_tcl_2002mb_pal_ranges), }, }; /* ------------ TUNER_PHILIPS_FQ1216AME_MK4 - Philips PAL ------------ */ static struct tuner_range tuner_philips_fq12_6a___mk4_pal_ranges[] = { { 16 * 160.00 /*MHz*/, 0xce, 0x01, }, { 16 * 442.00 /*MHz*/, 0xce, 0x02, }, { 16 * 999.99 , 0xce, 0x04, }, }; static struct tuner_params tuner_philips_fq1216ame_mk4_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_philips_fq12_6a___mk4_pal_ranges, .count = ARRAY_SIZE(tuner_philips_fq12_6a___mk4_pal_ranges), .has_tda9887 = 1, .port1_active = 1, .port2_invert_for_secam_lc = 1, .default_top_mid = -2, .default_top_secam_low = -2, .default_top_secam_mid = -2, .default_top_secam_high = -2, }, }; /* ------------ TUNER_PHILIPS_FQ1236A_MK4 - Philips NTSC ------------ */ static struct tuner_params tuner_philips_fq1236a_mk4_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_fm1236_mk3_ntsc_ranges, .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges), }, }; /* ------------ TUNER_YMEC_TVF_8531MF - Philips NTSC ------------ */ static struct tuner_params tuner_ymec_tvf_8531mf_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_philips_ntsc_m_ranges, .count = ARRAY_SIZE(tuner_philips_ntsc_m_ranges), }, }; /* ------------ TUNER_YMEC_TVF_5533MF - Philips NTSC ------------ */ static struct tuner_range tuner_ymec_tvf_5533mf_ntsc_ranges[] = { { 16 * 160.00 /*MHz*/, 0x8e, 0x01, }, { 16 * 454.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x04, }, }; static struct tuner_params tuner_ymec_tvf_5533mf_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_ymec_tvf_5533mf_ntsc_ranges, .count = ARRAY_SIZE(tuner_ymec_tvf_5533mf_ntsc_ranges), }, }; /* 60-69 */ /* ------------ TUNER_THOMSON_DTT761X - THOMSON ATSC ------------ */ /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */ static struct tuner_range tuner_thomson_dtt761x_ntsc_ranges[] = { { 16 * 145.25 /*MHz*/, 0x8e, 0x39, }, { 16 * 415.25 /*MHz*/, 0x8e, 0x3a, }, { 16 * 999.99 , 0x8e, 0x3c, }, }; static struct tuner_range tuner_thomson_dtt761x_atsc_ranges[] = { { 16 * 147.00 /*MHz*/, 0x8e, 0x39, }, { 16 * 417.00 /*MHz*/, 0x8e, 0x3a, }, { 16 * 999.99 , 0x8e, 0x3c, }, }; static struct tuner_params tuner_thomson_dtt761x_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_thomson_dtt761x_ntsc_ranges, .count = ARRAY_SIZE(tuner_thomson_dtt761x_ntsc_ranges), .has_tda9887 = 1, .fm_gain_normal = 1, .radio_if = 2, /* 41.3 MHz */ }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_thomson_dtt761x_atsc_ranges, .count = ARRAY_SIZE(tuner_thomson_dtt761x_atsc_ranges), .iffreq = 16 * 44.00, /*MHz*/ }, }; /* ------------ TUNER_TENA_9533_DI - Philips PAL ------------ */ static struct tuner_range tuner_tena_9533_di_pal_ranges[] = { { 16 * 160.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 464.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x04, }, }; static struct tuner_params tuner_tena_9533_di_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_tena_9533_di_pal_ranges, .count = ARRAY_SIZE(tuner_tena_9533_di_pal_ranges), }, }; /* ------------ TUNER_TENA_TNF_5337 - Tena tnf5337MFD STD M/N ------------ */ static struct tuner_range tuner_tena_tnf_5337_ntsc_ranges[] = { { 16 * 166.25 /*MHz*/, 0x86, 0x01, }, { 16 * 466.25 /*MHz*/, 0x86, 0x02, }, { 16 * 999.99 , 0x86, 0x08, }, }; static struct tuner_params tuner_tena_tnf_5337_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_tena_tnf_5337_ntsc_ranges, .count = ARRAY_SIZE(tuner_tena_tnf_5337_ntsc_ranges), }, }; /* ------------ TUNER_PHILIPS_FMD1216ME(X)_MK3 - Philips PAL ------------ */ static struct tuner_range tuner_philips_fmd1216me_mk3_pal_ranges[] = { { 16 * 160.00 /*MHz*/, 0x86, 0x51, }, { 16 * 442.00 /*MHz*/, 0x86, 0x52, }, { 16 * 999.99 , 0x86, 0x54, }, }; static struct tuner_range tuner_philips_fmd1216me_mk3_dvb_ranges[] = { { 16 * 143.87 /*MHz*/, 0xbc, 0x41 }, { 16 * 158.87 /*MHz*/, 0xf4, 0x41 }, { 16 * 329.87 /*MHz*/, 0xbc, 0x42 }, { 16 * 441.87 /*MHz*/, 0xf4, 0x42 }, { 16 * 625.87 /*MHz*/, 0xbc, 0x44 }, { 16 * 803.87 /*MHz*/, 0xf4, 0x44 }, { 16 * 999.99 , 0xfc, 0x44 }, }; static struct tuner_params tuner_philips_fmd1216me_mk3_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_philips_fmd1216me_mk3_pal_ranges, .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_pal_ranges), .has_tda9887 = 1, .port1_active = 1, .port2_active = 1, .port2_fm_high_sensitivity = 1, .port2_invert_for_secam_lc = 1, .port1_set_for_fm_mono = 1, }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_philips_fmd1216me_mk3_dvb_ranges, .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_dvb_ranges), .iffreq = 16 * 36.125, /*MHz*/ }, }; static struct tuner_params tuner_philips_fmd1216mex_mk3_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_philips_fmd1216me_mk3_pal_ranges, .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_pal_ranges), .has_tda9887 = 1, .port1_active = 1, .port2_active = 1, .port2_fm_high_sensitivity = 1, .port2_invert_for_secam_lc = 1, .port1_set_for_fm_mono = 1, .radio_if = 1, .fm_gain_normal = 1, }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_philips_fmd1216me_mk3_dvb_ranges, .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_dvb_ranges), .iffreq = 16 * 36.125, /*MHz*/ }, }; /* ------ TUNER_LG_TDVS_H06XF - LG INNOTEK / INFINEON ATSC ----- */ static struct tuner_range tuner_tua6034_ntsc_ranges[] = { { 16 * 165.00 /*MHz*/, 0x8e, 0x01 }, { 16 * 450.00 /*MHz*/, 0x8e, 0x02 }, { 16 * 999.99 , 0x8e, 0x04 }, }; static struct tuner_range tuner_tua6034_atsc_ranges[] = { { 16 * 165.00 /*MHz*/, 0xce, 0x01 }, { 16 * 450.00 /*MHz*/, 0xce, 0x02 }, { 16 * 999.99 , 0xce, 0x04 }, }; static struct tuner_params tuner_lg_tdvs_h06xf_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_tua6034_ntsc_ranges, .count = ARRAY_SIZE(tuner_tua6034_ntsc_ranges), }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_tua6034_atsc_ranges, .count = ARRAY_SIZE(tuner_tua6034_atsc_ranges), .iffreq = 16 * 44.00, }, }; /* ------------ TUNER_YMEC_TVF66T5_B_DFF - Philips PAL ------------ */ static struct tuner_range tuner_ymec_tvf66t5_b_dff_pal_ranges[] = { { 16 * 160.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 464.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_ymec_tvf66t5_b_dff_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_ymec_tvf66t5_b_dff_pal_ranges, .count = ARRAY_SIZE(tuner_ymec_tvf66t5_b_dff_pal_ranges), }, }; /* ------------ TUNER_LG_NTSC_TALN_MINI - LGINNOTEK NTSC ------------ */ static struct tuner_range tuner_lg_taln_ntsc_ranges[] = { { 16 * 137.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 373.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_range tuner_lg_taln_pal_secam_ranges[] = { { 16 * 150.00 /*MHz*/, 0x8e, 0x01, }, { 16 * 425.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_lg_taln_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_lg_taln_ntsc_ranges, .count = ARRAY_SIZE(tuner_lg_taln_ntsc_ranges), },{ .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_lg_taln_pal_secam_ranges, .count = ARRAY_SIZE(tuner_lg_taln_pal_secam_ranges), }, }; /* ------------ TUNER_PHILIPS_TD1316 - Philips PAL ------------ */ static struct tuner_range tuner_philips_td1316_pal_ranges[] = { { 16 * 160.00 /*MHz*/, 0xc8, 0xa1, }, { 16 * 442.00 /*MHz*/, 0xc8, 0xa2, }, { 16 * 999.99 , 0xc8, 0xa4, }, }; static struct tuner_range tuner_philips_td1316_dvb_ranges[] = { { 16 * 93.834 /*MHz*/, 0xca, 0x60, }, { 16 * 123.834 /*MHz*/, 0xca, 0xa0, }, { 16 * 163.834 /*MHz*/, 0xca, 0xc0, }, { 16 * 253.834 /*MHz*/, 0xca, 0x60, }, { 16 * 383.834 /*MHz*/, 0xca, 0xa0, }, { 16 * 443.834 /*MHz*/, 0xca, 0xc0, }, { 16 * 583.834 /*MHz*/, 0xca, 0x60, }, { 16 * 793.834 /*MHz*/, 0xca, 0xa0, }, { 16 * 999.999 , 0xca, 0xe0, }, }; static struct tuner_params tuner_philips_td1316_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_philips_td1316_pal_ranges, .count = ARRAY_SIZE(tuner_philips_td1316_pal_ranges), }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_philips_td1316_dvb_ranges, .count = ARRAY_SIZE(tuner_philips_td1316_dvb_ranges), .iffreq = 16 * 36.166667 /*MHz*/, }, }; /* ------------ TUNER_PHILIPS_TUV1236D - Philips ATSC ------------ */ static struct tuner_range tuner_tuv1236d_ntsc_ranges[] = { { 16 * 157.25 /*MHz*/, 0xce, 0x01, }, { 16 * 454.00 /*MHz*/, 0xce, 0x02, }, { 16 * 999.99 , 0xce, 0x04, }, }; static struct tuner_range tuner_tuv1236d_atsc_ranges[] = { { 16 * 157.25 /*MHz*/, 0xc6, 0x41, }, { 16 * 454.00 /*MHz*/, 0xc6, 0x42, }, { 16 * 999.99 , 0xc6, 0x44, }, }; static struct tuner_params tuner_tuv1236d_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_tuv1236d_ntsc_ranges, .count = ARRAY_SIZE(tuner_tuv1236d_ntsc_ranges), }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_tuv1236d_atsc_ranges, .count = ARRAY_SIZE(tuner_tuv1236d_atsc_ranges), .iffreq = 16 * 44.00, }, }; /* ------------ TUNER_TNF_xxx5 - Texas Instruments--------- */ /* This is known to work with Tenna TVF58t5-MFF and TVF5835 MFF * but it is expected to work also with other Tenna/Ymec * models based on TI SN 761677 chip on both PAL and NTSC */ static struct tuner_range tuner_tnf_5335_d_if_pal_ranges[] = { { 16 * 168.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 471.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_range tuner_tnf_5335mf_ntsc_ranges[] = { { 16 * 169.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 469.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x08, }, }; static struct tuner_params tuner_tnf_5335mf_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_tnf_5335mf_ntsc_ranges, .count = ARRAY_SIZE(tuner_tnf_5335mf_ntsc_ranges), }, { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_tnf_5335_d_if_pal_ranges, .count = ARRAY_SIZE(tuner_tnf_5335_d_if_pal_ranges), }, }; /* 70-79 */ /* ------------ TUNER_SAMSUNG_TCPN_2121P30A - Samsung NTSC ------------ */ /* '+ 4' turns on the Low Noise Amplifier */ static struct tuner_range tuner_samsung_tcpn_2121p30a_ntsc_ranges[] = { { 16 * 130.00 /*MHz*/, 0xce, 0x01 + 4, }, { 16 * 364.50 /*MHz*/, 0xce, 0x02 + 4, }, { 16 * 999.99 , 0xce, 0x08 + 4, }, }; static struct tuner_params tuner_samsung_tcpn_2121p30a_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_samsung_tcpn_2121p30a_ntsc_ranges, .count = ARRAY_SIZE(tuner_samsung_tcpn_2121p30a_ntsc_ranges), }, }; /* ------------ TUNER_THOMSON_FE6600 - DViCO Hybrid PAL ------------ */ static struct tuner_range tuner_thomson_fe6600_pal_ranges[] = { { 16 * 160.00 /*MHz*/, 0xfe, 0x11, }, { 16 * 442.00 /*MHz*/, 0xf6, 0x12, }, { 16 * 999.99 , 0xf6, 0x18, }, }; static struct tuner_range tuner_thomson_fe6600_dvb_ranges[] = { { 16 * 250.00 /*MHz*/, 0xb4, 0x12, }, { 16 * 455.00 /*MHz*/, 0xfe, 0x11, }, { 16 * 775.50 /*MHz*/, 0xbc, 0x18, }, { 16 * 999.99 , 0xf4, 0x18, }, }; static struct tuner_params tuner_thomson_fe6600_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_thomson_fe6600_pal_ranges, .count = ARRAY_SIZE(tuner_thomson_fe6600_pal_ranges), }, { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_thomson_fe6600_dvb_ranges, .count = ARRAY_SIZE(tuner_thomson_fe6600_dvb_ranges), .iffreq = 16 * 36.125 /*MHz*/, }, }; /* ------------ TUNER_SAMSUNG_TCPG_6121P30A - Samsung PAL ------------ */ /* '+ 4' turns on the Low Noise Amplifier */ static struct tuner_range tuner_samsung_tcpg_6121p30a_pal_ranges[] = { { 16 * 146.25 /*MHz*/, 0xce, 0x01 + 4, }, { 16 * 428.50 /*MHz*/, 0xce, 0x02 + 4, }, { 16 * 999.99 , 0xce, 0x08 + 4, }, }; static struct tuner_params tuner_samsung_tcpg_6121p30a_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_samsung_tcpg_6121p30a_pal_ranges, .count = ARRAY_SIZE(tuner_samsung_tcpg_6121p30a_pal_ranges), .has_tda9887 = 1, .port1_active = 1, .port2_active = 1, .port2_invert_for_secam_lc = 1, }, }; /* ------------ TUNER_TCL_MF02GIP-5N-E - TCL MF02GIP-5N ------------ */ static struct tuner_range tuner_tcl_mf02gip_5n_ntsc_ranges[] = { { 16 * 172.00 /*MHz*/, 0x8e, 0x01, }, { 16 * 448.00 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x04, }, }; static struct tuner_params tuner_tcl_mf02gip_5n_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_tcl_mf02gip_5n_ntsc_ranges, .count = ARRAY_SIZE(tuner_tcl_mf02gip_5n_ntsc_ranges), .cb_first_if_lower_freq = 1, }, }; /* 80-89 */ /* --------- TUNER_PHILIPS_FQ1216LME_MK3 -- active loopthrough, no FM ------- */ static struct tuner_params tuner_fq1216lme_mk3_params[] = { { .type = TUNER_PARAM_TYPE_PAL, .ranges = tuner_fm1216me_mk3_pal_ranges, .count = ARRAY_SIZE(tuner_fm1216me_mk3_pal_ranges), .cb_first_if_lower_freq = 1, /* not specified, but safe to do */ .has_tda9887 = 1, /* TDA9886 */ .port1_active = 1, .port2_active = 1, .port2_invert_for_secam_lc = 1, .default_top_low = 4, .default_top_mid = 4, .default_top_high = 4, .default_top_secam_low = 4, .default_top_secam_mid = 4, .default_top_secam_high = 4, }, }; /* ----- TUNER_PARTSNIC_PTI_5NF05 - Partsnic (Daewoo) PTI-5NF05 NTSC ----- */ static struct tuner_range tuner_partsnic_pti_5nf05_ranges[] = { /* The datasheet specified channel ranges and the bandswitch byte */ /* The control byte value of 0x8e is just a guess */ { 16 * 133.25 /*MHz*/, 0x8e, 0x01, }, /* Channels 2 - B */ { 16 * 367.25 /*MHz*/, 0x8e, 0x02, }, /* Channels C - W+11 */ { 16 * 999.99 , 0x8e, 0x08, }, /* Channels W+12 - 69 */ }; static struct tuner_params tuner_partsnic_pti_5nf05_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_partsnic_pti_5nf05_ranges, .count = ARRAY_SIZE(tuner_partsnic_pti_5nf05_ranges), .cb_first_if_lower_freq = 1, /* not specified but safe to do */ }, }; /* --------- TUNER_PHILIPS_CU1216L - DVB-C NIM ------------------------- */ static struct tuner_range tuner_cu1216l_ranges[] = { { 16 * 160.25 /*MHz*/, 0xce, 0x01 }, { 16 * 444.25 /*MHz*/, 0xce, 0x02 }, { 16 * 999.99 , 0xce, 0x04 }, }; static struct tuner_params tuner_philips_cu1216l_params[] = { { .type = TUNER_PARAM_TYPE_DIGITAL, .ranges = tuner_cu1216l_ranges, .count = ARRAY_SIZE(tuner_cu1216l_ranges), .iffreq = 16 * 36.125, /*MHz*/ }, }; /* ---------------------- TUNER_SONY_BTF_PXN01Z ------------------------ */ static struct tuner_range tuner_sony_btf_pxn01z_ranges[] = { { 16 * 137.25 /*MHz*/, 0x8e, 0x01, }, { 16 * 367.25 /*MHz*/, 0x8e, 0x02, }, { 16 * 999.99 , 0x8e, 0x04, }, }; static struct tuner_params tuner_sony_btf_pxn01z_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_sony_btf_pxn01z_ranges, .count = ARRAY_SIZE(tuner_sony_btf_pxn01z_ranges), }, }; /* ------------ TUNER_PHILIPS_FQ1236_MK5 - Philips NTSC ------------ */ static struct tuner_params tuner_philips_fq1236_mk5_params[] = { { .type = TUNER_PARAM_TYPE_NTSC, .ranges = tuner_fm1236_mk3_ntsc_ranges, .count = ARRAY_SIZE(tuner_fm1236_mk3_ntsc_ranges), .has_tda9887 = 1, /* TDA9885, no FM radio */ }, }; /* --------------------------------------------------------------------- */ struct tunertype tuners[] = { /* 0-9 */ [TUNER_TEMIC_PAL] = { /* TEMIC PAL */ .name = "Temic PAL (4002 FH5)", .params = tuner_temic_pal_params, .count = ARRAY_SIZE(tuner_temic_pal_params), }, [TUNER_PHILIPS_PAL_I] = { /* Philips PAL_I */ .name = "Philips PAL_I (FI1246 and compatibles)", .params = tuner_philips_pal_i_params, .count = ARRAY_SIZE(tuner_philips_pal_i_params), }, [TUNER_PHILIPS_NTSC] = { /* Philips NTSC */ .name = "Philips NTSC (FI1236,FM1236 and compatibles)", .params = tuner_philips_ntsc_params, .count = ARRAY_SIZE(tuner_philips_ntsc_params), }, [TUNER_PHILIPS_SECAM] = { /* Philips SECAM */ .name = "Philips (SECAM+PAL_BG) (FI1216MF, FM1216MF, FR1216MF)", .params = tuner_philips_secam_params, .count = ARRAY_SIZE(tuner_philips_secam_params), }, [TUNER_ABSENT] = { /* Tuner Absent */ .name = "NoTuner", }, [TUNER_PHILIPS_PAL] = { /* Philips PAL */ .name = "Philips PAL_BG (FI1216 and compatibles)", .params = tuner_philips_pal_params, .count = ARRAY_SIZE(tuner_philips_pal_params), }, [TUNER_TEMIC_NTSC] = { /* TEMIC NTSC */ .name = "Temic NTSC (4032 FY5)", .params = tuner_temic_ntsc_params, .count = ARRAY_SIZE(tuner_temic_ntsc_params), }, [TUNER_TEMIC_PAL_I] = { /* TEMIC PAL_I */ .name = "Temic PAL_I (4062 FY5)", .params = tuner_temic_pal_i_params, .count = ARRAY_SIZE(tuner_temic_pal_i_params), }, [TUNER_TEMIC_4036FY5_NTSC] = { /* TEMIC NTSC */ .name = "Temic NTSC (4036 FY5)", .params = tuner_temic_4036fy5_ntsc_params, .count = ARRAY_SIZE(tuner_temic_4036fy5_ntsc_params), }, [TUNER_ALPS_TSBH1_NTSC] = { /* TEMIC NTSC */ .name = "Alps HSBH1", .params = tuner_alps_tsbh1_ntsc_params, .count = ARRAY_SIZE(tuner_alps_tsbh1_ntsc_params), }, /* 10-19 */ [TUNER_ALPS_TSBE1_PAL] = { /* TEMIC PAL */ .name = "Alps TSBE1", .params = tuner_alps_tsb_1_params, .count = ARRAY_SIZE(tuner_alps_tsb_1_params), }, [TUNER_ALPS_TSBB5_PAL_I] = { /* Alps PAL_I */ .name = "Alps TSBB5", .params = tuner_alps_tsbb5_params, .count = ARRAY_SIZE(tuner_alps_tsbb5_params), }, [TUNER_ALPS_TSBE5_PAL] = { /* Alps PAL */ .name = "Alps TSBE5", .params = tuner_alps_tsbe5_params, .count = ARRAY_SIZE(tuner_alps_tsbe5_params), }, [TUNER_ALPS_TSBC5_PAL] = { /* Alps PAL */ .name = "Alps TSBC5", .params = tuner_alps_tsbc5_params, .count = ARRAY_SIZE(tuner_alps_tsbc5_params), }, [TUNER_TEMIC_4006FH5_PAL] = { /* TEMIC PAL */ .name = "Temic PAL_BG (4006FH5)", .params = tuner_temic_4006fh5_params, .count = ARRAY_SIZE(tuner_temic_4006fh5_params), }, [TUNER_ALPS_TSHC6_NTSC] = { /* Alps NTSC */ .name = "Alps TSCH6", .params = tuner_alps_tshc6_params, .count = ARRAY_SIZE(tuner_alps_tshc6_params), }, [TUNER_TEMIC_PAL_DK] = { /* TEMIC PAL */ .name = "Temic PAL_DK (4016 FY5)", .params = tuner_temic_pal_dk_params, .count = ARRAY_SIZE(tuner_temic_pal_dk_params), }, [TUNER_PHILIPS_NTSC_M] = { /* Philips NTSC */ .name = "Philips NTSC_M (MK2)", .params = tuner_philips_ntsc_m_params, .count = ARRAY_SIZE(tuner_philips_ntsc_m_params), }, [TUNER_TEMIC_4066FY5_PAL_I] = { /* TEMIC PAL_I */ .name = "Temic PAL_I (4066 FY5)", .params = tuner_temic_4066fy5_pal_i_params, .count = ARRAY_SIZE(tuner_temic_4066fy5_pal_i_params), }, [TUNER_TEMIC_4006FN5_MULTI_PAL] = { /* TEMIC PAL */ .name = "Temic PAL* auto (4006 FN5)", .params = tuner_temic_4006fn5_multi_params, .count = ARRAY_SIZE(tuner_temic_4006fn5_multi_params), }, /* 20-29 */ [TUNER_TEMIC_4009FR5_PAL] = { /* TEMIC PAL */ .name = "Temic PAL_BG (4009 FR5) or PAL_I (4069 FR5)", .params = tuner_temic_4009f_5_params, .count = ARRAY_SIZE(tuner_temic_4009f_5_params), }, [TUNER_TEMIC_4039FR5_NTSC] = { /* TEMIC NTSC */ .name = "Temic NTSC (4039 FR5)", .params = tuner_temic_4039fr5_params, .count = ARRAY_SIZE(tuner_temic_4039fr5_params), }, [TUNER_TEMIC_4046FM5] = { /* TEMIC PAL */ .name = "Temic PAL/SECAM multi (4046 FM5)", .params = tuner_temic_4046fm5_params, .count = ARRAY_SIZE(tuner_temic_4046fm5_params), }, [TUNER_PHILIPS_PAL_DK] = { /* Philips PAL */ .name = "Philips PAL_DK (FI1256 and compatibles)", .params = tuner_philips_pal_dk_params, .count = ARRAY_SIZE(tuner_philips_pal_dk_params), }, [TUNER_PHILIPS_FQ1216ME] = { /* Philips PAL */ .name = "Philips PAL/SECAM multi (FQ1216ME)", .params = tuner_philips_fq1216me_params, .count = ARRAY_SIZE(tuner_philips_fq1216me_params), }, [TUNER_LG_PAL_I_FM] = { /* LGINNOTEK PAL_I */ .name = "LG PAL_I+FM (TAPC-I001D)", .params = tuner_lg_pal_i_fm_params, .count = ARRAY_SIZE(tuner_lg_pal_i_fm_params), }, [TUNER_LG_PAL_I] = { /* LGINNOTEK PAL_I */ .name = "LG PAL_I (TAPC-I701D)", .params = tuner_lg_pal_i_params, .count = ARRAY_SIZE(tuner_lg_pal_i_params), }, [TUNER_LG_NTSC_FM] = { /* LGINNOTEK NTSC */ .name = "LG NTSC+FM (TPI8NSR01F)", .params = tuner_lg_ntsc_fm_params, .count = ARRAY_SIZE(tuner_lg_ntsc_fm_params), }, [TUNER_LG_PAL_FM] = { /* LGINNOTEK PAL */ .name = "LG PAL_BG+FM (TPI8PSB01D)", .params = tuner_lg_pal_fm_params, .count = ARRAY_SIZE(tuner_lg_pal_fm_params), }, [TUNER_LG_PAL] = { /* LGINNOTEK PAL */ .name = "LG PAL_BG (TPI8PSB11D)", .params = tuner_lg_pal_params, .count = ARRAY_SIZE(tuner_lg_pal_params), }, /* 30-39 */ [TUNER_TEMIC_4009FN5_MULTI_PAL_FM] = { /* TEMIC PAL */ .name = "Temic PAL* auto + FM (4009 FN5)", .params = tuner_temic_4009_fn5_multi_pal_fm_params, .count = ARRAY_SIZE(tuner_temic_4009_fn5_multi_pal_fm_params), }, [TUNER_SHARP_2U5JF5540_NTSC] = { /* SHARP NTSC */ .name = "SHARP NTSC_JP (2U5JF5540)", .params = tuner_sharp_2u5jf5540_params, .count = ARRAY_SIZE(tuner_sharp_2u5jf5540_params), }, [TUNER_Samsung_PAL_TCPM9091PD27] = { /* Samsung PAL */ .name = "Samsung PAL TCPM9091PD27", .params = tuner_samsung_pal_tcpm9091pd27_params, .count = ARRAY_SIZE(tuner_samsung_pal_tcpm9091pd27_params), }, [TUNER_MT2032] = { /* Microtune PAL|NTSC */ .name = "MT20xx universal", /* see mt20xx.c for details */ }, [TUNER_TEMIC_4106FH5] = { /* TEMIC PAL */ .name = "Temic PAL_BG (4106 FH5)", .params = tuner_temic_4106fh5_params, .count = ARRAY_SIZE(tuner_temic_4106fh5_params), }, [TUNER_TEMIC_4012FY5] = { /* TEMIC PAL */ .name = "Temic PAL_DK/SECAM_L (4012 FY5)", .params = tuner_temic_4012fy5_params, .count = ARRAY_SIZE(tuner_temic_4012fy5_params), }, [TUNER_TEMIC_4136FY5] = { /* TEMIC NTSC */ .name = "Temic NTSC (4136 FY5)", .params = tuner_temic_4136_fy5_params, .count = ARRAY_SIZE(tuner_temic_4136_fy5_params), }, [TUNER_LG_PAL_NEW_TAPC] = { /* LGINNOTEK PAL */ .name = "LG PAL (newer TAPC series)", .params = tuner_lg_pal_new_tapc_params, .count = ARRAY_SIZE(tuner_lg_pal_new_tapc_params), }, [TUNER_PHILIPS_FM1216ME_MK3] = { /* Philips PAL */ .name = "Philips PAL/SECAM multi (FM1216ME MK3)", .params = tuner_fm1216me_mk3_params, .count = ARRAY_SIZE(tuner_fm1216me_mk3_params), }, [TUNER_LG_NTSC_NEW_TAPC] = { /* LGINNOTEK NTSC */ .name = "LG NTSC (newer TAPC series)", .params = tuner_lg_ntsc_new_tapc_params, .count = ARRAY_SIZE(tuner_lg_ntsc_new_tapc_params), }, /* 40-49 */ [TUNER_HITACHI_NTSC] = { /* HITACHI NTSC */ .name = "HITACHI V7-J180AT", .params = tuner_hitachi_ntsc_params, .count = ARRAY_SIZE(tuner_hitachi_ntsc_params), }, [TUNER_PHILIPS_PAL_MK] = { /* Philips PAL */ .name = "Philips PAL_MK (FI1216 MK)", .params = tuner_philips_pal_mk_params, .count = ARRAY_SIZE(tuner_philips_pal_mk_params), }, [TUNER_PHILIPS_FCV1236D] = { /* Philips ATSC */ .name = "Philips FCV1236D ATSC/NTSC dual in", .params = tuner_philips_fcv1236d_params, .count = ARRAY_SIZE(tuner_philips_fcv1236d_params), .min = 16 * 53.00, .max = 16 * 803.00, .stepsize = 62500, }, [TUNER_PHILIPS_FM1236_MK3] = { /* Philips NTSC */ .name = "Philips NTSC MK3 (FM1236MK3 or FM1236/F)", .params = tuner_fm1236_mk3_params, .count = ARRAY_SIZE(tuner_fm1236_mk3_params), }, [TUNER_PHILIPS_4IN1] = { /* Philips NTSC */ .name = "Philips 4 in 1 (ATI TV Wonder Pro/Conexant)", .params = tuner_philips_4in1_params, .count = ARRAY_SIZE(tuner_philips_4in1_params), }, [TUNER_MICROTUNE_4049FM5] = { /* Microtune PAL */ .name = "Microtune 4049 FM5", .params = tuner_microtune_4049_fm5_params, .count = ARRAY_SIZE(tuner_microtune_4049_fm5_params), }, [TUNER_PANASONIC_VP27] = { /* Panasonic NTSC */ .name = "Panasonic VP27s/ENGE4324D", .params = tuner_panasonic_vp27_params, .count = ARRAY_SIZE(tuner_panasonic_vp27_params), }, [TUNER_LG_NTSC_TAPE] = { /* LGINNOTEK NTSC */ .name = "LG NTSC (TAPE series)", .params = tuner_fm1236_mk3_params, .count = ARRAY_SIZE(tuner_fm1236_mk3_params), }, [TUNER_TNF_8831BGFF] = { /* Philips PAL */ .name = "Tenna TNF 8831 BGFF)", .params = tuner_tnf_8831bgff_params, .count = ARRAY_SIZE(tuner_tnf_8831bgff_params), }, [TUNER_MICROTUNE_4042FI5] = { /* Microtune NTSC */ .name = "Microtune 4042 FI5 ATSC/NTSC dual in", .params = tuner_microtune_4042fi5_params, .count = ARRAY_SIZE(tuner_microtune_4042fi5_params), .min = 16 * 57.00, .max = 16 * 858.00, .stepsize = 62500, }, /* 50-59 */ [TUNER_TCL_2002N] = { /* TCL NTSC */ .name = "TCL 2002N", .params = tuner_tcl_2002n_params, .count = ARRAY_SIZE(tuner_tcl_2002n_params), }, [TUNER_PHILIPS_FM1256_IH3] = { /* Philips PAL */ .name = "Philips PAL/SECAM_D (FM 1256 I-H3)", .params = tuner_philips_fm1256_ih3_params, .count = ARRAY_SIZE(tuner_philips_fm1256_ih3_params), }, [TUNER_THOMSON_DTT7610] = { /* THOMSON ATSC */ .name = "Thomson DTT 7610 (ATSC/NTSC)", .params = tuner_thomson_dtt7610_params, .count = ARRAY_SIZE(tuner_thomson_dtt7610_params), .min = 16 * 44.00, .max = 16 * 958.00, .stepsize = 62500, }, [TUNER_PHILIPS_FQ1286] = { /* Philips NTSC */ .name = "Philips FQ1286", .params = tuner_philips_fq1286_params, .count = ARRAY_SIZE(tuner_philips_fq1286_params), }, [TUNER_PHILIPS_TDA8290] = { /* Philips PAL|NTSC */ .name = "Philips/NXP TDA 8290/8295 + 8275/8275A/18271", /* see tda8290.c for details */ }, [TUNER_TCL_2002MB] = { /* TCL PAL */ .name = "TCL 2002MB", .params = tuner_tcl_2002mb_params, .count = ARRAY_SIZE(tuner_tcl_2002mb_params), }, [TUNER_PHILIPS_FQ1216AME_MK4] = { /* Philips PAL */ .name = "Philips PAL/SECAM multi (FQ1216AME MK4)", .params = tuner_philips_fq1216ame_mk4_params, .count = ARRAY_SIZE(tuner_philips_fq1216ame_mk4_params), }, [TUNER_PHILIPS_FQ1236A_MK4] = { /* Philips NTSC */ .name = "Philips FQ1236A MK4", .params = tuner_philips_fq1236a_mk4_params, .count = ARRAY_SIZE(tuner_philips_fq1236a_mk4_params), }, [TUNER_YMEC_TVF_8531MF] = { /* Philips NTSC */ .name = "Ymec TVision TVF-8531MF/8831MF/8731MF", .params = tuner_ymec_tvf_8531mf_params, .count = ARRAY_SIZE(tuner_ymec_tvf_8531mf_params), }, [TUNER_YMEC_TVF_5533MF] = { /* Philips NTSC */ .name = "Ymec TVision TVF-5533MF", .params = tuner_ymec_tvf_5533mf_params, .count = ARRAY_SIZE(tuner_ymec_tvf_5533mf_params), }, /* 60-69 */ [TUNER_THOMSON_DTT761X] = { /* THOMSON ATSC */ /* DTT 7611 7611A 7612 7613 7613A 7614 7615 7615A */ .name = "Thomson DTT 761X (ATSC/NTSC)", .params = tuner_thomson_dtt761x_params, .count = ARRAY_SIZE(tuner_thomson_dtt761x_params), .min = 16 * 57.00, .max = 16 * 863.00, .stepsize = 62500, .initdata = tua603x_agc103, }, [TUNER_TENA_9533_DI] = { /* Philips PAL */ .name = "Tena TNF9533-D/IF/TNF9533-B/DF", .params = tuner_tena_9533_di_params, .count = ARRAY_SIZE(tuner_tena_9533_di_params), }, [TUNER_TEA5767] = { /* Philips RADIO */ .name = "Philips TEA5767HN FM Radio", /* see tea5767.c for details */ }, [TUNER_PHILIPS_FMD1216ME_MK3] = { /* Philips PAL */ .name = "Philips FMD1216ME MK3 Hybrid Tuner", .params = tuner_philips_fmd1216me_mk3_params, .count = ARRAY_SIZE(tuner_philips_fmd1216me_mk3_params), .min = 16 * 50.87, .max = 16 * 858.00, .stepsize = 166667, .initdata = tua603x_agc112, .sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 }, }, [TUNER_LG_TDVS_H06XF] = { /* LGINNOTEK ATSC */ .name = "LG TDVS-H06xF", /* H061F, H062F & H064F */ .params = tuner_lg_tdvs_h06xf_params, .count = ARRAY_SIZE(tuner_lg_tdvs_h06xf_params), .min = 16 * 54.00, .max = 16 * 863.00, .stepsize = 62500, .initdata = tua603x_agc103, }, [TUNER_YMEC_TVF66T5_B_DFF] = { /* Philips PAL */ .name = "Ymec TVF66T5-B/DFF", .params = tuner_ymec_tvf66t5_b_dff_params, .count = ARRAY_SIZE(tuner_ymec_tvf66t5_b_dff_params), }, [TUNER_LG_TALN] = { /* LGINNOTEK NTSC / PAL / SECAM */ .name = "LG TALN series", .params = tuner_lg_taln_params, .count = ARRAY_SIZE(tuner_lg_taln_params), }, [TUNER_PHILIPS_TD1316] = { /* Philips PAL */ .name = "Philips TD1316 Hybrid Tuner", .params = tuner_philips_td1316_params, .count = ARRAY_SIZE(tuner_philips_td1316_params), .min = 16 * 87.00, .max = 16 * 895.00, .stepsize = 166667, }, [TUNER_PHILIPS_TUV1236D] = { /* Philips ATSC */ .name = "Philips TUV1236D ATSC/NTSC dual in", .params = tuner_tuv1236d_params, .count = ARRAY_SIZE(tuner_tuv1236d_params), .min = 16 * 54.00, .max = 16 * 864.00, .stepsize = 62500, }, [TUNER_TNF_5335MF] = { /* Tenna PAL/NTSC */ .name = "Tena TNF 5335 and similar models", .params = tuner_tnf_5335mf_params, .count = ARRAY_SIZE(tuner_tnf_5335mf_params), }, /* 70-79 */ [TUNER_SAMSUNG_TCPN_2121P30A] = { /* Samsung NTSC */ .name = "Samsung TCPN 2121P30A", .params = tuner_samsung_tcpn_2121p30a_params, .count = ARRAY_SIZE(tuner_samsung_tcpn_2121p30a_params), }, [TUNER_XC2028] = { /* Xceive 2028 */ .name = "Xceive xc2028/xc3028 tuner", /* see tuner-xc2028.c for details */ }, [TUNER_THOMSON_FE6600] = { /* Thomson PAL / DVB-T */ .name = "Thomson FE6600", .params = tuner_thomson_fe6600_params, .count = ARRAY_SIZE(tuner_thomson_fe6600_params), .min = 16 * 44.25, .max = 16 * 858.00, .stepsize = 166667, }, [TUNER_SAMSUNG_TCPG_6121P30A] = { /* Samsung PAL */ .name = "Samsung TCPG 6121P30A", .params = tuner_samsung_tcpg_6121p30a_params, .count = ARRAY_SIZE(tuner_samsung_tcpg_6121p30a_params), }, [TUNER_TDA9887] = { /* Philips TDA 9887 IF PLL Demodulator. This chip is part of some modern tuners */ .name = "Philips TDA988[5,6,7] IF PLL Demodulator", /* see tda9887.c for details */ }, [TUNER_TEA5761] = { /* Philips RADIO */ .name = "Philips TEA5761 FM Radio", /* see tea5767.c for details */ }, [TUNER_XC5000] = { /* Xceive 5000 */ .name = "Xceive 5000 tuner", /* see xc5000.c for details */ }, [TUNER_XC4000] = { /* Xceive 4000 */ .name = "Xceive 4000 tuner", /* see xc4000.c for details */ }, [TUNER_TCL_MF02GIP_5N] = { /* TCL tuner MF02GIP-5N-E */ .name = "TCL tuner MF02GIP-5N-E", .params = tuner_tcl_mf02gip_5n_params, .count = ARRAY_SIZE(tuner_tcl_mf02gip_5n_params), }, [TUNER_PHILIPS_FMD1216MEX_MK3] = { /* Philips PAL */ .name = "Philips FMD1216MEX MK3 Hybrid Tuner", .params = tuner_philips_fmd1216mex_mk3_params, .count = ARRAY_SIZE(tuner_philips_fmd1216mex_mk3_params), .min = 16 * 50.87, .max = 16 * 858.00, .stepsize = 166667, .initdata = tua603x_agc112, .sleepdata = (u8[]){ 4, 0x9c, 0x60, 0x85, 0x54 }, }, [TUNER_PHILIPS_FM1216MK5] = { /* Philips PAL */ .name = "Philips PAL/SECAM multi (FM1216 MK5)", .params = tuner_fm1216mk5_params, .count = ARRAY_SIZE(tuner_fm1216mk5_params), }, /* 80-89 */ [TUNER_PHILIPS_FQ1216LME_MK3] = { /* PAL/SECAM, Loop-thru, no FM */ .name = "Philips FQ1216LME MK3 PAL/SECAM w/active loopthrough", .params = tuner_fq1216lme_mk3_params, .count = ARRAY_SIZE(tuner_fq1216lme_mk3_params), }, [TUNER_PARTSNIC_PTI_5NF05] = { .name = "Partsnic (Daewoo) PTI-5NF05", .params = tuner_partsnic_pti_5nf05_params, .count = ARRAY_SIZE(tuner_partsnic_pti_5nf05_params), }, [TUNER_PHILIPS_CU1216L] = { .name = "Philips CU1216L", .params = tuner_philips_cu1216l_params, .count = ARRAY_SIZE(tuner_philips_cu1216l_params), .stepsize = 62500, }, [TUNER_NXP_TDA18271] = { .name = "NXP TDA18271", /* see tda18271-fe.c for details */ }, [TUNER_SONY_BTF_PXN01Z] = { .name = "Sony BTF-Pxn01Z", .params = tuner_sony_btf_pxn01z_params, .count = ARRAY_SIZE(tuner_sony_btf_pxn01z_params), }, [TUNER_PHILIPS_FQ1236_MK5] = { /* NTSC, TDA9885, no FM radio */ .name = "Philips FQ1236 MK5", .params = tuner_philips_fq1236_mk5_params, .count = ARRAY_SIZE(tuner_philips_fq1236_mk5_params), }, [TUNER_TENA_TNF_5337] = { /* Tena 5337 MFD */ .name = "Tena TNF5337 MFD", .params = tuner_tena_tnf_5337_params, .count = ARRAY_SIZE(tuner_tena_tnf_5337_params), }, [TUNER_XC5000C] = { /* Xceive 5000C */ .name = "Xceive 5000C tuner", /* see xc5000.c for details */ }, }; EXPORT_SYMBOL(tuners); unsigned const int tuner_count = ARRAY_SIZE(tuners); EXPORT_SYMBOL(tuner_count); MODULE_DESCRIPTION("Simple tuner device type database"); MODULE_AUTHOR("Ralph Metzler, Gerd Knorr, Gunther Mayer"); MODULE_LICENSE("GPL");
gpl-2.0
Arc-Team/android_kernel_samsung_jflte
drivers/staging/wlags49_h2/wl_pci.c
4799
52516
/******************************************************************************* * Agere Systems Inc. * Wireless device driver for Linux (wlags49). * * Copyright (c) 1998-2003 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Initially developed by TriplePoint, Inc. * http://www.triplepoint.com * *------------------------------------------------------------------------------ * * This file contains processing and initialization specific to PCI/miniPCI * devices. * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2003 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ******************************************************************************/ /******************************************************************************* * include files ******************************************************************************/ #include <wireless/wl_version.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/ptrace.h> #include <linux/ctype.h> #include <linux/string.h> //#include <linux/timer.h> #include <linux/interrupt.h> #include <linux/in.h> #include <linux/delay.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/bitops.h> #include <asm/uaccess.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/if_arp.h> #include <linux/ioport.h> #include <hcf/debug.h> #include <hcf.h> #include <dhf.h> #include <hcfdef.h> #include <wireless/wl_if.h> #include <wireless/wl_internal.h> #include <wireless/wl_util.h> #include <wireless/wl_main.h> #include <wireless/wl_netdev.h> #include <wireless/wl_pci.h> /******************************************************************************* * global variables ******************************************************************************/ #if DBG extern dbg_info_t *DbgInfo; #endif // DBG /* define the PCI device Table Cardname and id tables */ static struct pci_device_id wl_pci_tbl[] __devinitdata = { { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_0), }, { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_1), }, { PCI_DEVICE(PCI_VENDOR_ID_WL_LKM, PCI_DEVICE_ID_WL_LKM_2), }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, wl_pci_tbl); /******************************************************************************* * function prototypes ******************************************************************************/ int __devinit wl_pci_probe( struct pci_dev *pdev, const struct pci_device_id *ent ); void __devexit wl_pci_remove(struct pci_dev *pdev); int wl_pci_setup( struct pci_dev *pdev ); void wl_pci_enable_cardbus_interrupts( struct pci_dev *pdev ); #ifdef ENABLE_DMA int wl_pci_dma_alloc( struct pci_dev *pdev, struct wl_private *lp ); int wl_pci_dma_free( struct pci_dev *pdev, struct wl_private *lp ); int wl_pci_dma_alloc_tx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ); int wl_pci_dma_free_tx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ); int wl_pci_dma_alloc_rx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ); int wl_pci_dma_free_rx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ); int wl_pci_dma_alloc_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc, int size ); int wl_pci_dma_free_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ); int wl_pci_dma_alloc_desc( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ); int wl_pci_dma_free_desc( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ); int wl_pci_dma_alloc_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT *desc, int size ); int wl_pci_dma_free_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT *desc ); void wl_pci_dma_hcf_reclaim_rx( struct wl_private *lp ); #endif // ENABLE_DMA /******************************************************************************* * PCI module function registration ******************************************************************************/ static struct pci_driver wl_driver = { name: MODULE_NAME, id_table: wl_pci_tbl, probe: wl_pci_probe, remove: __devexit_p(wl_pci_remove), suspend: NULL, resume: NULL, }; /******************************************************************************* * wl_adapter_init_module() ******************************************************************************* * * DESCRIPTION: * * Called by init_module() to perform PCI-specific driver initialization. * * PARAMETERS: * * N/A * * RETURNS: * * 0 * ******************************************************************************/ int wl_adapter_init_module( void ) { int result; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_adapter_init_module()" ); DBG_ENTER( DbgInfo ); DBG_TRACE( DbgInfo, "wl_adapter_init_module() -- PCI\n" ); result = pci_register_driver( &wl_driver ); //;?replace with pci_module_init, Rubini pg 490 //;? why not do something with the result DBG_LEAVE( DbgInfo ); return 0; } // wl_adapter_init_module /*============================================================================*/ /******************************************************************************* * wl_adapter_cleanup_module() ******************************************************************************* * * DESCRIPTION: * * Called by cleanup_module() to perform PCI-specific driver cleanup. * * PARAMETERS: * * N/A * * RETURNS: * * N/A * ******************************************************************************/ void wl_adapter_cleanup_module( void ) { //;?how comes wl_adapter_cleanup_module is located in a seemingly pci specific module DBG_FUNC( "wl_adapter_cleanup_module" ); DBG_ENTER( DbgInfo ); //;?DBG_TRACE below feels like nearly redundant in the light of DBG_ENTER above DBG_TRACE( DbgInfo, "wl_adapter_cleanup_module() -- PCI\n" ); pci_unregister_driver( &wl_driver ); DBG_LEAVE( DbgInfo ); return; } // wl_adapter_cleanup_module /*============================================================================*/ /******************************************************************************* * wl_adapter_insert() ******************************************************************************* * * DESCRIPTION: * * Called by wl_pci_probe() to continue the process of device insertion. * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * TRUE or FALSE * ******************************************************************************/ int wl_adapter_insert( struct net_device *dev ) { int result = FALSE; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_adapter_insert" ); DBG_ENTER( DbgInfo ); DBG_TRACE( DbgInfo, "wl_adapter_insert() -- PCI\n" ); if( dev == NULL ) { DBG_ERROR( DbgInfo, "net_device pointer is NULL!!!\n" ); } else if( dev->priv == NULL ) { DBG_ERROR( DbgInfo, "wl_private pointer is NULL!!!\n" ); } else if( wl_insert( dev ) ) { /* Perform remaining device initialization */ result = TRUE; } else { DBG_TRACE( DbgInfo, "wl_insert() FAILED\n" ); } DBG_LEAVE( DbgInfo ); return result; } // wl_adapter_insert /*============================================================================*/ /******************************************************************************* * wl_adapter_open() ******************************************************************************* * * DESCRIPTION: * * Open the device. * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * an HCF status code * ******************************************************************************/ int wl_adapter_open( struct net_device *dev ) { int result = 0; int hcf_status = HCF_SUCCESS; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_adapter_open" ); DBG_ENTER( DbgInfo ); DBG_TRACE( DbgInfo, "wl_adapter_open() -- PCI\n" ); hcf_status = wl_open( dev ); if( hcf_status != HCF_SUCCESS ) { result = -ENODEV; } DBG_LEAVE( DbgInfo ); return result; } // wl_adapter_open /*============================================================================*/ /******************************************************************************* * wl_adapter_close() ******************************************************************************* * * DESCRIPTION: * * Close the device * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * 0 * ******************************************************************************/ int wl_adapter_close( struct net_device *dev ) { DBG_FUNC( "wl_adapter_close" ); DBG_ENTER( DbgInfo ); DBG_TRACE( DbgInfo, "wl_adapter_close() -- PCI\n" ); DBG_TRACE( DbgInfo, "%s: Shutting down adapter.\n", dev->name ); wl_close( dev ); DBG_LEAVE( DbgInfo ); return 0; } // wl_adapter_close /*============================================================================*/ /******************************************************************************* * wl_adapter_is_open() ******************************************************************************* * * DESCRIPTION: * * Check whether this device is open. Returns * * PARAMETERS: * * dev - a pointer to the device's net_device structure * * RETURNS: * * nonzero if device is open. * ******************************************************************************/ int wl_adapter_is_open( struct net_device *dev ) { /* This function is used in PCMCIA to check the status of the 'open' field in the dev_link_t structure associated with a network device. There doesn't seem to be an analog to this for PCI, and checking the status contained in the net_device structure doesn't have the same effect. For now, return TRUE, but find out if this is necessary for PCI. */ return TRUE; } // wl_adapter_is_open /*============================================================================*/ /******************************************************************************* * wl_pci_probe() ******************************************************************************* * * DESCRIPTION: * * Registered in the pci_driver structure, this function is called when the * PCI subsystem finds a new PCI device which matches the infomation contained * in the pci_device_id table. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * ent - this device's entry in the pci_device_id table * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int __devinit wl_pci_probe( struct pci_dev *pdev, const struct pci_device_id *ent ) { int result; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_probe" ); DBG_ENTER( DbgInfo ); DBG_PRINT( "%s\n", VERSION_INFO ); result = wl_pci_setup( pdev ); DBG_LEAVE( DbgInfo ); return result; } // wl_pci_probe /*============================================================================*/ /******************************************************************************* * wl_pci_remove() ******************************************************************************* * * DESCRIPTION: * * Registered in the pci_driver structure, this function is called when the * PCI subsystem detects that a PCI device which matches the infomation * contained in the pci_device_id table has been removed. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * * RETURNS: * * N/A * ******************************************************************************/ void __devexit wl_pci_remove(struct pci_dev *pdev) { struct net_device *dev = NULL; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_remove" ); DBG_ENTER( DbgInfo ); /* Make sure the pci_dev pointer passed in is valid */ if( pdev == NULL ) { DBG_ERROR( DbgInfo, "PCI subsys passed in an invalid pci_dev pointer\n" ); return; } dev = pci_get_drvdata( pdev ); if( dev == NULL ) { DBG_ERROR( DbgInfo, "Could not retrieve net_device structure\n" ); return; } /* Perform device cleanup */ wl_remove( dev ); free_irq( dev->irq, dev ); #ifdef ENABLE_DMA wl_pci_dma_free( pdev, dev->priv ); #endif wl_device_dealloc( dev ); DBG_LEAVE( DbgInfo ); return; } // wl_pci_remove /*============================================================================*/ /******************************************************************************* * wl_pci_setup() ******************************************************************************* * * DESCRIPTION: * * Called by wl_pci_probe() to begin a device's initialization process. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_setup( struct pci_dev *pdev ) { int result = 0; struct net_device *dev = NULL; struct wl_private *lp = NULL; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_setup" ); DBG_ENTER( DbgInfo ); /* Make sure the pci_dev pointer passed in is valid */ if( pdev == NULL ) { DBG_ERROR( DbgInfo, "PCI subsys passed in an invalid pci_dev pointer\n" ); return -ENODEV; } result = pci_enable_device( pdev ); if( result != 0 ) { DBG_ERROR( DbgInfo, "pci_enable_device() failed\n" ); DBG_LEAVE( DbgInfo ); return result; } /* We found our device! Let's register it with the system */ DBG_TRACE( DbgInfo, "Found our device, now registering\n" ); dev = wl_device_alloc( ); if( dev == NULL ) { DBG_ERROR( DbgInfo, "Could not register device!!!\n" ); DBG_LEAVE( DbgInfo ); return -ENOMEM; } /* Make sure that space was allocated for our private adapter struct */ if( dev->priv == NULL ) { DBG_ERROR( DbgInfo, "Private adapter struct was not allocated!!!\n" ); DBG_LEAVE( DbgInfo ); return -ENOMEM; } #ifdef ENABLE_DMA /* Allocate DMA Descriptors */ if( wl_pci_dma_alloc( pdev, dev->priv ) < 0 ) { DBG_ERROR( DbgInfo, "Could not allocate DMA descriptor memory!!!\n" ); DBG_LEAVE( DbgInfo ); return -ENOMEM; } #endif /* Register our private adapter structure with PCI */ pci_set_drvdata( pdev, dev ); /* Fill out bus specific information in the net_device struct */ dev->irq = pdev->irq; SET_MODULE_OWNER( dev ); DBG_TRACE( DbgInfo, "Device Base Address: %#03lx\n", pdev->resource[0].start ); dev->base_addr = pdev->resource[0].start; /* Initialize our device here */ if( !wl_adapter_insert( dev )) { DBG_ERROR( DbgInfo, "wl_adapter_insert() FAILED!!!\n" ); wl_device_dealloc( dev ); DBG_LEAVE( DbgInfo ); return -EINVAL; } /* Register our ISR */ DBG_TRACE( DbgInfo, "Registering ISR...\n" ); result = request_irq(dev->irq, wl_isr, SA_SHIRQ, dev->name, dev); if( result ) { DBG_WARNING( DbgInfo, "Could not register ISR!!!\n" ); DBG_LEAVE( DbgInfo ); return result; } /* Make sure interrupts are enabled properly for CardBus */ lp = dev->priv; if( lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_CARDBUS || lp->hcfCtx.IFB_BusType == CFG_NIC_BUS_TYPE_PCI ) { DBG_TRACE( DbgInfo, "This is a PCI/CardBus card, enable interrupts\n" ); wl_pci_enable_cardbus_interrupts( pdev ); } /* Enable bus mastering */ pci_set_master( pdev ); DBG_LEAVE( DbgInfo ); return 0; } // wl_pci_setup /*============================================================================*/ /******************************************************************************* * wl_pci_enable_cardbus_interrupts() ******************************************************************************* * * DESCRIPTION: * * Called by wl_pci_setup() to enable interrupts on a CardBus device. This * is done by writing bit 15 to the function event mask register. This * CardBus-specific register is located in BAR2 (counting from BAR0), in memory * space at byte offset 1f4 (7f4 for WARP). * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * * RETURNS: * * N/A * ******************************************************************************/ void wl_pci_enable_cardbus_interrupts( struct pci_dev *pdev ) { u32 bar2_reg; u32 mem_addr_bus; u32 func_evt_mask_reg; void *mem_addr_kern = NULL; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_enable_cardbus_interrupts" ); DBG_ENTER( DbgInfo ); /* Initialize to known bad values */ bar2_reg = 0xdeadbeef; mem_addr_bus = 0xdeadbeef; /* Read the BAR2 register; this register contains the base address of the memory region where the function event mask register lives */ pci_read_config_dword( pdev, PCI_BASE_ADDRESS_2, &bar2_reg ); mem_addr_bus = bar2_reg & PCI_BASE_ADDRESS_MEM_MASK; /* Once the base address is obtained, remap the memory region to kernel space so we can retrieve the register */ mem_addr_kern = ioremap( mem_addr_bus, 0x200 ); #ifdef HERMES25 #define REG_OFFSET 0x07F4 #else #define REG_OFFSET 0x01F4 #endif // HERMES25 #define BIT15 0x8000 /* Retrieve the functional event mask register, enable interrupts by setting Bit 15, and write back the value */ func_evt_mask_reg = *(u32 *)( mem_addr_kern + REG_OFFSET ); func_evt_mask_reg |= BIT15; *(u32 *)( mem_addr_kern + REG_OFFSET ) = func_evt_mask_reg; /* Once complete, unmap the region and exit */ iounmap( mem_addr_kern ); DBG_LEAVE( DbgInfo ); return; } // wl_pci_enable_cardbus_interrupts /*============================================================================*/ #ifdef ENABLE_DMA /******************************************************************************* * wl_pci_dma_alloc() ******************************************************************************* * * DESCRIPTION: * * Allocates all resources needed for PCI/CardBus DMA operation * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_alloc( struct pci_dev *pdev, struct wl_private *lp ) { int i; int status = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_dma_alloc" ); DBG_ENTER( DbgInfo ); // lp->dma.tx_rsc_ind = lp->dma.rx_rsc_ind = 0; // // /* Alloc for the Tx chain and its reclaim descriptor */ // for( i = 0; i < NUM_TX_DESC; i++ ) { // status = wl_pci_dma_alloc_tx_packet( pdev, lp, &lp->dma.tx_packet[i] ); // if( status == 0 ) { // DBG_PRINT( "lp->dma.tx_packet[%d] : 0x%p\n", i, lp->dma.tx_packet[i] ); // DBG_PRINT( "lp->dma.tx_packet[%d]->next_desc_addr : 0x%p\n", i, lp->dma.tx_packet[i]->next_desc_addr ); // lp->dma.tx_rsc_ind++; // } else { // DBG_ERROR( DbgInfo, "Could not alloc DMA Tx Packet\n" ); // break; // } // } // if( status == 0 ) { // status = wl_pci_dma_alloc_desc( pdev, lp, &lp->dma.tx_reclaim_desc ); // DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc ); // } // /* Alloc for the Rx chain and its reclaim descriptor */ // if( status == 0 ) { // for( i = 0; i < NUM_RX_DESC; i++ ) { // status = wl_pci_dma_alloc_rx_packet( pdev, lp, &lp->dma.rx_packet[i] ); // if( status == 0 ) { // DBG_PRINT( "lp->dma.rx_packet[%d] : 0x%p\n", i, lp->dma.rx_packet[i] ); // DBG_PRINT( "lp->dma.rx_packet[%d]->next_desc_addr : 0x%p\n", i, lp->dma.rx_packet[i]->next_desc_addr ); // lp->dma.rx_rsc_ind++; // } else { // DBG_ERROR( DbgInfo, "Could not alloc DMA Rx Packet\n" ); // break; // } // } // } // if( status == 0 ) { // status = wl_pci_dma_alloc_desc( pdev, lp, &lp->dma.rx_reclaim_desc ); // DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc ); // } // /* Store status, as host should not call HCF functions if this fails */ // lp->dma.status = status; //;?all useages of dma.status have been commented out // DBG_LEAVE( DbgInfo ); return status; } // wl_pci_dma_alloc /*============================================================================*/ /******************************************************************************* * wl_pci_dma_free() ******************************************************************************* * * DESCRIPTION: * * Deallocated all resources needed for PCI/CardBus DMA operation * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_free( struct pci_dev *pdev, struct wl_private *lp ) { int i; int status = 0; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_dma_free" ); DBG_ENTER( DbgInfo ); /* Reclaim all Rx packets that were handed over to the HCF */ /* Do I need to do this? Before this free is called, I've already disabled the port which will call wl_pci_dma_hcf_reclaim */ //if( lp->dma.status == 0 ) //{ // wl_pci_dma_hcf_reclaim( lp ); //} /* Free everything needed for DMA Rx */ for( i = 0; i < NUM_RX_DESC; i++ ) { if( lp->dma.rx_packet[i] ) { status = wl_pci_dma_free_rx_packet( pdev, lp, &lp->dma.rx_packet[i] ); if( status != 0 ) { DBG_WARNING( DbgInfo, "Problem freeing Rx packet\n" ); } } } lp->dma.rx_rsc_ind = 0; if( lp->dma.rx_reclaim_desc ) { status = wl_pci_dma_free_desc( pdev, lp, &lp->dma.rx_reclaim_desc ); if( status != 0 ) { DBG_WARNING( DbgInfo, "Problem freeing Rx reclaim descriptor\n" ); } } /* Free everything needed for DMA Tx */ for( i = 0; i < NUM_TX_DESC; i++ ) { if( lp->dma.tx_packet[i] ) { status = wl_pci_dma_free_tx_packet( pdev, lp, &lp->dma.tx_packet[i] ); if( status != 0 ) { DBG_WARNING( DbgInfo, "Problem freeing Tx packet\n" ); } } } lp->dma.tx_rsc_ind = 0; if( lp->dma.tx_reclaim_desc ) { status = wl_pci_dma_free_desc( pdev, lp, &lp->dma.tx_reclaim_desc ); if( status != 0 ) { DBG_WARNING( DbgInfo, "Problem freeing Tx reclaim descriptor\n" ); } } DBG_LEAVE( DbgInfo ); return status; } // wl_pci_dma_free /*============================================================================*/ /******************************************************************************* * wl_pci_dma_alloc_tx_packet() ******************************************************************************* * * DESCRIPTION: * * Allocates a single Tx packet, consisting of several descriptors and * buffers. Data to transmit is first copied into the 'payload' buffer * before being transmitted. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * desc - a pointer which will reference the descriptor to be alloc'd. * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_alloc_tx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ) { // int status = 0; // /*------------------------------------------------------------------------*/ // // if( desc == NULL ) { // status = -EFAULT; // } // if( status == 0 ) { // status = wl_pci_dma_alloc_desc_and_buf( pdev, lp, desc, // HCF_DMA_TX_BUF1_SIZE ); // // if( status == 0 ) { // status = wl_pci_dma_alloc_desc_and_buf( pdev, lp, // &( (*desc)->next_desc_addr ), // HCF_MAX_PACKET_SIZE ); // } // } // if( status == 0 ) { // (*desc)->next_desc_phys_addr = (*desc)->next_desc_addr->desc_phys_addr; // } // return status; } // wl_pci_dma_alloc_tx_packet /*============================================================================*/ /******************************************************************************* * wl_pci_dma_free_tx_packet() ******************************************************************************* * * DESCRIPTION: * * Frees a single Tx packet, described in the corresponding alloc function. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * desc - a pointer which will reference the descriptor to be alloc'd. * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_free_tx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ) { int status = 0; /*------------------------------------------------------------------------*/ if( *desc == NULL ) { DBG_PRINT( "Null descriptor\n" ); status = -EFAULT; } //;?the "limited" NDIS strategy, assuming a frame consists ALWAYS out of 2 //descriptors, make this robust if( status == 0 && (*desc)->next_desc_addr ) { status = wl_pci_dma_free_desc_and_buf( pdev, lp, &(*desc)->next_desc_addr ); } if( status == 0 ) { status = wl_pci_dma_free_desc_and_buf( pdev, lp, desc ); } return status; } // wl_pci_dma_free_tx_packet /*============================================================================*/ /******************************************************************************* * wl_pci_dma_alloc_rx_packet() ******************************************************************************* * * DESCRIPTION: * * Allocates a single Rx packet, consisting of two descriptors and one * contiguous buffer. THe buffer starts with the hermes-specific header. * One descriptor points at the start, the other at offset 0x3a of the * buffer. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * desc - a pointer which will reference the descriptor to be alloc'd. * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_alloc_rx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ) { int status = 0; DESC_STRCT *p; /*------------------------------------------------------------------------*/ // if( desc == NULL ) { // status = -EFAULT; // } // //;?the "limited" NDIS strategy, assuming a frame consists ALWAYS out of 2 // //descriptors, make this robust // if( status == 0 ) { // status = wl_pci_dma_alloc_desc( pdev, lp, desc ); // } // if( status == 0 ) { // status = wl_pci_dma_alloc_buf( pdev, lp, *desc, HCF_MAX_PACKET_SIZE ); // } // if( status == 0 ) { // status = wl_pci_dma_alloc_desc( pdev, lp, &p ); // } // if( status == 0 ) { // /* Size of 1st descriptor becomes 0x3a bytes */ // SET_BUF_SIZE( *desc, HCF_DMA_RX_BUF1_SIZE ); // // /* Make 2nd descriptor point at offset 0x3a of the buffer */ // SET_BUF_SIZE( p, ( HCF_MAX_PACKET_SIZE - HCF_DMA_RX_BUF1_SIZE )); // p->buf_addr = (*desc)->buf_addr + HCF_DMA_RX_BUF1_SIZE; // p->buf_phys_addr = (*desc)->buf_phys_addr + HCF_DMA_RX_BUF1_SIZE; // p->next_desc_addr = NULL; // // /* Chain 2nd descriptor to 1st descriptor */ // (*desc)->next_desc_addr = p; // (*desc)->next_desc_phys_addr = p->desc_phys_addr; // } return status; } // wl_pci_dma_alloc_rx_packet /*============================================================================*/ /******************************************************************************* * wl_pci_dma_free_rx_packet() ******************************************************************************* * * DESCRIPTION: * * Frees a single Rx packet, described in the corresponding alloc function. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * desc - a pointer which will reference the descriptor to be alloc'd. * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_free_rx_packet( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ) { int status = 0; DESC_STRCT *p; /*------------------------------------------------------------------------*/ if( *desc == NULL ) { status = -EFAULT; } if( status == 0 ) { p = (*desc)->next_desc_addr; /* Free the 2nd descriptor */ if( p != NULL ) { p->buf_addr = NULL; p->buf_phys_addr = 0; status = wl_pci_dma_free_desc( pdev, lp, &p ); } } /* Free the buffer and 1st descriptor */ if( status == 0 ) { SET_BUF_SIZE( *desc, HCF_MAX_PACKET_SIZE ); status = wl_pci_dma_free_desc_and_buf( pdev, lp, desc ); } return status; } // wl_pci_dma_free_rx_packet /*============================================================================*/ /******************************************************************************* * wl_pci_dma_alloc_desc_and_buf() ******************************************************************************* * * DESCRIPTION: * * Allocates a DMA descriptor and buffer, and associates them with one * another. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * desc - a pointer which will reference the descriptor to be alloc'd * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_alloc_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc, int size ) { int status = 0; /*------------------------------------------------------------------------*/ // if( desc == NULL ) { // status = -EFAULT; // } // if( status == 0 ) { // status = wl_pci_dma_alloc_desc( pdev, lp, desc ); // // if( status == 0 ) { // status = wl_pci_dma_alloc_buf( pdev, lp, *desc, size ); // } // } return status; } // wl_pci_dma_alloc_desc_and_buf /*============================================================================*/ /******************************************************************************* * wl_pci_dma_free_desc_and_buf() ******************************************************************************* * * DESCRIPTION: * * Frees a DMA descriptor and associated buffer. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * desc - a pointer which will reference the descriptor to be alloc'd * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_free_desc_and_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ) { int status = 0; /*------------------------------------------------------------------------*/ if( desc == NULL ) { status = -EFAULT; } if( status == 0 && *desc == NULL ) { status = -EFAULT; } if( status == 0 ) { status = wl_pci_dma_free_buf( pdev, lp, *desc ); if( status == 0 ) { status = wl_pci_dma_free_desc( pdev, lp, desc ); } } return status; } // wl_pci_dma_free_desc_and_buf /*============================================================================*/ /******************************************************************************* * wl_pci_dma_alloc_desc() ******************************************************************************* * * DESCRIPTION: * * Allocates one DMA descriptor in cache coherent memory. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_alloc_desc( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ) { // int status = 0; // dma_addr_t pa; // /*------------------------------------------------------------------------*/ // // DBG_FUNC( "wl_pci_dma_alloc_desc" ); // DBG_ENTER( DbgInfo ); // // if( desc == NULL ) { // status = -EFAULT; // } // if( status == 0 ) { // *desc = pci_alloc_consistent( pdev, sizeof( DESC_STRCT ), &pa ); // } // if( *desc == NULL ) { // DBG_ERROR( DbgInfo, "pci_alloc_consistent() failed\n" ); // status = -ENOMEM; // } else { // memset( *desc, 0, sizeof( DESC_STRCT )); // (*desc)->desc_phys_addr = cpu_to_le32( pa ); // } // DBG_LEAVE( DbgInfo ); // return status; } // wl_pci_dma_alloc_desc /*============================================================================*/ /******************************************************************************* * wl_pci_dma_free_desc() ******************************************************************************* * * DESCRIPTION: * * Frees one DMA descriptor in cache coherent memory. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_free_desc( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT **desc ) { int status = 0; /*------------------------------------------------------------------------*/ if( *desc == NULL ) { status = -EFAULT; } if( status == 0 ) { pci_free_consistent( pdev, sizeof( DESC_STRCT ), *desc, (*desc)->desc_phys_addr ); } *desc = NULL; return status; } // wl_pci_dma_free_desc /*============================================================================*/ /******************************************************************************* * wl_pci_dma_alloc_buf() ******************************************************************************* * * DESCRIPTION: * * Allocates one DMA buffer in cache coherent memory, and associates a DMA * descriptor with this buffer. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_alloc_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT *desc, int size ) { int status = 0; dma_addr_t pa; /*------------------------------------------------------------------------*/ // DBG_FUNC( "wl_pci_dma_alloc_buf" ); // DBG_ENTER( DbgInfo ); // // if( desc == NULL ) { // status = -EFAULT; // } // if( status == 0 && desc->buf_addr != NULL ) { // status = -EFAULT; // } // if( status == 0 ) { // desc->buf_addr = pci_alloc_consistent( pdev, size, &pa ); // } // if( desc->buf_addr == NULL ) { // DBG_ERROR( DbgInfo, "pci_alloc_consistent() failed\n" ); // status = -ENOMEM; // } else { // desc->buf_phys_addr = cpu_to_le32( pa ); // SET_BUF_SIZE( desc, size ); // } // DBG_LEAVE( DbgInfo ); return status; } // wl_pci_dma_alloc_buf /*============================================================================*/ /******************************************************************************* * wl_pci_dma_free_buf() ******************************************************************************* * * DESCRIPTION: * * Allocates one DMA buffer in cache coherent memory, and associates a DMA * descriptor with this buffer. * * PARAMETERS: * * pdev - a pointer to the device's pci_dev structure * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ int wl_pci_dma_free_buf( struct pci_dev *pdev, struct wl_private *lp, DESC_STRCT *desc ) { int status = 0; /*------------------------------------------------------------------------*/ if( desc == NULL ) { status = -EFAULT; } if( status == 0 && desc->buf_addr == NULL ) { status = -EFAULT; } if( status == 0 ) { pci_free_consistent( pdev, GET_BUF_SIZE( desc ), desc->buf_addr, desc->buf_phys_addr ); desc->buf_addr = 0; desc->buf_phys_addr = 0; SET_BUF_SIZE( desc, 0 ); } return status; } // wl_pci_dma_free_buf /*============================================================================*/ /******************************************************************************* * wl_pci_dma_hcf_supply() ******************************************************************************* * * DESCRIPTION: * * Supply HCF with DMA-related resources. These consist of: * - buffers and descriptors for receive purposes * - one 'reclaim' descriptor for the transmit path, used to fulfill a * certain H25 DMA engine requirement * - one 'reclaim' descriptor for the receive path, used to fulfill a * certain H25 DMA engine requirement * * This function is called at start-of-day or at re-initialization. * * PARAMETERS: * * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ void wl_pci_dma_hcf_supply( struct wl_private *lp ) { int i; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_dma_hcf_supply" ); DBG_ENTER( DbgInfo ); //if( lp->dma.status == 0 ); //{ /* Hand over the Rx/Tx reclaim descriptors to the HCF */ if( lp->dma.tx_reclaim_desc ) { DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc ); hcf_dma_tx_put( &lp->hcfCtx, lp->dma.tx_reclaim_desc, 0 ); lp->dma.tx_reclaim_desc = NULL; DBG_PRINT( "lp->dma.tx_reclaim_desc: 0x%p\n", lp->dma.tx_reclaim_desc ); } if( lp->dma.rx_reclaim_desc ) { DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc ); hcf_dma_rx_put( &lp->hcfCtx, lp->dma.rx_reclaim_desc ); lp->dma.rx_reclaim_desc = NULL; DBG_PRINT( "lp->dma.rx_reclaim_desc: 0x%p\n", lp->dma.rx_reclaim_desc ); } /* Hand over the Rx descriptor chain to the HCF */ for( i = 0; i < NUM_RX_DESC; i++ ) { DBG_PRINT( "lp->dma.rx_packet[%d]: 0x%p\n", i, lp->dma.rx_packet[i] ); hcf_dma_rx_put( &lp->hcfCtx, lp->dma.rx_packet[i] ); lp->dma.rx_packet[i] = NULL; DBG_PRINT( "lp->dma.rx_packet[%d]: 0x%p\n", i, lp->dma.rx_packet[i] ); } //} DBG_LEAVE( DbgInfo ); return; } // wl_pci_dma_hcf_supply /*============================================================================*/ /******************************************************************************* * wl_pci_dma_hcf_reclaim() ******************************************************************************* * * DESCRIPTION: * * Return DMA-related resources from the HCF. These consist of: * - buffers and descriptors for receive purposes * - buffers and descriptors for transmit purposes * - one 'reclaim' descriptor for the transmit path, used to fulfill a * certain H25 DMA engine requirement * - one 'reclaim' descriptor for the receive path, used to fulfill a * certain H25 DMA engine requirement * * This function is called at end-of-day or at re-initialization. * * PARAMETERS: * * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ void wl_pci_dma_hcf_reclaim( struct wl_private *lp ) { int i; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_dma_hcf_reclaim" ); DBG_ENTER( DbgInfo ); wl_pci_dma_hcf_reclaim_rx( lp ); for( i = 0; i < NUM_RX_DESC; i++ ) { DBG_PRINT( "rx_packet[%d] 0x%p\n", i, lp->dma.rx_packet[i] ); // if( lp->dma.rx_packet[i] == NULL ) { // DBG_PRINT( "wl_pci_dma_hcf_reclaim: rx_packet[%d] NULL\n", i ); // } } wl_pci_dma_hcf_reclaim_tx( lp ); for( i = 0; i < NUM_TX_DESC; i++ ) { DBG_PRINT( "tx_packet[%d] 0x%p\n", i, lp->dma.tx_packet[i] ); // if( lp->dma.tx_packet[i] == NULL ) { // DBG_PRINT( "wl_pci_dma_hcf_reclaim: tx_packet[%d] NULL\n", i ); // } } DBG_LEAVE( DbgInfo ); return; } // wl_pci_dma_hcf_reclaim /*============================================================================*/ /******************************************************************************* * wl_pci_dma_hcf_reclaim_rx() ******************************************************************************* * * DESCRIPTION: * * Reclaim Rx packets that have already been processed by the HCF. * * PARAMETERS: * * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ void wl_pci_dma_hcf_reclaim_rx( struct wl_private *lp ) { int i; DESC_STRCT *p; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_dma_hcf_reclaim_rx" ); DBG_ENTER( DbgInfo ); //if( lp->dma.status == 0 ) //{ while ( ( p = hcf_dma_rx_get( &lp->hcfCtx ) ) != NULL ) { if( p && p->buf_addr == NULL ) { /* A reclaim descriptor is being given back by the HCF. Reclaim descriptors have a NULL buf_addr */ lp->dma.rx_reclaim_desc = p; DBG_PRINT( "reclaim_descriptor: 0x%p\n", p ); continue; } for( i = 0; i < NUM_RX_DESC; i++ ) { if( lp->dma.rx_packet[i] == NULL ) { break; } } /* An Rx buffer descriptor is being given back by the HCF */ lp->dma.rx_packet[i] = p; lp->dma.rx_rsc_ind++; DBG_PRINT( "rx_packet[%d] 0x%p\n", i, lp->dma.rx_packet[i] ); } //} DBG_LEAVE( DbgInfo ); } // wl_pci_dma_hcf_reclaim_rx /*============================================================================*/ /******************************************************************************* * wl_pci_dma_get_tx_packet() ******************************************************************************* * * DESCRIPTION: * * Obtains a Tx descriptor from the chain to use for Tx. * * PARAMETERS: * * lp - a pointer to the device's wl_private structure. * * RETURNS: * * A pointer to the retrieved descriptor * ******************************************************************************/ DESC_STRCT * wl_pci_dma_get_tx_packet( struct wl_private *lp ) { int i; DESC_STRCT *desc = NULL; /*------------------------------------------------------------------------*/ for( i = 0; i < NUM_TX_DESC; i++ ) { if( lp->dma.tx_packet[i] ) { break; } } if( i != NUM_TX_DESC ) { desc = lp->dma.tx_packet[i]; lp->dma.tx_packet[i] = NULL; lp->dma.tx_rsc_ind--; memset( desc->buf_addr, 0, HCF_DMA_TX_BUF1_SIZE ); } return desc; } // wl_pci_dma_get_tx_packet /*============================================================================*/ /******************************************************************************* * wl_pci_dma_put_tx_packet() ******************************************************************************* * * DESCRIPTION: * * Returns a Tx descriptor to the chain. * * PARAMETERS: * * lp - a pointer to the device's wl_private structure. * desc - a pointer to the descriptor to return. * * RETURNS: * * N/A * ******************************************************************************/ void wl_pci_dma_put_tx_packet( struct wl_private *lp, DESC_STRCT *desc ) { int i; /*------------------------------------------------------------------------*/ for( i = 0; i < NUM_TX_DESC; i++ ) { if( lp->dma.tx_packet[i] == NULL ) { break; } } if( i != NUM_TX_DESC ) { lp->dma.tx_packet[i] = desc; lp->dma.tx_rsc_ind++; } } // wl_pci_dma_put_tx_packet /*============================================================================*/ /******************************************************************************* * wl_pci_dma_hcf_reclaim_tx() ******************************************************************************* * * DESCRIPTION: * * Reclaim Tx packets that have either been processed by the HCF due to a * port disable or a Tx completion. * * PARAMETERS: * * lp - the device's private adapter structure * * RETURNS: * * 0 on success * errno value otherwise * ******************************************************************************/ void wl_pci_dma_hcf_reclaim_tx( struct wl_private *lp ) { int i; DESC_STRCT *p; /*------------------------------------------------------------------------*/ DBG_FUNC( "wl_pci_dma_hcf_reclaim_tx" ); DBG_ENTER( DbgInfo ); //if( lp->dma.status == 0 ) //{ while ( ( p = hcf_dma_tx_get( &lp->hcfCtx ) ) != NULL ) { if( p != NULL && p->buf_addr == NULL ) { /* A Reclaim descriptor is being given back by the HCF. Reclaim descriptors have a NULL buf_addr */ lp->dma.tx_reclaim_desc = p; DBG_PRINT( "reclaim_descriptor: 0x%p\n", p ); continue; } for( i = 0; i < NUM_TX_DESC; i++ ) { if( lp->dma.tx_packet[i] == NULL ) { break; } } /* An Rx buffer descriptor is being given back by the HCF */ lp->dma.tx_packet[i] = p; lp->dma.tx_rsc_ind++; DBG_PRINT( "tx_packet[%d] 0x%p\n", i, lp->dma.tx_packet[i] ); } //} if( lp->netif_queue_on == FALSE ) { netif_wake_queue( lp->dev ); WL_WDS_NETIF_WAKE_QUEUE( lp ); lp->netif_queue_on = TRUE; } DBG_LEAVE( DbgInfo ); return; } // wl_pci_dma_hcf_reclaim_tx /*============================================================================*/ #endif // ENABLE_DMA
gpl-2.0
AriesVE-DevCon-TEAM/samsung-kernel-msm7x30
drivers/sh/intc/dynamic.c
5055
1614
/* * Dynamic IRQ management * * Copyright (C) 2010 Paul Mundt * * Modelled after arch/x86/kernel/apic/io_apic.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #define pr_fmt(fmt) "intc: " fmt #include <linux/irq.h> #include <linux/bitmap.h> #include <linux/spinlock.h> #include <linux/module.h> #include "internals.h" /* only for activate_irq() damage.. */ /* * The IRQ bitmap provides a global map of bound IRQ vectors for a * given platform. Allocation of IRQs are either static through the CPU * vector map, or dynamic in the case of board mux vectors or MSI. * * As this is a central point for all IRQ controllers on the system, * each of the available sources are mapped out here. This combined with * sparseirq makes it quite trivial to keep the vector map tightly packed * when dynamically creating IRQs, as well as tying in to otherwise * unused irq_desc positions in the sparse array. */ /* * Dynamic IRQ allocation and deallocation */ unsigned int create_irq_nr(unsigned int irq_want, int node) { int irq = irq_alloc_desc_at(irq_want, node); if (irq < 0) return 0; activate_irq(irq); return irq; } int create_irq(void) { int irq = irq_alloc_desc(numa_node_id()); if (irq >= 0) activate_irq(irq); return irq; } void destroy_irq(unsigned int irq) { irq_free_desc(irq); } void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs) { int i; for (i = 0; i < nr_vecs; i++) irq_reserve_irq(evt2irq(vectors[i].vect)); }
gpl-2.0
Snuzzo/funky_jewel
arch/arm/mach-ux500/board-mop500-stuib.c
7871
4759
/* * Copyright (C) ST-Ericsson SA 2010 * * License terms: GNU General Public License (GPL), version 2 */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/mfd/stmpe.h> #include <linux/input/bu21013.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/input/matrix_keypad.h> #include <asm/mach-types.h> #include "board-mop500.h" /* STMPE/SKE keypad use this key layout */ static const unsigned int mop500_keymap[] = { KEY(2, 5, KEY_END), KEY(4, 1, KEY_POWER), KEY(3, 5, KEY_VOLUMEDOWN), KEY(1, 3, KEY_3), KEY(5, 2, KEY_RIGHT), KEY(5, 0, KEY_9), KEY(0, 5, KEY_MENU), KEY(7, 6, KEY_ENTER), KEY(4, 5, KEY_0), KEY(6, 7, KEY_2), KEY(3, 4, KEY_UP), KEY(3, 3, KEY_DOWN), KEY(6, 4, KEY_SEND), KEY(6, 2, KEY_BACK), KEY(4, 2, KEY_VOLUMEUP), KEY(5, 5, KEY_1), KEY(4, 3, KEY_LEFT), KEY(3, 2, KEY_7), }; static const struct matrix_keymap_data mop500_keymap_data = { .keymap = mop500_keymap, .keymap_size = ARRAY_SIZE(mop500_keymap), }; /* * STMPE1601 */ static struct stmpe_keypad_platform_data stmpe1601_keypad_data = { .debounce_ms = 64, .scan_count = 8, .no_autorepeat = true, .keymap_data = &mop500_keymap_data, }; static struct stmpe_platform_data stmpe1601_data = { .id = 1, .blocks = STMPE_BLOCK_KEYPAD, .irq_trigger = IRQF_TRIGGER_FALLING, .irq_base = MOP500_STMPE1601_IRQ(0), .keypad = &stmpe1601_keypad_data, .autosleep = true, .autosleep_timeout = 1024, }; static struct i2c_board_info __initdata mop500_i2c0_devices_stuib[] = { { I2C_BOARD_INFO("stmpe1601", 0x40), .irq = NOMADIK_GPIO_TO_IRQ(218), .platform_data = &stmpe1601_data, .flags = I2C_CLIENT_WAKE, }, }; /* * BU21013 ROHM touchscreen interface on the STUIBs */ /* tracks number of bu21013 devices being enabled */ static int bu21013_devices; #define TOUCH_GPIO_PIN 84 #define TOUCH_XMAX 384 #define TOUCH_YMAX 704 #define PRCMU_CLOCK_OCR 0x1CC #define TSC_EXT_CLOCK_9_6MHZ 0x840000 /** * bu21013_gpio_board_init : configures the touch panel. * @reset_pin: reset pin number * This function can be used to configures * the voltage and reset the touch panel controller. */ static int bu21013_gpio_board_init(int reset_pin) { int retval = 0; bu21013_devices++; if (bu21013_devices == 1) { retval = gpio_request(reset_pin, "touchp_reset"); if (retval) { printk(KERN_ERR "Unable to request gpio reset_pin"); return retval; } retval = gpio_direction_output(reset_pin, 1); if (retval < 0) { printk(KERN_ERR "%s: gpio direction failed\n", __func__); return retval; } } return retval; } /** * bu21013_gpio_board_exit : deconfigures the touch panel controller * @reset_pin: reset pin number * This function can be used to deconfigures the chip selection * for touch panel controller. */ static int bu21013_gpio_board_exit(int reset_pin) { int retval = 0; if (bu21013_devices == 1) { retval = gpio_direction_output(reset_pin, 0); if (retval < 0) { printk(KERN_ERR "%s: gpio direction failed\n", __func__); return retval; } gpio_set_value(reset_pin, 0); } bu21013_devices--; return retval; } /** * bu21013_read_pin_val : get the interrupt pin value * This function can be used to get the interrupt pin value for touch panel * controller. */ static int bu21013_read_pin_val(void) { return gpio_get_value(TOUCH_GPIO_PIN); } static struct bu21013_platform_device tsc_plat_device = { .cs_en = bu21013_gpio_board_init, .cs_dis = bu21013_gpio_board_exit, .irq_read_val = bu21013_read_pin_val, .irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN), .touch_x_max = TOUCH_XMAX, .touch_y_max = TOUCH_YMAX, .ext_clk = false, .x_flip = false, .y_flip = true, }; static struct bu21013_platform_device tsc_plat2_device = { .cs_en = bu21013_gpio_board_init, .cs_dis = bu21013_gpio_board_exit, .irq_read_val = bu21013_read_pin_val, .irq = NOMADIK_GPIO_TO_IRQ(TOUCH_GPIO_PIN), .touch_x_max = TOUCH_XMAX, .touch_y_max = TOUCH_YMAX, .ext_clk = false, .x_flip = false, .y_flip = true, }; static struct i2c_board_info __initdata u8500_i2c3_devices_stuib[] = { { I2C_BOARD_INFO("bu21013_tp", 0x5C), .platform_data = &tsc_plat_device, }, { I2C_BOARD_INFO("bu21013_tp", 0x5D), .platform_data = &tsc_plat2_device, }, }; void __init mop500_stuib_init(void) { if (machine_is_hrefv60()) { tsc_plat_device.cs_pin = HREFV60_TOUCH_RST_GPIO; tsc_plat2_device.cs_pin = HREFV60_TOUCH_RST_GPIO; } else { tsc_plat_device.cs_pin = GPIO_BU21013_CS; tsc_plat2_device.cs_pin = GPIO_BU21013_CS; } mop500_uib_i2c_add(0, mop500_i2c0_devices_stuib, ARRAY_SIZE(mop500_i2c0_devices_stuib)); mop500_uib_i2c_add(3, u8500_i2c3_devices_stuib, ARRAY_SIZE(u8500_i2c3_devices_stuib)); }
gpl-2.0
Ander-Alvarez/android_kernel_motorola_msm8226
arch/h8300/kernel/h8300_ksyms.c
9407
2653
#include <linux/module.h> #include <linux/linkage.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/user.h> #include <linux/elfcore.h> #include <linux/in6.h> #include <linux/interrupt.h> #include <asm/setup.h> #include <asm/pgalloc.h> #include <asm/irq.h> #include <asm/io.h> #include <asm/checksum.h> #include <asm/current.h> #include <asm/gpio.h> //asmlinkage long long __ashrdi3 (long long, int); //asmlinkage long long __lshrdi3 (long long, int); extern char h8300_debug_device[]; /* platform dependent support */ EXPORT_SYMBOL(strnlen); EXPORT_SYMBOL(strrchr); EXPORT_SYMBOL(strstr); EXPORT_SYMBOL(strchr); EXPORT_SYMBOL(strcat); EXPORT_SYMBOL(strlen); EXPORT_SYMBOL(strcmp); EXPORT_SYMBOL(strncmp); EXPORT_SYMBOL(ip_fast_csum); EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(enable_irq); EXPORT_SYMBOL(disable_irq); /* Networking helper routines. */ EXPORT_SYMBOL(csum_partial_copy_nocheck); /* The following are special because they're not called explicitly (the C compiler generates them). Fortunately, their interface isn't gonna change any time soon now, so it's OK to leave it out of version control. */ //EXPORT_SYMBOL(__ashrdi3); //EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcmp); EXPORT_SYMBOL(memscan); EXPORT_SYMBOL(memmove); /* * libgcc functions - functions that are used internally by the * compiler... (prototypes are not correct though, but that * doesn't really matter since they're not versioned). */ extern void __gcc_bcmp(void); extern void __ashldi3(void); extern void __ashrdi3(void); extern void __cmpdi2(void); extern void __divdi3(void); extern void __divsi3(void); extern void __lshrdi3(void); extern void __moddi3(void); extern void __modsi3(void); extern void __muldi3(void); extern void __mulsi3(void); extern void __negdi2(void); extern void __ucmpdi2(void); extern void __udivdi3(void); extern void __udivmoddi4(void); extern void __udivsi3(void); extern void __umoddi3(void); extern void __umodsi3(void); /* gcc lib functions */ EXPORT_SYMBOL(__gcc_bcmp); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__cmpdi2); EXPORT_SYMBOL(__divdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__moddi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__negdi2); EXPORT_SYMBOL(__ucmpdi2); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__udivmoddi4); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umoddi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(h8300_reserved_gpio); EXPORT_SYMBOL(h8300_free_gpio); EXPORT_SYMBOL(h8300_set_gpio_dir);
gpl-2.0
HighwindONE/android_kernel_motorola_msm8226
arch/cris/kernel/crisksyms.c
9407
1679
#include <linux/module.h> #include <linux/user.h> #include <linux/elfcore.h> #include <linux/sched.h> #include <linux/in6.h> #include <linux/interrupt.h> #include <linux/pm.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/tty.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/checksum.h> #include <asm/io.h> #include <asm/delay.h> #include <asm/irq.h> #include <asm/pgtable.h> #include <asm/fasttimer.h> extern unsigned long get_cmos_time(void); extern void __Udiv(void); extern void __Umod(void); extern void __Div(void); extern void __Mod(void); extern void __ashldi3(void); extern void __ashrdi3(void); extern void __lshrdi3(void); extern void __negdi2(void); extern void iounmap(volatile void * __iomem); /* Platform dependent support */ EXPORT_SYMBOL(kernel_thread); EXPORT_SYMBOL(get_cmos_time); EXPORT_SYMBOL(loops_per_usec); /* Math functions */ EXPORT_SYMBOL(__Udiv); EXPORT_SYMBOL(__Umod); EXPORT_SYMBOL(__Div); EXPORT_SYMBOL(__Mod); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__negdi2); /* Memory functions */ EXPORT_SYMBOL(__ioremap); EXPORT_SYMBOL(iounmap); /* Userspace access functions */ EXPORT_SYMBOL(__copy_user_zeroing); EXPORT_SYMBOL(__copy_user); #undef memcpy #undef memset extern void * memset(void *, int, __kernel_size_t); extern void * memcpy(void *, const void *, __kernel_size_t); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memset); #ifdef CONFIG_ETRAX_FAST_TIMER /* Fast timer functions */ EXPORT_SYMBOL(fast_timer_list); EXPORT_SYMBOL(start_one_shot_timer); EXPORT_SYMBOL(del_fast_timer); EXPORT_SYMBOL(schedule_usleep); #endif EXPORT_SYMBOL(csum_partial);
gpl-2.0
redmi/android_kernel_HM2014811
net/netfilter/xt_quota.c
9407
1945
/* * netfilter module to enforce network quotas * * Sam Johnston <samj@samj.net> */ #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_quota.h> #include <linux/module.h> struct xt_quota_priv { spinlock_t lock; uint64_t quota; }; MODULE_LICENSE("GPL"); MODULE_AUTHOR("Sam Johnston <samj@samj.net>"); MODULE_DESCRIPTION("Xtables: countdown quota match"); MODULE_ALIAS("ipt_quota"); MODULE_ALIAS("ip6t_quota"); static bool quota_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct xt_quota_info *q = (void *)par->matchinfo; struct xt_quota_priv *priv = q->master; bool ret = q->flags & XT_QUOTA_INVERT; spin_lock_bh(&priv->lock); if (priv->quota >= skb->len) { priv->quota -= skb->len; ret = !ret; } else { /* we do not allow even small packets from now on */ priv->quota = 0; } spin_unlock_bh(&priv->lock); return ret; } static int quota_mt_check(const struct xt_mtchk_param *par) { struct xt_quota_info *q = par->matchinfo; if (q->flags & ~XT_QUOTA_MASK) return -EINVAL; q->master = kmalloc(sizeof(*q->master), GFP_KERNEL); if (q->master == NULL) return -ENOMEM; spin_lock_init(&q->master->lock); q->master->quota = q->quota; return 0; } static void quota_mt_destroy(const struct xt_mtdtor_param *par) { const struct xt_quota_info *q = par->matchinfo; kfree(q->master); } static struct xt_match quota_mt_reg __read_mostly = { .name = "quota", .revision = 0, .family = NFPROTO_UNSPEC, .match = quota_mt, .checkentry = quota_mt_check, .destroy = quota_mt_destroy, .matchsize = sizeof(struct xt_quota_info), .me = THIS_MODULE, }; static int __init quota_mt_init(void) { return xt_register_match(&quota_mt_reg); } static void __exit quota_mt_exit(void) { xt_unregister_match(&quota_mt_reg); } module_init(quota_mt_init); module_exit(quota_mt_exit);
gpl-2.0
qqzwc/JBX_Kernel
drivers/infiniband/hw/ehca/ehca_mcast.c
13503
4589
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * mcast functions * * Authors: Khadija Souissi <souissik@de.ibm.com> * Waleri Fomin <fomin@de.ibm.com> * Reinhard Ernst <rernst@de.ibm.com> * Hoang-Nam Nguyen <hnguyen@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <linux/module.h> #include <linux/err.h> #include "ehca_classes.h" #include "ehca_tools.h" #include "ehca_qes.h" #include "ehca_iverbs.h" #include "hcp_if.h" #define MAX_MC_LID 0xFFFE #define MIN_MC_LID 0xC000 /* Multicast limits */ #define EHCA_VALID_MULTICAST_GID(gid) ((gid)[0] == 0xFF) #define EHCA_VALID_MULTICAST_LID(lid) \ (((lid) >= MIN_MC_LID) && ((lid) <= MAX_MC_LID)) int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); struct ehca_shca *shca = container_of(ibqp->device, struct ehca_shca, ib_device); union ib_gid my_gid; u64 subnet_prefix, interface_id, h_ret; if (ibqp->qp_type != IB_QPT_UD) { ehca_err(ibqp->device, "invalid qp_type=%x", ibqp->qp_type); return -EINVAL; } if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) { ehca_err(ibqp->device, "invalid mulitcast gid"); return -EINVAL; } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) { ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid); return -EINVAL; } memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); interface_id = be64_to_cpu(my_gid.global.interface_id); h_ret = hipz_h_attach_mcqp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, my_qp->galpas.kernel, lid, subnet_prefix, interface_id); if (h_ret != H_SUCCESS) ehca_err(ibqp->device, "ehca_qp=%p qp_num=%x hipz_h_attach_mcqp() failed " "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); return ehca2ib_return_code(h_ret); } int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid) { struct ehca_qp *my_qp = container_of(ibqp, struct ehca_qp, ib_qp); struct ehca_shca *shca = container_of(ibqp->pd->device, struct ehca_shca, ib_device); union ib_gid my_gid; u64 subnet_prefix, interface_id, h_ret; if (ibqp->qp_type != IB_QPT_UD) { ehca_err(ibqp->device, "invalid qp_type %x", ibqp->qp_type); return -EINVAL; } if (!(EHCA_VALID_MULTICAST_GID(gid->raw))) { ehca_err(ibqp->device, "invalid mulitcast gid"); return -EINVAL; } else if ((lid < MIN_MC_LID) || (lid > MAX_MC_LID)) { ehca_err(ibqp->device, "invalid mulitcast lid=%x", lid); return -EINVAL; } memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid)); subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix); interface_id = be64_to_cpu(my_gid.global.interface_id); h_ret = hipz_h_detach_mcqp(shca->ipz_hca_handle, my_qp->ipz_qp_handle, my_qp->galpas.kernel, lid, subnet_prefix, interface_id); if (h_ret != H_SUCCESS) ehca_err(ibqp->device, "ehca_qp=%p qp_num=%x hipz_h_detach_mcqp() failed " "h_ret=%lli", my_qp, ibqp->qp_num, h_ret); return ehca2ib_return_code(h_ret); }
gpl-2.0
Tomoms/android_kernel_sony_msm8x60
arch/mips/sgi-ip32/ip32-memory.c
14015
1131
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2003 Keith M Wesolowski * Copyright (C) 2005 Ilya A. Volynets (Total Knowledge) */ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/mm.h> #include <asm/ip32/crime.h> #include <asm/bootinfo.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/pgalloc.h> extern void crime_init(void); void __init prom_meminit(void) { u64 base, size; int bank; crime_init(); for (bank=0; bank < CRIME_MAXBANKS; bank++) { u64 bankctl = crime->bank_ctrl[bank]; base = (bankctl & CRIME_MEM_BANK_CONTROL_ADDR) << 25; if (bank != 0 && base == 0) continue; size = (bankctl & CRIME_MEM_BANK_CONTROL_SDRAM_SIZE) ? 128 : 32; size <<= 20; if (base + size > (256 << 20)) base += CRIME_HI_MEM_BASE; printk("CRIME MC: bank %u base 0x%016Lx size %LuMiB\n", bank, base, size >> 20); add_memory_region(base, size, BOOT_MEM_RAM); } } void __init prom_free_prom_memory(void) { }
gpl-2.0
auras76/aur-kernel-XZxx
tools/perf/util/scripting-engines/trace-event-perl.c
1728
15963
/* * trace-event-perl. Feed perf script events to an embedded Perl interpreter. * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <errno.h> #include "../util.h" #include <EXTERN.h> #include <perl.h> #include "../../perf.h" #include "../thread.h" #include "../event.h" #include "../trace-event.h" #include "../evsel.h" void boot_Perf__Trace__Context(pTHX_ CV *cv); void boot_DynaLoader(pTHX_ CV *cv); typedef PerlInterpreter * INTERP; void xs_init(pTHX); void xs_init(pTHX) { const char *file = __FILE__; dXSUB_SYS; newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, file); newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); } INTERP my_perl; #define FTRACE_MAX_EVENT \ ((1 << (sizeof(unsigned short) * 8)) - 1) struct event_format *events[FTRACE_MAX_EVENT]; extern struct scripting_context *scripting_context; static char *cur_field_name; static int zero_flag_atom; static void define_symbolic_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_symbolic_value", 0)) call_pv("main::define_symbolic_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_symbolic_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_symbolic_value(ev_name, field_name, field->value, field->str); if (field->next) define_symbolic_values(field->next, ev_name, field_name); } static void define_symbolic_field(const char *ev_name, const char *field_name) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); PUTBACK; if (get_cv("main::define_symbolic_field", 0)) call_pv("main::define_symbolic_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_flag_value", 0)) call_pv("main::define_flag_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_flag_value(ev_name, field_name, field->value, field->str); if (field->next) define_flag_values(field->next, ev_name, field_name); } static void define_flag_field(const char *ev_name, const char *field_name, const char *delim) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVpv(delim, 0))); PUTBACK; if (get_cv("main::define_flag_field", 0)) call_pv("main::define_flag_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_event_symbols(struct event_format *event, const char *ev_name, struct print_arg *args) { switch (args->type) { case PRINT_NULL: break; case PRINT_ATOM: define_flag_value(ev_name, cur_field_name, "0", args->atom.atom); zero_flag_atom = 0; break; case PRINT_FIELD: if (cur_field_name) free(cur_field_name); cur_field_name = strdup(args->field.name); break; case PRINT_FLAGS: define_event_symbols(event, ev_name, args->flags.field); define_flag_field(ev_name, cur_field_name, args->flags.delim); define_flag_values(args->flags.flags, ev_name, cur_field_name); break; case PRINT_SYMBOL: define_event_symbols(event, ev_name, args->symbol.field); define_symbolic_field(ev_name, cur_field_name); define_symbolic_values(args->symbol.symbols, ev_name, cur_field_name); break; case PRINT_HEX: define_event_symbols(event, ev_name, args->hex.field); define_event_symbols(event, ev_name, args->hex.size); break; case PRINT_BSTRING: case PRINT_DYNAMIC_ARRAY: case PRINT_STRING: break; case PRINT_TYPE: define_event_symbols(event, ev_name, args->typecast.item); break; case PRINT_OP: if (strcmp(args->op.op, ":") == 0) zero_flag_atom = 1; define_event_symbols(event, ev_name, args->op.left); define_event_symbols(event, ev_name, args->op.right); break; case PRINT_FUNC: default: pr_err("Unsupported print arg type\n"); /* we should warn... */ return; } if (args->next) define_event_symbols(event, ev_name, args->next); } static inline struct event_format *find_cache_event(struct perf_evsel *evsel) { static char ev_name[256]; struct event_format *event; int type = evsel->attr.config; if (events[type]) return events[type]; events[type] = event = evsel->tp_format; if (!event) return NULL; sprintf(ev_name, "%s::%s", event->system, event->name); define_event_symbols(event, ev_name, event->print_fmt.args); return event; } static void perl_process_tracepoint(union perf_event *perf_event __maybe_unused, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine __maybe_unused, struct addr_location *al) { struct format_field *field; static char handler[256]; unsigned long long val; unsigned long s, ns; struct event_format *event; int pid; int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; struct thread *thread = al->thread; char *comm = thread->comm; dSP; if (evsel->attr.type != PERF_TYPE_TRACEPOINT) return; event = find_cache_event(evsel); if (!event) die("ug! no event found for type %" PRIu64, (u64)evsel->attr.config); pid = raw_field_value(event, "common_pid", data); sprintf(handler, "%s::%s", event->system, event->name); s = nsecs / NSECS_PER_SEC; ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; scripting_context->pevent = evsel->tp_format->pevent; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(s))); XPUSHs(sv_2mortal(newSVuv(ns))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); /* common fields other than pid can be accessed via xsub fns */ for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); } else { /* FIELD_IS_NUMERIC */ val = read_size(event, data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { XPUSHs(sv_2mortal(newSViv(val))); } else { XPUSHs(sv_2mortal(newSVuv(val))); } } } PUTBACK; if (get_cv(handler, 0)) call_pv(handler, G_SCALAR); else if (get_cv("main::trace_unhandled", 0)) { XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(nsecs))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); call_pv("main::trace_unhandled", G_SCALAR); } SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void perl_process_event_generic(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine __maybe_unused, struct addr_location *al __maybe_unused) { dSP; if (!get_cv("process_event", 0)) return; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpvn((const char *)event, event->header.size))); XPUSHs(sv_2mortal(newSVpvn((const char *)&evsel->attr, sizeof(evsel->attr)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample, sizeof(*sample)))); XPUSHs(sv_2mortal(newSVpvn((const char *)sample->raw_data, sample->raw_size))); PUTBACK; call_pv("process_event", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void perl_process_event(union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine, struct addr_location *al) { perl_process_tracepoint(event, sample, evsel, machine, al); perl_process_event_generic(event, sample, evsel, machine, al); } static void run_start_sub(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_begin", 0)) call_pv("main::trace_begin", G_DISCARD | G_NOARGS); } /* * Start trace script */ static int perl_start_script(const char *script, int argc, const char **argv) { const char **command_line; int i, err = 0; command_line = malloc((argc + 2) * sizeof(const char *)); command_line[0] = ""; command_line[1] = script; for (i = 2; i < argc + 2; i++) command_line[i] = argv[i - 2]; my_perl = perl_alloc(); perl_construct(my_perl); if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, (char **)NULL)) { err = -1; goto error; } if (perl_run(my_perl)) { err = -1; goto error; } if (SvTRUE(ERRSV)) { err = -1; goto error; } run_start_sub(); free(command_line); return 0; error: perl_free(my_perl); free(command_line); return err; } /* * Stop trace script */ static int perl_stop_script(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_end", 0)) call_pv("main::trace_end", G_DISCARD | G_NOARGS); perl_destruct(my_perl); perl_free(my_perl); return 0; } static int perl_generate_script(struct pevent *pevent, const char *outfile) { struct event_format *event = NULL; struct format_field *f; char fname[PATH_MAX]; int not_first, count; FILE *ofp; sprintf(fname, "%s.pl", outfile); ofp = fopen(fname, "w"); if (ofp == NULL) { fprintf(stderr, "couldn't open %s\n", fname); return -1; } fprintf(ofp, "# perf script event handlers, " "generated by perf script -g perl\n"); fprintf(ofp, "# Licensed under the terms of the GNU GPL" " License version 2\n\n"); fprintf(ofp, "# The common_* event handler fields are the most useful " "fields common to\n"); fprintf(ofp, "# all events. They don't necessarily correspond to " "the 'common_*' fields\n"); fprintf(ofp, "# in the format files. Those fields not available as " "handler params can\n"); fprintf(ofp, "# be retrieved using Perl functions of the form " "common_*($context).\n"); fprintf(ofp, "# See Context.pm for the list of available " "functions.\n\n"); fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" "Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use Perf::Trace::Core;\n"); fprintf(ofp, "use Perf::Trace::Context;\n"); fprintf(ofp, "use Perf::Trace::Util;\n\n"); fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); while ((event = trace_find_next_event(pevent, event))) { fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); fprintf(ofp, "\tmy ("); fprintf(ofp, "$event_name, "); fprintf(ofp, "$context, "); fprintf(ofp, "$common_cpu, "); fprintf(ofp, "$common_secs, "); fprintf(ofp, "$common_nsecs,\n"); fprintf(ofp, "\t $common_pid, "); fprintf(ofp, "$common_comm,\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); fprintf(ofp, "$%s", f->name); } fprintf(ofp, ") = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm);\n\n"); fprintf(ofp, "\tprintf(\""); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (count && count % 4 == 0) { fprintf(ofp, "\".\n\t \""); } count++; fprintf(ofp, "%s=", f->name); if (f->flags & FIELD_IS_STRING || f->flags & FIELD_IS_FLAG || f->flags & FIELD_IS_SYMBOLIC) fprintf(ofp, "%%s"); else if (f->flags & FIELD_IS_SIGNED) fprintf(ofp, "%%d"); else fprintf(ofp, "%%u"); } fprintf(ofp, "\\n\",\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); if (f->flags & FIELD_IS_FLAG) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "flag_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else if (f->flags & FIELD_IS_SYMBOLIC) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "symbol_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else fprintf(ofp, "$%s", f->name); } fprintf(ofp, ");\n"); fprintf(ofp, "}\n\n"); } fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " "$common_cpu, $common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm) = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t $common_pid, " "$common_comm);\n}\n\n"); fprintf(ofp, "sub print_header\n{\n" "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}\n"); fprintf(ofp, "\n# Packed byte string args of process_event():\n" "#\n" "# $event:\tunion perf_event\tutil/event.h\n" "# $attr:\tstruct perf_event_attr\tlinux/perf_event.h\n" "# $sample:\tstruct perf_sample\tutil/event.h\n" "# $raw_data:\tperf_sample->raw_data\tutil/event.h\n" "\n" "sub process_event\n" "{\n" "\tmy ($event, $attr, $sample, $raw_data) = @_;\n" "\n" "\tmy @event\t= unpack(\"LSS\", $event);\n" "\tmy @attr\t= unpack(\"LLQQQQQLLQQ\", $attr);\n" "\tmy @sample\t= unpack(\"QLLQQQQQLL\", $sample);\n" "\tmy @raw_data\t= unpack(\"C*\", $raw_data);\n" "\n" "\tuse Data::Dumper;\n" "\tprint Dumper \\@event, \\@attr, \\@sample, \\@raw_data;\n" "}\n"); fclose(ofp); fprintf(stderr, "generated Perl script: %s\n", fname); return 0; } struct scripting_ops perl_scripting_ops = { .name = "Perl", .start_script = perl_start_script, .stop_script = perl_stop_script, .process_event = perl_process_event, .generate_script = perl_generate_script, };
gpl-2.0
yang0508/kernel_samsung_smdk4412
arch/arm/mach-omap2/usb-musb.c
1984
4852
/* * linux/arch/arm/mach-omap2/usb-musb.c * * This file will contain the board specific details for the * MENTOR USB OTG controller on OMAP3430 * * Copyright (C) 2007-2008 Texas Instruments * Copyright (C) 2008 Nokia Corporation * Author: Vikram Pandita * * Generalization by: * Felipe Balbi <felipe.balbi@nokia.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/errno.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <linux/usb/musb.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/am35xx.h> #include <plat/usb.h> #include <plat/omap_device.h> #include "mux.h" #if defined(CONFIG_USB_MUSB_OMAP2PLUS) || defined (CONFIG_USB_MUSB_AM35X) static struct musb_hdrc_config musb_config = { .multipoint = 1, .dyn_fifo = 1, .num_eps = 16, .ram_bits = 12, }; static struct musb_hdrc_platform_data musb_plat = { #ifdef CONFIG_USB_MUSB_OTG .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_HDRC_HCD) .mode = MUSB_HOST, #elif defined(CONFIG_USB_GADGET_MUSB_HDRC) .mode = MUSB_PERIPHERAL, #endif /* .clock is set dynamically */ .config = &musb_config, /* REVISIT charge pump on TWL4030 can supply up to * 100 mA ... but this value is board-specific, like * "mode", and should be passed to usb_musb_init(). */ .power = 50, /* up to 100 mA */ }; static u64 musb_dmamask = DMA_BIT_MASK(32); static struct omap_device_pm_latency omap_musb_latency[] = { { .deactivate_func = omap_device_idle_hwmods, .activate_func = omap_device_enable_hwmods, .flags = OMAP_DEVICE_LATENCY_AUTO_ADJUST, }, }; static void usb_musb_mux_init(struct omap_musb_board_data *board_data) { switch (board_data->interface_type) { case MUSB_INTERFACE_UTMI: omap_mux_init_signal("usba0_otg_dp", OMAP_PIN_INPUT); omap_mux_init_signal("usba0_otg_dm", OMAP_PIN_INPUT); break; case MUSB_INTERFACE_ULPI: omap_mux_init_signal("usba0_ulpiphy_clk", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_stp", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dir", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_nxt", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat0", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat1", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat2", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat3", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat4", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat5", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat6", OMAP_PIN_INPUT_PULLDOWN); omap_mux_init_signal("usba0_ulpiphy_dat7", OMAP_PIN_INPUT_PULLDOWN); break; default: break; } } static struct omap_musb_board_data musb_default_board_data = { .interface_type = MUSB_INTERFACE_ULPI, .mode = MUSB_OTG, .power = 100, }; void __init usb_musb_init(struct omap_musb_board_data *musb_board_data) { struct omap_hwmod *oh; struct omap_device *od; struct platform_device *pdev; struct device *dev; int bus_id = -1; const char *oh_name, *name; struct omap_musb_board_data *board_data; if (musb_board_data) board_data = musb_board_data; else board_data = &musb_default_board_data; /* * REVISIT: This line can be removed once all the platforms using * musb_core.c have been converted to use use clkdev. */ musb_plat.clock = "ick"; musb_plat.board_data = board_data; musb_plat.power = board_data->power >> 1; musb_plat.mode = board_data->mode; musb_plat.extvbus = board_data->extvbus; if (cpu_is_omap44xx()) omap4430_phy_init(dev); if (cpu_is_omap3517() || cpu_is_omap3505()) { oh_name = "am35x_otg_hs"; name = "musb-am35x"; } else { oh_name = "usb_otg_hs"; name = "musb-omap2430"; } oh = omap_hwmod_lookup(oh_name); if (!oh) { pr_err("Could not look up %s\n", oh_name); return; } od = omap_device_build(name, bus_id, oh, &musb_plat, sizeof(musb_plat), omap_musb_latency, ARRAY_SIZE(omap_musb_latency), false); if (IS_ERR(od)) { pr_err("Could not build omap_device for %s %s\n", name, oh_name); return; } pdev = &od->pdev; dev = &pdev->dev; get_device(dev); dev->dma_mask = &musb_dmamask; dev->coherent_dma_mask = musb_dmamask; put_device(dev); if (cpu_is_omap44xx()) omap4430_phy_init(dev); } #else void __init usb_musb_init(struct omap_musb_board_data *board_data) { if (cpu_is_omap44xx()) omap4430_phy_init(NULL); } #endif /* CONFIG_USB_MUSB_SOC */
gpl-2.0
XileForce/Linaro-LSK
sound/pci/lola/lola.c
2240
20182
/* * Support for Digigram Lola PCI-e boards * * Copyright (c) 2011 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 * Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/pci.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/initval.h> #include "lola.h" /* Standard options */ static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram Lola driver."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram Lola driver."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram Lola driver."); /* Lola-specific options */ /* for instance use always max granularity which is compatible * with all sample rates */ static int granularity[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1)] = LOLA_GRANULARITY_MAX }; /* below a sample_rate of 16kHz the analogue audio quality is NOT excellent */ static int sample_rate_min[SNDRV_CARDS] = { [0 ... (SNDRV_CARDS - 1) ] = 16000 }; module_param_array(granularity, int, NULL, 0444); MODULE_PARM_DESC(granularity, "Granularity value"); module_param_array(sample_rate_min, int, NULL, 0444); MODULE_PARM_DESC(sample_rate_min, "Minimal sample rate"); /* */ MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram, Lola}}"); MODULE_DESCRIPTION("Digigram Lola driver"); MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); #ifdef CONFIG_SND_DEBUG_VERBOSE static int debug; module_param(debug, int, 0644); #define verbose_debug(fmt, args...) \ do { if (debug > 1) printk(KERN_DEBUG SFX fmt, ##args); } while (0) #else #define verbose_debug(fmt, args...) #endif /* * pseudo-codec read/write via CORB/RIRB */ static int corb_send_verb(struct lola *chip, unsigned int nid, unsigned int verb, unsigned int data, unsigned int extdata) { unsigned long flags; int ret = -EIO; chip->last_cmd_nid = nid; chip->last_verb = verb; chip->last_data = data; chip->last_extdata = extdata; data |= (nid << 20) | (verb << 8); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->rirb.cmds < LOLA_CORB_ENTRIES - 1) { unsigned int wp = chip->corb.wp + 1; wp %= LOLA_CORB_ENTRIES; chip->corb.wp = wp; chip->corb.buf[wp * 2] = cpu_to_le32(data); chip->corb.buf[wp * 2 + 1] = cpu_to_le32(extdata); lola_writew(chip, BAR0, CORBWP, wp); chip->rirb.cmds++; smp_wmb(); ret = 0; } spin_unlock_irqrestore(&chip->reg_lock, flags); return ret; } static void lola_queue_unsol_event(struct lola *chip, unsigned int res, unsigned int res_ex) { lola_update_ext_clock_freq(chip, res); } /* retrieve RIRB entry - called from interrupt handler */ static void lola_update_rirb(struct lola *chip) { unsigned int rp, wp; u32 res, res_ex; wp = lola_readw(chip, BAR0, RIRBWP); if (wp == chip->rirb.wp) return; chip->rirb.wp = wp; while (chip->rirb.rp != wp) { chip->rirb.rp++; chip->rirb.rp %= LOLA_CORB_ENTRIES; rp = chip->rirb.rp << 1; /* an RIRB entry is 8-bytes */ res_ex = le32_to_cpu(chip->rirb.buf[rp + 1]); res = le32_to_cpu(chip->rirb.buf[rp]); if (res_ex & LOLA_RIRB_EX_UNSOL_EV) lola_queue_unsol_event(chip, res, res_ex); else if (chip->rirb.cmds) { chip->res = res; chip->res_ex = res_ex; smp_wmb(); chip->rirb.cmds--; } } } static int rirb_get_response(struct lola *chip, unsigned int *val, unsigned int *extval) { unsigned long timeout; again: timeout = jiffies + msecs_to_jiffies(1000); for (;;) { if (chip->polling_mode) { spin_lock_irq(&chip->reg_lock); lola_update_rirb(chip); spin_unlock_irq(&chip->reg_lock); } if (!chip->rirb.cmds) { *val = chip->res; if (extval) *extval = chip->res_ex; verbose_debug("get_response: %x, %x\n", chip->res, chip->res_ex); if (chip->res_ex & LOLA_RIRB_EX_ERROR) { printk(KERN_WARNING SFX "RIRB ERROR: " "NID=%x, verb=%x, data=%x, ext=%x\n", chip->last_cmd_nid, chip->last_verb, chip->last_data, chip->last_extdata); return -EIO; } return 0; } if (time_after(jiffies, timeout)) break; udelay(20); cond_resched(); } printk(KERN_WARNING SFX "RIRB response error\n"); if (!chip->polling_mode) { printk(KERN_WARNING SFX "switching to polling mode\n"); chip->polling_mode = 1; goto again; } return -EIO; } /* aynchronous write of a codec verb with data */ int lola_codec_write(struct lola *chip, unsigned int nid, unsigned int verb, unsigned int data, unsigned int extdata) { verbose_debug("codec_write NID=%x, verb=%x, data=%x, ext=%x\n", nid, verb, data, extdata); return corb_send_verb(chip, nid, verb, data, extdata); } /* write a codec verb with data and read the returned status */ int lola_codec_read(struct lola *chip, unsigned int nid, unsigned int verb, unsigned int data, unsigned int extdata, unsigned int *val, unsigned int *extval) { int err; verbose_debug("codec_read NID=%x, verb=%x, data=%x, ext=%x\n", nid, verb, data, extdata); err = corb_send_verb(chip, nid, verb, data, extdata); if (err < 0) return err; err = rirb_get_response(chip, val, extval); return err; } /* flush all pending codec writes */ int lola_codec_flush(struct lola *chip) { unsigned int tmp; return rirb_get_response(chip, &tmp, NULL); } /* * interrupt handler */ static irqreturn_t lola_interrupt(int irq, void *dev_id) { struct lola *chip = dev_id; unsigned int notify_ins, notify_outs, error_ins, error_outs; int handled = 0; int i; notify_ins = notify_outs = error_ins = error_outs = 0; spin_lock(&chip->reg_lock); for (;;) { unsigned int status, in_sts, out_sts; unsigned int reg; status = lola_readl(chip, BAR1, DINTSTS); if (!status || status == -1) break; in_sts = lola_readl(chip, BAR1, DIINTSTS); out_sts = lola_readl(chip, BAR1, DOINTSTS); /* clear Input Interrupts */ for (i = 0; in_sts && i < chip->pcm[CAPT].num_streams; i++) { if (!(in_sts & (1 << i))) continue; in_sts &= ~(1 << i); reg = lola_dsd_read(chip, i, STS); if (reg & LOLA_DSD_STS_DESE) /* error */ error_ins |= (1 << i); if (reg & LOLA_DSD_STS_BCIS) /* notify */ notify_ins |= (1 << i); /* clear */ lola_dsd_write(chip, i, STS, reg); } /* clear Output Interrupts */ for (i = 0; out_sts && i < chip->pcm[PLAY].num_streams; i++) { if (!(out_sts & (1 << i))) continue; out_sts &= ~(1 << i); reg = lola_dsd_read(chip, i + MAX_STREAM_IN_COUNT, STS); if (reg & LOLA_DSD_STS_DESE) /* error */ error_outs |= (1 << i); if (reg & LOLA_DSD_STS_BCIS) /* notify */ notify_outs |= (1 << i); lola_dsd_write(chip, i + MAX_STREAM_IN_COUNT, STS, reg); } if (status & LOLA_DINT_CTRL) { unsigned char rbsts; /* ring status is byte access */ rbsts = lola_readb(chip, BAR0, RIRBSTS); rbsts &= LOLA_RIRB_INT_MASK; if (rbsts) lola_writeb(chip, BAR0, RIRBSTS, rbsts); rbsts = lola_readb(chip, BAR0, CORBSTS); rbsts &= LOLA_CORB_INT_MASK; if (rbsts) lola_writeb(chip, BAR0, CORBSTS, rbsts); lola_update_rirb(chip); } if (status & (LOLA_DINT_FIFOERR | LOLA_DINT_MUERR)) { /* clear global fifo error interrupt */ lola_writel(chip, BAR1, DINTSTS, (status & (LOLA_DINT_FIFOERR | LOLA_DINT_MUERR))); } handled = 1; } spin_unlock(&chip->reg_lock); lola_pcm_update(chip, &chip->pcm[CAPT], notify_ins); lola_pcm_update(chip, &chip->pcm[PLAY], notify_outs); return IRQ_RETVAL(handled); } /* * controller */ static int reset_controller(struct lola *chip) { unsigned int gctl = lola_readl(chip, BAR0, GCTL); unsigned long end_time; if (gctl) { /* to be sure */ lola_writel(chip, BAR1, BOARD_MODE, 0); return 0; } chip->cold_reset = 1; lola_writel(chip, BAR0, GCTL, LOLA_GCTL_RESET); end_time = jiffies + msecs_to_jiffies(200); do { msleep(1); gctl = lola_readl(chip, BAR0, GCTL); if (gctl) break; } while (time_before(jiffies, end_time)); if (!gctl) { printk(KERN_ERR SFX "cannot reset controller\n"); return -EIO; } return 0; } static void lola_irq_enable(struct lola *chip) { unsigned int val; /* enalbe all I/O streams */ val = (1 << chip->pcm[PLAY].num_streams) - 1; lola_writel(chip, BAR1, DOINTCTL, val); val = (1 << chip->pcm[CAPT].num_streams) - 1; lola_writel(chip, BAR1, DIINTCTL, val); /* enable global irqs */ val = LOLA_DINT_GLOBAL | LOLA_DINT_CTRL | LOLA_DINT_FIFOERR | LOLA_DINT_MUERR; lola_writel(chip, BAR1, DINTCTL, val); } static void lola_irq_disable(struct lola *chip) { lola_writel(chip, BAR1, DINTCTL, 0); lola_writel(chip, BAR1, DIINTCTL, 0); lola_writel(chip, BAR1, DOINTCTL, 0); } static int setup_corb_rirb(struct lola *chip) { int err; unsigned char tmp; unsigned long end_time; err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_SIZE, &chip->rb); if (err < 0) return err; chip->corb.addr = chip->rb.addr; chip->corb.buf = (u32 *)chip->rb.area; chip->rirb.addr = chip->rb.addr + 2048; chip->rirb.buf = (u32 *)(chip->rb.area + 2048); /* disable ringbuffer DMAs */ lola_writeb(chip, BAR0, RIRBCTL, 0); lola_writeb(chip, BAR0, CORBCTL, 0); end_time = jiffies + msecs_to_jiffies(200); do { if (!lola_readb(chip, BAR0, RIRBCTL) && !lola_readb(chip, BAR0, CORBCTL)) break; msleep(1); } while (time_before(jiffies, end_time)); /* CORB set up */ lola_writel(chip, BAR0, CORBLBASE, (u32)chip->corb.addr); lola_writel(chip, BAR0, CORBUBASE, upper_32_bits(chip->corb.addr)); /* set the corb size to 256 entries */ lola_writeb(chip, BAR0, CORBSIZE, 0x02); /* set the corb write pointer to 0 */ lola_writew(chip, BAR0, CORBWP, 0); /* reset the corb hw read pointer */ lola_writew(chip, BAR0, CORBRP, LOLA_RBRWP_CLR); /* enable corb dma */ lola_writeb(chip, BAR0, CORBCTL, LOLA_RBCTL_DMA_EN); /* clear flags if set */ tmp = lola_readb(chip, BAR0, CORBSTS) & LOLA_CORB_INT_MASK; if (tmp) lola_writeb(chip, BAR0, CORBSTS, tmp); chip->corb.wp = 0; /* RIRB set up */ lola_writel(chip, BAR0, RIRBLBASE, (u32)chip->rirb.addr); lola_writel(chip, BAR0, RIRBUBASE, upper_32_bits(chip->rirb.addr)); /* set the rirb size to 256 entries */ lola_writeb(chip, BAR0, RIRBSIZE, 0x02); /* reset the rirb hw write pointer */ lola_writew(chip, BAR0, RIRBWP, LOLA_RBRWP_CLR); /* set N=1, get RIRB response interrupt for new entry */ lola_writew(chip, BAR0, RINTCNT, 1); /* enable rirb dma and response irq */ lola_writeb(chip, BAR0, RIRBCTL, LOLA_RBCTL_DMA_EN | LOLA_RBCTL_IRQ_EN); /* clear flags if set */ tmp = lola_readb(chip, BAR0, RIRBSTS) & LOLA_RIRB_INT_MASK; if (tmp) lola_writeb(chip, BAR0, RIRBSTS, tmp); chip->rirb.rp = chip->rirb.cmds = 0; return 0; } static void stop_corb_rirb(struct lola *chip) { /* disable ringbuffer DMAs */ lola_writeb(chip, BAR0, RIRBCTL, 0); lola_writeb(chip, BAR0, CORBCTL, 0); } static void lola_reset_setups(struct lola *chip) { /* update the granularity */ lola_set_granularity(chip, chip->granularity, true); /* update the sample clock */ lola_set_clock_index(chip, chip->clock.cur_index); /* enable unsolicited events of the clock widget */ lola_enable_clock_events(chip); /* update the analog gains */ lola_setup_all_analog_gains(chip, CAPT, false); /* input, update */ /* update SRC configuration if applicable */ lola_set_src_config(chip, chip->input_src_mask, false); /* update the analog outputs */ lola_setup_all_analog_gains(chip, PLAY, false); /* output, update */ } static int lola_parse_tree(struct lola *chip) { unsigned int val; int nid, err; err = lola_read_param(chip, 0, LOLA_PAR_VENDOR_ID, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read VENDOR_ID\n"); return err; } val >>= 16; if (val != 0x1369) { printk(KERN_ERR SFX "Unknown codec vendor 0x%x\n", val); return -EINVAL; } err = lola_read_param(chip, 1, LOLA_PAR_FUNCTION_TYPE, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read FUNCTION_TYPE for 0x%x\n", nid); return err; } if (val != 1) { printk(KERN_ERR SFX "Unknown function type %d\n", val); return -EINVAL; } err = lola_read_param(chip, 1, LOLA_PAR_SPECIFIC_CAPS, &val); if (err < 0) { printk(KERN_ERR SFX "Can't read SPECCAPS\n"); return err; } chip->lola_caps = val; chip->pin[CAPT].num_pins = LOLA_AFG_INPUT_PIN_COUNT(chip->lola_caps); chip->pin[PLAY].num_pins = LOLA_AFG_OUTPUT_PIN_COUNT(chip->lola_caps); snd_printdd(SFX "speccaps=0x%x, pins in=%d, out=%d\n", chip->lola_caps, chip->pin[CAPT].num_pins, chip->pin[PLAY].num_pins); if (chip->pin[CAPT].num_pins > MAX_AUDIO_INOUT_COUNT || chip->pin[PLAY].num_pins > MAX_AUDIO_INOUT_COUNT) { printk(KERN_ERR SFX "Invalid Lola-spec caps 0x%x\n", val); return -EINVAL; } nid = 0x02; err = lola_init_pcm(chip, CAPT, &nid); if (err < 0) return err; err = lola_init_pcm(chip, PLAY, &nid); if (err < 0) return err; err = lola_init_pins(chip, CAPT, &nid); if (err < 0) return err; err = lola_init_pins(chip, PLAY, &nid); if (err < 0) return err; if (LOLA_AFG_CLOCK_WIDGET_PRESENT(chip->lola_caps)) { err = lola_init_clock_widget(chip, nid); if (err < 0) return err; nid++; } if (LOLA_AFG_MIXER_WIDGET_PRESENT(chip->lola_caps)) { err = lola_init_mixer_widget(chip, nid); if (err < 0) return err; nid++; } /* enable unsolicited events of the clock widget */ err = lola_enable_clock_events(chip); if (err < 0) return err; /* if last ResetController was not a ColdReset, we don't know * the state of the card; initialize here again */ if (!chip->cold_reset) { lola_reset_setups(chip); chip->cold_reset = 1; } else { /* set the granularity if it is not the default */ if (chip->granularity != LOLA_GRANULARITY_MIN) lola_set_granularity(chip, chip->granularity, true); } return 0; } static void lola_stop_hw(struct lola *chip) { stop_corb_rirb(chip); lola_irq_disable(chip); } static void lola_free(struct lola *chip) { if (chip->initialized) lola_stop_hw(chip); lola_free_pcm(chip); lola_free_mixer(chip); if (chip->irq >= 0) free_irq(chip->irq, (void *)chip); if (chip->bar[0].remap_addr) iounmap(chip->bar[0].remap_addr); if (chip->bar[1].remap_addr) iounmap(chip->bar[1].remap_addr); if (chip->rb.area) snd_dma_free_pages(&chip->rb); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); } static int lola_dev_free(struct snd_device *device) { lola_free(device->device_data); return 0; } static int lola_create(struct snd_card *card, struct pci_dev *pci, int dev, struct lola **rchip) { struct lola *chip; int err; unsigned int dever; static struct snd_device_ops ops = { .dev_free = lola_dev_free, }; *rchip = NULL; err = pci_enable_device(pci); if (err < 0) return err; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) { snd_printk(KERN_ERR SFX "cannot allocate chip\n"); pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); mutex_init(&chip->open_mutex); chip->card = card; chip->pci = pci; chip->irq = -1; chip->granularity = granularity[dev]; switch (chip->granularity) { case 8: chip->sample_rate_max = 48000; break; case 16: chip->sample_rate_max = 96000; break; case 32: chip->sample_rate_max = 192000; break; default: snd_printk(KERN_WARNING SFX "Invalid granularity %d, reset to %d\n", chip->granularity, LOLA_GRANULARITY_MAX); chip->granularity = LOLA_GRANULARITY_MAX; chip->sample_rate_max = 192000; break; } chip->sample_rate_min = sample_rate_min[dev]; if (chip->sample_rate_min > chip->sample_rate_max) { snd_printk(KERN_WARNING SFX "Invalid sample_rate_min %d, reset to 16000\n", chip->sample_rate_min); chip->sample_rate_min = 16000; } err = pci_request_regions(pci, DRVNAME); if (err < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->bar[0].addr = pci_resource_start(pci, 0); chip->bar[0].remap_addr = pci_ioremap_bar(pci, 0); chip->bar[1].addr = pci_resource_start(pci, 2); chip->bar[1].remap_addr = pci_ioremap_bar(pci, 2); if (!chip->bar[0].remap_addr || !chip->bar[1].remap_addr) { snd_printk(KERN_ERR SFX "ioremap error\n"); err = -ENXIO; goto errout; } pci_set_master(pci); err = reset_controller(chip); if (err < 0) goto errout; if (request_irq(pci->irq, lola_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { printk(KERN_ERR SFX "unable to grab IRQ %d\n", pci->irq); err = -EBUSY; goto errout; } chip->irq = pci->irq; synchronize_irq(chip->irq); dever = lola_readl(chip, BAR1, DEVER); chip->pcm[CAPT].num_streams = (dever >> 0) & 0x3ff; chip->pcm[PLAY].num_streams = (dever >> 10) & 0x3ff; chip->version = (dever >> 24) & 0xff; snd_printdd(SFX "streams in=%d, out=%d, version=0x%x\n", chip->pcm[CAPT].num_streams, chip->pcm[PLAY].num_streams, chip->version); /* Test LOLA_BAR1_DEVER */ if (chip->pcm[CAPT].num_streams > MAX_STREAM_IN_COUNT || chip->pcm[PLAY].num_streams > MAX_STREAM_OUT_COUNT || (!chip->pcm[CAPT].num_streams && !chip->pcm[PLAY].num_streams)) { printk(KERN_ERR SFX "invalid DEVER = %x\n", dever); err = -EINVAL; goto errout; } err = setup_corb_rirb(chip); if (err < 0) goto errout; err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops); if (err < 0) { snd_printk(KERN_ERR SFX "Error creating device [card]!\n"); goto errout; } strcpy(card->driver, "Lola"); strlcpy(card->shortname, "Digigram Lola", sizeof(card->shortname)); snprintf(card->longname, sizeof(card->longname), "%s at 0x%lx irq %i", card->shortname, chip->bar[0].addr, chip->irq); strcpy(card->mixername, card->shortname); lola_irq_enable(chip); chip->initialized = 1; *rchip = chip; return 0; errout: lola_free(chip); return err; } static int lola_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct lola *chip; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { snd_printk(KERN_ERR SFX "Error creating card!\n"); return err; } snd_card_set_dev(card, &pci->dev); err = lola_create(card, pci, dev, &chip); if (err < 0) goto out_free; card->private_data = chip; err = lola_parse_tree(chip); if (err < 0) goto out_free; err = lola_create_pcm(chip); if (err < 0) goto out_free; err = lola_create_mixer(chip); if (err < 0) goto out_free; lola_proc_debug_new(chip); err = snd_card_register(card); if (err < 0) goto out_free; pci_set_drvdata(pci, card); dev++; return err; out_free: snd_card_free(card); return err; } static void lola_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } /* PCI IDs */ static DEFINE_PCI_DEVICE_TABLE(lola_ids) = { { PCI_VDEVICE(DIGIGRAM, 0x0001) }, { 0, } }; MODULE_DEVICE_TABLE(pci, lola_ids); /* pci_driver definition */ static struct pci_driver lola_driver = { .name = KBUILD_MODNAME, .id_table = lola_ids, .probe = lola_probe, .remove = lola_remove, }; module_pci_driver(lola_driver);
gpl-2.0
shouhu1993/NX511J_kernel
sound/pci/es1938.c
2240
56684
/* * Driver for ESS Solo-1 (ES1938, ES1946, ES1969) soundcard * Copyright (c) by Jaromir Koutek <miri@punknet.cz>, * Jaroslav Kysela <perex@perex.cz>, * Thomas Sailer <sailer@ife.ee.ethz.ch>, * Abramo Bagnara <abramo@alsa-project.org>, * Markus Gruber <gruber@eikon.tum.de> * * Rewritten from sonicvibes.c source. * * TODO: * Rewrite better spinlocks * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* NOTES: - Capture data is written unaligned starting from dma_base + 1 so I need to disable mmap and to add a copy callback. - After several cycle of the following: while : ; do arecord -d1 -f cd -t raw | aplay -f cd ; done a "playback write error (DMA or IRQ trouble?)" may happen. This is due to playback interrupts not generated. I suspect a timing issue. - Sometimes the interrupt handler is invoked wrongly during playback. This generates some harmless "Unexpected hw_pointer: wrong interrupt acknowledge". I've seen that using small period sizes. Reproducible with: mpg123 test.mp3 & hdparm -t -T /dev/hda */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <sound/core.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/opl3.h> #include <sound/mpu401.h> #include <sound/initval.h> #include <sound/tlv.h> #include <asm/io.h> MODULE_AUTHOR("Jaromir Koutek <miri@punknet.cz>"); MODULE_DESCRIPTION("ESS Solo-1"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESS,ES1938}," "{ESS,ES1946}," "{ESS,ES1969}," "{TerraTec,128i PCI}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for ESS Solo-1 soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for ESS Solo-1 soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable ESS Solo-1 soundcard."); #define SLIO_REG(chip, x) ((chip)->io_port + ESSIO_REG_##x) #define SLDM_REG(chip, x) ((chip)->ddma_port + ESSDM_REG_##x) #define SLSB_REG(chip, x) ((chip)->sb_port + ESSSB_REG_##x) #define SL_PCI_LEGACYCONTROL 0x40 #define SL_PCI_CONFIG 0x50 #define SL_PCI_DDMACONTROL 0x60 #define ESSIO_REG_AUDIO2DMAADDR 0 #define ESSIO_REG_AUDIO2DMACOUNT 4 #define ESSIO_REG_AUDIO2MODE 6 #define ESSIO_REG_IRQCONTROL 7 #define ESSDM_REG_DMAADDR 0x00 #define ESSDM_REG_DMACOUNT 0x04 #define ESSDM_REG_DMACOMMAND 0x08 #define ESSDM_REG_DMASTATUS 0x08 #define ESSDM_REG_DMAMODE 0x0b #define ESSDM_REG_DMACLEAR 0x0d #define ESSDM_REG_DMAMASK 0x0f #define ESSSB_REG_FMLOWADDR 0x00 #define ESSSB_REG_FMHIGHADDR 0x02 #define ESSSB_REG_MIXERADDR 0x04 #define ESSSB_REG_MIXERDATA 0x05 #define ESSSB_IREG_AUDIO1 0x14 #define ESSSB_IREG_MICMIX 0x1a #define ESSSB_IREG_RECSRC 0x1c #define ESSSB_IREG_MASTER 0x32 #define ESSSB_IREG_FM 0x36 #define ESSSB_IREG_AUXACD 0x38 #define ESSSB_IREG_AUXB 0x3a #define ESSSB_IREG_PCSPEAKER 0x3c #define ESSSB_IREG_LINE 0x3e #define ESSSB_IREG_SPATCONTROL 0x50 #define ESSSB_IREG_SPATLEVEL 0x52 #define ESSSB_IREG_MASTER_LEFT 0x60 #define ESSSB_IREG_MASTER_RIGHT 0x62 #define ESSSB_IREG_MPU401CONTROL 0x64 #define ESSSB_IREG_MICMIXRECORD 0x68 #define ESSSB_IREG_AUDIO2RECORD 0x69 #define ESSSB_IREG_AUXACDRECORD 0x6a #define ESSSB_IREG_FMRECORD 0x6b #define ESSSB_IREG_AUXBRECORD 0x6c #define ESSSB_IREG_MONO 0x6d #define ESSSB_IREG_LINERECORD 0x6e #define ESSSB_IREG_MONORECORD 0x6f #define ESSSB_IREG_AUDIO2SAMPLE 0x70 #define ESSSB_IREG_AUDIO2MODE 0x71 #define ESSSB_IREG_AUDIO2FILTER 0x72 #define ESSSB_IREG_AUDIO2TCOUNTL 0x74 #define ESSSB_IREG_AUDIO2TCOUNTH 0x76 #define ESSSB_IREG_AUDIO2CONTROL1 0x78 #define ESSSB_IREG_AUDIO2CONTROL2 0x7a #define ESSSB_IREG_AUDIO2 0x7c #define ESSSB_REG_RESET 0x06 #define ESSSB_REG_READDATA 0x0a #define ESSSB_REG_WRITEDATA 0x0c #define ESSSB_REG_READSTATUS 0x0c #define ESSSB_REG_STATUS 0x0e #define ESS_CMD_EXTSAMPLERATE 0xa1 #define ESS_CMD_FILTERDIV 0xa2 #define ESS_CMD_DMACNTRELOADL 0xa4 #define ESS_CMD_DMACNTRELOADH 0xa5 #define ESS_CMD_ANALOGCONTROL 0xa8 #define ESS_CMD_IRQCONTROL 0xb1 #define ESS_CMD_DRQCONTROL 0xb2 #define ESS_CMD_RECLEVEL 0xb4 #define ESS_CMD_SETFORMAT 0xb6 #define ESS_CMD_SETFORMAT2 0xb7 #define ESS_CMD_DMACONTROL 0xb8 #define ESS_CMD_DMATYPE 0xb9 #define ESS_CMD_OFFSETLEFT 0xba #define ESS_CMD_OFFSETRIGHT 0xbb #define ESS_CMD_READREG 0xc0 #define ESS_CMD_ENABLEEXT 0xc6 #define ESS_CMD_PAUSEDMA 0xd0 #define ESS_CMD_ENABLEAUDIO1 0xd1 #define ESS_CMD_STOPAUDIO1 0xd3 #define ESS_CMD_AUDIO1STATUS 0xd8 #define ESS_CMD_CONTDMA 0xd4 #define ESS_CMD_TESTIRQ 0xf2 #define ESS_RECSRC_MIC 0 #define ESS_RECSRC_AUXACD 2 #define ESS_RECSRC_AUXB 5 #define ESS_RECSRC_LINE 6 #define ESS_RECSRC_NONE 7 #define DAC1 0x01 #define ADC1 0x02 #define DAC2 0x04 /* */ #define SAVED_REG_SIZE 32 /* max. number of registers to save */ struct es1938 { int irq; unsigned long io_port; unsigned long sb_port; unsigned long vc_port; unsigned long mpu_port; unsigned long game_port; unsigned long ddma_port; unsigned char irqmask; unsigned char revision; struct snd_kcontrol *hw_volume; struct snd_kcontrol *hw_switch; struct snd_kcontrol *master_volume; struct snd_kcontrol *master_switch; struct pci_dev *pci; struct snd_card *card; struct snd_pcm *pcm; struct snd_pcm_substream *capture_substream; struct snd_pcm_substream *playback1_substream; struct snd_pcm_substream *playback2_substream; struct snd_rawmidi *rmidi; unsigned int dma1_size; unsigned int dma2_size; unsigned int dma1_start; unsigned int dma2_start; unsigned int dma1_shift; unsigned int dma2_shift; unsigned int last_capture_dmaaddr; unsigned int active; spinlock_t reg_lock; spinlock_t mixer_lock; struct snd_info_entry *proc_entry; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif #ifdef CONFIG_PM_SLEEP unsigned char saved_regs[SAVED_REG_SIZE]; #endif }; static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id); static DEFINE_PCI_DEVICE_TABLE(snd_es1938_ids) = { { PCI_VDEVICE(ESS, 0x1969), 0, }, /* Solo-1 */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_es1938_ids); #define RESET_LOOP_TIMEOUT 0x10000 #define WRITE_LOOP_TIMEOUT 0x10000 #define GET_LOOP_TIMEOUT 0x01000 #undef REG_DEBUG /* ----------------------------------------------------------------- * Write to a mixer register * -----------------------------------------------------------------*/ static void snd_es1938_mixer_write(struct es1938 *chip, unsigned char reg, unsigned char val) { unsigned long flags; spin_lock_irqsave(&chip->mixer_lock, flags); outb(reg, SLSB_REG(chip, MIXERADDR)); outb(val, SLSB_REG(chip, MIXERDATA)); spin_unlock_irqrestore(&chip->mixer_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Mixer reg %02x set to %02x\n", reg, val); #endif } /* ----------------------------------------------------------------- * Read from a mixer register * -----------------------------------------------------------------*/ static int snd_es1938_mixer_read(struct es1938 *chip, unsigned char reg) { int data; unsigned long flags; spin_lock_irqsave(&chip->mixer_lock, flags); outb(reg, SLSB_REG(chip, MIXERADDR)); data = inb(SLSB_REG(chip, MIXERDATA)); spin_unlock_irqrestore(&chip->mixer_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Mixer reg %02x now is %02x\n", reg, data); #endif return data; } /* ----------------------------------------------------------------- * Write to some bits of a mixer register (return old value) * -----------------------------------------------------------------*/ static int snd_es1938_mixer_bits(struct es1938 *chip, unsigned char reg, unsigned char mask, unsigned char val) { unsigned long flags; unsigned char old, new, oval; spin_lock_irqsave(&chip->mixer_lock, flags); outb(reg, SLSB_REG(chip, MIXERADDR)); old = inb(SLSB_REG(chip, MIXERDATA)); oval = old & mask; if (val != oval) { new = (old & ~mask) | (val & mask); outb(new, SLSB_REG(chip, MIXERDATA)); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Mixer reg %02x was %02x, set to %02x\n", reg, old, new); #endif } spin_unlock_irqrestore(&chip->mixer_lock, flags); return oval; } /* ----------------------------------------------------------------- * Write command to Controller Registers * -----------------------------------------------------------------*/ static void snd_es1938_write_cmd(struct es1938 *chip, unsigned char cmd) { int i; unsigned char v; for (i = 0; i < WRITE_LOOP_TIMEOUT; i++) { if (!(v = inb(SLSB_REG(chip, READSTATUS)) & 0x80)) { outb(cmd, SLSB_REG(chip, WRITEDATA)); return; } } printk(KERN_ERR "snd_es1938_write_cmd timeout (0x02%x/0x02%x)\n", cmd, v); } /* ----------------------------------------------------------------- * Read the Read Data Buffer * -----------------------------------------------------------------*/ static int snd_es1938_get_byte(struct es1938 *chip) { int i; unsigned char v; for (i = GET_LOOP_TIMEOUT; i; i--) if ((v = inb(SLSB_REG(chip, STATUS))) & 0x80) return inb(SLSB_REG(chip, READDATA)); snd_printk(KERN_ERR "get_byte timeout: status 0x02%x\n", v); return -ENODEV; } /* ----------------------------------------------------------------- * Write value cmd register * -----------------------------------------------------------------*/ static void snd_es1938_write(struct es1938 *chip, unsigned char reg, unsigned char val) { unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); snd_es1938_write_cmd(chip, reg); snd_es1938_write_cmd(chip, val); spin_unlock_irqrestore(&chip->reg_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Reg %02x set to %02x\n", reg, val); #endif } /* ----------------------------------------------------------------- * Read data from cmd register and return it * -----------------------------------------------------------------*/ static unsigned char snd_es1938_read(struct es1938 *chip, unsigned char reg) { unsigned char val; unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); snd_es1938_write_cmd(chip, ESS_CMD_READREG); snd_es1938_write_cmd(chip, reg); val = snd_es1938_get_byte(chip); spin_unlock_irqrestore(&chip->reg_lock, flags); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Reg %02x now is %02x\n", reg, val); #endif return val; } /* ----------------------------------------------------------------- * Write data to cmd register and return old value * -----------------------------------------------------------------*/ static int snd_es1938_bits(struct es1938 *chip, unsigned char reg, unsigned char mask, unsigned char val) { unsigned long flags; unsigned char old, new, oval; spin_lock_irqsave(&chip->reg_lock, flags); snd_es1938_write_cmd(chip, ESS_CMD_READREG); snd_es1938_write_cmd(chip, reg); old = snd_es1938_get_byte(chip); oval = old & mask; if (val != oval) { snd_es1938_write_cmd(chip, reg); new = (old & ~mask) | (val & mask); snd_es1938_write_cmd(chip, new); #ifdef REG_DEBUG snd_printk(KERN_DEBUG "Reg %02x was %02x, set to %02x\n", reg, old, new); #endif } spin_unlock_irqrestore(&chip->reg_lock, flags); return oval; } /* -------------------------------------------------------------------- * Reset the chip * --------------------------------------------------------------------*/ static void snd_es1938_reset(struct es1938 *chip) { int i; outb(3, SLSB_REG(chip, RESET)); inb(SLSB_REG(chip, RESET)); outb(0, SLSB_REG(chip, RESET)); for (i = 0; i < RESET_LOOP_TIMEOUT; i++) { if (inb(SLSB_REG(chip, STATUS)) & 0x80) { if (inb(SLSB_REG(chip, READDATA)) == 0xaa) goto __next; } } snd_printk(KERN_ERR "ESS Solo-1 reset failed\n"); __next: snd_es1938_write_cmd(chip, ESS_CMD_ENABLEEXT); /* Demand transfer DMA: 4 bytes per DMA request */ snd_es1938_write(chip, ESS_CMD_DMATYPE, 2); /* Change behaviour of register A1 4x oversampling 2nd channel DAC asynchronous */ snd_es1938_mixer_write(chip, ESSSB_IREG_AUDIO2MODE, 0x32); /* enable/select DMA channel and IRQ channel */ snd_es1938_bits(chip, ESS_CMD_IRQCONTROL, 0xf0, 0x50); snd_es1938_bits(chip, ESS_CMD_DRQCONTROL, 0xf0, 0x50); snd_es1938_write_cmd(chip, ESS_CMD_ENABLEAUDIO1); /* Set spatializer parameters to recommended values */ snd_es1938_mixer_write(chip, 0x54, 0x8f); snd_es1938_mixer_write(chip, 0x56, 0x95); snd_es1938_mixer_write(chip, 0x58, 0x94); snd_es1938_mixer_write(chip, 0x5a, 0x80); } /* -------------------------------------------------------------------- * Reset the FIFOs * --------------------------------------------------------------------*/ static void snd_es1938_reset_fifo(struct es1938 *chip) { outb(2, SLSB_REG(chip, RESET)); outb(0, SLSB_REG(chip, RESET)); } static struct snd_ratnum clocks[2] = { { .num = 793800, .den_min = 1, .den_max = 128, .den_step = 1, }, { .num = 768000, .den_min = 1, .den_max = 128, .den_step = 1, } }; static struct snd_pcm_hw_constraint_ratnums hw_constraints_clocks = { .nrats = 2, .rats = clocks, }; static void snd_es1938_rate_set(struct es1938 *chip, struct snd_pcm_substream *substream, int mode) { unsigned int bits, div0; struct snd_pcm_runtime *runtime = substream->runtime; if (runtime->rate_num == clocks[0].num) bits = 128 - runtime->rate_den; else bits = 256 - runtime->rate_den; /* set filter register */ div0 = 256 - 7160000*20/(8*82*runtime->rate); if (mode == DAC2) { snd_es1938_mixer_write(chip, 0x70, bits); snd_es1938_mixer_write(chip, 0x72, div0); } else { snd_es1938_write(chip, 0xA1, bits); snd_es1938_write(chip, 0xA2, div0); } } /* -------------------------------------------------------------------- * Configure Solo1 builtin DMA Controller * --------------------------------------------------------------------*/ static void snd_es1938_playback1_setdma(struct es1938 *chip) { outb(0x00, SLIO_REG(chip, AUDIO2MODE)); outl(chip->dma2_start, SLIO_REG(chip, AUDIO2DMAADDR)); outw(0, SLIO_REG(chip, AUDIO2DMACOUNT)); outw(chip->dma2_size, SLIO_REG(chip, AUDIO2DMACOUNT)); } static void snd_es1938_playback2_setdma(struct es1938 *chip) { /* Enable DMA controller */ outb(0xc4, SLDM_REG(chip, DMACOMMAND)); /* 1. Master reset */ outb(0, SLDM_REG(chip, DMACLEAR)); /* 2. Mask DMA */ outb(1, SLDM_REG(chip, DMAMASK)); outb(0x18, SLDM_REG(chip, DMAMODE)); outl(chip->dma1_start, SLDM_REG(chip, DMAADDR)); outw(chip->dma1_size - 1, SLDM_REG(chip, DMACOUNT)); /* 3. Unmask DMA */ outb(0, SLDM_REG(chip, DMAMASK)); } static void snd_es1938_capture_setdma(struct es1938 *chip) { /* Enable DMA controller */ outb(0xc4, SLDM_REG(chip, DMACOMMAND)); /* 1. Master reset */ outb(0, SLDM_REG(chip, DMACLEAR)); /* 2. Mask DMA */ outb(1, SLDM_REG(chip, DMAMASK)); outb(0x14, SLDM_REG(chip, DMAMODE)); outl(chip->dma1_start, SLDM_REG(chip, DMAADDR)); chip->last_capture_dmaaddr = chip->dma1_start; outw(chip->dma1_size - 1, SLDM_REG(chip, DMACOUNT)); /* 3. Unmask DMA */ outb(0, SLDM_REG(chip, DMAMASK)); } /* ---------------------------------------------------------------------- * * *** PCM part *** */ static int snd_es1938_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct es1938 *chip = snd_pcm_substream_chip(substream); int val; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: val = 0x0f; chip->active |= ADC1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: val = 0x00; chip->active &= ~ADC1; break; default: return -EINVAL; } snd_es1938_write(chip, ESS_CMD_DMACONTROL, val); return 0; } static int snd_es1938_playback1_trigger(struct snd_pcm_substream *substream, int cmd) { struct es1938 *chip = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: /* According to the documentation this should be: 0x13 but that value may randomly swap stereo channels */ snd_es1938_mixer_write(chip, ESSSB_IREG_AUDIO2CONTROL1, 0x92); udelay(10); snd_es1938_mixer_write(chip, ESSSB_IREG_AUDIO2CONTROL1, 0x93); /* This two stage init gives the FIFO -> DAC connection time to * settle before first data from DMA flows in. This should ensure * no swapping of stereo channels. Report a bug if otherwise :-) */ outb(0x0a, SLIO_REG(chip, AUDIO2MODE)); chip->active |= DAC2; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: outb(0, SLIO_REG(chip, AUDIO2MODE)); snd_es1938_mixer_write(chip, ESSSB_IREG_AUDIO2CONTROL1, 0); chip->active &= ~DAC2; break; default: return -EINVAL; } return 0; } static int snd_es1938_playback2_trigger(struct snd_pcm_substream *substream, int cmd) { struct es1938 *chip = snd_pcm_substream_chip(substream); int val; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: val = 5; chip->active |= DAC1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: val = 0; chip->active &= ~DAC1; break; default: return -EINVAL; } snd_es1938_write(chip, ESS_CMD_DMACONTROL, val); return 0; } static int snd_es1938_playback_trigger(struct snd_pcm_substream *substream, int cmd) { switch (substream->number) { case 0: return snd_es1938_playback1_trigger(substream, cmd); case 1: return snd_es1938_playback2_trigger(substream, cmd); } snd_BUG(); return -EINVAL; } /* -------------------------------------------------------------------- * First channel for Extended Mode Audio 1 ADC Operation * --------------------------------------------------------------------*/ static int snd_es1938_capture_prepare(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int u, is8, mono; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); chip->dma1_size = size; chip->dma1_start = runtime->dma_addr; mono = (runtime->channels > 1) ? 0 : 1; is8 = snd_pcm_format_width(runtime->format) == 16 ? 0 : 1; u = snd_pcm_format_unsigned(runtime->format); chip->dma1_shift = 2 - mono - is8; snd_es1938_reset_fifo(chip); /* program type */ snd_es1938_bits(chip, ESS_CMD_ANALOGCONTROL, 0x03, (mono ? 2 : 1)); /* set clock and counters */ snd_es1938_rate_set(chip, substream, ADC1); count = 0x10000 - count; snd_es1938_write(chip, ESS_CMD_DMACNTRELOADL, count & 0xff); snd_es1938_write(chip, ESS_CMD_DMACNTRELOADH, count >> 8); /* initialize and configure ADC */ snd_es1938_write(chip, ESS_CMD_SETFORMAT2, u ? 0x51 : 0x71); snd_es1938_write(chip, ESS_CMD_SETFORMAT2, 0x90 | (u ? 0x00 : 0x20) | (is8 ? 0x00 : 0x04) | (mono ? 0x40 : 0x08)); // snd_es1938_reset_fifo(chip); /* 11. configure system interrupt controller and DMA controller */ snd_es1938_capture_setdma(chip); return 0; } /* ------------------------------------------------------------------------------ * Second Audio channel DAC Operation * ------------------------------------------------------------------------------*/ static int snd_es1938_playback1_prepare(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int u, is8, mono; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); chip->dma2_size = size; chip->dma2_start = runtime->dma_addr; mono = (runtime->channels > 1) ? 0 : 1; is8 = snd_pcm_format_width(runtime->format) == 16 ? 0 : 1; u = snd_pcm_format_unsigned(runtime->format); chip->dma2_shift = 2 - mono - is8; snd_es1938_reset_fifo(chip); /* set clock and counters */ snd_es1938_rate_set(chip, substream, DAC2); count >>= 1; count = 0x10000 - count; snd_es1938_mixer_write(chip, ESSSB_IREG_AUDIO2TCOUNTL, count & 0xff); snd_es1938_mixer_write(chip, ESSSB_IREG_AUDIO2TCOUNTH, count >> 8); /* initialize and configure Audio 2 DAC */ snd_es1938_mixer_write(chip, ESSSB_IREG_AUDIO2CONTROL2, 0x40 | (u ? 0 : 4) | (mono ? 0 : 2) | (is8 ? 0 : 1)); /* program DMA */ snd_es1938_playback1_setdma(chip); return 0; } static int snd_es1938_playback2_prepare(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int u, is8, mono; unsigned int size = snd_pcm_lib_buffer_bytes(substream); unsigned int count = snd_pcm_lib_period_bytes(substream); chip->dma1_size = size; chip->dma1_start = runtime->dma_addr; mono = (runtime->channels > 1) ? 0 : 1; is8 = snd_pcm_format_width(runtime->format) == 16 ? 0 : 1; u = snd_pcm_format_unsigned(runtime->format); chip->dma1_shift = 2 - mono - is8; count = 0x10000 - count; /* reset */ snd_es1938_reset_fifo(chip); snd_es1938_bits(chip, ESS_CMD_ANALOGCONTROL, 0x03, (mono ? 2 : 1)); /* set clock and counters */ snd_es1938_rate_set(chip, substream, DAC1); snd_es1938_write(chip, ESS_CMD_DMACNTRELOADL, count & 0xff); snd_es1938_write(chip, ESS_CMD_DMACNTRELOADH, count >> 8); /* initialized and configure DAC */ snd_es1938_write(chip, ESS_CMD_SETFORMAT, u ? 0x80 : 0x00); snd_es1938_write(chip, ESS_CMD_SETFORMAT, u ? 0x51 : 0x71); snd_es1938_write(chip, ESS_CMD_SETFORMAT2, 0x90 | (mono ? 0x40 : 0x08) | (is8 ? 0x00 : 0x04) | (u ? 0x00 : 0x20)); /* program DMA */ snd_es1938_playback2_setdma(chip); return 0; } static int snd_es1938_playback_prepare(struct snd_pcm_substream *substream) { switch (substream->number) { case 0: return snd_es1938_playback1_prepare(substream); case 1: return snd_es1938_playback2_prepare(substream); } snd_BUG(); return -EINVAL; } /* during the incrementing of dma counters the DMA register reads sometimes returns garbage. To ensure a valid hw pointer, the following checks which should be very unlikely to fail are used: - is the current DMA address in the valid DMA range ? - is the sum of DMA address and DMA counter pointing to the last DMA byte ? One can argue this could differ by one byte depending on which register is updated first, so the implementation below allows for that. */ static snd_pcm_uframes_t snd_es1938_capture_pointer(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); size_t ptr; #if 0 size_t old, new; /* This stuff is *needed*, don't ask why - AB */ old = inw(SLDM_REG(chip, DMACOUNT)); while ((new = inw(SLDM_REG(chip, DMACOUNT))) != old) old = new; ptr = chip->dma1_size - 1 - new; #else size_t count; unsigned int diff; ptr = inl(SLDM_REG(chip, DMAADDR)); count = inw(SLDM_REG(chip, DMACOUNT)); diff = chip->dma1_start + chip->dma1_size - ptr - count; if (diff > 3 || ptr < chip->dma1_start || ptr >= chip->dma1_start+chip->dma1_size) ptr = chip->last_capture_dmaaddr; /* bad, use last saved */ else chip->last_capture_dmaaddr = ptr; /* good, remember it */ ptr -= chip->dma1_start; #endif return ptr >> chip->dma1_shift; } static snd_pcm_uframes_t snd_es1938_playback1_pointer(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); size_t ptr; #if 1 ptr = chip->dma2_size - inw(SLIO_REG(chip, AUDIO2DMACOUNT)); #else ptr = inl(SLIO_REG(chip, AUDIO2DMAADDR)) - chip->dma2_start; #endif return ptr >> chip->dma2_shift; } static snd_pcm_uframes_t snd_es1938_playback2_pointer(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); size_t ptr; size_t old, new; #if 1 /* This stuff is *needed*, don't ask why - AB */ old = inw(SLDM_REG(chip, DMACOUNT)); while ((new = inw(SLDM_REG(chip, DMACOUNT))) != old) old = new; ptr = chip->dma1_size - 1 - new; #else ptr = inl(SLDM_REG(chip, DMAADDR)) - chip->dma1_start; #endif return ptr >> chip->dma1_shift; } static snd_pcm_uframes_t snd_es1938_playback_pointer(struct snd_pcm_substream *substream) { switch (substream->number) { case 0: return snd_es1938_playback1_pointer(substream); case 1: return snd_es1938_playback2_pointer(substream); } snd_BUG(); return -EINVAL; } static int snd_es1938_capture_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count) { struct snd_pcm_runtime *runtime = substream->runtime; struct es1938 *chip = snd_pcm_substream_chip(substream); pos <<= chip->dma1_shift; count <<= chip->dma1_shift; if (snd_BUG_ON(pos + count > chip->dma1_size)) return -EINVAL; if (pos + count < chip->dma1_size) { if (copy_to_user(dst, runtime->dma_area + pos + 1, count)) return -EFAULT; } else { if (copy_to_user(dst, runtime->dma_area + pos + 1, count - 1)) return -EFAULT; if (put_user(runtime->dma_area[0], ((unsigned char __user *)dst) + count - 1)) return -EFAULT; } return 0; } /* * buffer management */ static int snd_es1938_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; return 0; } static int snd_es1938_pcm_hw_free(struct snd_pcm_substream *substream) { return snd_pcm_lib_free_pages(substream); } /* ---------------------------------------------------------------------- * Audio1 Capture (ADC) * ----------------------------------------------------------------------*/ static struct snd_pcm_hardware snd_es1938_capture = { .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 6000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 0x8000, /* DMA controller screws on higher values */ .period_bytes_min = 64, .period_bytes_max = 0x8000, .periods_min = 1, .periods_max = 1024, .fifo_size = 256, }; /* ----------------------------------------------------------------------- * Audio2 Playback (DAC) * -----------------------------------------------------------------------*/ static struct snd_pcm_hardware snd_es1938_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID), .formats = (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U16_LE), .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 6000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 0x8000, /* DMA controller screws on higher values */ .period_bytes_min = 64, .period_bytes_max = 0x8000, .periods_min = 1, .periods_max = 1024, .fifo_size = 256, }; static int snd_es1938_capture_open(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; if (chip->playback2_substream) return -EAGAIN; chip->capture_substream = substream; runtime->hw = snd_es1938_capture; snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_clocks); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, 0xff00); return 0; } static int snd_es1938_playback_open(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; switch (substream->number) { case 0: chip->playback1_substream = substream; break; case 1: if (chip->capture_substream) return -EAGAIN; chip->playback2_substream = substream; break; default: snd_BUG(); return -EINVAL; } runtime->hw = snd_es1938_playback; snd_pcm_hw_constraint_ratnums(runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_clocks); snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 0, 0xff00); return 0; } static int snd_es1938_capture_close(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); chip->capture_substream = NULL; return 0; } static int snd_es1938_playback_close(struct snd_pcm_substream *substream) { struct es1938 *chip = snd_pcm_substream_chip(substream); switch (substream->number) { case 0: chip->playback1_substream = NULL; break; case 1: chip->playback2_substream = NULL; break; default: snd_BUG(); return -EINVAL; } return 0; } static struct snd_pcm_ops snd_es1938_playback_ops = { .open = snd_es1938_playback_open, .close = snd_es1938_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_es1938_pcm_hw_params, .hw_free = snd_es1938_pcm_hw_free, .prepare = snd_es1938_playback_prepare, .trigger = snd_es1938_playback_trigger, .pointer = snd_es1938_playback_pointer, }; static struct snd_pcm_ops snd_es1938_capture_ops = { .open = snd_es1938_capture_open, .close = snd_es1938_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_es1938_pcm_hw_params, .hw_free = snd_es1938_pcm_hw_free, .prepare = snd_es1938_capture_prepare, .trigger = snd_es1938_capture_trigger, .pointer = snd_es1938_capture_pointer, .copy = snd_es1938_capture_copy, }; static int snd_es1938_new_pcm(struct es1938 *chip, int device) { struct snd_pcm *pcm; int err; if ((err = snd_pcm_new(chip->card, "es-1938-1946", device, 2, 1, &pcm)) < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_es1938_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_es1938_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, "ESS Solo-1"); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 64*1024); chip->pcm = pcm; return 0; } /* ------------------------------------------------------------------- * * *** Mixer part *** */ static int snd_es1938_info_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[8] = { "Mic", "Mic Master", "CD", "AOUT", "Mic1", "Mix", "Line", "Master" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 8; if (uinfo->value.enumerated.item > 7) uinfo->value.enumerated.item = 7; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_es1938_get_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.enumerated.item[0] = snd_es1938_mixer_read(chip, 0x1c) & 0x07; return 0; } static int snd_es1938_put_mux(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); unsigned char val = ucontrol->value.enumerated.item[0]; if (val > 7) return -EINVAL; return snd_es1938_mixer_bits(chip, 0x1c, 0x07, val) != val; } #define snd_es1938_info_spatializer_enable snd_ctl_boolean_mono_info static int snd_es1938_get_spatializer_enable(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); unsigned char val = snd_es1938_mixer_read(chip, 0x50); ucontrol->value.integer.value[0] = !!(val & 8); return 0; } static int snd_es1938_put_spatializer_enable(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); unsigned char oval, nval; int change; nval = ucontrol->value.integer.value[0] ? 0x0c : 0x04; oval = snd_es1938_mixer_read(chip, 0x50) & 0x0c; change = nval != oval; if (change) { snd_es1938_mixer_write(chip, 0x50, nval & ~0x04); snd_es1938_mixer_write(chip, 0x50, nval); } return change; } static int snd_es1938_info_hw_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = 63; return 0; } static int snd_es1938_get_hw_volume(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = snd_es1938_mixer_read(chip, 0x61) & 0x3f; ucontrol->value.integer.value[1] = snd_es1938_mixer_read(chip, 0x63) & 0x3f; return 0; } #define snd_es1938_info_hw_switch snd_ctl_boolean_stereo_info static int snd_es1938_get_hw_switch(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = !(snd_es1938_mixer_read(chip, 0x61) & 0x40); ucontrol->value.integer.value[1] = !(snd_es1938_mixer_read(chip, 0x63) & 0x40); return 0; } static void snd_es1938_hwv_free(struct snd_kcontrol *kcontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); chip->master_volume = NULL; chip->master_switch = NULL; chip->hw_volume = NULL; chip->hw_switch = NULL; } static int snd_es1938_reg_bits(struct es1938 *chip, unsigned char reg, unsigned char mask, unsigned char val) { if (reg < 0xa0) return snd_es1938_mixer_bits(chip, reg, mask, val); else return snd_es1938_bits(chip, reg, mask, val); } static int snd_es1938_reg_read(struct es1938 *chip, unsigned char reg) { if (reg < 0xa0) return snd_es1938_mixer_read(chip, reg); else return snd_es1938_read(chip, reg); } #define ES1938_SINGLE_TLV(xname, xindex, reg, shift, mask, invert, xtlv) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,\ .name = xname, .index = xindex, \ .info = snd_es1938_info_single, \ .get = snd_es1938_get_single, .put = snd_es1938_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24), \ .tlv = { .p = xtlv } } #define ES1938_SINGLE(xname, xindex, reg, shift, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_es1938_info_single, \ .get = snd_es1938_get_single, .put = snd_es1938_put_single, \ .private_value = reg | (shift << 8) | (mask << 16) | (invert << 24) } static int snd_es1938_info_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 16) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_es1938_get_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; int val; val = snd_es1938_reg_read(chip, reg); ucontrol->value.integer.value[0] = (val >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; return 0; } static int snd_es1938_put_single(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); int reg = kcontrol->private_value & 0xff; int shift = (kcontrol->private_value >> 8) & 0xff; int mask = (kcontrol->private_value >> 16) & 0xff; int invert = (kcontrol->private_value >> 24) & 0xff; unsigned char val; val = (ucontrol->value.integer.value[0] & mask); if (invert) val = mask - val; mask <<= shift; val <<= shift; return snd_es1938_reg_bits(chip, reg, mask, val) != val; } #define ES1938_DOUBLE_TLV(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert, xtlv) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ,\ .name = xname, .index = xindex, \ .info = snd_es1938_info_double, \ .get = snd_es1938_get_double, .put = snd_es1938_put_double, \ .private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22), \ .tlv = { .p = xtlv } } #define ES1938_DOUBLE(xname, xindex, left_reg, right_reg, shift_left, shift_right, mask, invert) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_es1938_info_double, \ .get = snd_es1938_get_double, .put = snd_es1938_put_double, \ .private_value = left_reg | (right_reg << 8) | (shift_left << 16) | (shift_right << 19) | (mask << 24) | (invert << 22) } static int snd_es1938_info_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { int mask = (kcontrol->private_value >> 24) & 0xff; uinfo->type = mask == 1 ? SNDRV_CTL_ELEM_TYPE_BOOLEAN : SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = mask; return 0; } static int snd_es1938_get_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; unsigned char left, right; left = snd_es1938_reg_read(chip, left_reg); if (left_reg != right_reg) right = snd_es1938_reg_read(chip, right_reg); else right = left; ucontrol->value.integer.value[0] = (left >> shift_left) & mask; ucontrol->value.integer.value[1] = (right >> shift_right) & mask; if (invert) { ucontrol->value.integer.value[0] = mask - ucontrol->value.integer.value[0]; ucontrol->value.integer.value[1] = mask - ucontrol->value.integer.value[1]; } return 0; } static int snd_es1938_put_double(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct es1938 *chip = snd_kcontrol_chip(kcontrol); int left_reg = kcontrol->private_value & 0xff; int right_reg = (kcontrol->private_value >> 8) & 0xff; int shift_left = (kcontrol->private_value >> 16) & 0x07; int shift_right = (kcontrol->private_value >> 19) & 0x07; int mask = (kcontrol->private_value >> 24) & 0xff; int invert = (kcontrol->private_value >> 22) & 1; int change; unsigned char val1, val2, mask1, mask2; val1 = ucontrol->value.integer.value[0] & mask; val2 = ucontrol->value.integer.value[1] & mask; if (invert) { val1 = mask - val1; val2 = mask - val2; } val1 <<= shift_left; val2 <<= shift_right; mask1 = mask << shift_left; mask2 = mask << shift_right; if (left_reg != right_reg) { change = 0; if (snd_es1938_reg_bits(chip, left_reg, mask1, val1) != val1) change = 1; if (snd_es1938_reg_bits(chip, right_reg, mask2, val2) != val2) change = 1; } else { change = (snd_es1938_reg_bits(chip, left_reg, mask1 | mask2, val1 | val2) != (val1 | val2)); } return change; } static const DECLARE_TLV_DB_RANGE(db_scale_master, 0, 54, TLV_DB_SCALE_ITEM(-3600, 50, 1), 54, 63, TLV_DB_SCALE_ITEM(-900, 100, 0), ); static const DECLARE_TLV_DB_RANGE(db_scale_audio1, 0, 8, TLV_DB_SCALE_ITEM(-3300, 300, 1), 8, 15, TLV_DB_SCALE_ITEM(-900, 150, 0), ); static const DECLARE_TLV_DB_RANGE(db_scale_audio2, 0, 8, TLV_DB_SCALE_ITEM(-3450, 300, 1), 8, 15, TLV_DB_SCALE_ITEM(-1050, 150, 0), ); static const DECLARE_TLV_DB_RANGE(db_scale_mic, 0, 8, TLV_DB_SCALE_ITEM(-2400, 300, 1), 8, 15, TLV_DB_SCALE_ITEM(0, 150, 0), ); static const DECLARE_TLV_DB_RANGE(db_scale_line, 0, 8, TLV_DB_SCALE_ITEM(-3150, 300, 1), 8, 15, TLV_DB_SCALE_ITEM(-750, 150, 0), ); static const DECLARE_TLV_DB_SCALE(db_scale_capture, 0, 150, 0); static struct snd_kcontrol_new snd_es1938_controls[] = { ES1938_DOUBLE_TLV("Master Playback Volume", 0, 0x60, 0x62, 0, 0, 63, 0, db_scale_master), ES1938_DOUBLE("Master Playback Switch", 0, 0x60, 0x62, 6, 6, 1, 1), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Hardware Master Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READ, .info = snd_es1938_info_hw_volume, .get = snd_es1938_get_hw_volume, }, { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READ | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .name = "Hardware Master Playback Switch", .info = snd_es1938_info_hw_switch, .get = snd_es1938_get_hw_switch, .tlv = { .p = db_scale_master }, }, ES1938_SINGLE("Hardware Volume Split", 0, 0x64, 7, 1, 0), ES1938_DOUBLE_TLV("Line Playback Volume", 0, 0x3e, 0x3e, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE("CD Playback Volume", 0, 0x38, 0x38, 4, 0, 15, 0), ES1938_DOUBLE_TLV("FM Playback Volume", 0, 0x36, 0x36, 4, 0, 15, 0, db_scale_mic), ES1938_DOUBLE_TLV("Mono Playback Volume", 0, 0x6d, 0x6d, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE_TLV("Mic Playback Volume", 0, 0x1a, 0x1a, 4, 0, 15, 0, db_scale_mic), ES1938_DOUBLE_TLV("Aux Playback Volume", 0, 0x3a, 0x3a, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE_TLV("Capture Volume", 0, 0xb4, 0xb4, 4, 0, 15, 0, db_scale_capture), ES1938_SINGLE("Beep Volume", 0, 0x3c, 0, 7, 0), ES1938_SINGLE("Record Monitor", 0, 0xa8, 3, 1, 0), ES1938_SINGLE("Capture Switch", 0, 0x1c, 4, 1, 1), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "Capture Source", .info = snd_es1938_info_mux, .get = snd_es1938_get_mux, .put = snd_es1938_put_mux, }, ES1938_DOUBLE_TLV("Mono Input Playback Volume", 0, 0x6d, 0x6d, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE_TLV("PCM Capture Volume", 0, 0x69, 0x69, 4, 0, 15, 0, db_scale_audio2), ES1938_DOUBLE_TLV("Mic Capture Volume", 0, 0x68, 0x68, 4, 0, 15, 0, db_scale_mic), ES1938_DOUBLE_TLV("Line Capture Volume", 0, 0x6e, 0x6e, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE_TLV("FM Capture Volume", 0, 0x6b, 0x6b, 4, 0, 15, 0, db_scale_mic), ES1938_DOUBLE_TLV("Mono Capture Volume", 0, 0x6f, 0x6f, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE_TLV("CD Capture Volume", 0, 0x6a, 0x6a, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE_TLV("Aux Capture Volume", 0, 0x6c, 0x6c, 4, 0, 15, 0, db_scale_line), ES1938_DOUBLE_TLV("PCM Playback Volume", 0, 0x7c, 0x7c, 4, 0, 15, 0, db_scale_audio2), ES1938_DOUBLE_TLV("PCM Playback Volume", 1, 0x14, 0x14, 4, 0, 15, 0, db_scale_audio1), ES1938_SINGLE("3D Control - Level", 0, 0x52, 0, 63, 0), { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = "3D Control - Switch", .info = snd_es1938_info_spatializer_enable, .get = snd_es1938_get_spatializer_enable, .put = snd_es1938_put_spatializer_enable, }, ES1938_SINGLE("Mic Boost (+26dB)", 0, 0x7d, 3, 1, 0) }; /* ---------------------------------------------------------------------------- */ /* ---------------------------------------------------------------------------- */ /* * initialize the chip - used by resume callback, too */ static void snd_es1938_chip_init(struct es1938 *chip) { /* reset chip */ snd_es1938_reset(chip); /* configure native mode */ /* enable bus master */ pci_set_master(chip->pci); /* disable legacy audio */ pci_write_config_word(chip->pci, SL_PCI_LEGACYCONTROL, 0x805f); /* set DDMA base */ pci_write_config_word(chip->pci, SL_PCI_DDMACONTROL, chip->ddma_port | 1); /* set DMA/IRQ policy */ pci_write_config_dword(chip->pci, SL_PCI_CONFIG, 0); /* enable Audio 1, Audio 2, MPU401 IRQ and HW volume IRQ*/ outb(0xf0, SLIO_REG(chip, IRQCONTROL)); /* reset DMA */ outb(0, SLDM_REG(chip, DMACLEAR)); } #ifdef CONFIG_PM_SLEEP /* * PM support */ static unsigned char saved_regs[SAVED_REG_SIZE+1] = { 0x14, 0x1a, 0x1c, 0x3a, 0x3c, 0x3e, 0x36, 0x38, 0x50, 0x52, 0x60, 0x61, 0x62, 0x63, 0x64, 0x68, 0x69, 0x6a, 0x6b, 0x6d, 0x6e, 0x6f, 0x7c, 0x7d, 0xa8, 0xb4, }; static int es1938_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct es1938 *chip = card->private_data; unsigned char *s, *d; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); /* save mixer-related registers */ for (s = saved_regs, d = chip->saved_regs; *s; s++, d++) *d = snd_es1938_reg_read(chip, *s); outb(0x00, SLIO_REG(chip, IRQCONTROL)); /* disable irqs */ if (chip->irq >= 0) { free_irq(chip->irq, chip); chip->irq = -1; } pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, PCI_D3hot); return 0; } static int es1938_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct es1938 *chip = card->private_data; unsigned char *s, *d; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "es1938: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } if (request_irq(pci->irq, snd_es1938_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { printk(KERN_ERR "es1938: unable to grab IRQ %d, " "disabling device\n", pci->irq); snd_card_disconnect(card); return -EIO; } chip->irq = pci->irq; snd_es1938_chip_init(chip); /* restore mixer-related registers */ for (s = saved_regs, d = chip->saved_regs; *s; s++, d++) { if (*s < 0xa0) snd_es1938_mixer_write(chip, *s, *d); else snd_es1938_write(chip, *s, *d); } snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } static SIMPLE_DEV_PM_OPS(es1938_pm, es1938_suspend, es1938_resume); #define ES1938_PM_OPS &es1938_pm #else #define ES1938_PM_OPS NULL #endif /* CONFIG_PM_SLEEP */ #ifdef SUPPORT_JOYSTICK static int snd_es1938_create_gameport(struct es1938 *chip) { struct gameport *gp; chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "es1938: cannot allocate memory for gameport\n"); return -ENOMEM; } gameport_set_name(gp, "ES1938"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gp->io = chip->game_port; gameport_register_port(gp); return 0; } static void snd_es1938_free_gameport(struct es1938 *chip) { if (chip->gameport) { gameport_unregister_port(chip->gameport); chip->gameport = NULL; } } #else static inline int snd_es1938_create_gameport(struct es1938 *chip) { return -ENOSYS; } static inline void snd_es1938_free_gameport(struct es1938 *chip) { } #endif /* SUPPORT_JOYSTICK */ static int snd_es1938_free(struct es1938 *chip) { /* disable irqs */ outb(0x00, SLIO_REG(chip, IRQCONTROL)); if (chip->rmidi) snd_es1938_mixer_bits(chip, ESSSB_IREG_MPU401CONTROL, 0x40, 0); snd_es1938_free_gameport(chip); if (chip->irq >= 0) free_irq(chip->irq, chip); pci_release_regions(chip->pci); pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_es1938_dev_free(struct snd_device *device) { struct es1938 *chip = device->device_data; return snd_es1938_free(chip); } static int snd_es1938_create(struct snd_card *card, struct pci_dev *pci, struct es1938 **rchip) { struct es1938 *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_es1938_dev_free, }; *rchip = NULL; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; /* check, if we can restrict PCI DMA transfers to 24 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(24)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(24)) < 0) { snd_printk(KERN_ERR "architecture does not support 24bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->mixer_lock); chip->card = card; chip->pci = pci; chip->irq = -1; if ((err = pci_request_regions(pci, "ESS Solo-1")) < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->io_port = pci_resource_start(pci, 0); chip->sb_port = pci_resource_start(pci, 1); chip->vc_port = pci_resource_start(pci, 2); chip->mpu_port = pci_resource_start(pci, 3); chip->game_port = pci_resource_start(pci, 4); if (request_irq(pci->irq, snd_es1938_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_es1938_free(chip); return -EBUSY; } chip->irq = pci->irq; #ifdef ES1938_DDEBUG snd_printk(KERN_DEBUG "create: io: 0x%lx, sb: 0x%lx, vc: 0x%lx, mpu: 0x%lx, game: 0x%lx\n", chip->io_port, chip->sb_port, chip->vc_port, chip->mpu_port, chip->game_port); #endif chip->ddma_port = chip->vc_port + 0x00; /* fix from Thomas Sailer */ snd_es1938_chip_init(chip); if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_es1938_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = chip; return 0; } /* -------------------------------------------------------------------- * Interrupt handler * -------------------------------------------------------------------- */ static irqreturn_t snd_es1938_interrupt(int irq, void *dev_id) { struct es1938 *chip = dev_id; unsigned char status, audiostatus; int handled = 0; status = inb(SLIO_REG(chip, IRQCONTROL)); #if 0 printk(KERN_DEBUG "Es1938debug - interrupt status: =0x%x\n", status); #endif /* AUDIO 1 */ if (status & 0x10) { #if 0 printk(KERN_DEBUG "Es1938debug - AUDIO channel 1 interrupt\n"); printk(KERN_DEBUG "Es1938debug - AUDIO channel 1 DMAC DMA count: %u\n", inw(SLDM_REG(chip, DMACOUNT))); printk(KERN_DEBUG "Es1938debug - AUDIO channel 1 DMAC DMA base: %u\n", inl(SLDM_REG(chip, DMAADDR))); printk(KERN_DEBUG "Es1938debug - AUDIO channel 1 DMAC DMA status: 0x%x\n", inl(SLDM_REG(chip, DMASTATUS))); #endif /* clear irq */ handled = 1; audiostatus = inb(SLSB_REG(chip, STATUS)); if (chip->active & ADC1) snd_pcm_period_elapsed(chip->capture_substream); else if (chip->active & DAC1) snd_pcm_period_elapsed(chip->playback2_substream); } /* AUDIO 2 */ if (status & 0x20) { #if 0 printk(KERN_DEBUG "Es1938debug - AUDIO channel 2 interrupt\n"); printk(KERN_DEBUG "Es1938debug - AUDIO channel 2 DMAC DMA count: %u\n", inw(SLIO_REG(chip, AUDIO2DMACOUNT))); printk(KERN_DEBUG "Es1938debug - AUDIO channel 2 DMAC DMA base: %u\n", inl(SLIO_REG(chip, AUDIO2DMAADDR))); #endif /* clear irq */ handled = 1; snd_es1938_mixer_bits(chip, ESSSB_IREG_AUDIO2CONTROL2, 0x80, 0); if (chip->active & DAC2) snd_pcm_period_elapsed(chip->playback1_substream); } /* Hardware volume */ if (status & 0x40) { int split = snd_es1938_mixer_read(chip, 0x64) & 0x80; handled = 1; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->hw_switch->id); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->hw_volume->id); if (!split) { snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_switch->id); snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_volume->id); } /* ack interrupt */ snd_es1938_mixer_write(chip, 0x66, 0x00); } /* MPU401 */ if (status & 0x80) { // the following line is evil! It switches off MIDI interrupt handling after the first interrupt received. // replacing the last 0 by 0x40 works for ESS-Solo1, but just doing nothing works as well! // andreas@flying-snail.de // snd_es1938_mixer_bits(chip, ESSSB_IREG_MPU401CONTROL, 0x40, 0); /* ack? */ if (chip->rmidi) { handled = 1; snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data); } } return IRQ_RETVAL(handled); } #define ES1938_DMA_SIZE 64 static int snd_es1938_mixer(struct es1938 *chip) { struct snd_card *card; unsigned int idx; int err; card = chip->card; strcpy(card->mixername, "ESS Solo-1"); for (idx = 0; idx < ARRAY_SIZE(snd_es1938_controls); idx++) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1(&snd_es1938_controls[idx], chip); switch (idx) { case 0: chip->master_volume = kctl; kctl->private_free = snd_es1938_hwv_free; break; case 1: chip->master_switch = kctl; kctl->private_free = snd_es1938_hwv_free; break; case 2: chip->hw_volume = kctl; kctl->private_free = snd_es1938_hwv_free; break; case 3: chip->hw_switch = kctl; kctl->private_free = snd_es1938_hwv_free; break; } if ((err = snd_ctl_add(card, kctl)) < 0) return err; } return 0; } static int snd_es1938_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct es1938 *chip; struct snd_opl3 *opl3; int idx, err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; for (idx = 0; idx < 5; idx++) { if (pci_resource_start(pci, idx) == 0 || !(pci_resource_flags(pci, idx) & IORESOURCE_IO)) { snd_card_free(card); return -ENODEV; } } if ((err = snd_es1938_create(card, pci, &chip)) < 0) { snd_card_free(card); return err; } card->private_data = chip; strcpy(card->driver, "ES1938"); strcpy(card->shortname, "ESS ES1938 (Solo-1)"); sprintf(card->longname, "%s rev %i, irq %i", card->shortname, chip->revision, chip->irq); if ((err = snd_es1938_new_pcm(chip, 0)) < 0) { snd_card_free(card); return err; } if ((err = snd_es1938_mixer(chip)) < 0) { snd_card_free(card); return err; } if (snd_opl3_create(card, SLSB_REG(chip, FMLOWADDR), SLSB_REG(chip, FMHIGHADDR), OPL3_HW_OPL3, 1, &opl3) < 0) { printk(KERN_ERR "es1938: OPL3 not detected at 0x%lx\n", SLSB_REG(chip, FMLOWADDR)); } else { if ((err = snd_opl3_timer_new(opl3, 0, 1)) < 0) { snd_card_free(card); return err; } if ((err = snd_opl3_hwdep_new(opl3, 0, 1, NULL)) < 0) { snd_card_free(card); return err; } } if (snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, chip->mpu_port, MPU401_INFO_INTEGRATED | MPU401_INFO_IRQ_HOOK, -1, &chip->rmidi) < 0) { printk(KERN_ERR "es1938: unable to initialize MPU-401\n"); } else { // this line is vital for MIDI interrupt handling on ess-solo1 // andreas@flying-snail.de snd_es1938_mixer_bits(chip, ESSSB_IREG_MPU401CONTROL, 0x40, 0x40); } snd_es1938_create_gameport(chip); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void snd_es1938_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver es1938_driver = { .name = KBUILD_MODNAME, .id_table = snd_es1938_ids, .probe = snd_es1938_probe, .remove = snd_es1938_remove, .driver = { .pm = ES1938_PM_OPS, }, }; module_pci_driver(es1938_driver);
gpl-2.0
ocoot/Xiaomi_Kernel_OpenSource
sound/pci/vx222/vx222.c
2240
7575
/* * Driver for Digigram VX222 V2/Mic PCI soundcards * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/tlv.h> #include "vx222.h" #define CARD_NAME "VX222" MODULE_AUTHOR("Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("Digigram VX222 V2/Mic"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{Digigram," CARD_NAME "}}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ static bool mic[SNDRV_CARDS]; /* microphone */ static int ibl[SNDRV_CARDS]; /* microphone */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Digigram " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Digigram " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Digigram " CARD_NAME " soundcard."); module_param_array(mic, bool, NULL, 0444); MODULE_PARM_DESC(mic, "Enable Microphone."); module_param_array(ibl, int, NULL, 0444); MODULE_PARM_DESC(ibl, "Capture IBL size."); /* */ enum { VX_PCI_VX222_OLD, VX_PCI_VX222_NEW }; static DEFINE_PCI_DEVICE_TABLE(snd_vx222_ids) = { { 0x10b5, 0x9050, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_OLD, }, /* PLX */ { 0x10b5, 0x9030, 0x1369, PCI_ANY_ID, 0, 0, VX_PCI_VX222_NEW, }, /* PLX */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_vx222_ids); /* */ static const DECLARE_TLV_DB_SCALE(db_scale_old_vol, -11350, 50, 0); static const DECLARE_TLV_DB_SCALE(db_scale_akm, -7350, 50, 0); static struct snd_vx_hardware vx222_old_hw = { .name = "VX222/Old", .type = VX_TYPE_BOARD, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX_ANALOG_OUT_LEVEL_MAX, .output_level_db_scale = db_scale_old_vol, }; static struct snd_vx_hardware vx222_v2_hw = { .name = "VX222/v2", .type = VX_TYPE_V2, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; static struct snd_vx_hardware vx222_mic_hw = { .name = "VX222/Mic", .type = VX_TYPE_MIC, /* hw specs */ .num_codecs = 1, .num_ins = 1, .num_outs = 1, .output_level_max = VX2_AKM_LEVEL_MAX, .output_level_db_scale = db_scale_akm, }; /* */ static int snd_vx222_free(struct vx_core *chip) { struct snd_vx222 *vx = (struct snd_vx222 *)chip; if (chip->irq >= 0) free_irq(chip->irq, (void*)chip); if (vx->port[0]) pci_release_regions(vx->pci); pci_disable_device(vx->pci); kfree(chip); return 0; } static int snd_vx222_dev_free(struct snd_device *device) { struct vx_core *chip = device->device_data; return snd_vx222_free(chip); } static int snd_vx222_create(struct snd_card *card, struct pci_dev *pci, struct snd_vx_hardware *hw, struct snd_vx222 **rchip) { struct vx_core *chip; struct snd_vx222 *vx; int i, err; static struct snd_device_ops ops = { .dev_free = snd_vx222_dev_free, }; struct snd_vx_ops *vx_ops; /* enable PCI device */ if ((err = pci_enable_device(pci)) < 0) return err; pci_set_master(pci); vx_ops = hw->type == VX_TYPE_BOARD ? &vx222_old_ops : &vx222_ops; chip = snd_vx_create(card, hw, vx_ops, sizeof(struct snd_vx222) - sizeof(struct vx_core)); if (! chip) { pci_disable_device(pci); return -ENOMEM; } vx = (struct snd_vx222 *)chip; vx->pci = pci; if ((err = pci_request_regions(pci, CARD_NAME)) < 0) { snd_vx222_free(chip); return err; } for (i = 0; i < 2; i++) vx->port[i] = pci_resource_start(pci, i + 1); if (request_irq(pci->irq, snd_vx_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_vx222_free(chip); return -EBUSY; } chip->irq = pci->irq; if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_vx222_free(chip); return err; } snd_card_set_dev(card, &pci->dev); *rchip = vx; return 0; } static int snd_vx222_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_vx_hardware *hw; struct snd_vx222 *vx; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch ((int)pci_id->driver_data) { case VX_PCI_VX222_OLD: hw = &vx222_old_hw; break; case VX_PCI_VX222_NEW: default: if (mic[dev]) hw = &vx222_mic_hw; else hw = &vx222_v2_hw; break; } if ((err = snd_vx222_create(card, pci, hw, &vx)) < 0) { snd_card_free(card); return err; } card->private_data = vx; vx->core.ibl.size = ibl[dev]; sprintf(card->longname, "%s at 0x%lx & 0x%lx, irq %i", card->shortname, vx->port[0], vx->port[1], vx->core.irq); snd_printdd("%s at 0x%lx & 0x%lx, irq %i\n", card->shortname, vx->port[0], vx->port[1], vx->core.irq); #ifdef SND_VX_FW_LOADER vx->core.dev = &pci->dev; #endif if ((err = snd_vx_setup_firmware(&vx->core)) < 0) { snd_card_free(card); return err; } if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); dev++; return 0; } static void snd_vx222_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM_SLEEP static int snd_vx222_suspend(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_vx222 *vx = card->private_data; int err; err = snd_vx_suspend(&vx->core); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, PCI_D3hot); return err; } static int snd_vx222_resume(struct device *dev) { struct pci_dev *pci = to_pci_dev(dev); struct snd_card *card = dev_get_drvdata(dev); struct snd_vx222 *vx = card->private_data; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "vx222: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); return snd_vx_resume(&vx->core); } static SIMPLE_DEV_PM_OPS(snd_vx222_pm, snd_vx222_suspend, snd_vx222_resume); #define SND_VX222_PM_OPS &snd_vx222_pm #else #define SND_VX222_PM_OPS NULL #endif static struct pci_driver vx222_driver = { .name = KBUILD_MODNAME, .id_table = snd_vx222_ids, .probe = snd_vx222_probe, .remove = snd_vx222_remove, .driver = { .pm = SND_VX222_PM_OPS, }, }; module_pci_driver(vx222_driver);
gpl-2.0
superatmos/android_kernel_samsung_t1
arch/mips/pci/pci-xlr.c
2752
5867
/* * Copyright 2003-2011 NetLogic Microsystems, Inc. (NetLogic). All rights * reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the NetLogic * license below: * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * THIS SOFTWARE IS PROVIDED BY NETLOGIC ``AS IS'' AND ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <linux/types.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/console.h> #include <asm/io.h> #include <asm/netlogic/interrupt.h> #include <asm/netlogic/xlr/iomap.h> #include <asm/netlogic/xlr/pic.h> #include <asm/netlogic/xlr/xlr.h> static void *pci_config_base; #define pci_cfg_addr(bus, devfn, off) (((bus) << 16) | ((devfn) << 8) | (off)) /* PCI ops */ static inline u32 pci_cfg_read_32bit(struct pci_bus *bus, unsigned int devfn, int where) { u32 data; u32 *cfgaddr; cfgaddr = (u32 *)(pci_config_base + pci_cfg_addr(bus->number, devfn, where & ~3)); data = *cfgaddr; return cpu_to_le32(data); } static inline void pci_cfg_write_32bit(struct pci_bus *bus, unsigned int devfn, int where, u32 data) { u32 *cfgaddr; cfgaddr = (u32 *)(pci_config_base + pci_cfg_addr(bus->number, devfn, where & ~3)); *cfgaddr = cpu_to_le32(data); } static int nlm_pcibios_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; data = pci_cfg_read_32bit(bus, devfn, where); if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 3) << 3)) & 0xffff; else *val = data; return PCIBIOS_SUCCESSFUL; } static int nlm_pcibios_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; data = pci_cfg_read_32bit(bus, devfn, where); if (size == 1) data = (data & ~(0xff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else if (size == 2) data = (data & ~(0xffff << ((where & 3) << 3))) | (val << ((where & 3) << 3)); else data = val; pci_cfg_write_32bit(bus, devfn, where, data); return PCIBIOS_SUCCESSFUL; } struct pci_ops nlm_pci_ops = { .read = nlm_pcibios_read, .write = nlm_pcibios_write }; static struct resource nlm_pci_mem_resource = { .name = "XLR PCI MEM", .start = 0xd0000000UL, /* 256MB PCI mem @ 0xd000_0000 */ .end = 0xdfffffffUL, .flags = IORESOURCE_MEM, }; static struct resource nlm_pci_io_resource = { .name = "XLR IO MEM", .start = 0x10000000UL, /* 16MB PCI IO @ 0x1000_0000 */ .end = 0x100fffffUL, .flags = IORESOURCE_IO, }; struct pci_controller nlm_pci_controller = { .index = 0, .pci_ops = &nlm_pci_ops, .mem_resource = &nlm_pci_mem_resource, .mem_offset = 0x00000000UL, .io_resource = &nlm_pci_io_resource, .io_offset = 0x00000000UL, }; int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) { if (!nlm_chip_is_xls()) return PIC_PCIX_IRQ; /* for XLR just one IRQ*/ /* * For XLS PCIe, there is an IRQ per Link, find out which * link the device is on to assign interrupts */ if (dev->bus->self == NULL) return 0; switch (dev->bus->self->devfn) { case 0x0: return PIC_PCIE_LINK0_IRQ; case 0x8: return PIC_PCIE_LINK1_IRQ; case 0x10: if (nlm_chip_is_xls_b()) return PIC_PCIE_XLSB0_LINK2_IRQ; else return PIC_PCIE_LINK2_IRQ; case 0x18: if (nlm_chip_is_xls_b()) return PIC_PCIE_XLSB0_LINK3_IRQ; else return PIC_PCIE_LINK3_IRQ; } WARN(1, "Unexpected devfn %d\n", dev->bus->self->devfn); return 0; } /* Do platform specific device initialization at pci_enable_device() time */ int pcibios_plat_dev_init(struct pci_dev *dev) { return 0; } static int __init pcibios_init(void) { /* PSB assigns PCI resources */ pci_probe_only = 1; pci_config_base = ioremap(DEFAULT_PCI_CONFIG_BASE, 16 << 20); /* Extend IO port for memory mapped io */ ioport_resource.start = 0; ioport_resource.end = ~0; set_io_port_base(CKSEG1); nlm_pci_controller.io_map_base = CKSEG1; pr_info("Registering XLR/XLS PCIX/PCIE Controller.\n"); register_pci_controller(&nlm_pci_controller); return 0; } arch_initcall(pcibios_init); struct pci_fixup pcibios_fixups[] = { {0} };
gpl-2.0
sebirdman/m7_kernel_dev
arch/powerpc/sysdev/xics/icp-hv.c
3008
3991
/* * Copyright 2011 IBM Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/irq.h> #include <linux/smp.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/cpu.h> #include <linux/of.h> #include <asm/smp.h> #include <asm/irq.h> #include <asm/errno.h> #include <asm/xics.h> #include <asm/io.h> #include <asm/hvcall.h> static inline unsigned int icp_hv_get_xirr(unsigned char cppr) { unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; long rc; unsigned int ret = XICS_IRQ_SPURIOUS; rc = plpar_hcall(H_XIRR, retbuf, cppr); if (rc == H_SUCCESS) { ret = (unsigned int)retbuf[0]; } else { pr_err("%s: bad return code xirr cppr=0x%x returned %ld\n", __func__, cppr, rc); WARN_ON_ONCE(1); } return ret; } static inline void icp_hv_set_cppr(u8 value) { long rc = plpar_hcall_norets(H_CPPR, value); if (rc != H_SUCCESS) { pr_err("%s: bad return code cppr cppr=0x%x returned %ld\n", __func__, value, rc); WARN_ON_ONCE(1); } } static inline void icp_hv_set_xirr(unsigned int value) { long rc = plpar_hcall_norets(H_EOI, value); if (rc != H_SUCCESS) { pr_err("%s: bad return code eoi xirr=0x%x returned %ld\n", __func__, value, rc); WARN_ON_ONCE(1); icp_hv_set_cppr(value >> 24); } } static inline void icp_hv_set_qirr(int n_cpu , u8 value) { int hw_cpu = get_hard_smp_processor_id(n_cpu); long rc = plpar_hcall_norets(H_IPI, hw_cpu, value); if (rc != H_SUCCESS) { pr_err("%s: bad return code qirr cpu=%d hw_cpu=%d mfrr=0x%x " "returned %ld\n", __func__, n_cpu, hw_cpu, value, rc); WARN_ON_ONCE(1); } } static void icp_hv_eoi(struct irq_data *d) { unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); iosync(); icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); } static void icp_hv_teardown_cpu(void) { int cpu = smp_processor_id(); /* Clear any pending IPI */ icp_hv_set_qirr(cpu, 0xff); } static void icp_hv_flush_ipi(void) { /* We take the ipi irq but and never return so we * need to EOI the IPI, but want to leave our priority 0 * * should we check all the other interrupts too? * should we be flagging idle loop instead? * or creating some task to be scheduled? */ icp_hv_set_xirr((0x00 << 24) | XICS_IPI); } static unsigned int icp_hv_get_irq(void) { unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); unsigned int vec = xirr & 0x00ffffff; unsigned int irq; if (vec == XICS_IRQ_SPURIOUS) return NO_IRQ; irq = irq_radix_revmap_lookup(xics_host, vec); if (likely(irq != NO_IRQ)) { xics_push_cppr(vec); return irq; } /* We don't have a linux mapping, so have rtas mask it. */ xics_mask_unknown_vec(vec); /* We might learn about it later, so EOI it */ icp_hv_set_xirr(xirr); return NO_IRQ; } static void icp_hv_set_cpu_priority(unsigned char cppr) { xics_set_base_cppr(cppr); icp_hv_set_cppr(cppr); iosync(); } #ifdef CONFIG_SMP static void icp_hv_cause_ipi(int cpu, unsigned long data) { icp_hv_set_qirr(cpu, IPI_PRIORITY); } static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) { int cpu = smp_processor_id(); icp_hv_set_qirr(cpu, 0xff); return smp_ipi_demux(); } #endif /* CONFIG_SMP */ static const struct icp_ops icp_hv_ops = { .get_irq = icp_hv_get_irq, .eoi = icp_hv_eoi, .set_priority = icp_hv_set_cpu_priority, .teardown_cpu = icp_hv_teardown_cpu, .flush_ipi = icp_hv_flush_ipi, #ifdef CONFIG_SMP .ipi_action = icp_hv_ipi_action, .cause_ipi = icp_hv_cause_ipi, #endif }; int icp_hv_init(void) { struct device_node *np; np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); if (!np) np = of_find_node_by_type(NULL, "PowerPC-External-Interrupt-Presentation"); if (!np) return -ENODEV; icp_ops = &icp_hv_ops; return 0; }
gpl-2.0
Coolexe/shooter-ics-crc-3.0.16-294f767
sound/drivers/dummy.c
4032
31005
/* * Dummy soundcard * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/jiffies.h> #include <linux/slab.h> #include <linux/time.h> #include <linux/wait.h> #include <linux/hrtimer.h> #include <linux/math64.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include <sound/info.h> #include <sound/initval.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Dummy soundcard (/dev/null)"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ALSA,Dummy soundcard}}"); #define MAX_PCM_DEVICES 4 #define MAX_PCM_SUBSTREAMS 128 #define MAX_MIDI_DEVICES 2 /* defaults */ #define MAX_BUFFER_SIZE (64*1024) #define MIN_PERIOD_SIZE 64 #define MAX_PERIOD_SIZE MAX_BUFFER_SIZE #define USE_FORMATS (SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE) #define USE_RATE SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000 #define USE_RATE_MIN 5500 #define USE_RATE_MAX 48000 #define USE_CHANNELS_MIN 1 #define USE_CHANNELS_MAX 2 #define USE_PERIODS_MIN 1 #define USE_PERIODS_MAX 1024 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = {1, [1 ... (SNDRV_CARDS - 1)] = 0}; static char *model[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = NULL}; static int pcm_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; static int pcm_substreams[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 8}; //static int midi_devs[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 2}; #ifdef CONFIG_HIGH_RES_TIMERS static int hrtimer = 1; #endif static int fake_buffer = 1; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for dummy soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for dummy soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable this dummy soundcard."); module_param_array(model, charp, NULL, 0444); MODULE_PARM_DESC(model, "Soundcard model."); module_param_array(pcm_devs, int, NULL, 0444); MODULE_PARM_DESC(pcm_devs, "PCM devices # (0-4) for dummy driver."); module_param_array(pcm_substreams, int, NULL, 0444); MODULE_PARM_DESC(pcm_substreams, "PCM substreams # (1-128) for dummy driver."); //module_param_array(midi_devs, int, NULL, 0444); //MODULE_PARM_DESC(midi_devs, "MIDI devices # (0-2) for dummy driver."); module_param(fake_buffer, bool, 0444); MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations."); #ifdef CONFIG_HIGH_RES_TIMERS module_param(hrtimer, bool, 0644); MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source."); #endif static struct platform_device *devices[SNDRV_CARDS]; #define MIXER_ADDR_MASTER 0 #define MIXER_ADDR_LINE 1 #define MIXER_ADDR_MIC 2 #define MIXER_ADDR_SYNTH 3 #define MIXER_ADDR_CD 4 #define MIXER_ADDR_LAST 4 struct dummy_timer_ops { int (*create)(struct snd_pcm_substream *); void (*free)(struct snd_pcm_substream *); int (*prepare)(struct snd_pcm_substream *); int (*start)(struct snd_pcm_substream *); int (*stop)(struct snd_pcm_substream *); snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *); }; struct dummy_model { const char *name; int (*playback_constraints)(struct snd_pcm_runtime *runtime); int (*capture_constraints)(struct snd_pcm_runtime *runtime); u64 formats; size_t buffer_bytes_max; size_t period_bytes_min; size_t period_bytes_max; unsigned int periods_min; unsigned int periods_max; unsigned int rates; unsigned int rate_min; unsigned int rate_max; unsigned int channels_min; unsigned int channels_max; }; struct snd_dummy { struct snd_card *card; struct dummy_model *model; struct snd_pcm *pcm; struct snd_pcm_hardware pcm_hw; spinlock_t mixer_lock; int mixer_volume[MIXER_ADDR_LAST+1][2]; int capture_source[MIXER_ADDR_LAST+1][2]; const struct dummy_timer_ops *timer_ops; }; /* * card models */ static int emu10k1_playback_constraints(struct snd_pcm_runtime *runtime) { int err; err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) return err; err = snd_pcm_hw_constraint_minmax(runtime, SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 256, UINT_MAX); if (err < 0) return err; return 0; } struct dummy_model model_emu10k1 = { .name = "emu10k1", .playback_constraints = emu10k1_playback_constraints, .buffer_bytes_max = 128 * 1024, }; struct dummy_model model_rme9652 = { .name = "rme9652", .buffer_bytes_max = 26 * 64 * 1024, .formats = SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 26, .channels_max = 26, .periods_min = 2, .periods_max = 2, }; struct dummy_model model_ice1712 = { .name = "ice1712", .buffer_bytes_max = 256 * 1024, .formats = SNDRV_PCM_FMTBIT_S32_LE, .channels_min = 10, .channels_max = 10, .periods_min = 1, .periods_max = 1024, }; struct dummy_model model_uda1341 = { .name = "uda1341", .buffer_bytes_max = 16380, .formats = SNDRV_PCM_FMTBIT_S16_LE, .channels_min = 2, .channels_max = 2, .periods_min = 2, .periods_max = 255, }; struct dummy_model model_ac97 = { .name = "ac97", .formats = SNDRV_PCM_FMTBIT_S16_LE, .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, }; struct dummy_model model_ca0106 = { .name = "ca0106", .formats = SNDRV_PCM_FMTBIT_S16_LE, .buffer_bytes_max = ((65536-64)*8), .period_bytes_max = (65536-64), .periods_min = 2, .periods_max = 8, .channels_min = 2, .channels_max = 2, .rates = SNDRV_PCM_RATE_48000|SNDRV_PCM_RATE_96000|SNDRV_PCM_RATE_192000, .rate_min = 48000, .rate_max = 192000, }; struct dummy_model *dummy_models[] = { &model_emu10k1, &model_rme9652, &model_ice1712, &model_uda1341, &model_ac97, &model_ca0106, NULL }; /* * system timer interface */ struct dummy_systimer_pcm { spinlock_t lock; struct timer_list timer; unsigned long base_time; unsigned int frac_pos; /* fractional sample position (based HZ) */ unsigned int frac_period_rest; unsigned int frac_buffer_size; /* buffer_size * HZ */ unsigned int frac_period_size; /* period_size * HZ */ unsigned int rate; int elapsed; struct snd_pcm_substream *substream; }; static void dummy_systimer_rearm(struct dummy_systimer_pcm *dpcm) { dpcm->timer.expires = jiffies + (dpcm->frac_period_rest + dpcm->rate - 1) / dpcm->rate; add_timer(&dpcm->timer); } static void dummy_systimer_update(struct dummy_systimer_pcm *dpcm) { unsigned long delta; delta = jiffies - dpcm->base_time; if (!delta) return; dpcm->base_time += delta; delta *= dpcm->rate; dpcm->frac_pos += delta; while (dpcm->frac_pos >= dpcm->frac_buffer_size) dpcm->frac_pos -= dpcm->frac_buffer_size; while (dpcm->frac_period_rest <= delta) { dpcm->elapsed++; dpcm->frac_period_rest += dpcm->frac_period_size; } dpcm->frac_period_rest -= delta; } static int dummy_systimer_start(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; spin_lock(&dpcm->lock); dpcm->base_time = jiffies; dummy_systimer_rearm(dpcm); spin_unlock(&dpcm->lock); return 0; } static int dummy_systimer_stop(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; spin_lock(&dpcm->lock); del_timer(&dpcm->timer); spin_unlock(&dpcm->lock); return 0; } static int dummy_systimer_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_systimer_pcm *dpcm = runtime->private_data; dpcm->frac_pos = 0; dpcm->rate = runtime->rate; dpcm->frac_buffer_size = runtime->buffer_size * HZ; dpcm->frac_period_size = runtime->period_size * HZ; dpcm->frac_period_rest = dpcm->frac_period_size; dpcm->elapsed = 0; return 0; } static void dummy_systimer_callback(unsigned long data) { struct dummy_systimer_pcm *dpcm = (struct dummy_systimer_pcm *)data; unsigned long flags; int elapsed = 0; spin_lock_irqsave(&dpcm->lock, flags); dummy_systimer_update(dpcm); dummy_systimer_rearm(dpcm); elapsed = dpcm->elapsed; dpcm->elapsed = 0; spin_unlock_irqrestore(&dpcm->lock, flags); if (elapsed) snd_pcm_period_elapsed(dpcm->substream); } static snd_pcm_uframes_t dummy_systimer_pointer(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm = substream->runtime->private_data; snd_pcm_uframes_t pos; spin_lock(&dpcm->lock); dummy_systimer_update(dpcm); pos = dpcm->frac_pos / HZ; spin_unlock(&dpcm->lock); return pos; } static int dummy_systimer_create(struct snd_pcm_substream *substream) { struct dummy_systimer_pcm *dpcm; dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL); if (!dpcm) return -ENOMEM; substream->runtime->private_data = dpcm; init_timer(&dpcm->timer); dpcm->timer.data = (unsigned long) dpcm; dpcm->timer.function = dummy_systimer_callback; spin_lock_init(&dpcm->lock); dpcm->substream = substream; return 0; } static void dummy_systimer_free(struct snd_pcm_substream *substream) { kfree(substream->runtime->private_data); } static struct dummy_timer_ops dummy_systimer_ops = { .create = dummy_systimer_create, .free = dummy_systimer_free, .prepare = dummy_systimer_prepare, .start = dummy_systimer_start, .stop = dummy_systimer_stop, .pointer = dummy_systimer_pointer, }; #ifdef CONFIG_HIGH_RES_TIMERS /* * hrtimer interface */ struct dummy_hrtimer_pcm { ktime_t base_time; ktime_t period_time; atomic_t running; struct hrtimer timer; struct tasklet_struct tasklet; struct snd_pcm_substream *substream; }; static void dummy_hrtimer_pcm_elapsed(unsigned long priv) { struct dummy_hrtimer_pcm *dpcm = (struct dummy_hrtimer_pcm *)priv; if (atomic_read(&dpcm->running)) snd_pcm_period_elapsed(dpcm->substream); } static enum hrtimer_restart dummy_hrtimer_callback(struct hrtimer *timer) { struct dummy_hrtimer_pcm *dpcm; dpcm = container_of(timer, struct dummy_hrtimer_pcm, timer); if (!atomic_read(&dpcm->running)) return HRTIMER_NORESTART; tasklet_schedule(&dpcm->tasklet); hrtimer_forward_now(timer, dpcm->period_time); return HRTIMER_RESTART; } static int dummy_hrtimer_start(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; dpcm->base_time = hrtimer_cb_get_time(&dpcm->timer); hrtimer_start(&dpcm->timer, dpcm->period_time, HRTIMER_MODE_REL); atomic_set(&dpcm->running, 1); return 0; } static int dummy_hrtimer_stop(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; atomic_set(&dpcm->running, 0); hrtimer_cancel(&dpcm->timer); return 0; } static inline void dummy_hrtimer_sync(struct dummy_hrtimer_pcm *dpcm) { tasklet_kill(&dpcm->tasklet); } static snd_pcm_uframes_t dummy_hrtimer_pointer(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_hrtimer_pcm *dpcm = runtime->private_data; u64 delta; u32 pos; delta = ktime_us_delta(hrtimer_cb_get_time(&dpcm->timer), dpcm->base_time); delta = div_u64(delta * runtime->rate + 999999, 1000000); div_u64_rem(delta, runtime->buffer_size, &pos); return pos; } static int dummy_hrtimer_prepare(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct dummy_hrtimer_pcm *dpcm = runtime->private_data; unsigned int period, rate; long sec; unsigned long nsecs; dummy_hrtimer_sync(dpcm); period = runtime->period_size; rate = runtime->rate; sec = period / rate; period %= rate; nsecs = div_u64((u64)period * 1000000000UL + rate - 1, rate); dpcm->period_time = ktime_set(sec, nsecs); return 0; } static int dummy_hrtimer_create(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm; dpcm = kzalloc(sizeof(*dpcm), GFP_KERNEL); if (!dpcm) return -ENOMEM; substream->runtime->private_data = dpcm; hrtimer_init(&dpcm->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); dpcm->timer.function = dummy_hrtimer_callback; dpcm->substream = substream; atomic_set(&dpcm->running, 0); tasklet_init(&dpcm->tasklet, dummy_hrtimer_pcm_elapsed, (unsigned long)dpcm); return 0; } static void dummy_hrtimer_free(struct snd_pcm_substream *substream) { struct dummy_hrtimer_pcm *dpcm = substream->runtime->private_data; dummy_hrtimer_sync(dpcm); kfree(dpcm); } static struct dummy_timer_ops dummy_hrtimer_ops = { .create = dummy_hrtimer_create, .free = dummy_hrtimer_free, .prepare = dummy_hrtimer_prepare, .start = dummy_hrtimer_start, .stop = dummy_hrtimer_stop, .pointer = dummy_hrtimer_pointer, }; #endif /* CONFIG_HIGH_RES_TIMERS */ /* * PCM interface */ static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_dummy *dummy = snd_pcm_substream_chip(substream); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: return dummy->timer_ops->start(substream); case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: return dummy->timer_ops->stop(substream); } return -EINVAL; } static int dummy_pcm_prepare(struct snd_pcm_substream *substream) { struct snd_dummy *dummy = snd_pcm_substream_chip(substream); return dummy->timer_ops->prepare(substream); } static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream) { struct snd_dummy *dummy = snd_pcm_substream_chip(substream); return dummy->timer_ops->pointer(substream); } static struct snd_pcm_hardware dummy_pcm_hardware = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_RESUME | SNDRV_PCM_INFO_MMAP_VALID), .formats = USE_FORMATS, .rates = USE_RATE, .rate_min = USE_RATE_MIN, .rate_max = USE_RATE_MAX, .channels_min = USE_CHANNELS_MIN, .channels_max = USE_CHANNELS_MAX, .buffer_bytes_max = MAX_BUFFER_SIZE, .period_bytes_min = MIN_PERIOD_SIZE, .period_bytes_max = MAX_PERIOD_SIZE, .periods_min = USE_PERIODS_MIN, .periods_max = USE_PERIODS_MAX, .fifo_size = 0, }; static int dummy_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { if (fake_buffer) { /* runtime->dma_bytes has to be set manually to allow mmap */ substream->runtime->dma_bytes = params_buffer_bytes(hw_params); return 0; } return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int dummy_pcm_hw_free(struct snd_pcm_substream *substream) { if (fake_buffer) return 0; return snd_pcm_lib_free_pages(substream); } static int dummy_pcm_open(struct snd_pcm_substream *substream) { struct snd_dummy *dummy = snd_pcm_substream_chip(substream); struct dummy_model *model = dummy->model; struct snd_pcm_runtime *runtime = substream->runtime; int err; dummy->timer_ops = &dummy_systimer_ops; #ifdef CONFIG_HIGH_RES_TIMERS if (hrtimer) dummy->timer_ops = &dummy_hrtimer_ops; #endif err = dummy->timer_ops->create(substream); if (err < 0) return err; runtime->hw = dummy->pcm_hw; if (substream->pcm->device & 1) { runtime->hw.info &= ~SNDRV_PCM_INFO_INTERLEAVED; runtime->hw.info |= SNDRV_PCM_INFO_NONINTERLEAVED; } if (substream->pcm->device & 2) runtime->hw.info &= ~(SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID); if (model == NULL) return 0; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) { if (model->playback_constraints) err = model->playback_constraints(substream->runtime); } else { if (model->capture_constraints) err = model->capture_constraints(substream->runtime); } if (err < 0) { dummy->timer_ops->free(substream); return err; } return 0; } static int dummy_pcm_close(struct snd_pcm_substream *substream) { struct snd_dummy *dummy = snd_pcm_substream_chip(substream); dummy->timer_ops->free(substream); return 0; } /* * dummy buffer handling */ static void *dummy_page[2]; static void free_fake_buffer(void) { if (fake_buffer) { int i; for (i = 0; i < 2; i++) if (dummy_page[i]) { free_page((unsigned long)dummy_page[i]); dummy_page[i] = NULL; } } } static int alloc_fake_buffer(void) { int i; if (!fake_buffer) return 0; for (i = 0; i < 2; i++) { dummy_page[i] = (void *)get_zeroed_page(GFP_KERNEL); if (!dummy_page[i]) { free_fake_buffer(); return -ENOMEM; } } return 0; } static int dummy_pcm_copy(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, void __user *dst, snd_pcm_uframes_t count) { return 0; /* do nothing */ } static int dummy_pcm_silence(struct snd_pcm_substream *substream, int channel, snd_pcm_uframes_t pos, snd_pcm_uframes_t count) { return 0; /* do nothing */ } static struct page *dummy_pcm_page(struct snd_pcm_substream *substream, unsigned long offset) { return virt_to_page(dummy_page[substream->stream]); /* the same page */ } static struct snd_pcm_ops dummy_pcm_ops = { .open = dummy_pcm_open, .close = dummy_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = dummy_pcm_hw_params, .hw_free = dummy_pcm_hw_free, .prepare = dummy_pcm_prepare, .trigger = dummy_pcm_trigger, .pointer = dummy_pcm_pointer, }; static struct snd_pcm_ops dummy_pcm_ops_no_buf = { .open = dummy_pcm_open, .close = dummy_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = dummy_pcm_hw_params, .hw_free = dummy_pcm_hw_free, .prepare = dummy_pcm_prepare, .trigger = dummy_pcm_trigger, .pointer = dummy_pcm_pointer, .copy = dummy_pcm_copy, .silence = dummy_pcm_silence, .page = dummy_pcm_page, }; static int __devinit snd_card_dummy_pcm(struct snd_dummy *dummy, int device, int substreams) { struct snd_pcm *pcm; struct snd_pcm_ops *ops; int err; err = snd_pcm_new(dummy->card, "Dummy PCM", device, substreams, substreams, &pcm); if (err < 0) return err; dummy->pcm = pcm; if (fake_buffer) ops = &dummy_pcm_ops_no_buf; else ops = &dummy_pcm_ops; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, ops); pcm->private_data = dummy; pcm->info_flags = 0; strcpy(pcm->name, "Dummy PCM"); if (!fake_buffer) { snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_CONTINUOUS, snd_dma_continuous_data(GFP_KERNEL), 0, 64*1024); } return 0; } /* * mixer interface */ #define DUMMY_VOLUME(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ, \ .name = xname, .index = xindex, \ .info = snd_dummy_volume_info, \ .get = snd_dummy_volume_get, .put = snd_dummy_volume_put, \ .private_value = addr, \ .tlv = { .p = db_scale_dummy } } static int snd_dummy_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = -50; uinfo->value.integer.max = 100; return 0; } static int snd_dummy_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int addr = kcontrol->private_value; spin_lock_irq(&dummy->mixer_lock); ucontrol->value.integer.value[0] = dummy->mixer_volume[addr][0]; ucontrol->value.integer.value[1] = dummy->mixer_volume[addr][1]; spin_unlock_irq(&dummy->mixer_lock); return 0; } static int snd_dummy_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int change, addr = kcontrol->private_value; int left, right; left = ucontrol->value.integer.value[0]; if (left < -50) left = -50; if (left > 100) left = 100; right = ucontrol->value.integer.value[1]; if (right < -50) right = -50; if (right > 100) right = 100; spin_lock_irq(&dummy->mixer_lock); change = dummy->mixer_volume[addr][0] != left || dummy->mixer_volume[addr][1] != right; dummy->mixer_volume[addr][0] = left; dummy->mixer_volume[addr][1] = right; spin_unlock_irq(&dummy->mixer_lock); return change; } static const DECLARE_TLV_DB_SCALE(db_scale_dummy, -4500, 30, 0); #define DUMMY_CAPSRC(xname, xindex, addr) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, .index = xindex, \ .info = snd_dummy_capsrc_info, \ .get = snd_dummy_capsrc_get, .put = snd_dummy_capsrc_put, \ .private_value = addr } #define snd_dummy_capsrc_info snd_ctl_boolean_stereo_info static int snd_dummy_capsrc_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int addr = kcontrol->private_value; spin_lock_irq(&dummy->mixer_lock); ucontrol->value.integer.value[0] = dummy->capture_source[addr][0]; ucontrol->value.integer.value[1] = dummy->capture_source[addr][1]; spin_unlock_irq(&dummy->mixer_lock); return 0; } static int snd_dummy_capsrc_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_dummy *dummy = snd_kcontrol_chip(kcontrol); int change, addr = kcontrol->private_value; int left, right; left = ucontrol->value.integer.value[0] & 1; right = ucontrol->value.integer.value[1] & 1; spin_lock_irq(&dummy->mixer_lock); change = dummy->capture_source[addr][0] != left && dummy->capture_source[addr][1] != right; dummy->capture_source[addr][0] = left; dummy->capture_source[addr][1] = right; spin_unlock_irq(&dummy->mixer_lock); return change; } static struct snd_kcontrol_new snd_dummy_controls[] = { DUMMY_VOLUME("Master Volume", 0, MIXER_ADDR_MASTER), DUMMY_CAPSRC("Master Capture Switch", 0, MIXER_ADDR_MASTER), DUMMY_VOLUME("Synth Volume", 0, MIXER_ADDR_SYNTH), DUMMY_CAPSRC("Synth Capture Switch", 0, MIXER_ADDR_SYNTH), DUMMY_VOLUME("Line Volume", 0, MIXER_ADDR_LINE), DUMMY_CAPSRC("Line Capture Switch", 0, MIXER_ADDR_LINE), DUMMY_VOLUME("Mic Volume", 0, MIXER_ADDR_MIC), DUMMY_CAPSRC("Mic Capture Switch", 0, MIXER_ADDR_MIC), DUMMY_VOLUME("CD Volume", 0, MIXER_ADDR_CD), DUMMY_CAPSRC("CD Capture Switch", 0, MIXER_ADDR_CD) }; static int __devinit snd_card_dummy_new_mixer(struct snd_dummy *dummy) { struct snd_card *card = dummy->card; unsigned int idx; int err; spin_lock_init(&dummy->mixer_lock); strcpy(card->mixername, "Dummy Mixer"); for (idx = 0; idx < ARRAY_SIZE(snd_dummy_controls); idx++) { err = snd_ctl_add(card, snd_ctl_new1(&snd_dummy_controls[idx], dummy)); if (err < 0) return err; } return 0; } #if defined(CONFIG_SND_DEBUG) && defined(CONFIG_PROC_FS) /* * proc interface */ static void print_formats(struct snd_dummy *dummy, struct snd_info_buffer *buffer) { int i; for (i = 0; i < SNDRV_PCM_FORMAT_LAST; i++) { if (dummy->pcm_hw.formats & (1ULL << i)) snd_iprintf(buffer, " %s", snd_pcm_format_name(i)); } } static void print_rates(struct snd_dummy *dummy, struct snd_info_buffer *buffer) { static int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100, 48000, 64000, 88200, 96000, 176400, 192000, }; int i; if (dummy->pcm_hw.rates & SNDRV_PCM_RATE_CONTINUOUS) snd_iprintf(buffer, " continuous"); if (dummy->pcm_hw.rates & SNDRV_PCM_RATE_KNOT) snd_iprintf(buffer, " knot"); for (i = 0; i < ARRAY_SIZE(rates); i++) if (dummy->pcm_hw.rates & (1 << i)) snd_iprintf(buffer, " %d", rates[i]); } #define get_dummy_int_ptr(dummy, ofs) \ (unsigned int *)((char *)&((dummy)->pcm_hw) + (ofs)) #define get_dummy_ll_ptr(dummy, ofs) \ (unsigned long long *)((char *)&((dummy)->pcm_hw) + (ofs)) struct dummy_hw_field { const char *name; const char *format; unsigned int offset; unsigned int size; }; #define FIELD_ENTRY(item, fmt) { \ .name = #item, \ .format = fmt, \ .offset = offsetof(struct snd_pcm_hardware, item), \ .size = sizeof(dummy_pcm_hardware.item) } static struct dummy_hw_field fields[] = { FIELD_ENTRY(formats, "%#llx"), FIELD_ENTRY(rates, "%#x"), FIELD_ENTRY(rate_min, "%d"), FIELD_ENTRY(rate_max, "%d"), FIELD_ENTRY(channels_min, "%d"), FIELD_ENTRY(channels_max, "%d"), FIELD_ENTRY(buffer_bytes_max, "%ld"), FIELD_ENTRY(period_bytes_min, "%ld"), FIELD_ENTRY(period_bytes_max, "%ld"), FIELD_ENTRY(periods_min, "%d"), FIELD_ENTRY(periods_max, "%d"), }; static void dummy_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_dummy *dummy = entry->private_data; int i; for (i = 0; i < ARRAY_SIZE(fields); i++) { snd_iprintf(buffer, "%s ", fields[i].name); if (fields[i].size == sizeof(int)) snd_iprintf(buffer, fields[i].format, *get_dummy_int_ptr(dummy, fields[i].offset)); else snd_iprintf(buffer, fields[i].format, *get_dummy_ll_ptr(dummy, fields[i].offset)); if (!strcmp(fields[i].name, "formats")) print_formats(dummy, buffer); else if (!strcmp(fields[i].name, "rates")) print_rates(dummy, buffer); snd_iprintf(buffer, "\n"); } } static void dummy_proc_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_dummy *dummy = entry->private_data; char line[64]; while (!snd_info_get_line(buffer, line, sizeof(line))) { char item[20]; const char *ptr; unsigned long long val; int i; ptr = snd_info_get_str(item, line, sizeof(item)); for (i = 0; i < ARRAY_SIZE(fields); i++) { if (!strcmp(item, fields[i].name)) break; } if (i >= ARRAY_SIZE(fields)) continue; snd_info_get_str(item, ptr, sizeof(item)); if (strict_strtoull(item, 0, &val)) continue; if (fields[i].size == sizeof(int)) *get_dummy_int_ptr(dummy, fields[i].offset) = val; else *get_dummy_ll_ptr(dummy, fields[i].offset) = val; } } static void __devinit dummy_proc_init(struct snd_dummy *chip) { struct snd_info_entry *entry; if (!snd_card_proc_new(chip->card, "dummy_pcm", &entry)) { snd_info_set_text_ops(entry, chip, dummy_proc_read); entry->c.text.write = dummy_proc_write; entry->mode |= S_IWUSR; entry->private_data = chip; } } #else #define dummy_proc_init(x) #endif /* CONFIG_SND_DEBUG && CONFIG_PROC_FS */ static int __devinit snd_dummy_probe(struct platform_device *devptr) { struct snd_card *card; struct snd_dummy *dummy; struct dummy_model *m = NULL, **mdl; int idx, err; int dev = devptr->id; err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_dummy), &card); if (err < 0) return err; dummy = card->private_data; dummy->card = card; for (mdl = dummy_models; *mdl && model[dev]; mdl++) { if (strcmp(model[dev], (*mdl)->name) == 0) { printk(KERN_INFO "snd-dummy: Using model '%s' for card %i\n", (*mdl)->name, card->number); m = dummy->model = *mdl; break; } } for (idx = 0; idx < MAX_PCM_DEVICES && idx < pcm_devs[dev]; idx++) { if (pcm_substreams[dev] < 1) pcm_substreams[dev] = 1; if (pcm_substreams[dev] > MAX_PCM_SUBSTREAMS) pcm_substreams[dev] = MAX_PCM_SUBSTREAMS; err = snd_card_dummy_pcm(dummy, idx, pcm_substreams[dev]); if (err < 0) goto __nodev; } dummy->pcm_hw = dummy_pcm_hardware; if (m) { if (m->formats) dummy->pcm_hw.formats = m->formats; if (m->buffer_bytes_max) dummy->pcm_hw.buffer_bytes_max = m->buffer_bytes_max; if (m->period_bytes_min) dummy->pcm_hw.period_bytes_min = m->period_bytes_min; if (m->period_bytes_max) dummy->pcm_hw.period_bytes_max = m->period_bytes_max; if (m->periods_min) dummy->pcm_hw.periods_min = m->periods_min; if (m->periods_max) dummy->pcm_hw.periods_max = m->periods_max; if (m->rates) dummy->pcm_hw.rates = m->rates; if (m->rate_min) dummy->pcm_hw.rate_min = m->rate_min; if (m->rate_max) dummy->pcm_hw.rate_max = m->rate_max; if (m->channels_min) dummy->pcm_hw.channels_min = m->channels_min; if (m->channels_max) dummy->pcm_hw.channels_max = m->channels_max; } err = snd_card_dummy_new_mixer(dummy); if (err < 0) goto __nodev; strcpy(card->driver, "Dummy"); strcpy(card->shortname, "Dummy"); sprintf(card->longname, "Dummy %i", dev + 1); dummy_proc_init(dummy); snd_card_set_dev(card, &devptr->dev); err = snd_card_register(card); if (err == 0) { platform_set_drvdata(devptr, card); return 0; } __nodev: snd_card_free(card); return err; } static int __devexit snd_dummy_remove(struct platform_device *devptr) { snd_card_free(platform_get_drvdata(devptr)); platform_set_drvdata(devptr, NULL); return 0; } #ifdef CONFIG_PM static int snd_dummy_suspend(struct platform_device *pdev, pm_message_t state) { struct snd_card *card = platform_get_drvdata(pdev); struct snd_dummy *dummy = card->private_data; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(dummy->pcm); return 0; } static int snd_dummy_resume(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif #define SND_DUMMY_DRIVER "snd_dummy" static struct platform_driver snd_dummy_driver = { .probe = snd_dummy_probe, .remove = __devexit_p(snd_dummy_remove), #ifdef CONFIG_PM .suspend = snd_dummy_suspend, .resume = snd_dummy_resume, #endif .driver = { .name = SND_DUMMY_DRIVER }, }; static void snd_dummy_unregister_all(void) { int i; for (i = 0; i < ARRAY_SIZE(devices); ++i) platform_device_unregister(devices[i]); platform_driver_unregister(&snd_dummy_driver); free_fake_buffer(); } static int __init alsa_card_dummy_init(void) { int i, cards, err; err = platform_driver_register(&snd_dummy_driver); if (err < 0) return err; err = alloc_fake_buffer(); if (err < 0) { platform_driver_unregister(&snd_dummy_driver); return err; } cards = 0; for (i = 0; i < SNDRV_CARDS; i++) { struct platform_device *device; if (! enable[i]) continue; device = platform_device_register_simple(SND_DUMMY_DRIVER, i, NULL, 0); if (IS_ERR(device)) continue; if (!platform_get_drvdata(device)) { platform_device_unregister(device); continue; } devices[i] = device; cards++; } if (!cards) { #ifdef MODULE printk(KERN_ERR "Dummy soundcard not found or device busy\n"); #endif snd_dummy_unregister_all(); return -ENODEV; } return 0; } static void __exit alsa_card_dummy_exit(void) { snd_dummy_unregister_all(); } module_init(alsa_card_dummy_init) module_exit(alsa_card_dummy_exit)
gpl-2.0
davidmueller13/david_kernel_aosp_flo_6.0
arch/powerpc/platforms/85xx/sbc8548.c
4544
3648
/* * Wind River SBC8548 setup and early boot code. * * Copyright 2007 Wind River Systems Inc. * * By Paul Gortmaker (see MAINTAINERS for contact information) * * Based largely on the MPC8548CDS support - Copyright 2005 Freescale Inc. * * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/reboot.h> #include <linux/pci.h> #include <linux/kdev_t.h> #include <linux/major.h> #include <linux/console.h> #include <linux/delay.h> #include <linux/seq_file.h> #include <linux/initrd.h> #include <linux/interrupt.h> #include <linux/fsl_devices.h> #include <linux/of_platform.h> #include <asm/pgtable.h> #include <asm/page.h> #include <linux/atomic.h> #include <asm/time.h> #include <asm/io.h> #include <asm/machdep.h> #include <asm/ipic.h> #include <asm/pci-bridge.h> #include <asm/irq.h> #include <mm/mmu_decl.h> #include <asm/prom.h> #include <asm/udbg.h> #include <asm/mpic.h> #include <sysdev/fsl_soc.h> #include <sysdev/fsl_pci.h> #include "mpc85xx.h" static int sbc_rev; static void __init sbc8548_pic_init(void) { struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN, 0, 256, " OpenPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); } /* Extract the HW Rev from the EPLD on the board */ static int __init sbc8548_hw_rev(void) { struct device_node *np; struct resource res; unsigned int *rev; int board_rev = 0; np = of_find_compatible_node(NULL, NULL, "hw-rev"); if (np == NULL) { printk("No HW-REV found in DTB.\n"); return -ENODEV; } of_address_to_resource(np, 0, &res); of_node_put(np); rev = ioremap(res.start,sizeof(unsigned int)); board_rev = (*rev) >> 28; iounmap(rev); return board_rev; } /* * Setup the architecture */ static void __init sbc8548_setup_arch(void) { #ifdef CONFIG_PCI struct device_node *np; #endif if (ppc_md.progress) ppc_md.progress("sbc8548_setup_arch()", 0); #ifdef CONFIG_PCI for_each_node_by_type(np, "pci") { if (of_device_is_compatible(np, "fsl,mpc8540-pci") || of_device_is_compatible(np, "fsl,mpc8548-pcie")) { struct resource rsrc; of_address_to_resource(np, 0, &rsrc); if ((rsrc.start & 0xfffff) == 0x8000) fsl_add_bridge(np, 1); else fsl_add_bridge(np, 0); } } #endif sbc_rev = sbc8548_hw_rev(); } static void sbc8548_show_cpuinfo(struct seq_file *m) { uint pvid, svid, phid1; pvid = mfspr(SPRN_PVR); svid = mfspr(SPRN_SVR); seq_printf(m, "Vendor\t\t: Wind River\n"); seq_printf(m, "Machine\t\t: SBC8548 v%d\n", sbc_rev); seq_printf(m, "PVR\t\t: 0x%x\n", pvid); seq_printf(m, "SVR\t\t: 0x%x\n", svid); /* Display cpu Pll setting */ phid1 = mfspr(SPRN_HID1); seq_printf(m, "PLL setting\t: 0x%x\n", ((phid1 >> 24) & 0x3f)); } machine_device_initcall(sbc8548, mpc85xx_common_publish_devices); /* * Called very early, device-tree isn't unflattened */ static int __init sbc8548_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "SBC8548"); } define_machine(sbc8548) { .name = "SBC8548", .probe = sbc8548_probe, .setup_arch = sbc8548_setup_arch, .init_IRQ = sbc8548_pic_init, .show_cpuinfo = sbc8548_show_cpuinfo, .get_irq = mpic_get_irq, .restart = fsl_rstcr_restart, #ifdef CONFIG_PCI .pcibios_fixup_bus = fsl_pcibios_fixup_bus, #endif .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
pawo99/stock_mm_dpgw
arch/blackfin/kernel/reboot.c
4544
2706
/* * arch/blackfin/kernel/reboot.c - handle shutdown/reboot * * Copyright 2004-2007 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/interrupt.h> #include <asm/bfin-global.h> #include <asm/reboot.h> #include <asm/bfrom.h> /* A system soft reset makes external memory unusable so force * this function into L1. We use the compiler ssync here rather * than SSYNC() because it's safe (no interrupts and such) and * we save some L1. We do not need to force sanity in the SYSCR * register as the BMODE selection bit is cleared by the soft * reset while the Core B bit (on dual core parts) is cleared by * the core reset. */ __attribute__ ((__l1_text__, __noreturn__)) static void bfin_reset(void) { #ifndef CONFIG_BF60x if (!ANOMALY_05000353 && !ANOMALY_05000386) bfrom_SoftReset((void *)(L1_SCRATCH_START + L1_SCRATCH_LENGTH - 20)); /* Wait for completion of "system" events such as cache line * line fills so that we avoid infinite stalls later on as * much as possible. This code is in L1, so it won't trigger * any such event after this point in time. */ __builtin_bfin_ssync(); /* Initiate System software reset. */ bfin_write_SWRST(0x7); /* Due to the way reset is handled in the hardware, we need * to delay for 10 SCLKS. The only reliable way to do this is * to calculate the CCLK/SCLK ratio and multiply 10. For now, * we'll assume worse case which is a 1:15 ratio. */ asm( "LSETUP (1f, 1f) LC0 = %0\n" "1: nop;" : : "a" (15 * 10) : "LC0", "LB0", "LT0" ); /* Clear System software reset */ bfin_write_SWRST(0); /* The BF526 ROM will crash during reset */ #if defined(__ADSPBF522__) || defined(__ADSPBF524__) || defined(__ADSPBF526__) /* Seems to be fixed with newer parts though ... */ if (__SILICON_REVISION__ < 1 && bfin_revid() < 1) bfin_read_SWRST(); #endif /* Wait for the SWRST write to complete. Cannot rely on SSYNC * though as the System state is all reset now. */ asm( "LSETUP (1f, 1f) LC1 = %0\n" "1: nop;" : : "a" (15 * 1) : "LC1", "LB1", "LT1" ); while (1) /* Issue core reset */ asm("raise 1"); #else while (1) bfin_write_RCU0_CTL(0x1); #endif } __attribute__((weak)) void native_machine_restart(char *cmd) { } void machine_restart(char *cmd) { native_machine_restart(cmd); if (smp_processor_id()) smp_call_function((void *)bfin_reset, 0, 1); else bfin_reset(); } __attribute__((weak)) void native_machine_halt(void) { idle_with_irq_disabled(); } void machine_halt(void) { native_machine_halt(); } __attribute__((weak)) void native_machine_power_off(void) { idle_with_irq_disabled(); } void machine_power_off(void) { native_machine_power_off(); }
gpl-2.0
myhro/debian-linux-kernel-gzip
arch/mips/loongson/common/cs5536/cs5536_ide.c
4544
4961
/* * the IDE Virtual Support Module of AMD CS5536 * * Copyright (C) 2007 Lemote, Inc. * Author : jlliu, liujl@lemote.com * * Copyright (C) 2009 Lemote, Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <cs5536/cs5536.h> #include <cs5536/cs5536_pci.h> void pci_ide_write_reg(int reg, u32 value) { u32 hi = 0, lo = value; switch (reg) { case PCI_COMMAND: _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo); if (value & PCI_COMMAND_MASTER) lo |= (0x03 << 4); else lo &= ~(0x03 << 4); _wrmsr(GLIU_MSR_REG(GLIU_PAE), hi, lo); break; case PCI_STATUS: if (value & PCI_STATUS_PARITY) { _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo); if (lo & SB_PARE_ERR_FLAG) { lo = (lo & 0x0000ffff) | SB_PARE_ERR_FLAG; _wrmsr(SB_MSR_REG(SB_ERROR), hi, lo); } } break; case PCI_CACHE_LINE_SIZE: value &= 0x0000ff00; _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo); hi &= 0xffffff00; hi |= (value >> 8); _wrmsr(SB_MSR_REG(SB_CTRL), hi, lo); break; case PCI_BAR4_REG: if (value == PCI_BAR_RANGE_MASK) { _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo); lo |= SOFT_BAR_IDE_FLAG; _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo); } else if (value & 0x01) { _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo); lo = (value & 0xfffffff0) | 0x1; _wrmsr(IDE_MSR_REG(IDE_IO_BAR), hi, lo); value &= 0xfffffffc; hi = 0x60000000 | ((value & 0x000ff000) >> 12); lo = 0x000ffff0 | ((value & 0x00000fff) << 20); _wrmsr(GLIU_MSR_REG(GLIU_IOD_BM2), hi, lo); } break; case PCI_IDE_CFG_REG: if (value == CS5536_IDE_FLASH_SIGNATURE) { _rdmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), &hi, &lo); lo |= 0x01; _wrmsr(DIVIL_MSR_REG(DIVIL_BALL_OPTS), hi, lo); } else { _rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo); lo = value; _wrmsr(IDE_MSR_REG(IDE_CFG), hi, lo); } break; case PCI_IDE_DTC_REG: _rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo); lo = value; _wrmsr(IDE_MSR_REG(IDE_DTC), hi, lo); break; case PCI_IDE_CAST_REG: _rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo); lo = value; _wrmsr(IDE_MSR_REG(IDE_CAST), hi, lo); break; case PCI_IDE_ETC_REG: _rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo); lo = value; _wrmsr(IDE_MSR_REG(IDE_ETC), hi, lo); break; case PCI_IDE_PM_REG: _rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo); lo = value; _wrmsr(IDE_MSR_REG(IDE_INTERNAL_PM), hi, lo); break; default: break; } } u32 pci_ide_read_reg(int reg) { u32 conf_data = 0; u32 hi, lo; switch (reg) { case PCI_VENDOR_ID: conf_data = CFG_PCI_VENDOR_ID(CS5536_IDE_DEVICE_ID, CS5536_VENDOR_ID); break; case PCI_COMMAND: _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo); if (lo & 0xfffffff0) conf_data |= PCI_COMMAND_IO; _rdmsr(GLIU_MSR_REG(GLIU_PAE), &hi, &lo); if ((lo & 0x30) == 0x30) conf_data |= PCI_COMMAND_MASTER; break; case PCI_STATUS: conf_data |= PCI_STATUS_66MHZ; conf_data |= PCI_STATUS_FAST_BACK; _rdmsr(SB_MSR_REG(SB_ERROR), &hi, &lo); if (lo & SB_PARE_ERR_FLAG) conf_data |= PCI_STATUS_PARITY; conf_data |= PCI_STATUS_DEVSEL_MEDIUM; break; case PCI_CLASS_REVISION: _rdmsr(IDE_MSR_REG(IDE_CAP), &hi, &lo); conf_data = lo & 0x000000ff; conf_data |= (CS5536_IDE_CLASS_CODE << 8); break; case PCI_CACHE_LINE_SIZE: _rdmsr(SB_MSR_REG(SB_CTRL), &hi, &lo); hi &= 0x000000f8; conf_data = CFG_PCI_CACHE_LINE_SIZE(PCI_NORMAL_HEADER_TYPE, hi); break; case PCI_BAR4_REG: _rdmsr(GLCP_MSR_REG(GLCP_SOFT_COM), &hi, &lo); if (lo & SOFT_BAR_IDE_FLAG) { conf_data = CS5536_IDE_RANGE | PCI_BASE_ADDRESS_SPACE_IO; lo &= ~SOFT_BAR_IDE_FLAG; _wrmsr(GLCP_MSR_REG(GLCP_SOFT_COM), hi, lo); } else { _rdmsr(IDE_MSR_REG(IDE_IO_BAR), &hi, &lo); conf_data = lo & 0xfffffff0; conf_data |= 0x01; conf_data &= ~0x02; } break; case PCI_CARDBUS_CIS: conf_data = PCI_CARDBUS_CIS_POINTER; break; case PCI_SUBSYSTEM_VENDOR_ID: conf_data = CFG_PCI_VENDOR_ID(CS5536_IDE_SUB_ID, CS5536_SUB_VENDOR_ID); break; case PCI_ROM_ADDRESS: conf_data = PCI_EXPANSION_ROM_BAR; break; case PCI_CAPABILITY_LIST: conf_data = PCI_CAPLIST_POINTER; break; case PCI_INTERRUPT_LINE: conf_data = CFG_PCI_INTERRUPT_LINE(PCI_DEFAULT_PIN, CS5536_IDE_INTR); break; case PCI_IDE_CFG_REG: _rdmsr(IDE_MSR_REG(IDE_CFG), &hi, &lo); conf_data = lo; break; case PCI_IDE_DTC_REG: _rdmsr(IDE_MSR_REG(IDE_DTC), &hi, &lo); conf_data = lo; break; case PCI_IDE_CAST_REG: _rdmsr(IDE_MSR_REG(IDE_CAST), &hi, &lo); conf_data = lo; break; case PCI_IDE_ETC_REG: _rdmsr(IDE_MSR_REG(IDE_ETC), &hi, &lo); conf_data = lo; break; case PCI_IDE_PM_REG: _rdmsr(IDE_MSR_REG(IDE_INTERNAL_PM), &hi, &lo); conf_data = lo; break; default: break; } return conf_data; }
gpl-2.0
blackdeviant/hybrid-nicki
drivers/net/ethernet/sun/sunhme.c
4800
93519
/* sunhme.c: Sparc HME/BigMac 10/100baseT half/full duplex auto switching, * auto carrier detecting ethernet driver. Also known as the * "Happy Meal Ethernet" found on SunSwift SBUS cards. * * Copyright (C) 1996, 1998, 1999, 2002, 2003, * 2006, 2008 David S. Miller (davem@davemloft.net) * * Changes : * 2000/11/11 Willy Tarreau <willy AT meta-x.org> * - port to non-sparc architectures. Tested only on x86 and * only currently works with QFE PCI cards. * - ability to specify the MAC address at module load time by passing this * argument : macaddr=0x00,0x10,0x20,0x30,0x40,0x50 */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/in.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/mm.h> #include <linux/bitops.h> #include <linux/dma-mapping.h> #include <asm/io.h> #include <asm/dma.h> #include <asm/byteorder.h> #ifdef CONFIG_SPARC #include <linux/of.h> #include <linux/of_device.h> #include <asm/idprom.h> #include <asm/openprom.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/auxio.h> #endif #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/irq.h> #ifdef CONFIG_PCI #include <linux/pci.h> #endif #include "sunhme.h" #define DRV_NAME "sunhme" #define DRV_VERSION "3.10" #define DRV_RELDATE "August 26, 2008" #define DRV_AUTHOR "David S. Miller (davem@davemloft.net)" static char version[] = DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " " DRV_AUTHOR "\n"; MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_AUTHOR); MODULE_DESCRIPTION("Sun HappyMealEthernet(HME) 10/100baseT ethernet driver"); MODULE_LICENSE("GPL"); static int macaddr[6]; /* accept MAC address of the form macaddr=0x08,0x00,0x20,0x30,0x40,0x50 */ module_param_array(macaddr, int, NULL, 0); MODULE_PARM_DESC(macaddr, "Happy Meal MAC address to set"); #ifdef CONFIG_SBUS static struct quattro *qfe_sbus_list; #endif #ifdef CONFIG_PCI static struct quattro *qfe_pci_list; #endif #undef HMEDEBUG #undef SXDEBUG #undef RXDEBUG #undef TXDEBUG #undef TXLOGGING #ifdef TXLOGGING struct hme_tx_logent { unsigned int tstamp; int tx_new, tx_old; unsigned int action; #define TXLOG_ACTION_IRQ 0x01 #define TXLOG_ACTION_TXMIT 0x02 #define TXLOG_ACTION_TBUSY 0x04 #define TXLOG_ACTION_NBUFS 0x08 unsigned int status; }; #define TX_LOG_LEN 128 static struct hme_tx_logent tx_log[TX_LOG_LEN]; static int txlog_cur_entry; static __inline__ void tx_add_log(struct happy_meal *hp, unsigned int a, unsigned int s) { struct hme_tx_logent *tlp; unsigned long flags; local_irq_save(flags); tlp = &tx_log[txlog_cur_entry]; tlp->tstamp = (unsigned int)jiffies; tlp->tx_new = hp->tx_new; tlp->tx_old = hp->tx_old; tlp->action = a; tlp->status = s; txlog_cur_entry = (txlog_cur_entry + 1) & (TX_LOG_LEN - 1); local_irq_restore(flags); } static __inline__ void tx_dump_log(void) { int i, this; this = txlog_cur_entry; for (i = 0; i < TX_LOG_LEN; i++) { printk("TXLOG[%d]: j[%08x] tx[N(%d)O(%d)] action[%08x] stat[%08x]\n", i, tx_log[this].tstamp, tx_log[this].tx_new, tx_log[this].tx_old, tx_log[this].action, tx_log[this].status); this = (this + 1) & (TX_LOG_LEN - 1); } } static __inline__ void tx_dump_ring(struct happy_meal *hp) { struct hmeal_init_block *hb = hp->happy_block; struct happy_meal_txd *tp = &hb->happy_meal_txd[0]; int i; for (i = 0; i < TX_RING_SIZE; i+=4) { printk("TXD[%d..%d]: [%08x:%08x] [%08x:%08x] [%08x:%08x] [%08x:%08x]\n", i, i + 4, le32_to_cpu(tp[i].tx_flags), le32_to_cpu(tp[i].tx_addr), le32_to_cpu(tp[i + 1].tx_flags), le32_to_cpu(tp[i + 1].tx_addr), le32_to_cpu(tp[i + 2].tx_flags), le32_to_cpu(tp[i + 2].tx_addr), le32_to_cpu(tp[i + 3].tx_flags), le32_to_cpu(tp[i + 3].tx_addr)); } } #else #define tx_add_log(hp, a, s) do { } while(0) #define tx_dump_log() do { } while(0) #define tx_dump_ring(hp) do { } while(0) #endif #ifdef HMEDEBUG #define HMD(x) printk x #else #define HMD(x) #endif /* #define AUTO_SWITCH_DEBUG */ #ifdef AUTO_SWITCH_DEBUG #define ASD(x) printk x #else #define ASD(x) #endif #define DEFAULT_IPG0 16 /* For lance-mode only */ #define DEFAULT_IPG1 8 /* For all modes */ #define DEFAULT_IPG2 4 /* For all modes */ #define DEFAULT_JAMSIZE 4 /* Toe jam */ /* NOTE: In the descriptor writes one _must_ write the address * member _first_. The card must not be allowed to see * the updated descriptor flags until the address is * correct. I've added a write memory barrier between * the two stores so that I can sleep well at night... -DaveM */ #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) static void sbus_hme_write32(void __iomem *reg, u32 val) { sbus_writel(val, reg); } static u32 sbus_hme_read32(void __iomem *reg) { return sbus_readl(reg); } static void sbus_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) { rxd->rx_addr = (__force hme32)addr; wmb(); rxd->rx_flags = (__force hme32)flags; } static void sbus_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) { txd->tx_addr = (__force hme32)addr; wmb(); txd->tx_flags = (__force hme32)flags; } static u32 sbus_hme_read_desc32(hme32 *p) { return (__force u32)*p; } static void pci_hme_write32(void __iomem *reg, u32 val) { writel(val, reg); } static u32 pci_hme_read32(void __iomem *reg) { return readl(reg); } static void pci_hme_write_rxd(struct happy_meal_rxd *rxd, u32 flags, u32 addr) { rxd->rx_addr = (__force hme32)cpu_to_le32(addr); wmb(); rxd->rx_flags = (__force hme32)cpu_to_le32(flags); } static void pci_hme_write_txd(struct happy_meal_txd *txd, u32 flags, u32 addr) { txd->tx_addr = (__force hme32)cpu_to_le32(addr); wmb(); txd->tx_flags = (__force hme32)cpu_to_le32(flags); } static u32 pci_hme_read_desc32(hme32 *p) { return le32_to_cpup((__le32 *)p); } #define hme_write32(__hp, __reg, __val) \ ((__hp)->write32((__reg), (__val))) #define hme_read32(__hp, __reg) \ ((__hp)->read32(__reg)) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ ((__hp)->write_rxd((__rxd), (__flags), (__addr))) #define hme_write_txd(__hp, __txd, __flags, __addr) \ ((__hp)->write_txd((__txd), (__flags), (__addr))) #define hme_read_desc32(__hp, __p) \ ((__hp)->read_desc32(__p)) #define hme_dma_map(__hp, __ptr, __size, __dir) \ ((__hp)->dma_map((__hp)->dma_dev, (__ptr), (__size), (__dir))) #define hme_dma_unmap(__hp, __addr, __size, __dir) \ ((__hp)->dma_unmap((__hp)->dma_dev, (__addr), (__size), (__dir))) #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ ((__hp)->dma_sync_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir))) #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ ((__hp)->dma_sync_for_device((__hp)->dma_dev, (__addr), (__size), (__dir))) #else #ifdef CONFIG_SBUS /* SBUS only compilation */ #define hme_write32(__hp, __reg, __val) \ sbus_writel((__val), (__reg)) #define hme_read32(__hp, __reg) \ sbus_readl(__reg) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ do { (__rxd)->rx_addr = (__force hme32)(u32)(__addr); \ wmb(); \ (__rxd)->rx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_write_txd(__hp, __txd, __flags, __addr) \ do { (__txd)->tx_addr = (__force hme32)(u32)(__addr); \ wmb(); \ (__txd)->tx_flags = (__force hme32)(u32)(__flags); \ } while(0) #define hme_read_desc32(__hp, __p) ((__force u32)(hme32)*(__p)) #define hme_dma_map(__hp, __ptr, __size, __dir) \ dma_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) #define hme_dma_unmap(__hp, __addr, __size, __dir) \ dma_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ dma_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ dma_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) #else /* PCI only compilation */ #define hme_write32(__hp, __reg, __val) \ writel((__val), (__reg)) #define hme_read32(__hp, __reg) \ readl(__reg) #define hme_write_rxd(__hp, __rxd, __flags, __addr) \ do { (__rxd)->rx_addr = (__force hme32)cpu_to_le32(__addr); \ wmb(); \ (__rxd)->rx_flags = (__force hme32)cpu_to_le32(__flags); \ } while(0) #define hme_write_txd(__hp, __txd, __flags, __addr) \ do { (__txd)->tx_addr = (__force hme32)cpu_to_le32(__addr); \ wmb(); \ (__txd)->tx_flags = (__force hme32)cpu_to_le32(__flags); \ } while(0) static inline u32 hme_read_desc32(struct happy_meal *hp, hme32 *p) { return le32_to_cpup((__le32 *)p); } #define hme_dma_map(__hp, __ptr, __size, __dir) \ pci_map_single((__hp)->dma_dev, (__ptr), (__size), (__dir)) #define hme_dma_unmap(__hp, __addr, __size, __dir) \ pci_unmap_single((__hp)->dma_dev, (__addr), (__size), (__dir)) #define hme_dma_sync_for_cpu(__hp, __addr, __size, __dir) \ pci_dma_sync_single_for_cpu((__hp)->dma_dev, (__addr), (__size), (__dir)) #define hme_dma_sync_for_device(__hp, __addr, __size, __dir) \ pci_dma_sync_single_for_device((__hp)->dma_dev, (__addr), (__size), (__dir)) #endif #endif /* Oh yes, the MIF BitBang is mighty fun to program. BitBucket is more like it. */ static void BB_PUT_BIT(struct happy_meal *hp, void __iomem *tregs, int bit) { hme_write32(hp, tregs + TCVR_BBDATA, bit); hme_write32(hp, tregs + TCVR_BBCLOCK, 0); hme_write32(hp, tregs + TCVR_BBCLOCK, 1); } #if 0 static u32 BB_GET_BIT(struct happy_meal *hp, void __iomem *tregs, int internal) { u32 ret; hme_write32(hp, tregs + TCVR_BBCLOCK, 0); hme_write32(hp, tregs + TCVR_BBCLOCK, 1); ret = hme_read32(hp, tregs + TCVR_CFG); if (internal) ret &= TCV_CFG_MDIO0; else ret &= TCV_CFG_MDIO1; return ret; } #endif static u32 BB_GET_BIT2(struct happy_meal *hp, void __iomem *tregs, int internal) { u32 retval; hme_write32(hp, tregs + TCVR_BBCLOCK, 0); udelay(1); retval = hme_read32(hp, tregs + TCVR_CFG); if (internal) retval &= TCV_CFG_MDIO0; else retval &= TCV_CFG_MDIO1; hme_write32(hp, tregs + TCVR_BBCLOCK, 1); return retval; } #define TCVR_FAILURE 0x80000000 /* Impossible MIF read value */ static int happy_meal_bb_read(struct happy_meal *hp, void __iomem *tregs, int reg) { u32 tmp; int retval = 0; int i; ASD(("happy_meal_bb_read: reg=%d ", reg)); /* Enable the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 1); /* Force BitBang into the idle state. */ for (i = 0; i < 32; i++) BB_PUT_BIT(hp, tregs, 1); /* Give it the read sequence. */ BB_PUT_BIT(hp, tregs, 0); BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 0); /* Give it the PHY address. */ tmp = hp->paddr & 0xff; for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Tell it what register we want to read. */ tmp = (reg & 0xff); for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Close down the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 0); /* Now read in the value. */ (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); for (i = 15; i >= 0; i--) retval |= BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); (void) BB_GET_BIT2(hp, tregs, (hp->tcvr_type == internal)); ASD(("value=%x\n", retval)); return retval; } static void happy_meal_bb_write(struct happy_meal *hp, void __iomem *tregs, int reg, unsigned short value) { u32 tmp; int i; ASD(("happy_meal_bb_write: reg=%d value=%x\n", reg, value)); /* Enable the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 1); /* Force BitBang into the idle state. */ for (i = 0; i < 32; i++) BB_PUT_BIT(hp, tregs, 1); /* Give it write sequence. */ BB_PUT_BIT(hp, tregs, 0); BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 0); BB_PUT_BIT(hp, tregs, 1); /* Give it the PHY address. */ tmp = (hp->paddr & 0xff); for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Tell it what register we will be writing. */ tmp = (reg & 0xff); for (i = 4; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((tmp >> i) & 1)); /* Tell it to become ready for the bits. */ BB_PUT_BIT(hp, tregs, 1); BB_PUT_BIT(hp, tregs, 0); for (i = 15; i >= 0; i--) BB_PUT_BIT(hp, tregs, ((value >> i) & 1)); /* Close down the MIF BitBang outputs. */ hme_write32(hp, tregs + TCVR_BBOENAB, 0); } #define TCVR_READ_TRIES 16 static int happy_meal_tcvr_read(struct happy_meal *hp, void __iomem *tregs, int reg) { int tries = TCVR_READ_TRIES; int retval; ASD(("happy_meal_tcvr_read: reg=0x%02x ", reg)); if (hp->tcvr_type == none) { ASD(("no transceiver, value=TCVR_FAILURE\n")); return TCVR_FAILURE; } if (!(hp->happy_flags & HFLAG_FENABLE)) { ASD(("doing bit bang\n")); return happy_meal_bb_read(hp, tregs, reg); } hme_write32(hp, tregs + TCVR_FRAME, (FRAME_READ | (hp->paddr << 23) | ((reg & 0xff) << 18))); while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) udelay(20); if (!tries) { printk(KERN_ERR "happy meal: Aieee, transceiver MIF read bolixed\n"); return TCVR_FAILURE; } retval = hme_read32(hp, tregs + TCVR_FRAME) & 0xffff; ASD(("value=%04x\n", retval)); return retval; } #define TCVR_WRITE_TRIES 16 static void happy_meal_tcvr_write(struct happy_meal *hp, void __iomem *tregs, int reg, unsigned short value) { int tries = TCVR_WRITE_TRIES; ASD(("happy_meal_tcvr_write: reg=0x%02x value=%04x\n", reg, value)); /* Welcome to Sun Microsystems, can I take your order please? */ if (!(hp->happy_flags & HFLAG_FENABLE)) { happy_meal_bb_write(hp, tregs, reg, value); return; } /* Would you like fries with that? */ hme_write32(hp, tregs + TCVR_FRAME, (FRAME_WRITE | (hp->paddr << 23) | ((reg & 0xff) << 18) | (value & 0xffff))); while (!(hme_read32(hp, tregs + TCVR_FRAME) & 0x10000) && --tries) udelay(20); /* Anything else? */ if (!tries) printk(KERN_ERR "happy meal: Aieee, transceiver MIF write bolixed\n"); /* Fifty-two cents is your change, have a nice day. */ } /* Auto negotiation. The scheme is very simple. We have a timer routine * that keeps watching the auto negotiation process as it progresses. * The DP83840 is first told to start doing it's thing, we set up the time * and place the timer state machine in it's initial state. * * Here the timer peeks at the DP83840 status registers at each click to see * if the auto negotiation has completed, we assume here that the DP83840 PHY * will time out at some point and just tell us what (didn't) happen. For * complete coverage we only allow so many of the ticks at this level to run, * when this has expired we print a warning message and try another strategy. * This "other" strategy is to force the interface into various speed/duplex * configurations and we stop when we see a link-up condition before the * maximum number of "peek" ticks have occurred. * * Once a valid link status has been detected we configure the BigMAC and * the rest of the Happy Meal to speak the most efficient protocol we could * get a clean link for. The priority for link configurations, highest first * is: * 100 Base-T Full Duplex * 100 Base-T Half Duplex * 10 Base-T Full Duplex * 10 Base-T Half Duplex * * We start a new timer now, after a successful auto negotiation status has * been detected. This timer just waits for the link-up bit to get set in * the BMCR of the DP83840. When this occurs we print a kernel log message * describing the link type in use and the fact that it is up. * * If a fatal error of some sort is signalled and detected in the interrupt * service routine, and the chip is reset, or the link is ifconfig'd down * and then back up, this entire process repeats itself all over again. */ static int try_next_permutation(struct happy_meal *hp, void __iomem *tregs) { hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); /* Downgrade from full to half duplex. Only possible * via ethtool. */ if (hp->sw_bmcr & BMCR_FULLDPLX) { hp->sw_bmcr &= ~(BMCR_FULLDPLX); happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); return 0; } /* Downgrade from 100 to 10. */ if (hp->sw_bmcr & BMCR_SPEED100) { hp->sw_bmcr &= ~(BMCR_SPEED100); happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); return 0; } /* We've tried everything. */ return -1; } static void display_link_mode(struct happy_meal *hp, void __iomem *tregs) { printk(KERN_INFO "%s: Link is up using ", hp->dev->name); if (hp->tcvr_type == external) printk("external "); else printk("internal "); printk("transceiver at "); hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); if (hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) { if (hp->sw_lpa & LPA_100FULL) printk("100Mb/s, Full Duplex.\n"); else printk("100Mb/s, Half Duplex.\n"); } else { if (hp->sw_lpa & LPA_10FULL) printk("10Mb/s, Full Duplex.\n"); else printk("10Mb/s, Half Duplex.\n"); } } static void display_forced_link_mode(struct happy_meal *hp, void __iomem *tregs) { printk(KERN_INFO "%s: Link has been forced up using ", hp->dev->name); if (hp->tcvr_type == external) printk("external "); else printk("internal "); printk("transceiver at "); hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (hp->sw_bmcr & BMCR_SPEED100) printk("100Mb/s, "); else printk("10Mb/s, "); if (hp->sw_bmcr & BMCR_FULLDPLX) printk("Full Duplex.\n"); else printk("Half Duplex.\n"); } static int set_happy_link_modes(struct happy_meal *hp, void __iomem *tregs) { int full; /* All we care about is making sure the bigmac tx_cfg has a * proper duplex setting. */ if (hp->timer_state == arbwait) { hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); if (!(hp->sw_lpa & (LPA_10HALF | LPA_10FULL | LPA_100HALF | LPA_100FULL))) goto no_response; if (hp->sw_lpa & LPA_100FULL) full = 1; else if (hp->sw_lpa & LPA_100HALF) full = 0; else if (hp->sw_lpa & LPA_10FULL) full = 1; else full = 0; } else { /* Forcing a link mode. */ hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (hp->sw_bmcr & BMCR_FULLDPLX) full = 1; else full = 0; } /* Before changing other bits in the tx_cfg register, and in * general any of other the TX config registers too, you * must: * 1) Clear Enable * 2) Poll with reads until that bit reads back as zero * 3) Make TX configuration changes * 4) Set Enable once more */ hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_ENABLE)); while (hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & BIGMAC_TXCFG_ENABLE) barrier(); if (full) { hp->happy_flags |= HFLAG_FULL; hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | BIGMAC_TXCFG_FULLDPLX); } else { hp->happy_flags &= ~(HFLAG_FULL); hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) & ~(BIGMAC_TXCFG_FULLDPLX)); } hme_write32(hp, hp->bigmacregs + BMAC_TXCFG, hme_read32(hp, hp->bigmacregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE); return 0; no_response: return 1; } static int happy_meal_init(struct happy_meal *hp); static int is_lucent_phy(struct happy_meal *hp) { void __iomem *tregs = hp->tcvregs; unsigned short mr2, mr3; int ret = 0; mr2 = happy_meal_tcvr_read(hp, tregs, 2); mr3 = happy_meal_tcvr_read(hp, tregs, 3); if ((mr2 & 0xffff) == 0x0180 && ((mr3 & 0xffff) >> 10) == 0x1d) ret = 1; return ret; } static void happy_meal_timer(unsigned long data) { struct happy_meal *hp = (struct happy_meal *) data; void __iomem *tregs = hp->tcvregs; int restart_timer = 0; spin_lock_irq(&hp->happy_lock); hp->timer_ticks++; switch(hp->timer_state) { case arbwait: /* Only allow for 5 ticks, thats 10 seconds and much too * long to wait for arbitration to complete. */ if (hp->timer_ticks >= 10) { /* Enter force mode. */ do_force_mode: hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); printk(KERN_NOTICE "%s: Auto-Negotiation unsuccessful, trying force link mode\n", hp->dev->name); hp->sw_bmcr = BMCR_SPEED100; happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); if (!is_lucent_phy(hp)) { /* OK, seems we need do disable the transceiver for the first * tick to make sure we get an accurate link state at the * second tick. */ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } hp->timer_state = ltrywait; hp->timer_ticks = 0; restart_timer = 1; } else { /* Anything interesting happen? */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); if (hp->sw_bmsr & BMSR_ANEGCOMPLETE) { int ret; /* Just what we've been waiting for... */ ret = set_happy_link_modes(hp, tregs); if (ret) { /* Ooops, something bad happened, go to force * mode. * * XXX Broken hubs which don't support 802.3u * XXX auto-negotiation make this happen as well. */ goto do_force_mode; } /* Success, at least so far, advance our state engine. */ hp->timer_state = lupwait; restart_timer = 1; } else { restart_timer = 1; } } break; case lupwait: /* Auto negotiation was successful and we are awaiting a * link up status. I have decided to let this timer run * forever until some sort of error is signalled, reporting * a message to the user at 10 second intervals. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); if (hp->sw_bmsr & BMSR_LSTATUS) { /* Wheee, it's up, display the link mode in use and put * the timer to sleep. */ display_link_mode(hp, tregs); hp->timer_state = asleep; restart_timer = 0; } else { if (hp->timer_ticks >= 10) { printk(KERN_NOTICE "%s: Auto negotiation successful, link still " "not completely up.\n", hp->dev->name); hp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } break; case ltrywait: /* Making the timeout here too long can make it take * annoyingly long to attempt all of the link mode * permutations, but then again this is essentially * error recovery code for the most part. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); if (hp->timer_ticks == 1) { if (!is_lucent_phy(hp)) { /* Re-enable transceiver, we'll re-enable the transceiver next * tick, then check link state on the following tick. */ hp->sw_csconfig |= CSCONFIG_TCVDISAB; happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } restart_timer = 1; break; } if (hp->timer_ticks == 2) { if (!is_lucent_phy(hp)) { hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } restart_timer = 1; break; } if (hp->sw_bmsr & BMSR_LSTATUS) { /* Force mode selection success. */ display_forced_link_mode(hp, tregs); set_happy_link_modes(hp, tregs); /* XXX error? then what? */ hp->timer_state = asleep; restart_timer = 0; } else { if (hp->timer_ticks >= 4) { /* 6 seconds or so... */ int ret; ret = try_next_permutation(hp, tregs); if (ret == -1) { /* Aieee, tried them all, reset the * chip and try all over again. */ /* Let the user know... */ printk(KERN_NOTICE "%s: Link down, cable problem?\n", hp->dev->name); ret = happy_meal_init(hp); if (ret) { /* ho hum... */ printk(KERN_ERR "%s: Error, cannot re-init the " "Happy Meal.\n", hp->dev->name); } goto out; } if (!is_lucent_phy(hp)) { hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); hp->sw_csconfig |= CSCONFIG_TCVDISAB; happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } hp->timer_ticks = 0; restart_timer = 1; } else { restart_timer = 1; } } break; case asleep: default: /* Can't happens.... */ printk(KERN_ERR "%s: Aieee, link timer is asleep but we got one anyways!\n", hp->dev->name); restart_timer = 0; hp->timer_ticks = 0; hp->timer_state = asleep; /* foo on you */ break; } if (restart_timer) { hp->happy_timer.expires = jiffies + ((12 * HZ)/10); /* 1.2 sec. */ add_timer(&hp->happy_timer); } out: spin_unlock_irq(&hp->happy_lock); } #define TX_RESET_TRIES 32 #define RX_RESET_TRIES 32 /* hp->happy_lock must be held */ static void happy_meal_tx_reset(struct happy_meal *hp, void __iomem *bregs) { int tries = TX_RESET_TRIES; HMD(("happy_meal_tx_reset: reset, ")); /* Would you like to try our SMCC Delux? */ hme_write32(hp, bregs + BMAC_TXSWRESET, 0); while ((hme_read32(hp, bregs + BMAC_TXSWRESET) & 1) && --tries) udelay(20); /* Lettuce, tomato, buggy hardware (no extra charge)? */ if (!tries) printk(KERN_ERR "happy meal: Transceiver BigMac ATTACK!"); /* Take care. */ HMD(("done\n")); } /* hp->happy_lock must be held */ static void happy_meal_rx_reset(struct happy_meal *hp, void __iomem *bregs) { int tries = RX_RESET_TRIES; HMD(("happy_meal_rx_reset: reset, ")); /* We have a special on GNU/Viking hardware bugs today. */ hme_write32(hp, bregs + BMAC_RXSWRESET, 0); while ((hme_read32(hp, bregs + BMAC_RXSWRESET) & 1) && --tries) udelay(20); /* Will that be all? */ if (!tries) printk(KERN_ERR "happy meal: Receiver BigMac ATTACK!"); /* Don't forget your vik_1137125_wa. Have a nice day. */ HMD(("done\n")); } #define STOP_TRIES 16 /* hp->happy_lock must be held */ static void happy_meal_stop(struct happy_meal *hp, void __iomem *gregs) { int tries = STOP_TRIES; HMD(("happy_meal_stop: reset, ")); /* We're consolidating our STB products, it's your lucky day. */ hme_write32(hp, gregs + GREG_SWRESET, GREG_RESET_ALL); while (hme_read32(hp, gregs + GREG_SWRESET) && --tries) udelay(20); /* Come back next week when we are "Sun Microelectronics". */ if (!tries) printk(KERN_ERR "happy meal: Fry guys."); /* Remember: "Different name, same old buggy as shit hardware." */ HMD(("done\n")); } /* hp->happy_lock must be held */ static void happy_meal_get_counters(struct happy_meal *hp, void __iomem *bregs) { struct net_device_stats *stats = &hp->net_stats; stats->rx_crc_errors += hme_read32(hp, bregs + BMAC_RCRCECTR); hme_write32(hp, bregs + BMAC_RCRCECTR, 0); stats->rx_frame_errors += hme_read32(hp, bregs + BMAC_UNALECTR); hme_write32(hp, bregs + BMAC_UNALECTR, 0); stats->rx_length_errors += hme_read32(hp, bregs + BMAC_GLECTR); hme_write32(hp, bregs + BMAC_GLECTR, 0); stats->tx_aborted_errors += hme_read32(hp, bregs + BMAC_EXCTR); stats->collisions += (hme_read32(hp, bregs + BMAC_EXCTR) + hme_read32(hp, bregs + BMAC_LTCTR)); hme_write32(hp, bregs + BMAC_EXCTR, 0); hme_write32(hp, bregs + BMAC_LTCTR, 0); } /* hp->happy_lock must be held */ static void happy_meal_poll_stop(struct happy_meal *hp, void __iomem *tregs) { ASD(("happy_meal_poll_stop: ")); /* If polling disabled or not polling already, nothing to do. */ if ((hp->happy_flags & (HFLAG_POLLENABLE | HFLAG_POLL)) != (HFLAG_POLLENABLE | HFLAG_POLL)) { HMD(("not polling, return\n")); return; } /* Shut up the MIF. */ ASD(("were polling, mif ints off, ")); hme_write32(hp, tregs + TCVR_IMASK, 0xffff); /* Turn off polling. */ ASD(("polling off, ")); hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PENABLE)); /* We are no longer polling. */ hp->happy_flags &= ~(HFLAG_POLL); /* Let the bits set. */ udelay(200); ASD(("done\n")); } /* Only Sun can take such nice parts and fuck up the programming interface * like this. Good job guys... */ #define TCVR_RESET_TRIES 16 /* It should reset quickly */ #define TCVR_UNISOLATE_TRIES 32 /* Dis-isolation can take longer. */ /* hp->happy_lock must be held */ static int happy_meal_tcvr_reset(struct happy_meal *hp, void __iomem *tregs) { u32 tconfig; int result, tries = TCVR_RESET_TRIES; tconfig = hme_read32(hp, tregs + TCVR_CFG); ASD(("happy_meal_tcvr_reset: tcfg<%08lx> ", tconfig)); if (hp->tcvr_type == external) { ASD(("external<")); hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT)); hp->tcvr_type = internal; hp->paddr = TCV_PADDR_ITX; ASD(("ISOLATE,")); happy_meal_tcvr_write(hp, tregs, MII_BMCR, (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) { ASD(("phyread_fail>\n")); return -1; } ASD(("phyread_ok,PSELECT>")); hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); hp->tcvr_type = external; hp->paddr = TCV_PADDR_ETX; } else { if (tconfig & TCV_CFG_MDIO1) { ASD(("internal<PSELECT,")); hme_write32(hp, tregs + TCVR_CFG, (tconfig | TCV_CFG_PSELECT)); ASD(("ISOLATE,")); happy_meal_tcvr_write(hp, tregs, MII_BMCR, (BMCR_LOOPBACK|BMCR_PDOWN|BMCR_ISOLATE)); result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) { ASD(("phyread_fail>\n")); return -1; } ASD(("phyread_ok,~PSELECT>")); hme_write32(hp, tregs + TCVR_CFG, (tconfig & ~(TCV_CFG_PSELECT))); hp->tcvr_type = internal; hp->paddr = TCV_PADDR_ITX; } } ASD(("BMCR_RESET ")); happy_meal_tcvr_write(hp, tregs, MII_BMCR, BMCR_RESET); while (--tries) { result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) return -1; hp->sw_bmcr = result; if (!(result & BMCR_RESET)) break; udelay(20); } if (!tries) { ASD(("BMCR RESET FAILED!\n")); return -1; } ASD(("RESET_OK\n")); /* Get fresh copies of the PHY registers. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); ASD(("UNISOLATE")); hp->sw_bmcr &= ~(BMCR_ISOLATE); happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); tries = TCVR_UNISOLATE_TRIES; while (--tries) { result = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (result == TCVR_FAILURE) return -1; if (!(result & BMCR_ISOLATE)) break; udelay(20); } if (!tries) { ASD((" FAILED!\n")); return -1; } ASD((" SUCCESS and CSCONFIG_DFBYPASS\n")); if (!is_lucent_phy(hp)) { result = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, (result | CSCONFIG_DFBYPASS)); } return 0; } /* Figure out whether we have an internal or external transceiver. * * hp->happy_lock must be held */ static void happy_meal_transceiver_check(struct happy_meal *hp, void __iomem *tregs) { unsigned long tconfig = hme_read32(hp, tregs + TCVR_CFG); ASD(("happy_meal_transceiver_check: tcfg=%08lx ", tconfig)); if (hp->happy_flags & HFLAG_POLL) { /* If we are polling, we must stop to get the transceiver type. */ ASD(("<polling> ")); if (hp->tcvr_type == internal) { if (tconfig & TCV_CFG_MDIO1) { ASD(("<internal> <poll stop> ")); happy_meal_poll_stop(hp, tregs); hp->paddr = TCV_PADDR_ETX; hp->tcvr_type = external; ASD(("<external>\n")); tconfig &= ~(TCV_CFG_PENABLE); tconfig |= TCV_CFG_PSELECT; hme_write32(hp, tregs + TCVR_CFG, tconfig); } } else { if (hp->tcvr_type == external) { ASD(("<external> ")); if (!(hme_read32(hp, tregs + TCVR_STATUS) >> 16)) { ASD(("<poll stop> ")); happy_meal_poll_stop(hp, tregs); hp->paddr = TCV_PADDR_ITX; hp->tcvr_type = internal; ASD(("<internal>\n")); hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_PSELECT)); } ASD(("\n")); } else { ASD(("<none>\n")); } } } else { u32 reread = hme_read32(hp, tregs + TCVR_CFG); /* Else we can just work off of the MDIO bits. */ ASD(("<not polling> ")); if (reread & TCV_CFG_MDIO1) { hme_write32(hp, tregs + TCVR_CFG, tconfig | TCV_CFG_PSELECT); hp->paddr = TCV_PADDR_ETX; hp->tcvr_type = external; ASD(("<external>\n")); } else { if (reread & TCV_CFG_MDIO0) { hme_write32(hp, tregs + TCVR_CFG, tconfig & ~(TCV_CFG_PSELECT)); hp->paddr = TCV_PADDR_ITX; hp->tcvr_type = internal; ASD(("<internal>\n")); } else { printk(KERN_ERR "happy meal: Transceiver and a coke please."); hp->tcvr_type = none; /* Grrr... */ ASD(("<none>\n")); } } } } /* The receive ring buffers are a bit tricky to get right. Here goes... * * The buffers we dma into must be 64 byte aligned. So we use a special * alloc_skb() routine for the happy meal to allocate 64 bytes more than * we really need. * * We use skb_reserve() to align the data block we get in the skb. We * also program the etxregs->cfg register to use an offset of 2. This * imperical constant plus the ethernet header size will always leave * us with a nicely aligned ip header once we pass things up to the * protocol layers. * * The numbers work out to: * * Max ethernet frame size 1518 * Ethernet header size 14 * Happy Meal base offset 2 * * Say a skb data area is at 0xf001b010, and its size alloced is * (ETH_FRAME_LEN + 64 + 2) = (1514 + 64 + 2) = 1580 bytes. * * First our alloc_skb() routine aligns the data base to a 64 byte * boundary. We now have 0xf001b040 as our skb data address. We * plug this into the receive descriptor address. * * Next, we skb_reserve() 2 bytes to account for the Happy Meal offset. * So now the data we will end up looking at starts at 0xf001b042. When * the packet arrives, we will check out the size received and subtract * this from the skb->length. Then we just pass the packet up to the * protocols as is, and allocate a new skb to replace this slot we have * just received from. * * The ethernet layer will strip the ether header from the front of the * skb we just sent to it, this leaves us with the ip header sitting * nicely aligned at 0xf001b050. Also, for tcp and udp packets the * Happy Meal has even checksummed the tcp/udp data for us. The 16 * bit checksum is obtained from the low bits of the receive descriptor * flags, thus: * * skb->csum = rxd->rx_flags & 0xffff; * skb->ip_summed = CHECKSUM_COMPLETE; * * before sending off the skb to the protocols, and we are good as gold. */ static void happy_meal_clean_rings(struct happy_meal *hp) { int i; for (i = 0; i < RX_RING_SIZE; i++) { if (hp->rx_skbs[i] != NULL) { struct sk_buff *skb = hp->rx_skbs[i]; struct happy_meal_rxd *rxd; u32 dma_addr; rxd = &hp->happy_block->happy_meal_rxd[i]; dma_addr = hme_read_desc32(hp, &rxd->rx_addr); dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); dev_kfree_skb_any(skb); hp->rx_skbs[i] = NULL; } } for (i = 0; i < TX_RING_SIZE; i++) { if (hp->tx_skbs[i] != NULL) { struct sk_buff *skb = hp->tx_skbs[i]; struct happy_meal_txd *txd; u32 dma_addr; int frag; hp->tx_skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { txd = &hp->happy_block->happy_meal_txd[i]; dma_addr = hme_read_desc32(hp, &txd->tx_addr); if (!frag) dma_unmap_single(hp->dma_dev, dma_addr, (hme_read_desc32(hp, &txd->tx_flags) & TXFLAG_SIZE), DMA_TO_DEVICE); else dma_unmap_page(hp->dma_dev, dma_addr, (hme_read_desc32(hp, &txd->tx_flags) & TXFLAG_SIZE), DMA_TO_DEVICE); if (frag != skb_shinfo(skb)->nr_frags) i++; } dev_kfree_skb_any(skb); } } } /* hp->happy_lock must be held */ static void happy_meal_init_rings(struct happy_meal *hp) { struct hmeal_init_block *hb = hp->happy_block; struct net_device *dev = hp->dev; int i; HMD(("happy_meal_init_rings: counters to zero, ")); hp->rx_new = hp->rx_old = hp->tx_new = hp->tx_old = 0; /* Free any skippy bufs left around in the rings. */ HMD(("clean, ")); happy_meal_clean_rings(hp); /* Now get new skippy bufs for the receive ring. */ HMD(("init rxring, ")); for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb; skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (!skb) { hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0); continue; } hp->rx_skbs[i] = skb; skb->dev = dev; /* Because we reserve afterwards. */ skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); hme_write_rxd(hp, &hb->happy_meal_rxd[i], (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE)); skb_reserve(skb, RX_OFFSET); } HMD(("init txring, ")); for (i = 0; i < TX_RING_SIZE; i++) hme_write_txd(hp, &hb->happy_meal_txd[i], 0, 0); HMD(("done\n")); } /* hp->happy_lock must be held */ static void happy_meal_begin_auto_negotiation(struct happy_meal *hp, void __iomem *tregs, struct ethtool_cmd *ep) { int timeout; /* Read all of the registers we are interested in now. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); hp->sw_physid1 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID1); hp->sw_physid2 = happy_meal_tcvr_read(hp, tregs, MII_PHYSID2); /* XXX Check BMSR_ANEGCAPABLE, should not be necessary though. */ hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { /* Advertise everything we can support. */ if (hp->sw_bmsr & BMSR_10HALF) hp->sw_advertise |= (ADVERTISE_10HALF); else hp->sw_advertise &= ~(ADVERTISE_10HALF); if (hp->sw_bmsr & BMSR_10FULL) hp->sw_advertise |= (ADVERTISE_10FULL); else hp->sw_advertise &= ~(ADVERTISE_10FULL); if (hp->sw_bmsr & BMSR_100HALF) hp->sw_advertise |= (ADVERTISE_100HALF); else hp->sw_advertise &= ~(ADVERTISE_100HALF); if (hp->sw_bmsr & BMSR_100FULL) hp->sw_advertise |= (ADVERTISE_100FULL); else hp->sw_advertise &= ~(ADVERTISE_100FULL); happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); /* XXX Currently no Happy Meal cards I know off support 100BaseT4, * XXX and this is because the DP83840 does not support it, changes * XXX would need to be made to the tx/rx logic in the driver as well * XXX so I completely skip checking for it in the BMSR for now. */ #ifdef AUTO_SWITCH_DEBUG ASD(("%s: Advertising [ ", hp->dev->name)); if (hp->sw_advertise & ADVERTISE_10HALF) ASD(("10H ")); if (hp->sw_advertise & ADVERTISE_10FULL) ASD(("10F ")); if (hp->sw_advertise & ADVERTISE_100HALF) ASD(("100H ")); if (hp->sw_advertise & ADVERTISE_100FULL) ASD(("100F ")); #endif /* Enable Auto-Negotiation, this is usually on already... */ hp->sw_bmcr |= BMCR_ANENABLE; happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); /* Restart it to make sure it is going. */ hp->sw_bmcr |= BMCR_ANRESTART; happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); /* BMCR_ANRESTART self clears when the process has begun. */ timeout = 64; /* More than enough. */ while (--timeout) { hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); if (!(hp->sw_bmcr & BMCR_ANRESTART)) break; /* got it. */ udelay(10); } if (!timeout) { printk(KERN_ERR "%s: Happy Meal would not start auto negotiation " "BMCR=0x%04x\n", hp->dev->name, hp->sw_bmcr); printk(KERN_NOTICE "%s: Performing force link detection.\n", hp->dev->name); goto force_link; } else { hp->timer_state = arbwait; } } else { force_link: /* Force the link up, trying first a particular mode. * Either we are here at the request of ethtool or * because the Happy Meal would not start to autoneg. */ /* Disable auto-negotiation in BMCR, enable the duplex and * speed setting, init the timer state machine, and fire it off. */ if (ep == NULL || ep->autoneg == AUTONEG_ENABLE) { hp->sw_bmcr = BMCR_SPEED100; } else { if (ethtool_cmd_speed(ep) == SPEED_100) hp->sw_bmcr = BMCR_SPEED100; else hp->sw_bmcr = 0; if (ep->duplex == DUPLEX_FULL) hp->sw_bmcr |= BMCR_FULLDPLX; } happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); if (!is_lucent_phy(hp)) { /* OK, seems we need do disable the transceiver for the first * tick to make sure we get an accurate link state at the * second tick. */ hp->sw_csconfig = happy_meal_tcvr_read(hp, tregs, DP83840_CSCONFIG); hp->sw_csconfig &= ~(CSCONFIG_TCVDISAB); happy_meal_tcvr_write(hp, tregs, DP83840_CSCONFIG, hp->sw_csconfig); } hp->timer_state = ltrywait; } hp->timer_ticks = 0; hp->happy_timer.expires = jiffies + (12 * HZ)/10; /* 1.2 sec. */ hp->happy_timer.data = (unsigned long) hp; hp->happy_timer.function = happy_meal_timer; add_timer(&hp->happy_timer); } /* hp->happy_lock must be held */ static int happy_meal_init(struct happy_meal *hp) { void __iomem *gregs = hp->gregs; void __iomem *etxregs = hp->etxregs; void __iomem *erxregs = hp->erxregs; void __iomem *bregs = hp->bigmacregs; void __iomem *tregs = hp->tcvregs; u32 regtmp, rxcfg; unsigned char *e = &hp->dev->dev_addr[0]; /* If auto-negotiation timer is running, kill it. */ del_timer(&hp->happy_timer); HMD(("happy_meal_init: happy_flags[%08x] ", hp->happy_flags)); if (!(hp->happy_flags & HFLAG_INIT)) { HMD(("set HFLAG_INIT, ")); hp->happy_flags |= HFLAG_INIT; happy_meal_get_counters(hp, bregs); } /* Stop polling. */ HMD(("to happy_meal_poll_stop\n")); happy_meal_poll_stop(hp, tregs); /* Stop transmitter and receiver. */ HMD(("happy_meal_init: to happy_meal_stop\n")); happy_meal_stop(hp, gregs); /* Alloc and reset the tx/rx descriptor chains. */ HMD(("happy_meal_init: to happy_meal_init_rings\n")); happy_meal_init_rings(hp); /* Shut up the MIF. */ HMD(("happy_meal_init: Disable all MIF irqs (old[%08x]), ", hme_read32(hp, tregs + TCVR_IMASK))); hme_write32(hp, tregs + TCVR_IMASK, 0xffff); /* See if we can enable the MIF frame on this card to speak to the DP83840. */ if (hp->happy_flags & HFLAG_FENABLE) { HMD(("use frame old[%08x], ", hme_read32(hp, tregs + TCVR_CFG))); hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); } else { HMD(("use bitbang old[%08x], ", hme_read32(hp, tregs + TCVR_CFG))); hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); } /* Check the state of the transceiver. */ HMD(("to happy_meal_transceiver_check\n")); happy_meal_transceiver_check(hp, tregs); /* Put the Big Mac into a sane state. */ HMD(("happy_meal_init: ")); switch(hp->tcvr_type) { case none: /* Cannot operate if we don't know the transceiver type! */ HMD(("AAIEEE no transceiver type, EAGAIN")); return -EAGAIN; case internal: /* Using the MII buffers. */ HMD(("internal, using MII, ")); hme_write32(hp, bregs + BMAC_XIFCFG, 0); break; case external: /* Not using the MII, disable it. */ HMD(("external, disable MII, ")); hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); break; } if (happy_meal_tcvr_reset(hp, tregs)) return -EAGAIN; /* Reset the Happy Meal Big Mac transceiver and the receiver. */ HMD(("tx/rx reset, ")); happy_meal_tx_reset(hp, bregs); happy_meal_rx_reset(hp, bregs); /* Set jam size and inter-packet gaps to reasonable defaults. */ HMD(("jsize/ipg1/ipg2, ")); hme_write32(hp, bregs + BMAC_JSIZE, DEFAULT_JAMSIZE); hme_write32(hp, bregs + BMAC_IGAP1, DEFAULT_IPG1); hme_write32(hp, bregs + BMAC_IGAP2, DEFAULT_IPG2); /* Load up the MAC address and random seed. */ HMD(("rseed/macaddr, ")); /* The docs recommend to use the 10LSB of our MAC here. */ hme_write32(hp, bregs + BMAC_RSEED, ((e[5] | e[4]<<8)&0x3ff)); hme_write32(hp, bregs + BMAC_MACADDR2, ((e[4] << 8) | e[5])); hme_write32(hp, bregs + BMAC_MACADDR1, ((e[2] << 8) | e[3])); hme_write32(hp, bregs + BMAC_MACADDR0, ((e[0] << 8) | e[1])); HMD(("htable, ")); if ((hp->dev->flags & IFF_ALLMULTI) || (netdev_mc_count(hp->dev) > 64)) { hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); } else if ((hp->dev->flags & IFF_PROMISC) == 0) { u16 hash_table[4]; struct netdev_hw_addr *ha; u32 crc; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, hp->dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); } else { hme_write32(hp, bregs + BMAC_HTABLE3, 0); hme_write32(hp, bregs + BMAC_HTABLE2, 0); hme_write32(hp, bregs + BMAC_HTABLE1, 0); hme_write32(hp, bregs + BMAC_HTABLE0, 0); } /* Set the RX and TX ring ptrs. */ HMD(("ring ptrs rxr[%08x] txr[%08x]\n", ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)), ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0)))); hme_write32(hp, erxregs + ERX_RING, ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))); hme_write32(hp, etxregs + ETX_RING, ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_txd, 0))); /* Parity issues in the ERX unit of some HME revisions can cause some * registers to not be written unless their parity is even. Detect such * lost writes and simply rewrite with a low bit set (which will be ignored * since the rxring needs to be 2K aligned). */ if (hme_read32(hp, erxregs + ERX_RING) != ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0))) hme_write32(hp, erxregs + ERX_RING, ((__u32)hp->hblock_dvma + hblock_offset(happy_meal_rxd, 0)) | 0x4); /* Set the supported burst sizes. */ HMD(("happy_meal_init: old[%08x] bursts<", hme_read32(hp, gregs + GREG_CFG))); #ifndef CONFIG_SPARC /* It is always PCI and can handle 64byte bursts. */ hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST64); #else if ((hp->happy_bursts & DMA_BURST64) && ((hp->happy_flags & HFLAG_PCI) != 0 #ifdef CONFIG_SBUS || sbus_can_burst64() #endif || 0)) { u32 gcfg = GREG_CFG_BURST64; /* I have no idea if I should set the extended * transfer mode bit for Cheerio, so for now I * do not. -DaveM */ #ifdef CONFIG_SBUS if ((hp->happy_flags & HFLAG_PCI) == 0) { struct platform_device *op = hp->happy_dev; if (sbus_can_dma_64bit()) { sbus_set_sbus64(&op->dev, hp->happy_bursts); gcfg |= GREG_CFG_64BIT; } } #endif HMD(("64>")); hme_write32(hp, gregs + GREG_CFG, gcfg); } else if (hp->happy_bursts & DMA_BURST32) { HMD(("32>")); hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST32); } else if (hp->happy_bursts & DMA_BURST16) { HMD(("16>")); hme_write32(hp, gregs + GREG_CFG, GREG_CFG_BURST16); } else { HMD(("XXX>")); hme_write32(hp, gregs + GREG_CFG, 0); } #endif /* CONFIG_SPARC */ /* Turn off interrupts we do not want to hear. */ HMD((", enable global interrupts, ")); hme_write32(hp, gregs + GREG_IMASK, (GREG_IMASK_GOTFRAME | GREG_IMASK_RCNTEXP | GREG_IMASK_SENTFRAME | GREG_IMASK_TXPERR)); /* Set the transmit ring buffer size. */ HMD(("tx rsize=%d oreg[%08x], ", (int)TX_RING_SIZE, hme_read32(hp, etxregs + ETX_RSIZE))); hme_write32(hp, etxregs + ETX_RSIZE, (TX_RING_SIZE >> ETX_RSIZE_SHIFT) - 1); /* Enable transmitter DVMA. */ HMD(("tx dma enable old[%08x], ", hme_read32(hp, etxregs + ETX_CFG))); hme_write32(hp, etxregs + ETX_CFG, hme_read32(hp, etxregs + ETX_CFG) | ETX_CFG_DMAENABLE); /* This chip really rots, for the receiver sometimes when you * write to its control registers not all the bits get there * properly. I cannot think of a sane way to provide complete * coverage for this hardware bug yet. */ HMD(("erx regs bug old[%08x]\n", hme_read32(hp, erxregs + ERX_CFG))); hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); regtmp = hme_read32(hp, erxregs + ERX_CFG); hme_write32(hp, erxregs + ERX_CFG, ERX_CFG_DEFAULT(RX_OFFSET)); if (hme_read32(hp, erxregs + ERX_CFG) != ERX_CFG_DEFAULT(RX_OFFSET)) { printk(KERN_ERR "happy meal: Eieee, rx config register gets greasy fries.\n"); printk(KERN_ERR "happy meal: Trying to set %08x, reread gives %08x\n", ERX_CFG_DEFAULT(RX_OFFSET), regtmp); /* XXX Should return failure here... */ } /* Enable Big Mac hash table filter. */ HMD(("happy_meal_init: enable hash rx_cfg_old[%08x], ", hme_read32(hp, bregs + BMAC_RXCFG))); rxcfg = BIGMAC_RXCFG_HENABLE | BIGMAC_RXCFG_REJME; if (hp->dev->flags & IFF_PROMISC) rxcfg |= BIGMAC_RXCFG_PMISC; hme_write32(hp, bregs + BMAC_RXCFG, rxcfg); /* Let the bits settle in the chip. */ udelay(10); /* Ok, configure the Big Mac transmitter. */ HMD(("BIGMAC init, ")); regtmp = 0; if (hp->happy_flags & HFLAG_FULL) regtmp |= BIGMAC_TXCFG_FULLDPLX; /* Don't turn on the "don't give up" bit for now. It could cause hme * to deadlock with the PHY if a Jabber occurs. */ hme_write32(hp, bregs + BMAC_TXCFG, regtmp /*| BIGMAC_TXCFG_DGIVEUP*/); /* Give up after 16 TX attempts. */ hme_write32(hp, bregs + BMAC_ALIMIT, 16); /* Enable the output drivers no matter what. */ regtmp = BIGMAC_XCFG_ODENABLE; /* If card can do lance mode, enable it. */ if (hp->happy_flags & HFLAG_LANCE) regtmp |= (DEFAULT_IPG0 << 5) | BIGMAC_XCFG_LANCE; /* Disable the MII buffers if using external transceiver. */ if (hp->tcvr_type == external) regtmp |= BIGMAC_XCFG_MIIDISAB; HMD(("XIF config old[%08x], ", hme_read32(hp, bregs + BMAC_XIFCFG))); hme_write32(hp, bregs + BMAC_XIFCFG, regtmp); /* Start things up. */ HMD(("tx old[%08x] and rx [%08x] ON!\n", hme_read32(hp, bregs + BMAC_TXCFG), hme_read32(hp, bregs + BMAC_RXCFG))); /* Set larger TX/RX size to allow for 802.1q */ hme_write32(hp, bregs + BMAC_TXMAX, ETH_FRAME_LEN + 8); hme_write32(hp, bregs + BMAC_RXMAX, ETH_FRAME_LEN + 8); hme_write32(hp, bregs + BMAC_TXCFG, hme_read32(hp, bregs + BMAC_TXCFG) | BIGMAC_TXCFG_ENABLE); hme_write32(hp, bregs + BMAC_RXCFG, hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_ENABLE); /* Get the autonegotiation started, and the watch timer ticking. */ happy_meal_begin_auto_negotiation(hp, tregs, NULL); /* Success. */ return 0; } /* hp->happy_lock must be held */ static void happy_meal_set_initial_advertisement(struct happy_meal *hp) { void __iomem *tregs = hp->tcvregs; void __iomem *bregs = hp->bigmacregs; void __iomem *gregs = hp->gregs; happy_meal_stop(hp, gregs); hme_write32(hp, tregs + TCVR_IMASK, 0xffff); if (hp->happy_flags & HFLAG_FENABLE) hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) & ~(TCV_CFG_BENABLE)); else hme_write32(hp, tregs + TCVR_CFG, hme_read32(hp, tregs + TCVR_CFG) | TCV_CFG_BENABLE); happy_meal_transceiver_check(hp, tregs); switch(hp->tcvr_type) { case none: return; case internal: hme_write32(hp, bregs + BMAC_XIFCFG, 0); break; case external: hme_write32(hp, bregs + BMAC_XIFCFG, BIGMAC_XCFG_MIIDISAB); break; } if (happy_meal_tcvr_reset(hp, tregs)) return; /* Latch PHY registers as of now. */ hp->sw_bmsr = happy_meal_tcvr_read(hp, tregs, MII_BMSR); hp->sw_advertise = happy_meal_tcvr_read(hp, tregs, MII_ADVERTISE); /* Advertise everything we can support. */ if (hp->sw_bmsr & BMSR_10HALF) hp->sw_advertise |= (ADVERTISE_10HALF); else hp->sw_advertise &= ~(ADVERTISE_10HALF); if (hp->sw_bmsr & BMSR_10FULL) hp->sw_advertise |= (ADVERTISE_10FULL); else hp->sw_advertise &= ~(ADVERTISE_10FULL); if (hp->sw_bmsr & BMSR_100HALF) hp->sw_advertise |= (ADVERTISE_100HALF); else hp->sw_advertise &= ~(ADVERTISE_100HALF); if (hp->sw_bmsr & BMSR_100FULL) hp->sw_advertise |= (ADVERTISE_100FULL); else hp->sw_advertise &= ~(ADVERTISE_100FULL); /* Update the PHY advertisement register. */ happy_meal_tcvr_write(hp, tregs, MII_ADVERTISE, hp->sw_advertise); } /* Once status is latched (by happy_meal_interrupt) it is cleared by * the hardware, so we cannot re-read it and get a correct value. * * hp->happy_lock must be held */ static int happy_meal_is_not_so_happy(struct happy_meal *hp, u32 status) { int reset = 0; /* Only print messages for non-counter related interrupts. */ if (status & (GREG_STAT_STSTERR | GREG_STAT_TFIFO_UND | GREG_STAT_MAXPKTERR | GREG_STAT_RXERR | GREG_STAT_RXPERR | GREG_STAT_RXTERR | GREG_STAT_EOPERR | GREG_STAT_MIFIRQ | GREG_STAT_TXEACK | GREG_STAT_TXLERR | GREG_STAT_TXPERR | GREG_STAT_TXTERR | GREG_STAT_SLVERR | GREG_STAT_SLVPERR)) printk(KERN_ERR "%s: Error interrupt for happy meal, status = %08x\n", hp->dev->name, status); if (status & GREG_STAT_RFIFOVF) { /* Receive FIFO overflow is harmless and the hardware will take care of it, just some packets are lost. Who cares. */ printk(KERN_DEBUG "%s: Happy Meal receive FIFO overflow.\n", hp->dev->name); } if (status & GREG_STAT_STSTERR) { /* BigMAC SQE link test failed. */ printk(KERN_ERR "%s: Happy Meal BigMAC SQE test failed.\n", hp->dev->name); reset = 1; } if (status & GREG_STAT_TFIFO_UND) { /* Transmit FIFO underrun, again DMA error likely. */ printk(KERN_ERR "%s: Happy Meal transmitter FIFO underrun, DMA error.\n", hp->dev->name); reset = 1; } if (status & GREG_STAT_MAXPKTERR) { /* Driver error, tried to transmit something larger * than ethernet max mtu. */ printk(KERN_ERR "%s: Happy Meal MAX Packet size error.\n", hp->dev->name); reset = 1; } if (status & GREG_STAT_NORXD) { /* This is harmless, it just means the system is * quite loaded and the incoming packet rate was * faster than the interrupt handler could keep up * with. */ printk(KERN_INFO "%s: Happy Meal out of receive " "descriptors, packet dropped.\n", hp->dev->name); } if (status & (GREG_STAT_RXERR|GREG_STAT_RXPERR|GREG_STAT_RXTERR)) { /* All sorts of DMA receive errors. */ printk(KERN_ERR "%s: Happy Meal rx DMA errors [ ", hp->dev->name); if (status & GREG_STAT_RXERR) printk("GenericError "); if (status & GREG_STAT_RXPERR) printk("ParityError "); if (status & GREG_STAT_RXTERR) printk("RxTagBotch "); printk("]\n"); reset = 1; } if (status & GREG_STAT_EOPERR) { /* Driver bug, didn't set EOP bit in tx descriptor given * to the happy meal. */ printk(KERN_ERR "%s: EOP not set in happy meal transmit descriptor!\n", hp->dev->name); reset = 1; } if (status & GREG_STAT_MIFIRQ) { /* MIF signalled an interrupt, were we polling it? */ printk(KERN_ERR "%s: Happy Meal MIF interrupt.\n", hp->dev->name); } if (status & (GREG_STAT_TXEACK|GREG_STAT_TXLERR|GREG_STAT_TXPERR|GREG_STAT_TXTERR)) { /* All sorts of transmit DMA errors. */ printk(KERN_ERR "%s: Happy Meal tx DMA errors [ ", hp->dev->name); if (status & GREG_STAT_TXEACK) printk("GenericError "); if (status & GREG_STAT_TXLERR) printk("LateError "); if (status & GREG_STAT_TXPERR) printk("ParityErro "); if (status & GREG_STAT_TXTERR) printk("TagBotch "); printk("]\n"); reset = 1; } if (status & (GREG_STAT_SLVERR|GREG_STAT_SLVPERR)) { /* Bus or parity error when cpu accessed happy meal registers * or it's internal FIFO's. Should never see this. */ printk(KERN_ERR "%s: Happy Meal register access SBUS slave (%s) error.\n", hp->dev->name, (status & GREG_STAT_SLVPERR) ? "parity" : "generic"); reset = 1; } if (reset) { printk(KERN_NOTICE "%s: Resetting...\n", hp->dev->name); happy_meal_init(hp); return 1; } return 0; } /* hp->happy_lock must be held */ static void happy_meal_mif_interrupt(struct happy_meal *hp) { void __iomem *tregs = hp->tcvregs; printk(KERN_INFO "%s: Link status change.\n", hp->dev->name); hp->sw_bmcr = happy_meal_tcvr_read(hp, tregs, MII_BMCR); hp->sw_lpa = happy_meal_tcvr_read(hp, tregs, MII_LPA); /* Use the fastest transmission protocol possible. */ if (hp->sw_lpa & LPA_100FULL) { printk(KERN_INFO "%s: Switching to 100Mbps at full duplex.", hp->dev->name); hp->sw_bmcr |= (BMCR_FULLDPLX | BMCR_SPEED100); } else if (hp->sw_lpa & LPA_100HALF) { printk(KERN_INFO "%s: Switching to 100MBps at half duplex.", hp->dev->name); hp->sw_bmcr |= BMCR_SPEED100; } else if (hp->sw_lpa & LPA_10FULL) { printk(KERN_INFO "%s: Switching to 10MBps at full duplex.", hp->dev->name); hp->sw_bmcr |= BMCR_FULLDPLX; } else { printk(KERN_INFO "%s: Using 10Mbps at half duplex.", hp->dev->name); } happy_meal_tcvr_write(hp, tregs, MII_BMCR, hp->sw_bmcr); /* Finally stop polling and shut up the MIF. */ happy_meal_poll_stop(hp, tregs); } #ifdef TXDEBUG #define TXD(x) printk x #else #define TXD(x) #endif /* hp->happy_lock must be held */ static void happy_meal_tx(struct happy_meal *hp) { struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0]; struct happy_meal_txd *this; struct net_device *dev = hp->dev; int elem; elem = hp->tx_old; TXD(("TX<")); while (elem != hp->tx_new) { struct sk_buff *skb; u32 flags, dma_addr, dma_len; int frag; TXD(("[%d]", elem)); this = &txbase[elem]; flags = hme_read_desc32(hp, &this->tx_flags); if (flags & TXFLAG_OWN) break; skb = hp->tx_skbs[elem]; if (skb_shinfo(skb)->nr_frags) { int last; last = elem + skb_shinfo(skb)->nr_frags; last &= (TX_RING_SIZE - 1); flags = hme_read_desc32(hp, &txbase[last].tx_flags); if (flags & TXFLAG_OWN) break; } hp->tx_skbs[elem] = NULL; hp->net_stats.tx_bytes += skb->len; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { dma_addr = hme_read_desc32(hp, &this->tx_addr); dma_len = hme_read_desc32(hp, &this->tx_flags); dma_len &= TXFLAG_SIZE; if (!frag) dma_unmap_single(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); else dma_unmap_page(hp->dma_dev, dma_addr, dma_len, DMA_TO_DEVICE); elem = NEXT_TX(elem); this = &txbase[elem]; } dev_kfree_skb_irq(skb); hp->net_stats.tx_packets++; } hp->tx_old = elem; TXD((">")); if (netif_queue_stopped(dev) && TX_BUFFS_AVAIL(hp) > (MAX_SKB_FRAGS + 1)) netif_wake_queue(dev); } #ifdef RXDEBUG #define RXD(x) printk x #else #define RXD(x) #endif /* Originally I used to handle the allocation failure by just giving back just * that one ring buffer to the happy meal. Problem is that usually when that * condition is triggered, the happy meal expects you to do something reasonable * with all of the packets it has DMA'd in. So now I just drop the entire * ring when we cannot get a new skb and give them all back to the happy meal, * maybe things will be "happier" now. * * hp->happy_lock must be held */ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev) { struct happy_meal_rxd *rxbase = &hp->happy_block->happy_meal_rxd[0]; struct happy_meal_rxd *this; int elem = hp->rx_new, drops = 0; u32 flags; RXD(("RX<")); this = &rxbase[elem]; while (!((flags = hme_read_desc32(hp, &this->rx_flags)) & RXFLAG_OWN)) { struct sk_buff *skb; int len = flags >> 16; u16 csum = flags & RXFLAG_CSUM; u32 dma_addr = hme_read_desc32(hp, &this->rx_addr); RXD(("[%d ", elem)); /* Check for errors. */ if ((len < ETH_ZLEN) || (flags & RXFLAG_OVERFLOW)) { RXD(("ERR(%08x)]", flags)); hp->net_stats.rx_errors++; if (len < ETH_ZLEN) hp->net_stats.rx_length_errors++; if (len & (RXFLAG_OVERFLOW >> 16)) { hp->net_stats.rx_over_errors++; hp->net_stats.rx_fifo_errors++; } /* Return it to the Happy meal. */ drop_it: hp->net_stats.rx_dropped++; hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), dma_addr); goto next; } skb = hp->rx_skbs[elem]; if (len > RX_COPY_THRESHOLD) { struct sk_buff *new_skb; /* Now refill the entry, if we can. */ new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); if (new_skb == NULL) { drops++; goto drop_it; } dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); hp->rx_skbs[elem] = new_skb; new_skb->dev = dev; skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE)); skb_reserve(new_skb, RX_OFFSET); /* Trim the original skb for the netif. */ skb_trim(skb, len); } else { struct sk_buff *copy_skb = netdev_alloc_skb(dev, len + 2); if (copy_skb == NULL) { drops++; goto drop_it; } skb_reserve(copy_skb, 2); skb_put(copy_skb, len); dma_sync_single_for_cpu(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); skb_copy_from_linear_data(skb, copy_skb->data, len); dma_sync_single_for_device(hp->dma_dev, dma_addr, len, DMA_FROM_DEVICE); /* Reuse original ring buffer. */ hme_write_rxd(hp, this, (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), dma_addr); skb = copy_skb; } /* This card is _fucking_ hot... */ skb->csum = csum_unfold(~(__force __sum16)htons(csum)); skb->ip_summed = CHECKSUM_COMPLETE; RXD(("len=%d csum=%4x]", len, csum)); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); hp->net_stats.rx_packets++; hp->net_stats.rx_bytes += len; next: elem = NEXT_RX(elem); this = &rxbase[elem]; } hp->rx_new = elem; if (drops) printk(KERN_INFO "%s: Memory squeeze, deferring packet.\n", hp->dev->name); RXD((">")); } static irqreturn_t happy_meal_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct happy_meal *hp = netdev_priv(dev); u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); HMD(("happy_meal_interrupt: status=%08x ", happy_status)); spin_lock(&hp->happy_lock); if (happy_status & GREG_STAT_ERRORS) { HMD(("ERRORS ")); if (happy_meal_is_not_so_happy(hp, /* un- */ happy_status)) goto out; } if (happy_status & GREG_STAT_MIFIRQ) { HMD(("MIFIRQ ")); happy_meal_mif_interrupt(hp); } if (happy_status & GREG_STAT_TXALL) { HMD(("TXALL ")); happy_meal_tx(hp); } if (happy_status & GREG_STAT_RXTOHOST) { HMD(("RXTOHOST ")); happy_meal_rx(hp, dev); } HMD(("done\n")); out: spin_unlock(&hp->happy_lock); return IRQ_HANDLED; } #ifdef CONFIG_SBUS static irqreturn_t quattro_sbus_interrupt(int irq, void *cookie) { struct quattro *qp = (struct quattro *) cookie; int i; for (i = 0; i < 4; i++) { struct net_device *dev = qp->happy_meals[i]; struct happy_meal *hp = netdev_priv(dev); u32 happy_status = hme_read32(hp, hp->gregs + GREG_STAT); HMD(("quattro_interrupt: status=%08x ", happy_status)); if (!(happy_status & (GREG_STAT_ERRORS | GREG_STAT_MIFIRQ | GREG_STAT_TXALL | GREG_STAT_RXTOHOST))) continue; spin_lock(&hp->happy_lock); if (happy_status & GREG_STAT_ERRORS) { HMD(("ERRORS ")); if (happy_meal_is_not_so_happy(hp, happy_status)) goto next; } if (happy_status & GREG_STAT_MIFIRQ) { HMD(("MIFIRQ ")); happy_meal_mif_interrupt(hp); } if (happy_status & GREG_STAT_TXALL) { HMD(("TXALL ")); happy_meal_tx(hp); } if (happy_status & GREG_STAT_RXTOHOST) { HMD(("RXTOHOST ")); happy_meal_rx(hp, dev); } next: spin_unlock(&hp->happy_lock); } HMD(("done\n")); return IRQ_HANDLED; } #endif static int happy_meal_open(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); int res; HMD(("happy_meal_open: ")); /* On SBUS Quattro QFE cards, all hme interrupts are concentrated * into a single source which we register handling at probe time. */ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) { if (request_irq(dev->irq, happy_meal_interrupt, IRQF_SHARED, dev->name, (void *)dev)) { HMD(("EAGAIN\n")); printk(KERN_ERR "happy_meal(SBUS): Can't order irq %d to go.\n", dev->irq); return -EAGAIN; } } HMD(("to happy_meal_init\n")); spin_lock_irq(&hp->happy_lock); res = happy_meal_init(hp); spin_unlock_irq(&hp->happy_lock); if (res && ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO)) free_irq(dev->irq, dev); return res; } static int happy_meal_close(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); spin_lock_irq(&hp->happy_lock); happy_meal_stop(hp, hp->gregs); happy_meal_clean_rings(hp); /* If auto-negotiation timer is running, kill it. */ del_timer(&hp->happy_timer); spin_unlock_irq(&hp->happy_lock); /* On Quattro QFE cards, all hme interrupts are concentrated * into a single source which we register handling at probe * time and never unregister. */ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) free_irq(dev->irq, dev); return 0; } #ifdef SXDEBUG #define SXD(x) printk x #else #define SXD(x) #endif static void happy_meal_tx_timeout(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); printk (KERN_ERR "%s: transmit timed out, resetting\n", dev->name); tx_dump_log(); printk (KERN_ERR "%s: Happy Status %08x TX[%08x:%08x]\n", dev->name, hme_read32(hp, hp->gregs + GREG_STAT), hme_read32(hp, hp->etxregs + ETX_CFG), hme_read32(hp, hp->bigmacregs + BMAC_TXCFG)); spin_lock_irq(&hp->happy_lock); happy_meal_init(hp); spin_unlock_irq(&hp->happy_lock); netif_wake_queue(dev); } static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); int entry; u32 tx_flags; tx_flags = TXFLAG_OWN; if (skb->ip_summed == CHECKSUM_PARTIAL) { const u32 csum_start_off = skb_checksum_start_offset(skb); const u32 csum_stuff_off = csum_start_off + skb->csum_offset; tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | ((csum_start_off << 14) & TXFLAG_CSBUFBEGIN) | ((csum_stuff_off << 20) & TXFLAG_CSLOCATION)); } spin_lock_irq(&hp->happy_lock); if (TX_BUFFS_AVAIL(hp) <= (skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); spin_unlock_irq(&hp->happy_lock); printk(KERN_ERR "%s: BUG! Tx Ring full when queue awake!\n", dev->name); return NETDEV_TX_BUSY; } entry = hp->tx_new; SXD(("SX<l[%d]e[%d]>", len, entry)); hp->tx_skbs[entry] = skb; if (skb_shinfo(skb)->nr_frags == 0) { u32 mapping, len; len = skb->len; mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (tx_flags | (len & TXFLAG_SIZE)), mapping); entry = NEXT_TX(entry); } else { u32 first_len, first_mapping; int frag, first_entry = entry; /* We must give this initial chunk to the device last. * Otherwise we could race with the device. */ first_len = skb_headlen(skb); first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, DMA_TO_DEVICE); entry = NEXT_TX(entry); for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { const skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag]; u32 len, mapping, this_txflags; len = skb_frag_size(this_frag); mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 0, len, DMA_TO_DEVICE); this_txflags = tx_flags; if (frag == skb_shinfo(skb)->nr_frags - 1) this_txflags |= TXFLAG_EOP; hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], (this_txflags | (len & TXFLAG_SIZE)), mapping); entry = NEXT_TX(entry); } hme_write_txd(hp, &hp->happy_block->happy_meal_txd[first_entry], (tx_flags | TXFLAG_SOP | (first_len & TXFLAG_SIZE)), first_mapping); } hp->tx_new = entry; if (TX_BUFFS_AVAIL(hp) <= (MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); /* Get it going. */ hme_write32(hp, hp->etxregs + ETX_PENDING, ETX_TP_DMAWAKEUP); spin_unlock_irq(&hp->happy_lock); tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); return NETDEV_TX_OK; } static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); spin_lock_irq(&hp->happy_lock); happy_meal_get_counters(hp, hp->bigmacregs); spin_unlock_irq(&hp->happy_lock); return &hp->net_stats; } static void happy_meal_set_multicast(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); void __iomem *bregs = hp->bigmacregs; struct netdev_hw_addr *ha; u32 crc; spin_lock_irq(&hp->happy_lock); if ((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { hme_write32(hp, bregs + BMAC_HTABLE0, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE1, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE2, 0xffff); hme_write32(hp, bregs + BMAC_HTABLE3, 0xffff); } else if (dev->flags & IFF_PROMISC) { hme_write32(hp, bregs + BMAC_RXCFG, hme_read32(hp, bregs + BMAC_RXCFG) | BIGMAC_RXCFG_PMISC); } else { u16 hash_table[4]; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(6, ha->addr); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } hme_write32(hp, bregs + BMAC_HTABLE0, hash_table[0]); hme_write32(hp, bregs + BMAC_HTABLE1, hash_table[1]); hme_write32(hp, bregs + BMAC_HTABLE2, hash_table[2]); hme_write32(hp, bregs + BMAC_HTABLE3, hash_table[3]); } spin_unlock_irq(&hp->happy_lock); } /* Ethtool support... */ static int hme_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct happy_meal *hp = netdev_priv(dev); u32 speed; cmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); /* XXX hardcoded stuff for now */ cmd->port = PORT_TP; /* XXX no MII support */ cmd->transceiver = XCVR_INTERNAL; /* XXX no external xcvr support */ cmd->phy_address = 0; /* XXX fixed PHYAD */ /* Record PHY settings. */ spin_lock_irq(&hp->happy_lock); hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); hp->sw_lpa = happy_meal_tcvr_read(hp, hp->tcvregs, MII_LPA); spin_unlock_irq(&hp->happy_lock); if (hp->sw_bmcr & BMCR_ANENABLE) { cmd->autoneg = AUTONEG_ENABLE; speed = ((hp->sw_lpa & (LPA_100HALF | LPA_100FULL)) ? SPEED_100 : SPEED_10); if (speed == SPEED_100) cmd->duplex = (hp->sw_lpa & (LPA_100FULL)) ? DUPLEX_FULL : DUPLEX_HALF; else cmd->duplex = (hp->sw_lpa & (LPA_10FULL)) ? DUPLEX_FULL : DUPLEX_HALF; } else { cmd->autoneg = AUTONEG_DISABLE; speed = (hp->sw_bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; cmd->duplex = (hp->sw_bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } ethtool_cmd_speed_set(cmd, speed); return 0; } static int hme_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct happy_meal *hp = netdev_priv(dev); /* Verify the settings we care about. */ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->autoneg == AUTONEG_DISABLE && ((ethtool_cmd_speed(cmd) != SPEED_100 && ethtool_cmd_speed(cmd) != SPEED_10) || (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL))) return -EINVAL; /* Ok, do it to it. */ spin_lock_irq(&hp->happy_lock); del_timer(&hp->happy_timer); happy_meal_begin_auto_negotiation(hp, hp->tcvregs, cmd); spin_unlock_irq(&hp->happy_lock); return 0; } static void hme_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct happy_meal *hp = netdev_priv(dev); strlcpy(info->driver, "sunhme", sizeof(info->driver)); strlcpy(info->version, "2.02", sizeof(info->version)); if (hp->happy_flags & HFLAG_PCI) { struct pci_dev *pdev = hp->happy_dev; strlcpy(info->bus_info, pci_name(pdev), sizeof(info->bus_info)); } #ifdef CONFIG_SBUS else { const struct linux_prom_registers *regs; struct platform_device *op = hp->happy_dev; regs = of_get_property(op->dev.of_node, "regs", NULL); if (regs) snprintf(info->bus_info, sizeof(info->bus_info), "SBUS:%d", regs->which_io); } #endif } static u32 hme_get_link(struct net_device *dev) { struct happy_meal *hp = netdev_priv(dev); spin_lock_irq(&hp->happy_lock); hp->sw_bmcr = happy_meal_tcvr_read(hp, hp->tcvregs, MII_BMCR); spin_unlock_irq(&hp->happy_lock); return hp->sw_bmsr & BMSR_LSTATUS; } static const struct ethtool_ops hme_ethtool_ops = { .get_settings = hme_get_settings, .set_settings = hme_set_settings, .get_drvinfo = hme_get_drvinfo, .get_link = hme_get_link, }; static int hme_version_printed; #ifdef CONFIG_SBUS /* Given a happy meal sbus device, find it's quattro parent. * If none exist, allocate and return a new one. * * Return NULL on failure. */ static struct quattro * __devinit quattro_sbus_find(struct platform_device *child) { struct device *parent = child->dev.parent; struct platform_device *op; struct quattro *qp; op = to_platform_device(parent); qp = dev_get_drvdata(&op->dev); if (qp) return qp; qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); if (qp != NULL) { int i; for (i = 0; i < 4; i++) qp->happy_meals[i] = NULL; qp->quattro_dev = child; qp->next = qfe_sbus_list; qfe_sbus_list = qp; dev_set_drvdata(&op->dev, qp); } return qp; } /* After all quattro cards have been probed, we call these functions * to register the IRQ handlers for the cards that have been * successfully probed and skip the cards that failed to initialize */ static int __init quattro_sbus_register_irqs(void) { struct quattro *qp; for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { struct platform_device *op = qp->quattro_dev; int err, qfe_slot, skip = 0; for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) { if (!qp->happy_meals[qfe_slot]) skip = 1; } if (skip) continue; err = request_irq(op->archdata.irqs[0], quattro_sbus_interrupt, IRQF_SHARED, "Quattro", qp); if (err != 0) { printk(KERN_ERR "Quattro HME: IRQ registration " "error %d.\n", err); return err; } } return 0; } static void quattro_sbus_free_irqs(void) { struct quattro *qp; for (qp = qfe_sbus_list; qp != NULL; qp = qp->next) { struct platform_device *op = qp->quattro_dev; int qfe_slot, skip = 0; for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) { if (!qp->happy_meals[qfe_slot]) skip = 1; } if (skip) continue; free_irq(op->archdata.irqs[0], qp); } } #endif /* CONFIG_SBUS */ #ifdef CONFIG_PCI static struct quattro * __devinit quattro_pci_find(struct pci_dev *pdev) { struct pci_dev *bdev = pdev->bus->self; struct quattro *qp; if (!bdev) return NULL; for (qp = qfe_pci_list; qp != NULL; qp = qp->next) { struct pci_dev *qpdev = qp->quattro_dev; if (qpdev == bdev) return qp; } qp = kmalloc(sizeof(struct quattro), GFP_KERNEL); if (qp != NULL) { int i; for (i = 0; i < 4; i++) qp->happy_meals[i] = NULL; qp->quattro_dev = bdev; qp->next = qfe_pci_list; qfe_pci_list = qp; /* No range tricks necessary on PCI. */ qp->nranges = 0; } return qp; } #endif /* CONFIG_PCI */ static const struct net_device_ops hme_netdev_ops = { .ndo_open = happy_meal_open, .ndo_stop = happy_meal_close, .ndo_start_xmit = happy_meal_start_xmit, .ndo_tx_timeout = happy_meal_tx_timeout, .ndo_get_stats = happy_meal_get_stats, .ndo_set_rx_mode = happy_meal_set_multicast, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; #ifdef CONFIG_SBUS static int __devinit happy_meal_sbus_probe_one(struct platform_device *op, int is_qfe) { struct device_node *dp = op->dev.of_node, *sbus_dp; struct quattro *qp = NULL; struct happy_meal *hp; struct net_device *dev; int i, qfe_slot = -1; int err = -ENODEV; sbus_dp = op->dev.parent->of_node; /* We can match PCI devices too, do not accept those here. */ if (strcmp(sbus_dp->name, "sbus") && strcmp(sbus_dp->name, "sbi")) return err; if (is_qfe) { qp = quattro_sbus_find(op); if (qp == NULL) goto err_out; for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) if (qp->happy_meals[qfe_slot] == NULL) break; if (qfe_slot == 4) goto err_out; } err = -ENOMEM; dev = alloc_etherdev(sizeof(struct happy_meal)); if (!dev) goto err_out; SET_NETDEV_DEV(dev, &op->dev); if (hme_version_printed++ == 0) printk(KERN_INFO "%s", version); /* If user did not specify a MAC address specifically, use * the Quattro local-mac-address property... */ for (i = 0; i < 6; i++) { if (macaddr[i] != 0) break; } if (i < 6) { /* a mac address was given */ for (i = 0; i < 6; i++) dev->dev_addr[i] = macaddr[i]; macaddr[5]++; } else { const unsigned char *addr; int len; addr = of_get_property(dp, "local-mac-address", &len); if (qfe_slot != -1 && addr && len == 6) memcpy(dev->dev_addr, addr, 6); else memcpy(dev->dev_addr, idprom->id_ethaddr, 6); } hp = netdev_priv(dev); hp->happy_dev = op; hp->dma_dev = &op->dev; spin_lock_init(&hp->happy_lock); err = -ENODEV; if (qp != NULL) { hp->qfe_parent = qp; hp->qfe_ent = qfe_slot; qp->happy_meals[qfe_slot] = dev; } hp->gregs = of_ioremap(&op->resource[0], 0, GREG_REG_SIZE, "HME Global Regs"); if (!hp->gregs) { printk(KERN_ERR "happymeal: Cannot map global registers.\n"); goto err_out_free_netdev; } hp->etxregs = of_ioremap(&op->resource[1], 0, ETX_REG_SIZE, "HME TX Regs"); if (!hp->etxregs) { printk(KERN_ERR "happymeal: Cannot map MAC TX registers.\n"); goto err_out_iounmap; } hp->erxregs = of_ioremap(&op->resource[2], 0, ERX_REG_SIZE, "HME RX Regs"); if (!hp->erxregs) { printk(KERN_ERR "happymeal: Cannot map MAC RX registers.\n"); goto err_out_iounmap; } hp->bigmacregs = of_ioremap(&op->resource[3], 0, BMAC_REG_SIZE, "HME BIGMAC Regs"); if (!hp->bigmacregs) { printk(KERN_ERR "happymeal: Cannot map BIGMAC registers.\n"); goto err_out_iounmap; } hp->tcvregs = of_ioremap(&op->resource[4], 0, TCVR_REG_SIZE, "HME Tranceiver Regs"); if (!hp->tcvregs) { printk(KERN_ERR "happymeal: Cannot map TCVR registers.\n"); goto err_out_iounmap; } hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); if (hp->hm_revision == 0xff) hp->hm_revision = 0xa0; /* Now enable the feature flags we can. */ if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) hp->happy_flags = HFLAG_20_21; else if (hp->hm_revision != 0xa0) hp->happy_flags = HFLAG_NOT_A0; if (qp != NULL) hp->happy_flags |= HFLAG_QUATTRO; /* Get the supported DVMA burst sizes from our Happy SBUS. */ hp->happy_bursts = of_getintprop_default(sbus_dp, "burst-sizes", 0x00); hp->happy_block = dma_alloc_coherent(hp->dma_dev, PAGE_SIZE, &hp->hblock_dvma, GFP_ATOMIC); err = -ENOMEM; if (!hp->happy_block) { printk(KERN_ERR "happymeal: Cannot allocate descriptors.\n"); goto err_out_iounmap; } /* Force check of the link first time we are brought up. */ hp->linkcheck = 0; /* Force timer state to 'asleep' with count of zero. */ hp->timer_state = asleep; hp->timer_ticks = 0; init_timer(&hp->happy_timer); hp->dev = dev; dev->netdev_ops = &hme_netdev_ops; dev->watchdog_timeo = 5*HZ; dev->ethtool_ops = &hme_ethtool_ops; /* Happy Meal can do it all... */ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= dev->hw_features | NETIF_F_RXCSUM; dev->irq = op->archdata.irqs[0]; #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) /* Hook up SBUS register/descriptor accessors. */ hp->read_desc32 = sbus_hme_read_desc32; hp->write_txd = sbus_hme_write_txd; hp->write_rxd = sbus_hme_write_rxd; hp->read32 = sbus_hme_read32; hp->write32 = sbus_hme_write32; #endif /* Grrr, Happy Meal comes up by default not advertising * full duplex 100baseT capabilities, fix this. */ spin_lock_irq(&hp->happy_lock); happy_meal_set_initial_advertisement(hp); spin_unlock_irq(&hp->happy_lock); err = register_netdev(hp->dev); if (err) { printk(KERN_ERR "happymeal: Cannot register net device, " "aborting.\n"); goto err_out_free_coherent; } dev_set_drvdata(&op->dev, hp); if (qfe_slot != -1) printk(KERN_INFO "%s: Quattro HME slot %d (SBUS) 10/100baseT Ethernet ", dev->name, qfe_slot); else printk(KERN_INFO "%s: HAPPY MEAL (SBUS) 10/100baseT Ethernet ", dev->name); printk("%pM\n", dev->dev_addr); return 0; err_out_free_coherent: dma_free_coherent(hp->dma_dev, PAGE_SIZE, hp->happy_block, hp->hblock_dvma); err_out_iounmap: if (hp->gregs) of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE); if (hp->etxregs) of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE); if (hp->erxregs) of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE); if (hp->bigmacregs) of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE); if (hp->tcvregs) of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE); if (qp) qp->happy_meals[qfe_slot] = NULL; err_out_free_netdev: free_netdev(dev); err_out: return err; } #endif #ifdef CONFIG_PCI #ifndef CONFIG_SPARC static int is_quattro_p(struct pci_dev *pdev) { struct pci_dev *busdev = pdev->bus->self; struct pci_dev *this_pdev; int n_hmes; if (busdev == NULL || busdev->vendor != PCI_VENDOR_ID_DEC || busdev->device != PCI_DEVICE_ID_DEC_21153) return 0; n_hmes = 0; list_for_each_entry(this_pdev, &pdev->bus->devices, bus_list) { if (this_pdev->vendor == PCI_VENDOR_ID_SUN && this_pdev->device == PCI_DEVICE_ID_SUN_HAPPYMEAL) n_hmes++; } if (n_hmes != 4) return 0; return 1; } /* Fetch MAC address from vital product data of PCI ROM. */ static int find_eth_addr_in_vpd(void __iomem *rom_base, int len, int index, unsigned char *dev_addr) { int this_offset; for (this_offset = 0x20; this_offset < len; this_offset++) { void __iomem *p = rom_base + this_offset; if (readb(p + 0) != 0x90 || readb(p + 1) != 0x00 || readb(p + 2) != 0x09 || readb(p + 3) != 0x4e || readb(p + 4) != 0x41 || readb(p + 5) != 0x06) continue; this_offset += 6; p += 6; if (index == 0) { int i; for (i = 0; i < 6; i++) dev_addr[i] = readb(p + i); return 1; } index--; } return 0; } static void get_hme_mac_nonsparc(struct pci_dev *pdev, unsigned char *dev_addr) { size_t size; void __iomem *p = pci_map_rom(pdev, &size); if (p) { int index = 0; int found; if (is_quattro_p(pdev)) index = PCI_SLOT(pdev->devfn); found = readb(p) == 0x55 && readb(p + 1) == 0xaa && find_eth_addr_in_vpd(p, (64 * 1024), index, dev_addr); pci_unmap_rom(pdev, p); if (found) return; } /* Sun MAC prefix then 3 random bytes. */ dev_addr[0] = 0x08; dev_addr[1] = 0x00; dev_addr[2] = 0x20; get_random_bytes(&dev_addr[3], 3); } #endif /* !(CONFIG_SPARC) */ static int __devinit happy_meal_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct quattro *qp = NULL; #ifdef CONFIG_SPARC struct device_node *dp; #endif struct happy_meal *hp; struct net_device *dev; void __iomem *hpreg_base; unsigned long hpreg_res; int i, qfe_slot = -1; char prom_name[64]; int err; /* Now make sure pci_dev cookie is there. */ #ifdef CONFIG_SPARC dp = pci_device_to_OF_node(pdev); strcpy(prom_name, dp->name); #else if (is_quattro_p(pdev)) strcpy(prom_name, "SUNW,qfe"); else strcpy(prom_name, "SUNW,hme"); #endif err = -ENODEV; if (pci_enable_device(pdev)) goto err_out; pci_set_master(pdev); if (!strcmp(prom_name, "SUNW,qfe") || !strcmp(prom_name, "qfe")) { qp = quattro_pci_find(pdev); if (qp == NULL) goto err_out; for (qfe_slot = 0; qfe_slot < 4; qfe_slot++) if (qp->happy_meals[qfe_slot] == NULL) break; if (qfe_slot == 4) goto err_out; } dev = alloc_etherdev(sizeof(struct happy_meal)); err = -ENOMEM; if (!dev) goto err_out; SET_NETDEV_DEV(dev, &pdev->dev); if (hme_version_printed++ == 0) printk(KERN_INFO "%s", version); dev->base_addr = (long) pdev; hp = netdev_priv(dev); hp->happy_dev = pdev; hp->dma_dev = &pdev->dev; spin_lock_init(&hp->happy_lock); if (qp != NULL) { hp->qfe_parent = qp; hp->qfe_ent = qfe_slot; qp->happy_meals[qfe_slot] = dev; } hpreg_res = pci_resource_start(pdev, 0); err = -ENODEV; if ((pci_resource_flags(pdev, 0) & IORESOURCE_IO) != 0) { printk(KERN_ERR "happymeal(PCI): Cannot find proper PCI device base address.\n"); goto err_out_clear_quattro; } if (pci_request_regions(pdev, DRV_NAME)) { printk(KERN_ERR "happymeal(PCI): Cannot obtain PCI resources, " "aborting.\n"); goto err_out_clear_quattro; } if ((hpreg_base = ioremap(hpreg_res, 0x8000)) == NULL) { printk(KERN_ERR "happymeal(PCI): Unable to remap card memory.\n"); goto err_out_free_res; } for (i = 0; i < 6; i++) { if (macaddr[i] != 0) break; } if (i < 6) { /* a mac address was given */ for (i = 0; i < 6; i++) dev->dev_addr[i] = macaddr[i]; macaddr[5]++; } else { #ifdef CONFIG_SPARC const unsigned char *addr; int len; if (qfe_slot != -1 && (addr = of_get_property(dp, "local-mac-address", &len)) != NULL && len == 6) { memcpy(dev->dev_addr, addr, 6); } else { memcpy(dev->dev_addr, idprom->id_ethaddr, 6); } #else get_hme_mac_nonsparc(pdev, &dev->dev_addr[0]); #endif } /* Layout registers. */ hp->gregs = (hpreg_base + 0x0000UL); hp->etxregs = (hpreg_base + 0x2000UL); hp->erxregs = (hpreg_base + 0x4000UL); hp->bigmacregs = (hpreg_base + 0x6000UL); hp->tcvregs = (hpreg_base + 0x7000UL); #ifdef CONFIG_SPARC hp->hm_revision = of_getintprop_default(dp, "hm-rev", 0xff); if (hp->hm_revision == 0xff) hp->hm_revision = 0xc0 | (pdev->revision & 0x0f); #else /* works with this on non-sparc hosts */ hp->hm_revision = 0x20; #endif /* Now enable the feature flags we can. */ if (hp->hm_revision == 0x20 || hp->hm_revision == 0x21) hp->happy_flags = HFLAG_20_21; else if (hp->hm_revision != 0xa0 && hp->hm_revision != 0xc0) hp->happy_flags = HFLAG_NOT_A0; if (qp != NULL) hp->happy_flags |= HFLAG_QUATTRO; /* And of course, indicate this is PCI. */ hp->happy_flags |= HFLAG_PCI; #ifdef CONFIG_SPARC /* Assume PCI happy meals can handle all burst sizes. */ hp->happy_bursts = DMA_BURSTBITS; #endif hp->happy_block = (struct hmeal_init_block *) dma_alloc_coherent(&pdev->dev, PAGE_SIZE, &hp->hblock_dvma, GFP_KERNEL); err = -ENODEV; if (!hp->happy_block) { printk(KERN_ERR "happymeal(PCI): Cannot get hme init block.\n"); goto err_out_iounmap; } hp->linkcheck = 0; hp->timer_state = asleep; hp->timer_ticks = 0; init_timer(&hp->happy_timer); hp->dev = dev; dev->netdev_ops = &hme_netdev_ops; dev->watchdog_timeo = 5*HZ; dev->ethtool_ops = &hme_ethtool_ops; dev->irq = pdev->irq; dev->dma = 0; /* Happy Meal can do it all... */ dev->hw_features = NETIF_F_SG | NETIF_F_HW_CSUM; dev->features |= dev->hw_features | NETIF_F_RXCSUM; #if defined(CONFIG_SBUS) && defined(CONFIG_PCI) /* Hook up PCI register/descriptor accessors. */ hp->read_desc32 = pci_hme_read_desc32; hp->write_txd = pci_hme_write_txd; hp->write_rxd = pci_hme_write_rxd; hp->read32 = pci_hme_read32; hp->write32 = pci_hme_write32; #endif /* Grrr, Happy Meal comes up by default not advertising * full duplex 100baseT capabilities, fix this. */ spin_lock_irq(&hp->happy_lock); happy_meal_set_initial_advertisement(hp); spin_unlock_irq(&hp->happy_lock); err = register_netdev(hp->dev); if (err) { printk(KERN_ERR "happymeal(PCI): Cannot register net device, " "aborting.\n"); goto err_out_iounmap; } dev_set_drvdata(&pdev->dev, hp); if (!qfe_slot) { struct pci_dev *qpdev = qp->quattro_dev; prom_name[0] = 0; if (!strncmp(dev->name, "eth", 3)) { int i = simple_strtoul(dev->name + 3, NULL, 10); sprintf(prom_name, "-%d", i + 3); } printk(KERN_INFO "%s%s: Quattro HME (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, prom_name); if (qpdev->vendor == PCI_VENDOR_ID_DEC && qpdev->device == PCI_DEVICE_ID_DEC_21153) printk("DEC 21153 PCI Bridge\n"); else printk("unknown bridge %04x.%04x\n", qpdev->vendor, qpdev->device); } if (qfe_slot != -1) printk(KERN_INFO "%s: Quattro HME slot %d (PCI/CheerIO) 10/100baseT Ethernet ", dev->name, qfe_slot); else printk(KERN_INFO "%s: HAPPY MEAL (PCI/CheerIO) 10/100BaseT Ethernet ", dev->name); printk("%pM\n", dev->dev_addr); return 0; err_out_iounmap: iounmap(hp->gregs); err_out_free_res: pci_release_regions(pdev); err_out_clear_quattro: if (qp != NULL) qp->happy_meals[qfe_slot] = NULL; free_netdev(dev); err_out: return err; } static void __devexit happy_meal_pci_remove(struct pci_dev *pdev) { struct happy_meal *hp = dev_get_drvdata(&pdev->dev); struct net_device *net_dev = hp->dev; unregister_netdev(net_dev); dma_free_coherent(hp->dma_dev, PAGE_SIZE, hp->happy_block, hp->hblock_dvma); iounmap(hp->gregs); pci_release_regions(hp->happy_dev); free_netdev(net_dev); dev_set_drvdata(&pdev->dev, NULL); } static DEFINE_PCI_DEVICE_TABLE(happymeal_pci_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_HAPPYMEAL) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, happymeal_pci_ids); static struct pci_driver hme_pci_driver = { .name = "hme", .id_table = happymeal_pci_ids, .probe = happy_meal_pci_probe, .remove = __devexit_p(happy_meal_pci_remove), }; static int __init happy_meal_pci_init(void) { return pci_register_driver(&hme_pci_driver); } static void happy_meal_pci_exit(void) { pci_unregister_driver(&hme_pci_driver); while (qfe_pci_list) { struct quattro *qfe = qfe_pci_list; struct quattro *next = qfe->next; kfree(qfe); qfe_pci_list = next; } } #endif #ifdef CONFIG_SBUS static const struct of_device_id hme_sbus_match[]; static int __devinit hme_sbus_probe(struct platform_device *op) { const struct of_device_id *match; struct device_node *dp = op->dev.of_node; const char *model = of_get_property(dp, "model", NULL); int is_qfe; match = of_match_device(hme_sbus_match, &op->dev); if (!match) return -EINVAL; is_qfe = (match->data != NULL); if (!is_qfe && model && !strcmp(model, "SUNW,sbus-qfe")) is_qfe = 1; return happy_meal_sbus_probe_one(op, is_qfe); } static int __devexit hme_sbus_remove(struct platform_device *op) { struct happy_meal *hp = dev_get_drvdata(&op->dev); struct net_device *net_dev = hp->dev; unregister_netdev(net_dev); /* XXX qfe parent interrupt... */ of_iounmap(&op->resource[0], hp->gregs, GREG_REG_SIZE); of_iounmap(&op->resource[1], hp->etxregs, ETX_REG_SIZE); of_iounmap(&op->resource[2], hp->erxregs, ERX_REG_SIZE); of_iounmap(&op->resource[3], hp->bigmacregs, BMAC_REG_SIZE); of_iounmap(&op->resource[4], hp->tcvregs, TCVR_REG_SIZE); dma_free_coherent(hp->dma_dev, PAGE_SIZE, hp->happy_block, hp->hblock_dvma); free_netdev(net_dev); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id hme_sbus_match[] = { { .name = "SUNW,hme", }, { .name = "SUNW,qfe", .data = (void *) 1, }, { .name = "qfe", .data = (void *) 1, }, {}, }; MODULE_DEVICE_TABLE(of, hme_sbus_match); static struct platform_driver hme_sbus_driver = { .driver = { .name = "hme", .owner = THIS_MODULE, .of_match_table = hme_sbus_match, }, .probe = hme_sbus_probe, .remove = __devexit_p(hme_sbus_remove), }; static int __init happy_meal_sbus_init(void) { int err; err = platform_driver_register(&hme_sbus_driver); if (!err) err = quattro_sbus_register_irqs(); return err; } static void happy_meal_sbus_exit(void) { platform_driver_unregister(&hme_sbus_driver); quattro_sbus_free_irqs(); while (qfe_sbus_list) { struct quattro *qfe = qfe_sbus_list; struct quattro *next = qfe->next; kfree(qfe); qfe_sbus_list = next; } } #endif static int __init happy_meal_probe(void) { int err = 0; #ifdef CONFIG_SBUS err = happy_meal_sbus_init(); #endif #ifdef CONFIG_PCI if (!err) { err = happy_meal_pci_init(); #ifdef CONFIG_SBUS if (err) happy_meal_sbus_exit(); #endif } #endif return err; } static void __exit happy_meal_exit(void) { #ifdef CONFIG_SBUS happy_meal_sbus_exit(); #endif #ifdef CONFIG_PCI happy_meal_pci_exit(); #endif } module_init(happy_meal_probe); module_exit(happy_meal_exit);
gpl-2.0
M1cha/lge-kernel-lproj
drivers/of/of_spi.c
5056
2422
/* * SPI OF support routines * Copyright (C) 2008 Secret Lab Technologies Ltd. * * Support routines for deriving SPI device attachments from the device * tree. */ #include <linux/module.h> #include <linux/of.h> #include <linux/device.h> #include <linux/spi/spi.h> #include <linux/of_irq.h> #include <linux/of_spi.h> /** * of_register_spi_devices - Register child devices onto the SPI bus * @master: Pointer to spi_master device * * Registers an spi_device for each child node of master node which has a 'reg' * property. */ void of_register_spi_devices(struct spi_master *master) { struct spi_device *spi; struct device_node *nc; const __be32 *prop; int rc; int len; if (!master->dev.of_node) return; for_each_child_of_node(master->dev.of_node, nc) { /* Alloc an spi_device */ spi = spi_alloc_device(master); if (!spi) { dev_err(&master->dev, "spi_device alloc error for %s\n", nc->full_name); spi_dev_put(spi); continue; } /* Select device driver */ if (of_modalias_node(nc, spi->modalias, sizeof(spi->modalias)) < 0) { dev_err(&master->dev, "cannot find modalias for %s\n", nc->full_name); spi_dev_put(spi); continue; } /* Device address */ prop = of_get_property(nc, "reg", &len); if (!prop || len < sizeof(*prop)) { dev_err(&master->dev, "%s has no 'reg' property\n", nc->full_name); spi_dev_put(spi); continue; } spi->chip_select = be32_to_cpup(prop); /* Mode (clock phase/polarity/etc.) */ if (of_find_property(nc, "spi-cpha", NULL)) spi->mode |= SPI_CPHA; if (of_find_property(nc, "spi-cpol", NULL)) spi->mode |= SPI_CPOL; if (of_find_property(nc, "spi-cs-high", NULL)) spi->mode |= SPI_CS_HIGH; /* Device speed */ prop = of_get_property(nc, "spi-max-frequency", &len); if (!prop || len < sizeof(*prop)) { dev_err(&master->dev, "%s has no 'spi-max-frequency' property\n", nc->full_name); spi_dev_put(spi); continue; } spi->max_speed_hz = be32_to_cpup(prop); /* IRQ */ spi->irq = irq_of_parse_and_map(nc, 0); /* Store a pointer to the node in the device structure */ of_node_get(nc); spi->dev.of_node = nc; /* Register the new device */ request_module(spi->modalias); rc = spi_add_device(spi); if (rc) { dev_err(&master->dev, "spi_device register error %s\n", nc->full_name); spi_dev_put(spi); } } } EXPORT_SYMBOL(of_register_spi_devices);
gpl-2.0
multipath-tcp/mptcp_3.12.x
arch/x86/xen/grant-table.c
7360
4010
/****************************************************************************** * grant_table.c * x86 specific part * * Granting foreign access to our memory reservation. * * Copyright (c) 2005-2006, Christopher Clark * Copyright (c) 2004-2005, K A Fraser * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> * VA Linux Systems Japan. Split out x86 specific part. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation; or, when distributed * separately from the Linux kernel or incorporated into other * software packages, subject to the following license: * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this source file (the "Software"), to deal in the Software without * restriction, including without limitation the rights to use, copy, modify, * merge, publish, distribute, sublicense, and/or sell copies of the Software, * and to permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. */ #include <linux/sched.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <xen/interface/xen.h> #include <xen/page.h> #include <xen/grant_table.h> #include <asm/pgtable.h> static int map_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long **frames = (unsigned long **)data; set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } /* * This function is used to map shared frames to store grant status. It is * different from map_pte_fn above, the frames type here is uint64_t. */ static int map_pte_fn_status(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { uint64_t **frames = (uint64_t **)data; set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL)); (*frames)++; return 0; } static int unmap_pte_fn(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { set_pte_at(&init_mm, addr, pte, __pte(0)); return 0; } int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, void **__shared) { int rc; void *shared = *__shared; if (shared == NULL) { struct vm_struct *area = alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); BUG_ON(area == NULL); shared = area->addr; *__shared = shared; } rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn, &frames); return rc; } int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, unsigned long max_nr_gframes, grant_status_t **__shared) { int rc; grant_status_t *shared = *__shared; if (shared == NULL) { /* No need to pass in PTE as we are going to do it * in apply_to_page_range anyhow. */ struct vm_struct *area = alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL); BUG_ON(area == NULL); shared = area->addr; *__shared = shared; } rc = apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, map_pte_fn_status, &frames); return rc; } void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) { apply_to_page_range(&init_mm, (unsigned long)shared, PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL); }
gpl-2.0
nikhil18/lightning-kernel-CAF
arch/mips/pci/ops-bridge.c
9408
8740
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999, 2000, 04, 06 Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/pci.h> #include <asm/paccess.h> #include <asm/pci/bridge.h> #include <asm/sn/arch.h> #include <asm/sn/intr.h> #include <asm/sn/sn0/hub.h> /* * Most of the IOC3 PCI config register aren't present * we emulate what is needed for a normal PCI enumeration */ static u32 emulate_ioc3_cfg(int where, int size) { if (size == 1 && where == 0x3d) return 0x01; else if (size == 2 && where == 0x3c) return 0x0100; else if (size == 4 && where == 0x3c) return 0x00000100; return 0; } /* * The Bridge ASIC supports both type 0 and type 1 access. Type 1 is * not really documented, so right now I can't write code which uses it. * Therefore we use type 0 accesses for now even though they won't work * correcly for PCI-to-PCI bridges. * * The function is complicated by the ultimate brokeness of the IOC3 chip * which is used in SGI systems. The IOC3 can only handle 32-bit PCI * accesses and does only decode parts of it's address space. */ static int pci_conf0_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); bridge_t *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); volatile void *addr; u32 cf, shift, mask; int res; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) goto oh_my_gawd; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; if (size == 1) res = get_dbe(*value, (u8 *) addr); else if (size == 2) res = get_dbe(*value, (u16 *) addr); else res = get_dbe(*value, (u32 *) addr); return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; oh_my_gawd: /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to look at the wrong register. */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { *value = emulate_ioc3_cfg(where, size); return PCIBIOS_SUCCESSFUL; } /* * IOC3 is fucked fucked beyond believe ... Don't try to access * anything but 32-bit words ... */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; shift = ((where & 3) << 3); mask = (0xffffffffU >> ((4 - size) << 3)); *value = (cf >> shift) & mask; return PCIBIOS_SUCCESSFUL; } static int pci_conf1_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); bridge_t *bridge = bc->base; int busno = bus->number; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); volatile void *addr; u32 cf, shift, mask; int res; bridge->b_pci_cfg = (busno << 16) | (slot << 11); addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) goto oh_my_gawd; bridge->b_pci_cfg = (busno << 16) | (slot << 11); addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; if (size == 1) res = get_dbe(*value, (u8 *) addr); else if (size == 2) res = get_dbe(*value, (u16 *) addr); else res = get_dbe(*value, (u32 *) addr); return res ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; oh_my_gawd: /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to look at the wrong register. */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) { *value = emulate_ioc3_cfg(where, size); return PCIBIOS_SUCCESSFUL; } /* * IOC3 is fucked fucked beyond believe ... Don't try to access * anything but 32-bit words ... */ bridge->b_pci_cfg = (busno << 16) | (slot << 11); addr = &bridge->b_type1_cfg.c[(fn << 8) | where]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; shift = ((where & 3) << 3); mask = (0xffffffffU >> ((4 - size) << 3)); *value = (cf >> shift) & mask; return PCIBIOS_SUCCESSFUL; } static int pci_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 * value) { if (bus->number > 0) return pci_conf1_read_config(bus, devfn, where, size, value); return pci_conf0_read_config(bus, devfn, where, size, value); } static int pci_conf0_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); bridge_t *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); volatile void *addr; u32 cf, shift, mask, smask; int res; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) goto oh_my_gawd; addr = &bridge->b_type0_cfg_dev[slot].f[fn].c[where ^ (4 - size)]; if (size == 1) { res = put_dbe(value, (u8 *) addr); } else if (size == 2) { res = put_dbe(value, (u16 *) addr); } else { res = put_dbe(value, (u32 *) addr); } if (res) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; oh_my_gawd: /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to touch the wrong register. */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) return PCIBIOS_SUCCESSFUL; /* * IOC3 is fucked fucked beyond believe ... Don't try to access * anything but 32-bit words ... */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; shift = ((where & 3) << 3); mask = (0xffffffffU >> ((4 - size) << 3)); smask = mask << shift; cf = (cf & ~smask) | ((value & mask) << shift); if (put_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; } static int pci_conf1_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { struct bridge_controller *bc = BRIDGE_CONTROLLER(bus); bridge_t *bridge = bc->base; int slot = PCI_SLOT(devfn); int fn = PCI_FUNC(devfn); int busno = bus->number; volatile void *addr; u32 cf, shift, mask, smask; int res; bridge->b_pci_cfg = (busno << 16) | (slot << 11); addr = &bridge->b_type1_cfg.c[(fn << 8) | PCI_VENDOR_ID]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to look at it for real ... */ if (cf == (PCI_VENDOR_ID_SGI | (PCI_DEVICE_ID_SGI_IOC3 << 16))) goto oh_my_gawd; addr = &bridge->b_type1_cfg.c[(fn << 8) | (where ^ (4 - size))]; if (size == 1) { res = put_dbe(value, (u8 *) addr); } else if (size == 2) { res = put_dbe(value, (u16 *) addr); } else { res = put_dbe(value, (u32 *) addr); } if (res) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; oh_my_gawd: /* * IOC3 is fucked fucked beyond believe ... Don't even give the * generic PCI code a chance to touch the wrong register. */ if ((where >= 0x14 && where < 0x40) || (where >= 0x48)) return PCIBIOS_SUCCESSFUL; /* * IOC3 is fucked fucked beyond believe ... Don't try to access * anything but 32-bit words ... */ addr = &bridge->b_type0_cfg_dev[slot].f[fn].l[where >> 2]; if (get_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; shift = ((where & 3) << 3); mask = (0xffffffffU >> ((4 - size) << 3)); smask = mask << shift; cf = (cf & ~smask) | ((value & mask) << shift); if (put_dbe(cf, (u32 *) addr)) return PCIBIOS_DEVICE_NOT_FOUND; return PCIBIOS_SUCCESSFUL; } static int pci_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { if (bus->number > 0) return pci_conf1_write_config(bus, devfn, where, size, value); return pci_conf0_write_config(bus, devfn, where, size, value); } struct pci_ops bridge_pci_ops = { .read = pci_read_config, .write = pci_write_config, };
gpl-2.0
davros-/DEMENTED_kernel_jf
arch/powerpc/boot/cuboot-taishan.c
14016
1361
/* * Old U-boot compatibility for Taishan * * Author: Hugh Blemings <hugh@au.ibm.com> * * Copyright 2007 Hugh Blemings, IBM Corporation. * Based on cuboot-ebony.c which is: * Copyright 2007 David Gibson, IBM Corporation. * Based on cuboot-83xx.c, which is: * Copyright (c) 2007 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include "ops.h" #include "stdio.h" #include "cuboot.h" #include "reg.h" #include "dcr.h" #include "4xx.h" #define TARGET_4xx #define TARGET_44x #define TARGET_440GX #include "ppcboot.h" static bd_t bd; BSS_STACK(4096); static void taishan_fixups(void) { /* FIXME: sysclk should be derived by reading the FPGA registers */ unsigned long sysclk = 33000000; ibm440gx_fixup_clocks(sysclk, 6 * 1843200, 25000000); ibm4xx_sdram_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", bd.bi_enetaddr); dt_fixup_mac_address_by_alias("ethernet1", bd.bi_enet1addr); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); } void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, unsigned long r6, unsigned long r7) { CUBOOT_INIT(); platform_ops.fixups = taishan_fixups; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
eiselekd/gcc
gcc/testsuite/gcc.target/s390/20040305-1.c
193
1026
/* The testcase failed due to corrupted alias information. During the crossjump analyzing step the mem alias info of the st instructions are merged and get copied during basic block reordering which leads to an insn with wrong alias info. The scheduler afterwards exchanges the mvc and st instructions not recognizing the anti dependence. */ /* { dg-do run } */ /* { dg-options "-O3 -mtune=z990 -fno-inline" } */ extern void exit (int); extern void abort (void); int f; int g; int h; int* x = &f; int* p1 = &g; int* p2 = &h; int foo(void) { if (*x == 0) { x = p1; /* mvc - memory to memory */ p1 = (int*)0; /* st - register to memory */ return 1; } if (*x == 5) { f = 1; g = 2; p2 = (int*)0; /* st */ return 1; } } int main (int argc, char** argv) { foo (); /* If the scheduler has exchanged the mvc and st instructions, x is 0. The expected result is &g. */ if (x == &g) exit (0); else abort (); }
gpl-2.0
rancher/linux
samples/bpf/libbpf.c
193
3458
/* eBPF mini library */ #include <stdlib.h> #include <stdio.h> #include <linux/unistd.h> #include <unistd.h> #include <string.h> #include <linux/netlink.h> #include <linux/bpf.h> #include <errno.h> #include <net/ethernet.h> #include <net/if.h> #include <linux/if_packet.h> #include <arpa/inet.h> #include "libbpf.h" static __u64 ptr_to_u64(void *ptr) { return (__u64) (unsigned long) ptr; } int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size, int max_entries) { union bpf_attr attr = { .map_type = map_type, .key_size = key_size, .value_size = value_size, .max_entries = max_entries }; return syscall(__NR_bpf, BPF_MAP_CREATE, &attr, sizeof(attr)); } int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags) { union bpf_attr attr = { .map_fd = fd, .key = ptr_to_u64(key), .value = ptr_to_u64(value), .flags = flags, }; return syscall(__NR_bpf, BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr)); } int bpf_lookup_elem(int fd, void *key, void *value) { union bpf_attr attr = { .map_fd = fd, .key = ptr_to_u64(key), .value = ptr_to_u64(value), }; return syscall(__NR_bpf, BPF_MAP_LOOKUP_ELEM, &attr, sizeof(attr)); } int bpf_delete_elem(int fd, void *key) { union bpf_attr attr = { .map_fd = fd, .key = ptr_to_u64(key), }; return syscall(__NR_bpf, BPF_MAP_DELETE_ELEM, &attr, sizeof(attr)); } int bpf_get_next_key(int fd, void *key, void *next_key) { union bpf_attr attr = { .map_fd = fd, .key = ptr_to_u64(key), .next_key = ptr_to_u64(next_key), }; return syscall(__NR_bpf, BPF_MAP_GET_NEXT_KEY, &attr, sizeof(attr)); } #define ROUND_UP(x, n) (((x) + (n) - 1u) & ~((n) - 1u)) char bpf_log_buf[LOG_BUF_SIZE]; int bpf_prog_load(enum bpf_prog_type prog_type, const struct bpf_insn *insns, int prog_len, const char *license, int kern_version) { union bpf_attr attr = { .prog_type = prog_type, .insns = ptr_to_u64((void *) insns), .insn_cnt = prog_len / sizeof(struct bpf_insn), .license = ptr_to_u64((void *) license), .log_buf = ptr_to_u64(bpf_log_buf), .log_size = LOG_BUF_SIZE, .log_level = 1, }; /* assign one field outside of struct init to make sure any * padding is zero initialized */ attr.kern_version = kern_version; bpf_log_buf[0] = 0; return syscall(__NR_bpf, BPF_PROG_LOAD, &attr, sizeof(attr)); } int bpf_obj_pin(int fd, const char *pathname) { union bpf_attr attr = { .pathname = ptr_to_u64((void *)pathname), .bpf_fd = fd, }; return syscall(__NR_bpf, BPF_OBJ_PIN, &attr, sizeof(attr)); } int bpf_obj_get(const char *pathname) { union bpf_attr attr = { .pathname = ptr_to_u64((void *)pathname), }; return syscall(__NR_bpf, BPF_OBJ_GET, &attr, sizeof(attr)); } int open_raw_sock(const char *name) { struct sockaddr_ll sll; int sock; sock = socket(PF_PACKET, SOCK_RAW | SOCK_NONBLOCK | SOCK_CLOEXEC, htons(ETH_P_ALL)); if (sock < 0) { printf("cannot create raw socket\n"); return -1; } memset(&sll, 0, sizeof(sll)); sll.sll_family = AF_PACKET; sll.sll_ifindex = if_nametoindex(name); sll.sll_protocol = htons(ETH_P_ALL); if (bind(sock, (struct sockaddr *)&sll, sizeof(sll)) < 0) { printf("bind to %s: %s\n", name, strerror(errno)); close(sock); return -1; } return sock; } int perf_event_open(struct perf_event_attr *attr, int pid, int cpu, int group_fd, unsigned long flags) { return syscall(__NR_perf_event_open, attr, pid, cpu, group_fd, flags); }
gpl-2.0
Andiry/linux-test
arch/powerpc/platforms/pseries/lpar.c
193
20730
/* * pSeries_lpar.c * Copyright (C) 2001 Todd Inglett, IBM Corporation * * pSeries LPAR support. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* Enables debugging of low-level hash table routines - careful! */ #undef DEBUG #include <linux/kernel.h> #include <linux/dma-mapping.h> #include <linux/console.h> #include <linux/export.h> #include <linux/static_key.h> #include <asm/processor.h> #include <asm/mmu.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/tlbflush.h> #include <asm/tlb.h> #include <asm/prom.h> #include <asm/cputable.h> #include <asm/udbg.h> #include <asm/smp.h> #include <asm/trace.h> #include <asm/firmware.h> #include <asm/plpar_wrappers.h> #include <asm/kexec.h> #include <asm/fadump.h> #include "pseries.h" /* Flag bits for H_BULK_REMOVE */ #define HBR_REQUEST 0x4000000000000000UL #define HBR_RESPONSE 0x8000000000000000UL #define HBR_END 0xc000000000000000UL #define HBR_AVPN 0x0200000000000000UL #define HBR_ANDCOND 0x0100000000000000UL /* in hvCall.S */ EXPORT_SYMBOL(plpar_hcall); EXPORT_SYMBOL(plpar_hcall9); EXPORT_SYMBOL(plpar_hcall_norets); void vpa_init(int cpu) { int hwcpu = get_hard_smp_processor_id(cpu); unsigned long addr; long ret; struct paca_struct *pp; struct dtl_entry *dtl; /* * The spec says it "may be problematic" if CPU x registers the VPA of * CPU y. We should never do that, but wail if we ever do. */ WARN_ON(cpu != smp_processor_id()); if (cpu_has_feature(CPU_FTR_ALTIVEC)) lppaca_of(cpu).vmxregs_in_use = 1; if (cpu_has_feature(CPU_FTR_ARCH_207S)) lppaca_of(cpu).ebb_regs_in_use = 1; addr = __pa(&lppaca_of(cpu)); ret = register_vpa(hwcpu, addr); if (ret) { pr_err("WARNING: VPA registration for cpu %d (hw %d) of area " "%lx failed with %ld\n", cpu, hwcpu, addr, ret); return; } /* * PAPR says this feature is SLB-Buffer but firmware never * reports that. All SPLPAR support SLB shadow buffer. */ addr = __pa(paca[cpu].slb_shadow_ptr); if (firmware_has_feature(FW_FEATURE_SPLPAR)) { ret = register_slb_shadow(hwcpu, addr); if (ret) pr_err("WARNING: SLB shadow buffer registration for " "cpu %d (hw %d) of area %lx failed with %ld\n", cpu, hwcpu, addr, ret); } /* * Register dispatch trace log, if one has been allocated. */ pp = &paca[cpu]; dtl = pp->dispatch_log; if (dtl) { pp->dtl_ridx = 0; pp->dtl_curr = dtl; lppaca_of(cpu).dtl_idx = 0; /* hypervisor reads buffer length from this field */ dtl->enqueue_to_dispatch_time = cpu_to_be32(DISPATCH_LOG_BYTES); ret = register_dtl(hwcpu, __pa(dtl)); if (ret) pr_err("WARNING: DTL registration of cpu %d (hw %d) " "failed with %ld\n", smp_processor_id(), hwcpu, ret); lppaca_of(cpu).dtl_enable_mask = 2; } } static long pSeries_lpar_hpte_insert(unsigned long hpte_group, unsigned long vpn, unsigned long pa, unsigned long rflags, unsigned long vflags, int psize, int apsize, int ssize) { unsigned long lpar_rc; unsigned long flags; unsigned long slot; unsigned long hpte_v, hpte_r; if (!(vflags & HPTE_V_BOLTED)) pr_devel("hpte_insert(group=%lx, vpn=%016lx, " "pa=%016lx, rflags=%lx, vflags=%lx, psize=%d)\n", hpte_group, vpn, pa, rflags, vflags, psize); hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID; hpte_r = hpte_encode_r(pa, psize, apsize) | rflags; if (!(vflags & HPTE_V_BOLTED)) pr_devel(" hpte_v=%016lx, hpte_r=%016lx\n", hpte_v, hpte_r); /* Now fill in the actual HPTE */ /* Set CEC cookie to 0 */ /* Zero page = 0 */ /* I-cache Invalidate = 0 */ /* I-cache synchronize = 0 */ /* Exact = 0 */ flags = 0; /* Make pHyp happy */ if ((rflags & _PAGE_NO_CACHE) && !(rflags & _PAGE_WRITETHRU)) hpte_r &= ~HPTE_R_M; if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) flags |= H_COALESCE_CAND; lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); if (unlikely(lpar_rc == H_PTEG_FULL)) { if (!(vflags & HPTE_V_BOLTED)) pr_devel(" full\n"); return -1; } /* * Since we try and ioremap PHBs we don't own, the pte insert * will fail. However we must catch the failure in hash_page * or we will loop forever, so return -2 in this case. */ if (unlikely(lpar_rc != H_SUCCESS)) { if (!(vflags & HPTE_V_BOLTED)) pr_devel(" lpar err %ld\n", lpar_rc); return -2; } if (!(vflags & HPTE_V_BOLTED)) pr_devel(" -> slot: %lu\n", slot & 7); /* Because of iSeries, we have to pass down the secondary * bucket bit here as well */ return (slot & 7) | (!!(vflags & HPTE_V_SECONDARY) << 3); } static DEFINE_SPINLOCK(pSeries_lpar_tlbie_lock); static long pSeries_lpar_hpte_remove(unsigned long hpte_group) { unsigned long slot_offset; unsigned long lpar_rc; int i; unsigned long dummy1, dummy2; /* pick a random slot to start at */ slot_offset = mftb() & 0x7; for (i = 0; i < HPTES_PER_GROUP; i++) { /* don't remove a bolted entry */ lpar_rc = plpar_pte_remove(H_ANDCOND, hpte_group + slot_offset, (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; /* * The test for adjunct partition is performed before the * ANDCOND test. H_RESOURCE may be returned, so we need to * check for that as well. */ BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; } return -1; } static void pSeries_lpar_hptab_clear(void) { unsigned long size_bytes = 1UL << ppc64_pft_size; unsigned long hpte_count = size_bytes >> 4; struct { unsigned long pteh; unsigned long ptel; } ptes[4]; long lpar_rc; unsigned long i, j; /* Read in batches of 4, * invalidate only valid entries not in the VRMA * hpte_count will be a multiple of 4 */ for (i = 0; i < hpte_count; i += 4) { lpar_rc = plpar_pte_read_4_raw(0, i, (void *)ptes); if (lpar_rc != H_SUCCESS) continue; for (j = 0; j < 4; j++){ if ((ptes[j].pteh & HPTE_V_VRMA_MASK) == HPTE_V_VRMA_MASK) continue; if (ptes[j].pteh & HPTE_V_VALID) plpar_pte_remove_raw(0, i + j, 0, &(ptes[j].pteh), &(ptes[j].ptel)); } } #ifdef __LITTLE_ENDIAN__ /* * Reset exceptions to big endian. * * FIXME this is a hack for kexec, we need to reset the exception * endian before starting the new kernel and this is a convenient place * to do it. * * This is also called on boot when a fadump happens. In that case we * must not change the exception endian mode. */ if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) { long rc; rc = pseries_big_endian_exceptions(); /* * At this point it is unlikely panic() will get anything * out to the user, but at least this will stop us from * continuing on further and creating an even more * difficult to debug situation. * * There is a known problem when kdump'ing, if cpus are offline * the above call will fail. Rather than panicking again, keep * going and hope the kdump kernel is also little endian, which * it usually is. */ if (rc && !kdump_in_progress()) panic("Could not enable big endian exceptions"); } #endif } /* * NOTE: for updatepp ops we are fortunate that the linux "newpp" bits and * the low 3 bits of flags happen to line up. So no transform is needed. * We can probably optimize here and assume the high bits of newpp are * already zero. For now I am paranoid. */ static long pSeries_lpar_hpte_updatepp(unsigned long slot, unsigned long newpp, unsigned long vpn, int psize, int apsize, int ssize, unsigned long inv_flags) { unsigned long lpar_rc; unsigned long flags = (newpp & 7) | H_AVPN; unsigned long want_v; want_v = hpte_encode_avpn(vpn, psize, ssize); pr_devel(" update: avpnv=%016lx, hash=%016lx, f=%lx, psize: %d ...", want_v, slot, flags, psize); lpar_rc = plpar_pte_protect(flags, slot, want_v); if (lpar_rc == H_NOT_FOUND) { pr_devel("not found !\n"); return -1; } pr_devel("ok\n"); BUG_ON(lpar_rc != H_SUCCESS); return 0; } static unsigned long pSeries_lpar_hpte_getword0(unsigned long slot) { unsigned long dword0; unsigned long lpar_rc; unsigned long dummy_word1; unsigned long flags; /* Read 1 pte at a time */ /* Do not need RPN to logical page translation */ /* No cross CEC PFT access */ flags = 0; lpar_rc = plpar_pte_read(flags, slot, &dword0, &dummy_word1); BUG_ON(lpar_rc != H_SUCCESS); return dword0; } static long pSeries_lpar_hpte_find(unsigned long vpn, int psize, int ssize) { unsigned long hash; unsigned long i; long slot; unsigned long want_v, hpte_v; hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize); want_v = hpte_encode_avpn(vpn, psize, ssize); /* Bolted entries are always in the primary group */ slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; for (i = 0; i < HPTES_PER_GROUP; i++) { hpte_v = pSeries_lpar_hpte_getword0(slot); if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID)) /* HPTE matches */ return slot; ++slot; } return -1; } static void pSeries_lpar_hpte_updateboltedpp(unsigned long newpp, unsigned long ea, int psize, int ssize) { unsigned long vpn; unsigned long lpar_rc, slot, vsid, flags; vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); flags = newpp & 7; lpar_rc = plpar_pte_protect(flags, slot, 0); BUG_ON(lpar_rc != H_SUCCESS); } static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long vpn, int psize, int apsize, int ssize, int local) { unsigned long want_v; unsigned long lpar_rc; unsigned long dummy1, dummy2; pr_devel(" inval : slot=%lx, vpn=%016lx, psize: %d, local: %d\n", slot, vpn, psize, local); want_v = hpte_encode_avpn(vpn, psize, ssize); lpar_rc = plpar_pte_remove(H_AVPN, slot, want_v, &dummy1, &dummy2); if (lpar_rc == H_NOT_FOUND) return; BUG_ON(lpar_rc != H_SUCCESS); } /* * Limit iterations holding pSeries_lpar_tlbie_lock to 3. We also need * to make sure that we avoid bouncing the hypervisor tlbie lock. */ #define PPC64_HUGE_HPTE_BATCH 12 static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot, unsigned long *vpn, int count, int psize, int ssize) { unsigned long param[8]; int i = 0, pix = 0, rc; unsigned long flags = 0; int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); if (lock_tlbie) spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); for (i = 0; i < count; i++) { if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { pSeries_lpar_hpte_invalidate(slot[i], vpn[i], psize, 0, ssize, 0); } else { param[pix] = HBR_REQUEST | HBR_AVPN | slot[i]; param[pix+1] = hpte_encode_avpn(vpn[i], psize, ssize); pix += 2; if (pix == 8) { rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7]); BUG_ON(rc != H_SUCCESS); pix = 0; } } } if (pix) { param[pix] = HBR_END; rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7]); BUG_ON(rc != H_SUCCESS); } if (lock_tlbie) spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); } static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, unsigned long addr, unsigned char *hpte_slot_array, int psize, int ssize, int local) { int i, index = 0; unsigned long s_addr = addr; unsigned int max_hpte_count, valid; unsigned long vpn_array[PPC64_HUGE_HPTE_BATCH]; unsigned long slot_array[PPC64_HUGE_HPTE_BATCH]; unsigned long shift, hidx, vpn = 0, hash, slot; shift = mmu_psize_defs[psize].shift; max_hpte_count = 1U << (PMD_SHIFT - shift); for (i = 0; i < max_hpte_count; i++) { valid = hpte_valid(hpte_slot_array, i); if (!valid) continue; hidx = hpte_hash_index(hpte_slot_array, i); /* get the vpn */ addr = s_addr + (i * (1ul << shift)); vpn = hpt_vpn(addr, vsid, ssize); hash = hpt_hash(vpn, shift, ssize); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; slot_array[index] = slot; vpn_array[index] = vpn; if (index == PPC64_HUGE_HPTE_BATCH - 1) { /* * Now do a bluk invalidate */ __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, PPC64_HUGE_HPTE_BATCH, psize, ssize); index = 0; } else index++; } if (index) __pSeries_lpar_hugepage_invalidate(slot_array, vpn_array, index, psize, ssize); } static void pSeries_lpar_hpte_removebolted(unsigned long ea, int psize, int ssize) { unsigned long vpn; unsigned long slot, vsid; vsid = get_kernel_vsid(ea, ssize); vpn = hpt_vpn(ea, vsid, ssize); slot = pSeries_lpar_hpte_find(vpn, psize, ssize); BUG_ON(slot == -1); /* * lpar doesn't use the passed actual page size */ pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, 0); } /* * Take a spinlock around flushes to avoid bouncing the hypervisor tlbie * lock. */ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) { unsigned long vpn; unsigned long i, pix, rc; unsigned long flags = 0; struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch); int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); unsigned long param[9]; unsigned long hash, index, shift, hidx, slot; real_pte_t pte; int psize, ssize; if (lock_tlbie) spin_lock_irqsave(&pSeries_lpar_tlbie_lock, flags); psize = batch->psize; ssize = batch->ssize; pix = 0; for (i = 0; i < number; i++) { vpn = batch->vpn[i]; pte = batch->pte[i]; pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { hash = hpt_hash(vpn, shift, ssize); hidx = __rpte_to_hidx(pte, index); if (hidx & _PTEIDX_SECONDARY) hash = ~hash; slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; slot += hidx & _PTEIDX_GROUP_IX; if (!firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { /* * lpar doesn't use the passed actual page size */ pSeries_lpar_hpte_invalidate(slot, vpn, psize, 0, ssize, local); } else { param[pix] = HBR_REQUEST | HBR_AVPN | slot; param[pix+1] = hpte_encode_avpn(vpn, psize, ssize); pix += 2; if (pix == 8) { rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7]); BUG_ON(rc != H_SUCCESS); pix = 0; } } } pte_iterate_hashed_end(); } if (pix) { param[pix] = HBR_END; rc = plpar_hcall9(H_BULK_REMOVE, param, param[0], param[1], param[2], param[3], param[4], param[5], param[6], param[7]); BUG_ON(rc != H_SUCCESS); } if (lock_tlbie) spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); } static int __init disable_bulk_remove(char *str) { if (strcmp(str, "off") == 0 && firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { printk(KERN_INFO "Disabling BULK_REMOVE firmware feature"); powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; } return 1; } __setup("bulk_remove=", disable_bulk_remove); void __init hpte_init_lpar(void) { ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; ppc_md.hpte_updatepp = pSeries_lpar_hpte_updatepp; ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; ppc_md.hpte_insert = pSeries_lpar_hpte_insert; ppc_md.hpte_remove = pSeries_lpar_hpte_remove; ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; ppc_md.hugepage_invalidate = pSeries_lpar_hugepage_invalidate; } #ifdef CONFIG_PPC_SMLPAR #define CMO_FREE_HINT_DEFAULT 1 static int cmo_free_hint_flag = CMO_FREE_HINT_DEFAULT; static int __init cmo_free_hint(char *str) { char *parm; parm = strstrip(str); if (strcasecmp(parm, "no") == 0 || strcasecmp(parm, "off") == 0) { printk(KERN_INFO "cmo_free_hint: CMO free page hinting is not active.\n"); cmo_free_hint_flag = 0; return 1; } cmo_free_hint_flag = 1; printk(KERN_INFO "cmo_free_hint: CMO free page hinting is active.\n"); if (strcasecmp(parm, "yes") == 0 || strcasecmp(parm, "on") == 0) return 1; return 0; } __setup("cmo_free_hint=", cmo_free_hint); static void pSeries_set_page_state(struct page *page, int order, unsigned long state) { int i, j; unsigned long cmo_page_sz, addr; cmo_page_sz = cmo_get_page_size(); addr = __pa((unsigned long)page_address(page)); for (i = 0; i < (1 << order); i++, addr += PAGE_SIZE) { for (j = 0; j < PAGE_SIZE; j += cmo_page_sz) plpar_hcall_norets(H_PAGE_INIT, state, addr + j, 0); } } void arch_free_page(struct page *page, int order) { if (!cmo_free_hint_flag || !firmware_has_feature(FW_FEATURE_CMO)) return; pSeries_set_page_state(page, order, H_PAGE_SET_UNUSED); } EXPORT_SYMBOL(arch_free_page); #endif #ifdef CONFIG_TRACEPOINTS #ifdef HAVE_JUMP_LABEL struct static_key hcall_tracepoint_key = STATIC_KEY_INIT; void hcall_tracepoint_regfunc(void) { static_key_slow_inc(&hcall_tracepoint_key); } void hcall_tracepoint_unregfunc(void) { static_key_slow_dec(&hcall_tracepoint_key); } #else /* * We optimise our hcall path by placing hcall_tracepoint_refcount * directly in the TOC so we can check if the hcall tracepoints are * enabled via a single load. */ /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ extern long hcall_tracepoint_refcount; void hcall_tracepoint_regfunc(void) { hcall_tracepoint_refcount++; } void hcall_tracepoint_unregfunc(void) { hcall_tracepoint_refcount--; } #endif /* * Since the tracing code might execute hcalls we need to guard against * recursion. One example of this are spinlocks calling H_YIELD on * shared processor partitions. */ static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); void __trace_hcall_entry(unsigned long opcode, unsigned long *args) { unsigned long flags; unsigned int *depth; /* * We cannot call tracepoints inside RCU idle regions which * means we must not trace H_CEDE. */ if (opcode == H_CEDE) return; local_irq_save(flags); depth = this_cpu_ptr(&hcall_trace_depth); if (*depth) goto out; (*depth)++; preempt_disable(); trace_hcall_entry(opcode, args); (*depth)--; out: local_irq_restore(flags); } void __trace_hcall_exit(long opcode, unsigned long retval, unsigned long *retbuf) { unsigned long flags; unsigned int *depth; if (opcode == H_CEDE) return; local_irq_save(flags); depth = this_cpu_ptr(&hcall_trace_depth); if (*depth) goto out; (*depth)++; trace_hcall_exit(opcode, retval, retbuf); preempt_enable(); (*depth)--; out: local_irq_restore(flags); } #endif /** * h_get_mpp * H_GET_MPP hcall returns info in 7 parms */ int h_get_mpp(struct hvcall_mpp_data *mpp_data) { int rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; rc = plpar_hcall9(H_GET_MPP, retbuf); mpp_data->entitled_mem = retbuf[0]; mpp_data->mapped_mem = retbuf[1]; mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; mpp_data->pool_num = retbuf[2] & 0xffff; mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffffUL; mpp_data->pool_size = retbuf[4]; mpp_data->loan_request = retbuf[5]; mpp_data->backing_mem = retbuf[6]; return rc; } EXPORT_SYMBOL(h_get_mpp); int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) { int rc; unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; rc = plpar_hcall9(H_GET_MPP_X, retbuf); mpp_x_data->coalesced_bytes = retbuf[0]; mpp_x_data->pool_coalesced_bytes = retbuf[1]; mpp_x_data->pool_purr_cycles = retbuf[2]; mpp_x_data->pool_spurr_cycles = retbuf[3]; return rc; }
gpl-2.0
sub77/kernel_samsung_matisse
sound/soc/codecs/audience/escore-uart-common.c
449
5364
/* * escore-uart-common.c -- Audience eS705 UART interface * * Copyright 2013 Audience, Inc. * * Author: Matt Lupfer <mlupfer@cardinalpeak.com> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/kthread.h> #include <linux/esxxx.h> #include <linux/serial_core.h> #include <linux/tty.h> #include "escore.h" #include "escore-uart-common.h" int escore_uart_read(struct escore_priv *escore, void *buf, int len) { int rc; mm_segment_t oldfs; dev_dbg(escore->dev, "%s() size %d\n", __func__, len); /* * we may call from user context via char dev, so allow * read buffer in kernel address space */ oldfs = get_fs(); set_fs(KERNEL_DS); rc = escore->uart_dev.ld->ops->read(escore->uart_dev.tty, escore->uart_dev.file, (char __user *)buf, len); /* restore old fs context */ set_fs(oldfs); dev_dbg(escore->dev, "%s() returning %d\n", __func__, rc); return rc; } int escore_uart_write(struct escore_priv *escore, const void *buf, int len) { int rc = 0; int count_remain = len; int bytes_wr = 0; mm_segment_t oldfs; dev_dbg(escore->dev, "%s() size %d\n", __func__, len); /* * we may call from user context via char dev, so allow * read buffer in kernel address space */ oldfs = get_fs(); set_fs(KERNEL_DS); while (count_remain > 0) { /* block until tx buffer space is available */ while (tty_write_room(escore->uart_dev.tty) < UART_TTY_WRITE_SZ) usleep_range(5000, 5000); rc = escore->uart_dev.ld->ops->write(escore->uart_dev.tty, escore->uart_dev.file, buf + bytes_wr, min(UART_TTY_WRITE_SZ, count_remain)); if (rc < 0) { bytes_wr = rc; goto err_out; } bytes_wr += rc; count_remain -= rc; } err_out: /* restore old fs context */ set_fs(oldfs); dev_dbg(escore->dev, "%s() returning %d\n", __func__, rc); return bytes_wr; } int escore_uart_cmd(struct escore_priv *escore, u32 cmd, int sr, u32 *resp) { int err; u32 rv; pr_debug("escore: cmd=0x%08x sr=%i\n", cmd, sr); cmd = cpu_to_be32(cmd); err = escore_uart_write(escore, &cmd, sizeof(cmd)); if (err < 0 || sr) return min(err, 0); else if (err > 0) err = 0; /* Maximum response time is 10ms */ usleep_range(10000, 10500); err = escore_uart_read(escore, &rv, sizeof(rv)); if (err > 0) *resp = be32_to_cpu(rv); err = err > 0 ? 0 : err; return err; } int escore_configure_tty(struct tty_struct *tty, u32 bps, int stop) { int rc = 0; struct ktermios termios; termios = *tty->termios; dev_dbg(escore_priv.dev, "%s(): Requesting baud %u\n", __func__, bps); termios.c_cflag &= ~(CBAUD | CSIZE | PARENB); /* clear csize, baud */ termios.c_cflag |= BOTHER; /* allow arbitrary baud */ termios.c_cflag |= CS8; if (stop == 2) termios.c_cflag |= CSTOPB; /* set uart port to raw mode (see termios man page for flags) */ termios.c_iflag &= ~(IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON); termios.c_oflag &= ~(OPOST); termios.c_lflag &= ~(ECHO | ECHONL | ICANON | ISIG | IEXTEN); /* set baud rate */ termios.c_ospeed = bps; termios.c_ispeed = bps; rc = tty_set_termios(tty, &termios); dev_dbg(escore_priv.dev, "%s(): New baud %u\n", __func__, tty->termios->c_ospeed); return rc; } int escore_uart_open(struct escore_priv *escore) { long err = 0; struct file *fp = NULL; unsigned long timeout = jiffies + msecs_to_jiffies(60000); int attempt = 0; /* try to probe tty node every 1 second for 60 seconds */ do { ssleep(1); dev_dbg(escore->dev, "%s(): probing for tty on %s (attempt %d)\n", __func__, UART_TTY_DEVICE_NODE, ++attempt); fp = filp_open(UART_TTY_DEVICE_NODE, O_RDWR | O_NONBLOCK | O_NOCTTY, 0); err = PTR_ERR(fp); } while (time_before(jiffies, timeout) && err == -ENOENT); if (IS_ERR_OR_NULL(fp)) { dev_err(escore->dev, "%s(): UART device node open failed\n", __func__); return -ENODEV; } /* device node found */ dev_dbg(escore->dev, "%s(): successfully opened tty\n", __func__); /* set uart_dev members */ escore_priv.uart_dev.file = fp; escore_priv.uart_dev.tty = ((struct tty_file_private *)fp->private_data)->tty; escore_priv.uart_dev.ld = tty_ldisc_ref( escore_priv.uart_dev.tty); /* set baudrate to FW baud (common case) */ escore_configure_tty(escore_priv.uart_dev.tty, UART_TTY_BAUD_RATE_FIRMWARE, UART_TTY_STOP_BITS); return 0; } int escore_uart_close(struct escore_priv *escore) { tty_ldisc_deref(escore->uart_dev.ld); filp_close(escore->uart_dev.file, 0); return 0; } int escore_uart_wait(struct escore_priv *escore) { /* wait on tty read queue until awoken or for 50ms */ wait_event_interruptible_timeout( escore->uart_dev.tty->read_wait, escore->uart_dev.tty->read_cnt, msecs_to_jiffies(50)); return 0; } struct es_stream_device es_uart_streamdev = { .open = escore_uart_open, .read = escore_uart_read, .close = escore_uart_close, .wait = escore_uart_wait, .intf = ES_UART_INTF, }; MODULE_DESCRIPTION("ASoC ESCORE driver"); MODULE_AUTHOR("Greg Clemson <gclemson@audience.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:escore-codec");
gpl-2.0
HRTKernel/Hacker_Kernel_SM-G92X_MM
drivers/acpi/acpica/evxfevnt.c
2241
9968
/****************************************************************************** * * Module Name: evxfevnt - External Interfaces, ACPI event disable/enable * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <acpi/acpi.h> #include "accommon.h" #include "actables.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxfevnt") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /******************************************************************************* * * FUNCTION: acpi_enable * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Transfers the system into ACPI mode. * ******************************************************************************/ acpi_status acpi_enable(void) { acpi_status status; int retry; ACPI_FUNCTION_TRACE(acpi_enable); /* ACPI tables must be present */ if (!acpi_tb_tables_loaded()) { return_ACPI_STATUS(AE_NO_ACPI_TABLES); } /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* Check current mode */ if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in ACPI mode\n")); return_ACPI_STATUS(AE_OK); } /* Transition to ACPI mode */ status = acpi_hw_set_mode(ACPI_SYS_MODE_ACPI); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not transition to ACPI mode")); return_ACPI_STATUS(status); } /* Sanity check that transition succeeded */ for (retry = 0; retry < 30000; ++retry) { if (acpi_hw_get_mode() == ACPI_SYS_MODE_ACPI) { if (retry != 0) ACPI_WARNING((AE_INFO, "Platform took > %d00 usec to enter ACPI mode", retry)); return_ACPI_STATUS(AE_OK); } acpi_os_stall(100); /* 100 usec */ } ACPI_ERROR((AE_INFO, "Hardware did not enter ACPI mode")); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } ACPI_EXPORT_SYMBOL(acpi_enable) /******************************************************************************* * * FUNCTION: acpi_disable * * PARAMETERS: None * * RETURN: Status * * DESCRIPTION: Transfers the system into LEGACY (non-ACPI) mode. * ******************************************************************************/ acpi_status acpi_disable(void) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_disable); /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } if (acpi_hw_get_mode() == ACPI_SYS_MODE_LEGACY) { ACPI_DEBUG_PRINT((ACPI_DB_INIT, "System is already in legacy (non-ACPI) mode\n")); } else { /* Transition to LEGACY mode */ status = acpi_hw_set_mode(ACPI_SYS_MODE_LEGACY); if (ACPI_FAILURE(status)) { ACPI_ERROR((AE_INFO, "Could not exit ACPI mode to legacy mode")); return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_INIT, "ACPI mode disabled\n")); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable) /******************************************************************************* * * FUNCTION: acpi_enable_event * * PARAMETERS: event - The fixed eventto be enabled * flags - Reserved * * RETURN: Status * * DESCRIPTION: Enable an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_enable_event(u32 event, u32 flags) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_enable_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Enable the requested fixed event (by writing a one to the enable * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_ENABLE_EVENT); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Make sure that the hardware responded */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (value != 1) { ACPI_ERROR((AE_INFO, "Could not enable %s event", acpi_ut_get_event_name(event))); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_enable_event) /******************************************************************************* * * FUNCTION: acpi_disable_event * * PARAMETERS: event - The fixed event to be disabled * flags - Reserved * * RETURN: Status * * DESCRIPTION: Disable an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_disable_event(u32 event, u32 flags) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_disable_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Disable the requested fixed event (by writing a zero to the enable * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, ACPI_DISABLE_EVENT); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } if (value != 0) { ACPI_ERROR((AE_INFO, "Could not disable %s events", acpi_ut_get_event_name(event))); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_disable_event) /******************************************************************************* * * FUNCTION: acpi_clear_event * * PARAMETERS: event - The fixed event to be cleared * * RETURN: Status * * DESCRIPTION: Clear an ACPI event (fixed) * ******************************************************************************/ acpi_status acpi_clear_event(u32 event) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(acpi_clear_event); /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* * Clear the requested fixed event (By writing a one to the status * register bit) */ status = acpi_write_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, ACPI_CLEAR_STATUS); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_clear_event) /******************************************************************************* * * FUNCTION: acpi_get_event_status * * PARAMETERS: event - The fixed event * event_status - Where the current status of the event will * be returned * * RETURN: Status * * DESCRIPTION: Obtains and returns the current status of the event * ******************************************************************************/ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) { acpi_status status = AE_OK; u32 value; ACPI_FUNCTION_TRACE(acpi_get_event_status); if (!event_status) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Decode the Fixed Event */ if (event > ACPI_EVENT_MAX) { return_ACPI_STATUS(AE_BAD_PARAMETER); } /* Get the status of the requested fixed event */ status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. enable_register_id, &value); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); *event_status = value; status = acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. status_register_id, &value); if (ACPI_FAILURE(status)) return_ACPI_STATUS(status); if (value) *event_status |= ACPI_EVENT_FLAG_SET; if (acpi_gbl_fixed_event_handlers[event].handler) *event_status |= ACPI_EVENT_FLAG_HANDLE; return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_get_event_status) #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
dtsd/zte_blade_s6_lollipop_kernel
drivers/video/backlight/kb3886_bl.c
2241
4916
/* * Backlight Driver for the KB3886 Backlight * * Copyright (c) 2007-2008 Claudio Nieder * * Based on corgi_bl.c by Richard Purdie and kb3886 driver by Robert Woerle * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/delay.h> #include <linux/dmi.h> #define KB3886_PARENT 0x64 #define KB3886_IO 0x60 #define KB3886_ADC_DAC_PWM 0xC4 #define KB3886_PWM0_WRITE 0x81 #define KB3886_PWM0_READ 0x41 static DEFINE_MUTEX(bl_mutex); static void kb3886_bl_set_intensity(int intensity) { mutex_lock(&bl_mutex); intensity = intensity&0xff; outb(KB3886_ADC_DAC_PWM, KB3886_PARENT); usleep_range(10000, 11000); outb(KB3886_PWM0_WRITE, KB3886_IO); usleep_range(10000, 11000); outb(intensity, KB3886_IO); mutex_unlock(&bl_mutex); } struct kb3886bl_machinfo { int max_intensity; int default_intensity; int limit_mask; void (*set_bl_intensity)(int intensity); }; static struct kb3886bl_machinfo kb3886_bl_machinfo = { .max_intensity = 0xff, .default_intensity = 0xa0, .limit_mask = 0x7f, .set_bl_intensity = kb3886_bl_set_intensity, }; static struct platform_device kb3886bl_device = { .name = "kb3886-bl", .dev = { .platform_data = &kb3886_bl_machinfo, }, .id = -1, }; static struct platform_device *devices[] __initdata = { &kb3886bl_device, }; /* * Back to driver */ static int kb3886bl_intensity; static struct backlight_device *kb3886_backlight_device; static struct kb3886bl_machinfo *bl_machinfo; static unsigned long kb3886bl_flags; #define KB3886BL_SUSPENDED 0x01 static struct dmi_system_id __initdata kb3886bl_device_table[] = { { .ident = "Sahara Touch-iT", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "SDV"), DMI_MATCH(DMI_PRODUCT_NAME, "iTouch T201"), }, }, { } }; static int kb3886bl_send_intensity(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (kb3886bl_flags & KB3886BL_SUSPENDED) intensity = 0; bl_machinfo->set_bl_intensity(intensity); kb3886bl_intensity = intensity; return 0; } #ifdef CONFIG_PM_SLEEP static int kb3886bl_suspend(struct device *dev) { struct backlight_device *bd = dev_get_drvdata(dev); kb3886bl_flags |= KB3886BL_SUSPENDED; backlight_update_status(bd); return 0; } static int kb3886bl_resume(struct device *dev) { struct backlight_device *bd = dev_get_drvdata(dev); kb3886bl_flags &= ~KB3886BL_SUSPENDED; backlight_update_status(bd); return 0; } #endif static SIMPLE_DEV_PM_OPS(kb3886bl_pm_ops, kb3886bl_suspend, kb3886bl_resume); static int kb3886bl_get_intensity(struct backlight_device *bd) { return kb3886bl_intensity; } static const struct backlight_ops kb3886bl_ops = { .get_brightness = kb3886bl_get_intensity, .update_status = kb3886bl_send_intensity, }; static int kb3886bl_probe(struct platform_device *pdev) { struct backlight_properties props; struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data; bl_machinfo = machinfo; if (!machinfo->limit_mask) machinfo->limit_mask = -1; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = machinfo->max_intensity; kb3886_backlight_device = backlight_device_register("kb3886-bl", &pdev->dev, NULL, &kb3886bl_ops, &props); if (IS_ERR(kb3886_backlight_device)) return PTR_ERR(kb3886_backlight_device); platform_set_drvdata(pdev, kb3886_backlight_device); kb3886_backlight_device->props.power = FB_BLANK_UNBLANK; kb3886_backlight_device->props.brightness = machinfo->default_intensity; backlight_update_status(kb3886_backlight_device); return 0; } static int kb3886bl_remove(struct platform_device *pdev) { struct backlight_device *bd = platform_get_drvdata(pdev); backlight_device_unregister(bd); return 0; } static struct platform_driver kb3886bl_driver = { .probe = kb3886bl_probe, .remove = kb3886bl_remove, .driver = { .name = "kb3886-bl", .pm = &kb3886bl_pm_ops, }, }; static int __init kb3886_init(void) { if (!dmi_check_system(kb3886bl_device_table)) return -ENODEV; platform_add_devices(devices, ARRAY_SIZE(devices)); return platform_driver_register(&kb3886bl_driver); } static void __exit kb3886_exit(void) { platform_driver_unregister(&kb3886bl_driver); } module_init(kb3886_init); module_exit(kb3886_exit); MODULE_AUTHOR("Claudio Nieder <private@claudio.ch>"); MODULE_DESCRIPTION("Tabletkiosk Sahara Touch-iT Backlight Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("dmi:*:svnSDV:pniTouchT201:*");
gpl-2.0
namagi/android_kernel_motorola_msm8960-common
drivers/gpu/drm/savage/savage_drv.c
2497
2622
/* savage_drv.c -- Savage driver for Linux * * Copyright 2004 Felix Kuehling * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sub license, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "savage_drm.h" #include "savage_drv.h" #include "drm_pciids.h" static struct pci_device_id pciidlist[] = { savage_PCI_IDS }; static struct drm_driver driver = { .driver_features = DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_DMA | DRIVER_PCI_DMA, .dev_priv_size = sizeof(drm_savage_buf_priv_t), .load = savage_driver_load, .firstopen = savage_driver_firstopen, .lastclose = savage_driver_lastclose, .unload = savage_driver_unload, .reclaim_buffers = savage_reclaim_buffers, .ioctls = savage_ioctls, .dma_ioctl = savage_bci_buffers, .fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = drm_mmap, .poll = drm_poll, .fasync = drm_fasync, .llseek = noop_llseek, }, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static struct pci_driver savage_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, }; static int __init savage_init(void) { driver.num_ioctls = savage_max_ioctl; return drm_pci_init(&driver, &savage_pci_driver); } static void __exit savage_exit(void) { drm_pci_exit(&driver, &savage_pci_driver); } module_init(savage_init); module_exit(savage_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
finnq/android_kernel_asus_tf101
arch/microblaze/kernel/unwind.c
3009
8962
/* * Backtrace support for Microblaze * * Copyright (C) 2010 Digital Design Corporation * * Based on arch/sh/kernel/cpu/sh5/unwind.c code which is: * Copyright (C) 2004 Paul Mundt * Copyright (C) 2004 Richard Curnow * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ /* #define DEBUG 1 */ #include <linux/kallsyms.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/stacktrace.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/io.h> #include <asm/sections.h> #include <asm/exceptions.h> #include <asm/unwind.h> struct stack_trace; /* * On Microblaze, finding the previous stack frame is a little tricky. * At this writing (3/2010), Microblaze does not support CONFIG_FRAME_POINTERS, * and even if it did, gcc (4.1.2) does not store the frame pointer at * a consistent offset within each frame. To determine frame size, it is * necessary to search for the assembly instruction that creates or reclaims * the frame and extract the size from it. * * Microblaze stores the stack pointer in r1, and creates a frame via * * addik r1, r1, -FRAME_SIZE * * The frame is reclaimed via * * addik r1, r1, FRAME_SIZE * * Frame creation occurs at or near the top of a function. * Depending on the compiler, reclaim may occur at the end, or before * a mid-function return. * * A stack frame is usually not created in a leaf function. * */ /** * get_frame_size - Extract the stack adjustment from an * "addik r1, r1, adjust" instruction * @instr : Microblaze instruction * * Return - Number of stack bytes the instruction reserves or reclaims */ inline long get_frame_size(unsigned long instr) { return abs((s16)(instr & 0xFFFF)); } /** * find_frame_creation - Search backward to find the instruction that creates * the stack frame (hopefully, for the same function the * initial PC is in). * @pc : Program counter at which to begin the search * * Return - PC at which stack frame creation occurs * NULL if this cannot be found, i.e. a leaf function */ static unsigned long *find_frame_creation(unsigned long *pc) { int i; /* NOTE: Distance to search is arbitrary * 250 works well for most things, * 750 picks up things like tcp_recvmsg(), * 1000 needed for fat_fill_super() */ for (i = 0; i < 1000; i++, pc--) { unsigned long instr; s16 frame_size; if (!kernel_text_address((unsigned long) pc)) return NULL; instr = *pc; /* addik r1, r1, foo ? */ if ((instr & 0xFFFF0000) != 0x30210000) continue; /* No */ frame_size = get_frame_size(instr); if ((frame_size < 8) || (frame_size & 3)) { pr_debug(" Invalid frame size %d at 0x%p\n", frame_size, pc); return NULL; } pr_debug(" Found frame creation at 0x%p, size %d\n", pc, frame_size); return pc; } return NULL; } /** * lookup_prev_stack_frame - Find the stack frame of the previous function. * @fp : Frame (stack) pointer for current function * @pc : Program counter within current function * @leaf_return : r15 value within current function. If the current function * is a leaf, this is the caller's return address. * @pprev_fp : On exit, set to frame (stack) pointer for previous function * @pprev_pc : On exit, set to current function caller's return address * * Return - 0 on success, -EINVAL if the previous frame cannot be found */ static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc, unsigned long leaf_return, unsigned long *pprev_fp, unsigned long *pprev_pc) { unsigned long *prologue = NULL; /* _switch_to is a special leaf function */ if (pc != (unsigned long) &_switch_to) prologue = find_frame_creation((unsigned long *)pc); if (prologue) { long frame_size = get_frame_size(*prologue); *pprev_fp = fp + frame_size; *pprev_pc = *(unsigned long *)fp; } else { if (!leaf_return) return -EINVAL; *pprev_pc = leaf_return; *pprev_fp = fp; } /* NOTE: don't check kernel_text_address here, to allow display * of userland return address */ return (!*pprev_pc || (*pprev_pc & 3)) ? -EINVAL : 0; } static void microblaze_unwind_inner(struct task_struct *task, unsigned long pc, unsigned long fp, unsigned long leaf_return, struct stack_trace *trace); /** * unwind_trap - Unwind through a system trap, that stored previous state * on the stack. */ #ifdef CONFIG_MMU static inline void unwind_trap(struct task_struct *task, unsigned long pc, unsigned long fp, struct stack_trace *trace) { /* To be implemented */ } #else static inline void unwind_trap(struct task_struct *task, unsigned long pc, unsigned long fp, struct stack_trace *trace) { const struct pt_regs *regs = (const struct pt_regs *) fp; microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); } #endif /** * microblaze_unwind_inner - Unwind the stack from the specified point * @task : Task whose stack we are to unwind (may be NULL) * @pc : Program counter from which we start unwinding * @fp : Frame (stack) pointer from which we start unwinding * @leaf_return : Value of r15 at pc. If the function is a leaf, this is * the caller's return address. * @trace : Where to store stack backtrace (PC values). * NULL == print backtrace to kernel log */ static void microblaze_unwind_inner(struct task_struct *task, unsigned long pc, unsigned long fp, unsigned long leaf_return, struct stack_trace *trace) { int ofs = 0; pr_debug(" Unwinding with PC=%p, FP=%p\n", (void *)pc, (void *)fp); if (!pc || !fp || (pc & 3) || (fp & 3)) { pr_debug(" Invalid state for unwind, aborting\n"); return; } for (; pc != 0;) { unsigned long next_fp, next_pc = 0; unsigned long return_to = pc + 2 * sizeof(unsigned long); const struct trap_handler_info *handler = &microblaze_trap_handlers; /* Is previous function the HW exception handler? */ if ((return_to >= (unsigned long)&_hw_exception_handler) &&(return_to < (unsigned long)&ex_handler_unhandled)) { /* * HW exception handler doesn't save all registers, * so we open-code a special case of unwind_trap() */ #ifndef CONFIG_MMU const struct pt_regs *regs = (const struct pt_regs *) fp; #endif pr_info("HW EXCEPTION\n"); #ifndef CONFIG_MMU microblaze_unwind_inner(task, regs->r17 - 4, fp + EX_HANDLER_STACK_SIZ, regs->r15, trace); #endif return; } /* Is previous function a trap handler? */ for (; handler->start_addr; ++handler) { if ((return_to >= handler->start_addr) && (return_to <= handler->end_addr)) { if (!trace) pr_info("%s\n", handler->trap_name); unwind_trap(task, pc, fp, trace); return; } } pc -= ofs; if (trace) { #ifdef CONFIG_STACKTRACE if (trace->skip > 0) trace->skip--; else trace->entries[trace->nr_entries++] = pc; if (trace->nr_entries >= trace->max_entries) break; #endif } else { /* Have we reached userland? */ if (unlikely(pc == task_pt_regs(task)->pc)) { pr_info("[<%p>] PID %lu [%s]\n", (void *) pc, (unsigned long) task->pid, task->comm); break; } else print_ip_sym(pc); } /* Stop when we reach anything not part of the kernel */ if (!kernel_text_address(pc)) break; if (lookup_prev_stack_frame(fp, pc, leaf_return, &next_fp, &next_pc) == 0) { ofs = sizeof(unsigned long); pc = next_pc & ~3; fp = next_fp; leaf_return = 0; } else { pr_debug(" Failed to find previous stack frame\n"); break; } pr_debug(" Next PC=%p, next FP=%p\n", (void *)next_pc, (void *)next_fp); } } /** * microblaze_unwind - Stack unwinder for Microblaze (external entry point) * @task : Task whose stack we are to unwind (NULL == current) * @trace : Where to store stack backtrace (PC values). * NULL == print backtrace to kernel log */ void microblaze_unwind(struct task_struct *task, struct stack_trace *trace) { if (task) { if (task == current) { const struct pt_regs *regs = task_pt_regs(task); microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace); } else { struct thread_info *thread_info = (struct thread_info *)(task->stack); const struct cpu_context *cpu_context = &thread_info->cpu_context; microblaze_unwind_inner(task, (unsigned long) &_switch_to, cpu_context->r1, cpu_context->r15, trace); } } else { unsigned long pc, fp; __asm__ __volatile__ ("or %0, r1, r0" : "=r" (fp)); __asm__ __volatile__ ( "brlid %0, 0f;" "nop;" "0:" : "=r" (pc) ); /* Since we are not a leaf function, use leaf_return = 0 */ microblaze_unwind_inner(current, pc, fp, 0, trace); } }
gpl-2.0
tectas/android_kernel_lge_msm8974ac
net/dccp/ccids/ccid3.c
3265
25695
/* * Copyright (c) 2007 The University of Aberdeen, Scotland, UK * Copyright (c) 2005-7 The University of Waikato, Hamilton, New Zealand. * Copyright (c) 2005-7 Ian McDonald <ian.mcdonald@jandi.co.nz> * * An implementation of the DCCP protocol * * This code has been developed by the University of Waikato WAND * research group. For further information please see http://www.wand.net.nz/ * * This code also uses code from Lulea University, rereleased as GPL by its * authors: * Copyright (c) 2003 Nils-Erik Mattsson, Joacim Haggmark, Magnus Erixzon * * Changes to meet Linux coding standards, to make it meet latest ccid3 draft * and to make it work as a loadable module in the DCCP stack written by * Arnaldo Carvalho de Melo <acme@conectiva.com.br>. * * Copyright (c) 2005 Arnaldo Carvalho de Melo <acme@conectiva.com.br> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "../dccp.h" #include "ccid3.h" #include <asm/unaligned.h> #ifdef CONFIG_IP_DCCP_CCID3_DEBUG static bool ccid3_debug; #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) #else #define ccid3_pr_debug(format, a...) #endif /* * Transmitter Half-Connection Routines */ #ifdef CONFIG_IP_DCCP_CCID3_DEBUG static const char *ccid3_tx_state_name(enum ccid3_hc_tx_states state) { static const char *const ccid3_state_names[] = { [TFRC_SSTATE_NO_SENT] = "NO_SENT", [TFRC_SSTATE_NO_FBACK] = "NO_FBACK", [TFRC_SSTATE_FBACK] = "FBACK", }; return ccid3_state_names[state]; } #endif static void ccid3_hc_tx_set_state(struct sock *sk, enum ccid3_hc_tx_states state) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); enum ccid3_hc_tx_states oldstate = hc->tx_state; ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", dccp_role(sk), sk, ccid3_tx_state_name(oldstate), ccid3_tx_state_name(state)); WARN_ON(state == oldstate); hc->tx_state = state; } /* * Compute the initial sending rate X_init in the manner of RFC 3390: * * X_init = min(4 * s, max(2 * s, 4380 bytes)) / RTT * * Note that RFC 3390 uses MSS, RFC 4342 refers to RFC 3390, and rfc3448bis * (rev-02) clarifies the use of RFC 3390 with regard to the above formula. * For consistency with other parts of the code, X_init is scaled by 2^6. */ static inline u64 rfc3390_initial_rate(struct sock *sk) { const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); const __u32 w_init = clamp_t(__u32, 4380U, 2 * hc->tx_s, 4 * hc->tx_s); return scaled_div(w_init << 6, hc->tx_rtt); } /** * ccid3_update_send_interval - Calculate new t_ipi = s / X_inst * This respects the granularity of X_inst (64 * bytes/second). */ static void ccid3_update_send_interval(struct ccid3_hc_tx_sock *hc) { hc->tx_t_ipi = scaled_div32(((u64)hc->tx_s) << 6, hc->tx_x); DCCP_BUG_ON(hc->tx_t_ipi == 0); ccid3_pr_debug("t_ipi=%u, s=%u, X=%u\n", hc->tx_t_ipi, hc->tx_s, (unsigned)(hc->tx_x >> 6)); } static u32 ccid3_hc_tx_idle_rtt(struct ccid3_hc_tx_sock *hc, ktime_t now) { u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count); return delta / hc->tx_rtt; } /** * ccid3_hc_tx_update_x - Update allowed sending rate X * @stamp: most recent time if available - can be left NULL. * This function tracks draft rfc3448bis, check there for latest details. * * Note: X and X_recv are both stored in units of 64 * bytes/second, to support * fine-grained resolution of sending rates. This requires scaling by 2^6 * throughout the code. Only X_calc is unscaled (in bytes/second). * */ static void ccid3_hc_tx_update_x(struct sock *sk, ktime_t *stamp) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); __u64 min_rate = 2 * hc->tx_x_recv; const __u64 old_x = hc->tx_x; ktime_t now = stamp ? *stamp : ktime_get_real(); /* * Handle IDLE periods: do not reduce below RFC3390 initial sending rate * when idling [RFC 4342, 5.1]. Definition of idling is from rfc3448bis: * a sender is idle if it has not sent anything over a 2-RTT-period. * For consistency with X and X_recv, min_rate is also scaled by 2^6. */ if (ccid3_hc_tx_idle_rtt(hc, now) >= 2) { min_rate = rfc3390_initial_rate(sk); min_rate = max(min_rate, 2 * hc->tx_x_recv); } if (hc->tx_p > 0) { hc->tx_x = min(((__u64)hc->tx_x_calc) << 6, min_rate); hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI); } else if (ktime_us_delta(now, hc->tx_t_ld) - (s64)hc->tx_rtt >= 0) { hc->tx_x = min(2 * hc->tx_x, min_rate); hc->tx_x = max(hc->tx_x, scaled_div(((__u64)hc->tx_s) << 6, hc->tx_rtt)); hc->tx_t_ld = now; } if (hc->tx_x != old_x) { ccid3_pr_debug("X_prev=%u, X_now=%u, X_calc=%u, " "X_recv=%u\n", (unsigned)(old_x >> 6), (unsigned)(hc->tx_x >> 6), hc->tx_x_calc, (unsigned)(hc->tx_x_recv >> 6)); ccid3_update_send_interval(hc); } } /* * Track the mean packet size `s' (cf. RFC 4342, 5.3 and RFC 3448, 4.1) * @len: DCCP packet payload size in bytes */ static inline void ccid3_hc_tx_update_s(struct ccid3_hc_tx_sock *hc, int len) { const u16 old_s = hc->tx_s; hc->tx_s = tfrc_ewma(hc->tx_s, len, 9); if (hc->tx_s != old_s) ccid3_update_send_interval(hc); } /* * Update Window Counter using the algorithm from [RFC 4342, 8.1]. * As elsewhere, RTT > 0 is assumed by using dccp_sample_rtt(). */ static inline void ccid3_hc_tx_update_win_count(struct ccid3_hc_tx_sock *hc, ktime_t now) { u32 delta = ktime_us_delta(now, hc->tx_t_last_win_count), quarter_rtts = (4 * delta) / hc->tx_rtt; if (quarter_rtts > 0) { hc->tx_t_last_win_count = now; hc->tx_last_win_count += min(quarter_rtts, 5U); hc->tx_last_win_count &= 0xF; /* mod 16 */ } } static void ccid3_hc_tx_no_feedback_timer(unsigned long data) { struct sock *sk = (struct sock *)data; struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); unsigned long t_nfb = USEC_PER_SEC / 5; bh_lock_sock(sk); if (sock_owned_by_user(sk)) { /* Try again later. */ /* XXX: set some sensible MIB */ goto restart_timer; } ccid3_pr_debug("%s(%p, state=%s) - entry\n", dccp_role(sk), sk, ccid3_tx_state_name(hc->tx_state)); /* Ignore and do not restart after leaving the established state */ if ((1 << sk->sk_state) & ~(DCCPF_OPEN | DCCPF_PARTOPEN)) goto out; /* Reset feedback state to "no feedback received" */ if (hc->tx_state == TFRC_SSTATE_FBACK) ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); /* * Determine new allowed sending rate X as per draft rfc3448bis-00, 4.4 * RTO is 0 if and only if no feedback has been received yet. */ if (hc->tx_t_rto == 0 || hc->tx_p == 0) { /* halve send rate directly */ hc->tx_x = max(hc->tx_x / 2, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI); ccid3_update_send_interval(hc); } else { /* * Modify the cached value of X_recv * * If (X_calc > 2 * X_recv) * X_recv = max(X_recv / 2, s / (2 * t_mbi)); * Else * X_recv = X_calc / 4; * * Note that X_recv is scaled by 2^6 while X_calc is not */ if (hc->tx_x_calc > (hc->tx_x_recv >> 5)) hc->tx_x_recv = max(hc->tx_x_recv / 2, (((__u64)hc->tx_s) << 6) / (2*TFRC_T_MBI)); else { hc->tx_x_recv = hc->tx_x_calc; hc->tx_x_recv <<= 4; } ccid3_hc_tx_update_x(sk, NULL); } ccid3_pr_debug("Reduced X to %llu/64 bytes/sec\n", (unsigned long long)hc->tx_x); /* * Set new timeout for the nofeedback timer. * See comments in packet_recv() regarding the value of t_RTO. */ if (unlikely(hc->tx_t_rto == 0)) /* no feedback received yet */ t_nfb = TFRC_INITIAL_TIMEOUT; else t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); restart_timer: sk_reset_timer(sk, &hc->tx_no_feedback_timer, jiffies + usecs_to_jiffies(t_nfb)); out: bh_unlock_sock(sk); sock_put(sk); } /** * ccid3_hc_tx_send_packet - Delay-based dequeueing of TX packets * @skb: next packet candidate to send on @sk * This function uses the convention of ccid_packet_dequeue_eval() and * returns a millisecond-delay value between 0 and t_mbi = 64000 msec. */ static int ccid3_hc_tx_send_packet(struct sock *sk, struct sk_buff *skb) { struct dccp_sock *dp = dccp_sk(sk); struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ktime_t now = ktime_get_real(); s64 delay; /* * This function is called only for Data and DataAck packets. Sending * zero-sized Data(Ack)s is theoretically possible, but for congestion * control this case is pathological - ignore it. */ if (unlikely(skb->len == 0)) return -EBADMSG; if (hc->tx_state == TFRC_SSTATE_NO_SENT) { sk_reset_timer(sk, &hc->tx_no_feedback_timer, (jiffies + usecs_to_jiffies(TFRC_INITIAL_TIMEOUT))); hc->tx_last_win_count = 0; hc->tx_t_last_win_count = now; /* Set t_0 for initial packet */ hc->tx_t_nom = now; hc->tx_s = skb->len; /* * Use initial RTT sample when available: recommended by erratum * to RFC 4342. This implements the initialisation procedure of * draft rfc3448bis, section 4.2. Remember, X is scaled by 2^6. */ if (dp->dccps_syn_rtt) { ccid3_pr_debug("SYN RTT = %uus\n", dp->dccps_syn_rtt); hc->tx_rtt = dp->dccps_syn_rtt; hc->tx_x = rfc3390_initial_rate(sk); hc->tx_t_ld = now; } else { /* * Sender does not have RTT sample: * - set fallback RTT (RFC 4340, 3.4) since a RTT value * is needed in several parts (e.g. window counter); * - set sending rate X_pps = 1pps as per RFC 3448, 4.2. */ hc->tx_rtt = DCCP_FALLBACK_RTT; hc->tx_x = hc->tx_s; hc->tx_x <<= 6; } ccid3_update_send_interval(hc); ccid3_hc_tx_set_state(sk, TFRC_SSTATE_NO_FBACK); } else { delay = ktime_us_delta(hc->tx_t_nom, now); ccid3_pr_debug("delay=%ld\n", (long)delay); /* * Scheduling of packet transmissions (RFC 5348, 8.3) * * if (t_now > t_nom - delta) * // send the packet now * else * // send the packet in (t_nom - t_now) milliseconds. */ if (delay >= TFRC_T_DELTA) return (u32)delay / USEC_PER_MSEC; ccid3_hc_tx_update_win_count(hc, now); } /* prepare to send now (add options etc.) */ dp->dccps_hc_tx_insert_options = 1; DCCP_SKB_CB(skb)->dccpd_ccval = hc->tx_last_win_count; /* set the nominal send time for the next following packet */ hc->tx_t_nom = ktime_add_us(hc->tx_t_nom, hc->tx_t_ipi); return CCID_PACKET_SEND_AT_ONCE; } static void ccid3_hc_tx_packet_sent(struct sock *sk, unsigned int len) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); ccid3_hc_tx_update_s(hc, len); if (tfrc_tx_hist_add(&hc->tx_hist, dccp_sk(sk)->dccps_gss)) DCCP_CRIT("packet history - out of memory!"); } static void ccid3_hc_tx_packet_recv(struct sock *sk, struct sk_buff *skb) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); struct tfrc_tx_hist_entry *acked; ktime_t now; unsigned long t_nfb; u32 r_sample; /* we are only interested in ACKs */ if (!(DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_ACK || DCCP_SKB_CB(skb)->dccpd_type == DCCP_PKT_DATAACK)) return; /* * Locate the acknowledged packet in the TX history. * * Returning "entry not found" here can for instance happen when * - the host has not sent out anything (e.g. a passive server), * - the Ack is outdated (packet with higher Ack number was received), * - it is a bogus Ack (for a packet not sent on this connection). */ acked = tfrc_tx_hist_find_entry(hc->tx_hist, dccp_hdr_ack_seq(skb)); if (acked == NULL) return; /* For the sake of RTT sampling, ignore/remove all older entries */ tfrc_tx_hist_purge(&acked->next); /* Update the moving average for the RTT estimate (RFC 3448, 4.3) */ now = ktime_get_real(); r_sample = dccp_sample_rtt(sk, ktime_us_delta(now, acked->stamp)); hc->tx_rtt = tfrc_ewma(hc->tx_rtt, r_sample, 9); /* * Update allowed sending rate X as per draft rfc3448bis-00, 4.2/3 */ if (hc->tx_state == TFRC_SSTATE_NO_FBACK) { ccid3_hc_tx_set_state(sk, TFRC_SSTATE_FBACK); if (hc->tx_t_rto == 0) { /* * Initial feedback packet: Larger Initial Windows (4.2) */ hc->tx_x = rfc3390_initial_rate(sk); hc->tx_t_ld = now; ccid3_update_send_interval(hc); goto done_computing_x; } else if (hc->tx_p == 0) { /* * First feedback after nofeedback timer expiry (4.3) */ goto done_computing_x; } } /* Update sending rate (step 4 of [RFC 3448, 4.3]) */ if (hc->tx_p > 0) hc->tx_x_calc = tfrc_calc_x(hc->tx_s, hc->tx_rtt, hc->tx_p); ccid3_hc_tx_update_x(sk, &now); done_computing_x: ccid3_pr_debug("%s(%p), RTT=%uus (sample=%uus), s=%u, " "p=%u, X_calc=%u, X_recv=%u, X=%u\n", dccp_role(sk), sk, hc->tx_rtt, r_sample, hc->tx_s, hc->tx_p, hc->tx_x_calc, (unsigned)(hc->tx_x_recv >> 6), (unsigned)(hc->tx_x >> 6)); /* unschedule no feedback timer */ sk_stop_timer(sk, &hc->tx_no_feedback_timer); /* * As we have calculated new ipi, delta, t_nom it is possible * that we now can send a packet, so wake up dccp_wait_for_ccid */ sk->sk_write_space(sk); /* * Update timeout interval for the nofeedback timer. In order to control * rate halving on networks with very low RTTs (<= 1 ms), use per-route * tunable RTAX_RTO_MIN value as the lower bound. */ hc->tx_t_rto = max_t(u32, 4 * hc->tx_rtt, USEC_PER_SEC/HZ * tcp_rto_min(sk)); /* * Schedule no feedback timer to expire in * max(t_RTO, 2 * s/X) = max(t_RTO, 2 * t_ipi) */ t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); ccid3_pr_debug("%s(%p), Scheduled no feedback timer to " "expire in %lu jiffies (%luus)\n", dccp_role(sk), sk, usecs_to_jiffies(t_nfb), t_nfb); sk_reset_timer(sk, &hc->tx_no_feedback_timer, jiffies + usecs_to_jiffies(t_nfb)); } static int ccid3_hc_tx_parse_options(struct sock *sk, u8 packet_type, u8 option, u8 *optval, u8 optlen) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); __be32 opt_val; switch (option) { case TFRC_OPT_RECEIVE_RATE: case TFRC_OPT_LOSS_EVENT_RATE: /* Must be ignored on Data packets, cf. RFC 4342 8.3 and 8.5 */ if (packet_type == DCCP_PKT_DATA) break; if (unlikely(optlen != 4)) { DCCP_WARN("%s(%p), invalid len %d for %u\n", dccp_role(sk), sk, optlen, option); return -EINVAL; } opt_val = ntohl(get_unaligned((__be32 *)optval)); if (option == TFRC_OPT_RECEIVE_RATE) { /* Receive Rate is kept in units of 64 bytes/second */ hc->tx_x_recv = opt_val; hc->tx_x_recv <<= 6; ccid3_pr_debug("%s(%p), RECEIVE_RATE=%u\n", dccp_role(sk), sk, opt_val); } else { /* Update the fixpoint Loss Event Rate fraction */ hc->tx_p = tfrc_invert_loss_event_rate(opt_val); ccid3_pr_debug("%s(%p), LOSS_EVENT_RATE=%u\n", dccp_role(sk), sk, opt_val); } } return 0; } static int ccid3_hc_tx_init(struct ccid *ccid, struct sock *sk) { struct ccid3_hc_tx_sock *hc = ccid_priv(ccid); hc->tx_state = TFRC_SSTATE_NO_SENT; hc->tx_hist = NULL; setup_timer(&hc->tx_no_feedback_timer, ccid3_hc_tx_no_feedback_timer, (unsigned long)sk); return 0; } static void ccid3_hc_tx_exit(struct sock *sk) { struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); sk_stop_timer(sk, &hc->tx_no_feedback_timer); tfrc_tx_hist_purge(&hc->tx_hist); } static void ccid3_hc_tx_get_info(struct sock *sk, struct tcp_info *info) { info->tcpi_rto = ccid3_hc_tx_sk(sk)->tx_t_rto; info->tcpi_rtt = ccid3_hc_tx_sk(sk)->tx_rtt; } static int ccid3_hc_tx_getsockopt(struct sock *sk, const int optname, int len, u32 __user *optval, int __user *optlen) { const struct ccid3_hc_tx_sock *hc = ccid3_hc_tx_sk(sk); struct tfrc_tx_info tfrc; const void *val; switch (optname) { case DCCP_SOCKOPT_CCID_TX_INFO: if (len < sizeof(tfrc)) return -EINVAL; tfrc.tfrctx_x = hc->tx_x; tfrc.tfrctx_x_recv = hc->tx_x_recv; tfrc.tfrctx_x_calc = hc->tx_x_calc; tfrc.tfrctx_rtt = hc->tx_rtt; tfrc.tfrctx_p = hc->tx_p; tfrc.tfrctx_rto = hc->tx_t_rto; tfrc.tfrctx_ipi = hc->tx_t_ipi; len = sizeof(tfrc); val = &tfrc; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen) || copy_to_user(optval, val, len)) return -EFAULT; return 0; } /* * Receiver Half-Connection Routines */ /* CCID3 feedback types */ enum ccid3_fback_type { CCID3_FBACK_NONE = 0, CCID3_FBACK_INITIAL, CCID3_FBACK_PERIODIC, CCID3_FBACK_PARAM_CHANGE }; #ifdef CONFIG_IP_DCCP_CCID3_DEBUG static const char *ccid3_rx_state_name(enum ccid3_hc_rx_states state) { static const char *const ccid3_rx_state_names[] = { [TFRC_RSTATE_NO_DATA] = "NO_DATA", [TFRC_RSTATE_DATA] = "DATA", }; return ccid3_rx_state_names[state]; } #endif static void ccid3_hc_rx_set_state(struct sock *sk, enum ccid3_hc_rx_states state) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); enum ccid3_hc_rx_states oldstate = hc->rx_state; ccid3_pr_debug("%s(%p) %-8.8s -> %s\n", dccp_role(sk), sk, ccid3_rx_state_name(oldstate), ccid3_rx_state_name(state)); WARN_ON(state == oldstate); hc->rx_state = state; } static void ccid3_hc_rx_send_feedback(struct sock *sk, const struct sk_buff *skb, enum ccid3_fback_type fbtype) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); struct dccp_sock *dp = dccp_sk(sk); ktime_t now = ktime_get_real(); s64 delta = 0; switch (fbtype) { case CCID3_FBACK_INITIAL: hc->rx_x_recv = 0; hc->rx_pinv = ~0U; /* see RFC 4342, 8.5 */ break; case CCID3_FBACK_PARAM_CHANGE: /* * When parameters change (new loss or p > p_prev), we do not * have a reliable estimate for R_m of [RFC 3448, 6.2] and so * need to reuse the previous value of X_recv. However, when * X_recv was 0 (due to early loss), this would kill X down to * s/t_mbi (i.e. one packet in 64 seconds). * To avoid such drastic reduction, we approximate X_recv as * the number of bytes since last feedback. * This is a safe fallback, since X is bounded above by X_calc. */ if (hc->rx_x_recv > 0) break; /* fall through */ case CCID3_FBACK_PERIODIC: delta = ktime_us_delta(now, hc->rx_tstamp_last_feedback); if (delta <= 0) DCCP_BUG("delta (%ld) <= 0", (long)delta); else hc->rx_x_recv = scaled_div32(hc->rx_bytes_recv, delta); break; default: return; } ccid3_pr_debug("Interval %ldusec, X_recv=%u, 1/p=%u\n", (long)delta, hc->rx_x_recv, hc->rx_pinv); hc->rx_tstamp_last_feedback = now; hc->rx_last_counter = dccp_hdr(skb)->dccph_ccval; hc->rx_bytes_recv = 0; dp->dccps_hc_rx_insert_options = 1; dccp_send_ack(sk); } static int ccid3_hc_rx_insert_options(struct sock *sk, struct sk_buff *skb) { const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); __be32 x_recv, pinv; if (!(sk->sk_state == DCCP_OPEN || sk->sk_state == DCCP_PARTOPEN)) return 0; if (dccp_packet_without_ack(skb)) return 0; x_recv = htonl(hc->rx_x_recv); pinv = htonl(hc->rx_pinv); if (dccp_insert_option(skb, TFRC_OPT_LOSS_EVENT_RATE, &pinv, sizeof(pinv)) || dccp_insert_option(skb, TFRC_OPT_RECEIVE_RATE, &x_recv, sizeof(x_recv))) return -1; return 0; } /** * ccid3_first_li - Implements [RFC 5348, 6.3.1] * * Determine the length of the first loss interval via inverse lookup. * Assume that X_recv can be computed by the throughput equation * s * X_recv = -------- * R * fval * Find some p such that f(p) = fval; return 1/p (scaled). */ static u32 ccid3_first_li(struct sock *sk) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); u32 x_recv, p, delta; u64 fval; if (hc->rx_rtt == 0) { DCCP_WARN("No RTT estimate available, using fallback RTT\n"); hc->rx_rtt = DCCP_FALLBACK_RTT; } delta = ktime_to_us(net_timedelta(hc->rx_tstamp_last_feedback)); x_recv = scaled_div32(hc->rx_bytes_recv, delta); if (x_recv == 0) { /* would also trigger divide-by-zero */ DCCP_WARN("X_recv==0\n"); if (hc->rx_x_recv == 0) { DCCP_BUG("stored value of X_recv is zero"); return ~0U; } x_recv = hc->rx_x_recv; } fval = scaled_div(hc->rx_s, hc->rx_rtt); fval = scaled_div32(fval, x_recv); p = tfrc_calc_x_reverse_lookup(fval); ccid3_pr_debug("%s(%p), receive rate=%u bytes/s, implied " "loss rate=%u\n", dccp_role(sk), sk, x_recv, p); return p == 0 ? ~0U : scaled_div(1, p); } static void ccid3_hc_rx_packet_recv(struct sock *sk, struct sk_buff *skb) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); enum ccid3_fback_type do_feedback = CCID3_FBACK_NONE; const u64 ndp = dccp_sk(sk)->dccps_options_received.dccpor_ndp; const bool is_data_packet = dccp_data_packet(skb); if (unlikely(hc->rx_state == TFRC_RSTATE_NO_DATA)) { if (is_data_packet) { const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4; do_feedback = CCID3_FBACK_INITIAL; ccid3_hc_rx_set_state(sk, TFRC_RSTATE_DATA); hc->rx_s = payload; /* * Not necessary to update rx_bytes_recv here, * since X_recv = 0 for the first feedback packet (cf. * RFC 3448, 6.3) -- gerrit */ } goto update_records; } if (tfrc_rx_hist_duplicate(&hc->rx_hist, skb)) return; /* done receiving */ if (is_data_packet) { const u32 payload = skb->len - dccp_hdr(skb)->dccph_doff * 4; /* * Update moving-average of s and the sum of received payload bytes */ hc->rx_s = tfrc_ewma(hc->rx_s, payload, 9); hc->rx_bytes_recv += payload; } /* * Perform loss detection and handle pending losses */ if (tfrc_rx_handle_loss(&hc->rx_hist, &hc->rx_li_hist, skb, ndp, ccid3_first_li, sk)) { do_feedback = CCID3_FBACK_PARAM_CHANGE; goto done_receiving; } if (tfrc_rx_hist_loss_pending(&hc->rx_hist)) return; /* done receiving */ /* * Handle data packets: RTT sampling and monitoring p */ if (unlikely(!is_data_packet)) goto update_records; if (!tfrc_lh_is_initialised(&hc->rx_li_hist)) { const u32 sample = tfrc_rx_hist_sample_rtt(&hc->rx_hist, skb); /* * Empty loss history: no loss so far, hence p stays 0. * Sample RTT values, since an RTT estimate is required for the * computation of p when the first loss occurs; RFC 3448, 6.3.1. */ if (sample != 0) hc->rx_rtt = tfrc_ewma(hc->rx_rtt, sample, 9); } else if (tfrc_lh_update_i_mean(&hc->rx_li_hist, skb)) { /* * Step (3) of [RFC 3448, 6.1]: Recompute I_mean and, if I_mean * has decreased (resp. p has increased), send feedback now. */ do_feedback = CCID3_FBACK_PARAM_CHANGE; } /* * Check if the periodic once-per-RTT feedback is due; RFC 4342, 10.3 */ if (SUB16(dccp_hdr(skb)->dccph_ccval, hc->rx_last_counter) > 3) do_feedback = CCID3_FBACK_PERIODIC; update_records: tfrc_rx_hist_add_packet(&hc->rx_hist, skb, ndp); done_receiving: if (do_feedback) ccid3_hc_rx_send_feedback(sk, skb, do_feedback); } static int ccid3_hc_rx_init(struct ccid *ccid, struct sock *sk) { struct ccid3_hc_rx_sock *hc = ccid_priv(ccid); hc->rx_state = TFRC_RSTATE_NO_DATA; tfrc_lh_init(&hc->rx_li_hist); return tfrc_rx_hist_alloc(&hc->rx_hist); } static void ccid3_hc_rx_exit(struct sock *sk) { struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); tfrc_rx_hist_purge(&hc->rx_hist); tfrc_lh_cleanup(&hc->rx_li_hist); } static void ccid3_hc_rx_get_info(struct sock *sk, struct tcp_info *info) { info->tcpi_ca_state = ccid3_hc_rx_sk(sk)->rx_state; info->tcpi_options |= TCPI_OPT_TIMESTAMPS; info->tcpi_rcv_rtt = ccid3_hc_rx_sk(sk)->rx_rtt; } static int ccid3_hc_rx_getsockopt(struct sock *sk, const int optname, int len, u32 __user *optval, int __user *optlen) { const struct ccid3_hc_rx_sock *hc = ccid3_hc_rx_sk(sk); struct tfrc_rx_info rx_info; const void *val; switch (optname) { case DCCP_SOCKOPT_CCID_RX_INFO: if (len < sizeof(rx_info)) return -EINVAL; rx_info.tfrcrx_x_recv = hc->rx_x_recv; rx_info.tfrcrx_rtt = hc->rx_rtt; rx_info.tfrcrx_p = tfrc_invert_loss_event_rate(hc->rx_pinv); len = sizeof(rx_info); val = &rx_info; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen) || copy_to_user(optval, val, len)) return -EFAULT; return 0; } struct ccid_operations ccid3_ops = { .ccid_id = DCCPC_CCID3, .ccid_name = "TCP-Friendly Rate Control", .ccid_hc_tx_obj_size = sizeof(struct ccid3_hc_tx_sock), .ccid_hc_tx_init = ccid3_hc_tx_init, .ccid_hc_tx_exit = ccid3_hc_tx_exit, .ccid_hc_tx_send_packet = ccid3_hc_tx_send_packet, .ccid_hc_tx_packet_sent = ccid3_hc_tx_packet_sent, .ccid_hc_tx_packet_recv = ccid3_hc_tx_packet_recv, .ccid_hc_tx_parse_options = ccid3_hc_tx_parse_options, .ccid_hc_rx_obj_size = sizeof(struct ccid3_hc_rx_sock), .ccid_hc_rx_init = ccid3_hc_rx_init, .ccid_hc_rx_exit = ccid3_hc_rx_exit, .ccid_hc_rx_insert_options = ccid3_hc_rx_insert_options, .ccid_hc_rx_packet_recv = ccid3_hc_rx_packet_recv, .ccid_hc_rx_get_info = ccid3_hc_rx_get_info, .ccid_hc_tx_get_info = ccid3_hc_tx_get_info, .ccid_hc_rx_getsockopt = ccid3_hc_rx_getsockopt, .ccid_hc_tx_getsockopt = ccid3_hc_tx_getsockopt, }; #ifdef CONFIG_IP_DCCP_CCID3_DEBUG module_param(ccid3_debug, bool, 0644); MODULE_PARM_DESC(ccid3_debug, "Enable CCID-3 debug messages"); #endif
gpl-2.0
macressler/parallella-linux
arch/parisc/kernel/pci.c
3777
7916
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1997, 1998 Ralf Baechle * Copyright (C) 1999 SuSE GmbH * Copyright (C) 1999-2001 Hewlett-Packard Company * Copyright (C) 1999-2001 Grant Grundler */ #include <linux/eisa.h> #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/types.h> #include <asm/io.h> #include <asm/superio.h> #define DEBUG_RESOURCES 0 #define DEBUG_CONFIG 0 #if DEBUG_CONFIG # define DBGC(x...) printk(KERN_DEBUG x) #else # define DBGC(x...) #endif #if DEBUG_RESOURCES #define DBG_RES(x...) printk(KERN_DEBUG x) #else #define DBG_RES(x...) #endif /* To be used as: mdelay(pci_post_reset_delay); * * post_reset is the time the kernel should stall to prevent anyone from * accessing the PCI bus once #RESET is de-asserted. * PCI spec somewhere says 1 second but with multi-PCI bus systems, * this makes the boot time much longer than necessary. * 20ms seems to work for all the HP PCI implementations to date. * * #define pci_post_reset_delay 50 */ struct pci_port_ops *pci_port __read_mostly; struct pci_bios_ops *pci_bios __read_mostly; static int pci_hba_count __read_mostly; /* parisc_pci_hba used by pci_port->in/out() ops to lookup bus data. */ #define PCI_HBA_MAX 32 static struct pci_hba_data *parisc_pci_hba[PCI_HBA_MAX] __read_mostly; /******************************************************************** ** ** I/O port space support ** *********************************************************************/ /* EISA port numbers and PCI port numbers share the same interface. Some * machines have both EISA and PCI adapters installed. Rather than turn * pci_port into an array, we reserve bus 0 for EISA and call the EISA * routines if the access is to a port on bus 0. We don't want to fix * EISA and ISA drivers which assume port space is <= 0xffff. */ #ifdef CONFIG_EISA #define EISA_IN(size) if (EISA_bus && (b == 0)) return eisa_in##size(addr) #define EISA_OUT(size) if (EISA_bus && (b == 0)) return eisa_out##size(d, addr) #else #define EISA_IN(size) #define EISA_OUT(size) #endif #define PCI_PORT_IN(type, size) \ u##size in##type (int addr) \ { \ int b = PCI_PORT_HBA(addr); \ EISA_IN(size); \ if (!parisc_pci_hba[b]) return (u##size) -1; \ return pci_port->in##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr)); \ } \ EXPORT_SYMBOL(in##type); PCI_PORT_IN(b, 8) PCI_PORT_IN(w, 16) PCI_PORT_IN(l, 32) #define PCI_PORT_OUT(type, size) \ void out##type (u##size d, int addr) \ { \ int b = PCI_PORT_HBA(addr); \ EISA_OUT(size); \ if (!parisc_pci_hba[b]) return; \ pci_port->out##type(parisc_pci_hba[b], PCI_PORT_ADDR(addr), d); \ } \ EXPORT_SYMBOL(out##type); PCI_PORT_OUT(b, 8) PCI_PORT_OUT(w, 16) PCI_PORT_OUT(l, 32) /* * BIOS32 replacement. */ static int __init pcibios_init(void) { if (!pci_bios) return -1; if (pci_bios->init) { pci_bios->init(); } else { printk(KERN_WARNING "pci_bios != NULL but init() is!\n"); } /* Set the CLS for PCI as early as possible. */ pci_cache_line_size = pci_dfl_cache_line_size; return 0; } /* Called from pci_do_scan_bus() *after* walking a bus but before walking PPBs. */ void pcibios_fixup_bus(struct pci_bus *bus) { if (pci_bios->fixup_bus) { pci_bios->fixup_bus(bus); } else { printk(KERN_WARNING "pci_bios != NULL but fixup_bus() is!\n"); } } /* * Called by pci_set_master() - a driver interface. * * Legacy PDC guarantees to set: * Map Memory BAR's into PA IO space. * Map Expansion ROM BAR into one common PA IO space per bus. * Map IO BAR's into PCI IO space. * Command (see below) * Cache Line Size * Latency Timer * Interrupt Line * PPB: secondary latency timer, io/mmio base/limit, * bus numbers, bridge control * */ void pcibios_set_master(struct pci_dev *dev) { u8 lat; /* If someone already mucked with this, don't touch it. */ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat); if (lat >= 16) return; /* ** HP generally has fewer devices on the bus than other architectures. ** upper byte is PCI_LATENCY_TIMER. */ pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, (0x80 << 8) | pci_cache_line_size); } void __init pcibios_init_bus(struct pci_bus *bus) { struct pci_dev *dev = bus->self; unsigned short bridge_ctl; /* We deal only with pci controllers and pci-pci bridges. */ if (!dev || (dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) return; /* PCI-PCI bridge - set the cache line and default latency (32) for primary and secondary buses. */ pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER, 32); pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bridge_ctl); bridge_ctl |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR; pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bridge_ctl); } /* * pcibios align resources() is called every time generic PCI code * wants to generate a new address. The process of looking for * an available address, each candidate is first "aligned" and * then checked if the resource is available until a match is found. * * Since we are just checking candidates, don't use any fields other * than res->start. */ resource_size_t pcibios_align_resource(void *data, const struct resource *res, resource_size_t size, resource_size_t alignment) { resource_size_t mask, align, start = res->start; DBG_RES("pcibios_align_resource(%s, (%p) [%lx,%lx]/%x, 0x%lx, 0x%lx)\n", pci_name(((struct pci_dev *) data)), res->parent, res->start, res->end, (int) res->flags, size, alignment); /* If it's not IO, then it's gotta be MEM */ align = (res->flags & IORESOURCE_IO) ? PCIBIOS_MIN_IO : PCIBIOS_MIN_MEM; /* Align to largest of MIN or input size */ mask = max(alignment, align) - 1; start += mask; start &= ~mask; return start; } int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, enum pci_mmap_state mmap_state, int write_combine) { unsigned long prot; /* * I/O space can be accessed via normal processor loads and stores on * this platform but for now we elect not to do this and portable * drivers should not do this anyway. */ if (mmap_state == pci_mmap_io) return -EINVAL; if (write_combine) return -EINVAL; /* * Ignore write-combine; for now only return uncached mappings. */ prot = pgprot_val(vma->vm_page_prot); prot |= _PAGE_NO_CACHE; vma->vm_page_prot = __pgprot(prot); return remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, vma->vm_end - vma->vm_start, vma->vm_page_prot); } /* * A driver is enabling the device. We make sure that all the appropriate * bits are set to allow the device to operate as the driver is expecting. * We enable the port IO and memory IO bits if the device has any BARs of * that type, and we enable the PERR and SERR bits unconditionally. * Drivers that do not need parity (eg graphics and possibly networking) * can clear these bits if they want. */ int pcibios_enable_device(struct pci_dev *dev, int mask) { int err; u16 cmd, old_cmd; err = pci_enable_resources(dev, mask); if (err < 0) return err; pci_read_config_word(dev, PCI_COMMAND, &cmd); old_cmd = cmd; cmd |= (PCI_COMMAND_SERR | PCI_COMMAND_PARITY); #if 0 /* If bridge/bus controller has FBB enabled, child must too. */ if (dev->bus->bridge_ctl & PCI_BRIDGE_CTL_FAST_BACK) cmd |= PCI_COMMAND_FAST_BACK; #endif if (cmd != old_cmd) { dev_info(&dev->dev, "enabling SERR and PARITY (%04x -> %04x)\n", old_cmd, cmd); pci_write_config_word(dev, PCI_COMMAND, cmd); } return 0; } /* PA-RISC specific */ void pcibios_register_hba(struct pci_hba_data *hba) { if (pci_hba_count >= PCI_HBA_MAX) { printk(KERN_ERR "PCI: Too many Host Bus Adapters\n"); return; } parisc_pci_hba[pci_hba_count] = hba; hba->hba_num = pci_hba_count++; } subsys_initcall(pcibios_init);
gpl-2.0
SaberMod/samsung_kernel_manta
arch/powerpc/kernel/pci_of_scan.c
4033
10842
/* * Helper routines to scan the device tree for PCI devices and busses * * Migrated out of PowerPC architecture pci_64.c file by Grant Likely * <grant.likely@secretlab.ca> so that these routines are available for * 32 bit also. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * Copyright (c) 2009 Secret Lab Technologies Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/pci.h> #include <linux/export.h> #include <asm/pci-bridge.h> #include <asm/prom.h> /** * get_int_prop - Decode a u32 from a device tree property */ static u32 get_int_prop(struct device_node *np, const char *name, u32 def) { const u32 *prop; int len; prop = of_get_property(np, name, &len); if (prop && len >= 4) return *prop; return def; } /** * pci_parse_of_flags - Parse the flags cell of a device tree PCI address * @addr0: value of 1st cell of a device tree PCI address. * @bridge: Set this flag if the address is from a bridge 'ranges' property */ unsigned int pci_parse_of_flags(u32 addr0, int bridge) { unsigned int flags = 0; if (addr0 & 0x02000000) { flags = IORESOURCE_MEM | PCI_BASE_ADDRESS_SPACE_MEMORY; flags |= (addr0 >> 22) & PCI_BASE_ADDRESS_MEM_TYPE_64; flags |= (addr0 >> 28) & PCI_BASE_ADDRESS_MEM_TYPE_1M; if (addr0 & 0x40000000) flags |= IORESOURCE_PREFETCH | PCI_BASE_ADDRESS_MEM_PREFETCH; /* Note: We don't know whether the ROM has been left enabled * by the firmware or not. We mark it as disabled (ie, we do * not set the IORESOURCE_ROM_ENABLE flag) for now rather than * do a config space read, it will be force-enabled if needed */ if (!bridge && (addr0 & 0xff) == 0x30) flags |= IORESOURCE_READONLY; } else if (addr0 & 0x01000000) flags = IORESOURCE_IO | PCI_BASE_ADDRESS_SPACE_IO; if (flags) flags |= IORESOURCE_SIZEALIGN; return flags; } /** * of_pci_parse_addrs - Parse PCI addresses assigned in the device tree node * @node: device tree node for the PCI device * @dev: pci_dev structure for the device * * This function parses the 'assigned-addresses' property of a PCI devices' * device tree node and writes them into the associated pci_dev structure. */ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev) { u64 base, size; unsigned int flags; struct pci_bus_region region; struct resource *res; const u32 *addrs; u32 i; int proplen; addrs = of_get_property(node, "assigned-addresses", &proplen); if (!addrs) return; pr_debug(" parse addresses (%d bytes) @ %p\n", proplen, addrs); for (; proplen >= 20; proplen -= 20, addrs += 5) { flags = pci_parse_of_flags(addrs[0], 0); if (!flags) continue; base = of_read_number(&addrs[1], 2); size = of_read_number(&addrs[3], 2); if (!size) continue; i = addrs[0] & 0xff; pr_debug(" base: %llx, size: %llx, i: %x\n", (unsigned long long)base, (unsigned long long)size, i); if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; } else if (i == dev->rom_base_reg) { res = &dev->resource[PCI_ROM_RESOURCE]; flags |= IORESOURCE_READONLY | IORESOURCE_CACHEABLE; } else { printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); continue; } res->flags = flags; res->name = pci_name(dev); region.start = base; region.end = base + size - 1; pcibios_bus_to_resource(dev, res, &region); } } /** * of_create_pci_dev - Given a device tree node on a pci bus, create a pci_dev * @node: device tree node pointer * @bus: bus the device is sitting on * @devfn: PCI function number, extracted from device tree by caller. */ struct pci_dev *of_create_pci_dev(struct device_node *node, struct pci_bus *bus, int devfn) { struct pci_dev *dev; const char *type; struct pci_slot *slot; dev = alloc_pci_dev(); if (!dev) return NULL; type = of_get_property(node, "device_type", NULL); if (type == NULL) type = ""; pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); dev->bus = bus; dev->dev.of_node = of_node_get(node); dev->dev.parent = bus->bridge; dev->dev.bus = &pci_bus_type; dev->devfn = devfn; dev->multifunction = 0; /* maybe a lie? */ dev->needs_freset = 0; /* pcie fundamental reset required */ set_pcie_port_type(dev); list_for_each_entry(slot, &dev->bus->slots, list) if (PCI_SLOT(dev->devfn) == slot->number) dev->slot = slot; dev->vendor = get_int_prop(node, "vendor-id", 0xffff); dev->device = get_int_prop(node, "device-id", 0xffff); dev->subsystem_vendor = get_int_prop(node, "subsystem-vendor-id", 0); dev->subsystem_device = get_int_prop(node, "subsystem-id", 0); dev->cfg_size = pci_cfg_space_size(dev); dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(bus), dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); dev->class = get_int_prop(node, "class-code", 0); dev->revision = get_int_prop(node, "revision-id", 0); pr_debug(" class: 0x%x\n", dev->class); pr_debug(" revision: 0x%x\n", dev->revision); dev->current_state = 4; /* unknown power state */ dev->error_state = pci_channel_io_normal; dev->dma_mask = 0xffffffff; /* Early fixups, before probing the BARs */ pci_fixup_device(pci_fixup_early, dev); if (!strcmp(type, "pci") || !strcmp(type, "pciex")) { /* a PCI-PCI bridge */ dev->hdr_type = PCI_HEADER_TYPE_BRIDGE; dev->rom_base_reg = PCI_ROM_ADDRESS1; set_pcie_hotplug_bridge(dev); } else if (!strcmp(type, "cardbus")) { dev->hdr_type = PCI_HEADER_TYPE_CARDBUS; } else { dev->hdr_type = PCI_HEADER_TYPE_NORMAL; dev->rom_base_reg = PCI_ROM_ADDRESS; /* Maybe do a default OF mapping here */ dev->irq = NO_IRQ; } of_pci_parse_addrs(node, dev); pr_debug(" adding to system ...\n"); pci_device_add(dev, bus); return dev; } EXPORT_SYMBOL(of_create_pci_dev); /** * of_scan_pci_bridge - Set up a PCI bridge and scan for child nodes * @node: device tree node of bridge * @dev: pci_dev structure for the bridge * * of_scan_bus() calls this routine for each PCI bridge that it finds, and * this routine in turn call of_scan_bus() recusively to scan for more child * devices. */ void __devinit of_scan_pci_bridge(struct pci_dev *dev) { struct device_node *node = dev->dev.of_node; struct pci_bus *bus; const u32 *busrange, *ranges; int len, i, mode; struct pci_bus_region region; struct resource *res; unsigned int flags; u64 size; pr_debug("of_scan_pci_bridge(%s)\n", node->full_name); /* parse bus-range property */ busrange = of_get_property(node, "bus-range", &len); if (busrange == NULL || len != 8) { printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", node->full_name); return; } ranges = of_get_property(node, "ranges", &len); if (ranges == NULL) { printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", node->full_name); return; } bus = pci_add_new_bus(dev->bus, dev, busrange[0]); if (!bus) { printk(KERN_ERR "Failed to create pci bus for %s\n", node->full_name); return; } bus->primary = dev->bus->number; bus->subordinate = busrange[1]; bus->bridge_ctl = 0; /* parse ranges property */ /* PCI #address-cells == 3 and #size-cells == 2 always */ res = &dev->resource[PCI_BRIDGE_RESOURCES]; for (i = 0; i < PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES; ++i) { res->flags = 0; bus->resource[i] = res; ++res; } i = 1; for (; len >= 32; len -= 32, ranges += 8) { flags = pci_parse_of_flags(ranges[0], 1); size = of_read_number(&ranges[6], 2); if (flags == 0 || size == 0) continue; if (flags & IORESOURCE_IO) { res = bus->resource[0]; if (res->flags) { printk(KERN_ERR "PCI: ignoring extra I/O range" " for bridge %s\n", node->full_name); continue; } } else { if (i >= PCI_NUM_RESOURCES - PCI_BRIDGE_RESOURCES) { printk(KERN_ERR "PCI: too many memory ranges" " for bridge %s\n", node->full_name); continue; } res = bus->resource[i]; ++i; } res->flags = flags; region.start = of_read_number(&ranges[1], 2); region.end = region.start + size - 1; pcibios_bus_to_resource(dev, res, &region); } sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), bus->number); pr_debug(" bus name: %s\n", bus->name); mode = PCI_PROBE_NORMAL; if (ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); pr_debug(" probe mode: %d\n", mode); if (mode == PCI_PROBE_DEVTREE) of_scan_bus(node, bus); else if (mode == PCI_PROBE_NORMAL) pci_scan_child_bus(bus); } EXPORT_SYMBOL(of_scan_pci_bridge); /** * __of_scan_bus - given a PCI bus node, setup bus and scan for child devices * @node: device tree node for the PCI bus * @bus: pci_bus structure for the PCI bus * @rescan_existing: Flag indicating bus has already been set up */ static void __devinit __of_scan_bus(struct device_node *node, struct pci_bus *bus, int rescan_existing) { struct device_node *child; const u32 *reg; int reglen, devfn; struct pci_dev *dev; pr_debug("of_scan_bus(%s) bus no %d...\n", node->full_name, bus->number); /* Scan direct children */ for_each_child_of_node(node, child) { pr_debug(" * %s\n", child->full_name); if (!of_device_is_available(child)) continue; reg = of_get_property(child, "reg", &reglen); if (reg == NULL || reglen < 20) continue; devfn = (reg[0] >> 8) & 0xff; /* create a new pci_dev for this device */ dev = of_create_pci_dev(child, bus, devfn); if (!dev) continue; pr_debug(" dev header type: %x\n", dev->hdr_type); } /* Apply all fixups necessary. We don't fixup the bus "self" * for an existing bridge that is being rescanned */ if (!rescan_existing) pcibios_setup_bus_self(bus); pcibios_setup_bus_devices(bus); /* Now scan child busses */ list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) { of_scan_pci_bridge(dev); } } } /** * of_scan_bus - given a PCI bus node, setup bus and scan for child devices * @node: device tree node for the PCI bus * @bus: pci_bus structure for the PCI bus */ void __devinit of_scan_bus(struct device_node *node, struct pci_bus *bus) { __of_scan_bus(node, bus, 0); } EXPORT_SYMBOL_GPL(of_scan_bus); /** * of_rescan_bus - given a PCI bus node, scan for child devices * @node: device tree node for the PCI bus * @bus: pci_bus structure for the PCI bus * * Same as of_scan_bus, but for a pci_bus structure that has already been * setup. */ void __devinit of_rescan_bus(struct device_node *node, struct pci_bus *bus) { __of_scan_bus(node, bus, 1); } EXPORT_SYMBOL_GPL(of_rescan_bus);
gpl-2.0
blackwing182/htc-kernel-msm7x30-3.0
drivers/i2c/algos/i2c-algo-pcf.c
4289
11650
/* * i2c-algo-pcf.c i2c driver algorithms for PCF8584 adapters * * Copyright (C) 1995-1997 Simon G. Vogl * 1998-2000 Hans Berglund * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and * Frodo Looijaard <frodol@dds.nl>, and also from Martin Bailey * <mbailey@littlefeet-inc.com> * * Partially rewriten by Oleg I. Vdovikin <vdovikin@jscc.ru> to handle multiple * messages, proper stop/repstart signaling during receive, added detect code */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/i2c.h> #include <linux/i2c-algo-pcf.h> #include "i2c-algo-pcf.h" #define DEB2(x) if (i2c_debug >= 2) x #define DEB3(x) if (i2c_debug >= 3) x /* print several statistical values */ #define DEBPROTO(x) if (i2c_debug >= 9) x; /* debug the protocol by showing transferred bits */ #define DEF_TIMEOUT 16 /* * module parameters: */ static int i2c_debug; /* setting states on the bus with the right timing: */ #define set_pcf(adap, ctl, val) adap->setpcf(adap->data, ctl, val) #define get_pcf(adap, ctl) adap->getpcf(adap->data, ctl) #define get_own(adap) adap->getown(adap->data) #define get_clock(adap) adap->getclock(adap->data) #define i2c_outb(adap, val) adap->setpcf(adap->data, 0, val) #define i2c_inb(adap) adap->getpcf(adap->data, 0) /* other auxiliary functions */ static void i2c_start(struct i2c_algo_pcf_data *adap) { DEBPROTO(printk(KERN_DEBUG "S ")); set_pcf(adap, 1, I2C_PCF_START); } static void i2c_repstart(struct i2c_algo_pcf_data *adap) { DEBPROTO(printk(" Sr ")); set_pcf(adap, 1, I2C_PCF_REPSTART); } static void i2c_stop(struct i2c_algo_pcf_data *adap) { DEBPROTO(printk("P\n")); set_pcf(adap, 1, I2C_PCF_STOP); } static void handle_lab(struct i2c_algo_pcf_data *adap, const int *status) { DEB2(printk(KERN_INFO "i2c-algo-pcf.o: lost arbitration (CSR 0x%02x)\n", *status)); /* * Cleanup from LAB -- reset and enable ESO. * This resets the PCF8584; since we've lost the bus, no * further attempts should be made by callers to clean up * (no i2c_stop() etc.) */ set_pcf(adap, 1, I2C_PCF_PIN); set_pcf(adap, 1, I2C_PCF_ESO); /* * We pause for a time period sufficient for any running * I2C transaction to complete -- the arbitration logic won't * work properly until the next START is seen. * It is assumed the bus driver or client has set a proper value. * * REVISIT: should probably use msleep instead of mdelay if we * know we can sleep. */ if (adap->lab_mdelay) mdelay(adap->lab_mdelay); DEB2(printk(KERN_INFO "i2c-algo-pcf.o: reset LAB condition (CSR 0x%02x)\n", get_pcf(adap, 1))); } static int wait_for_bb(struct i2c_algo_pcf_data *adap) { int timeout = DEF_TIMEOUT; int status; status = get_pcf(adap, 1); while (!(status & I2C_PCF_BB) && --timeout) { udelay(100); /* wait for 100 us */ status = get_pcf(adap, 1); } if (timeout == 0) { printk(KERN_ERR "Timeout waiting for Bus Busy\n"); return -ETIMEDOUT; } return 0; } static int wait_for_pin(struct i2c_algo_pcf_data *adap, int *status) { int timeout = DEF_TIMEOUT; *status = get_pcf(adap, 1); while ((*status & I2C_PCF_PIN) && --timeout) { adap->waitforpin(adap->data); *status = get_pcf(adap, 1); } if (*status & I2C_PCF_LAB) { handle_lab(adap, status); return -EINTR; } if (timeout == 0) return -ETIMEDOUT; return 0; } /* * This should perform the 'PCF8584 initialization sequence' as described * in the Philips IC12 data book (1995, Aug 29). * There should be a 30 clock cycle wait after reset, I assume this * has been fulfilled. * There should be a delay at the end equal to the longest I2C message * to synchronize the BB-bit (in multimaster systems). How long is * this? I assume 1 second is always long enough. * * vdovikin: added detect code for PCF8584 */ static int pcf_init_8584 (struct i2c_algo_pcf_data *adap) { unsigned char temp; DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: PCF state 0x%02x\n", get_pcf(adap, 1))); /* S1=0x80: S0 selected, serial interface off */ set_pcf(adap, 1, I2C_PCF_PIN); /* * check to see S1 now used as R/W ctrl - * PCF8584 does that when ESO is zero */ if (((temp = get_pcf(adap, 1)) & 0x7f) != (0)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S0 (0x%02x).\n", temp)); return -ENXIO; /* definitely not PCF8584 */ } /* load own address in S0, effective address is (own << 1) */ i2c_outb(adap, get_own(adap)); /* check it's really written */ if ((temp = i2c_inb(adap)) != get_own(adap)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S0 (0x%02x).\n", temp)); return -ENXIO; } /* S1=0xA0, next byte in S2 */ set_pcf(adap, 1, I2C_PCF_PIN | I2C_PCF_ES1); /* check to see S2 now selected */ if (((temp = get_pcf(adap, 1)) & 0x7f) != I2C_PCF_ES1) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S2 (0x%02x).\n", temp)); return -ENXIO; } /* load clock register S2 */ i2c_outb(adap, get_clock(adap)); /* check it's really written, the only 5 lowest bits does matter */ if (((temp = i2c_inb(adap)) & 0x1f) != get_clock(adap)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't set S2 (0x%02x).\n", temp)); return -ENXIO; } /* Enable serial interface, idle, S0 selected */ set_pcf(adap, 1, I2C_PCF_IDLE); /* check to see PCF is really idled and we can access status register */ if ((temp = get_pcf(adap, 1)) != (I2C_PCF_PIN | I2C_PCF_BB)) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: PCF detection failed -- can't select S1` (0x%02x).\n", temp)); return -ENXIO; } printk(KERN_DEBUG "i2c-algo-pcf.o: detected and initialized PCF8584.\n"); return 0; } static int pcf_sendbytes(struct i2c_adapter *i2c_adap, const char *buf, int count, int last) { struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; int wrcount, status, timeout; for (wrcount=0; wrcount<count; ++wrcount) { DEB2(dev_dbg(&i2c_adap->dev, "i2c_write: writing %2.2X\n", buf[wrcount] & 0xff)); i2c_outb(adap, buf[wrcount]); timeout = wait_for_pin(adap, &status); if (timeout) { if (timeout == -EINTR) return -EINTR; /* arbitration lost */ i2c_stop(adap); dev_err(&i2c_adap->dev, "i2c_write: error - timeout.\n"); return -EREMOTEIO; /* got a better one ?? */ } if (status & I2C_PCF_LRB) { i2c_stop(adap); dev_err(&i2c_adap->dev, "i2c_write: error - no ack.\n"); return -EREMOTEIO; /* got a better one ?? */ } } if (last) i2c_stop(adap); else i2c_repstart(adap); return wrcount; } static int pcf_readbytes(struct i2c_adapter *i2c_adap, char *buf, int count, int last) { int i, status; struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; int wfp; /* increment number of bytes to read by one -- read dummy byte */ for (i = 0; i <= count; i++) { if ((wfp = wait_for_pin(adap, &status))) { if (wfp == -EINTR) return -EINTR; /* arbitration lost */ i2c_stop(adap); dev_err(&i2c_adap->dev, "pcf_readbytes timed out.\n"); return -1; } if ((status & I2C_PCF_LRB) && (i != count)) { i2c_stop(adap); dev_err(&i2c_adap->dev, "i2c_read: i2c_inb, No ack.\n"); return -1; } if (i == count - 1) { set_pcf(adap, 1, I2C_PCF_ESO); } else if (i == count) { if (last) i2c_stop(adap); else i2c_repstart(adap); } if (i) buf[i - 1] = i2c_inb(adap); else i2c_inb(adap); /* dummy read */ } return i - 1; } static int pcf_doAddress(struct i2c_algo_pcf_data *adap, struct i2c_msg *msg) { unsigned short flags = msg->flags; unsigned char addr; addr = msg->addr << 1; if (flags & I2C_M_RD) addr |= 1; if (flags & I2C_M_REV_DIR_ADDR) addr ^= 1; i2c_outb(adap, addr); return 0; } static int pcf_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct i2c_algo_pcf_data *adap = i2c_adap->algo_data; struct i2c_msg *pmsg; int i; int ret=0, timeout, status; if (adap->xfer_begin) adap->xfer_begin(adap->data); /* Check for bus busy */ timeout = wait_for_bb(adap); if (timeout) { DEB2(printk(KERN_ERR "i2c-algo-pcf.o: " "Timeout waiting for BB in pcf_xfer\n");) i = -EIO; goto out; } for (i = 0;ret >= 0 && i < num; i++) { pmsg = &msgs[i]; DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: Doing %s %d bytes to 0x%02x - %d of %d messages\n", pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->len, pmsg->addr, i + 1, num);) ret = pcf_doAddress(adap, pmsg); /* Send START */ if (i == 0) i2c_start(adap); /* Wait for PIN (pending interrupt NOT) */ timeout = wait_for_pin(adap, &status); if (timeout) { if (timeout == -EINTR) { /* arbitration lost */ i = -EINTR; goto out; } i2c_stop(adap); DEB2(printk(KERN_ERR "i2c-algo-pcf.o: Timeout waiting " "for PIN(1) in pcf_xfer\n");) i = -EREMOTEIO; goto out; } /* Check LRB (last rcvd bit - slave ack) */ if (status & I2C_PCF_LRB) { i2c_stop(adap); DEB2(printk(KERN_ERR "i2c-algo-pcf.o: No LRB(1) in pcf_xfer\n");) i = -EREMOTEIO; goto out; } DEB3(printk(KERN_DEBUG "i2c-algo-pcf.o: Msg %d, addr=0x%x, flags=0x%x, len=%d\n", i, msgs[i].addr, msgs[i].flags, msgs[i].len);) if (pmsg->flags & I2C_M_RD) { ret = pcf_readbytes(i2c_adap, pmsg->buf, pmsg->len, (i + 1 == num)); if (ret != pmsg->len) { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: " "only read %d bytes.\n",ret)); } else { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: read %d bytes.\n",ret)); } } else { ret = pcf_sendbytes(i2c_adap, pmsg->buf, pmsg->len, (i + 1 == num)); if (ret != pmsg->len) { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: fail: " "only wrote %d bytes.\n",ret)); } else { DEB2(printk(KERN_DEBUG "i2c-algo-pcf.o: wrote %d bytes.\n",ret)); } } } out: if (adap->xfer_end) adap->xfer_end(adap->data); return i; } static u32 pcf_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING; } /* exported algorithm data: */ static const struct i2c_algorithm pcf_algo = { .master_xfer = pcf_xfer, .functionality = pcf_func, }; /* * registering functions to load algorithms at runtime */ int i2c_pcf_add_bus(struct i2c_adapter *adap) { struct i2c_algo_pcf_data *pcf_adap = adap->algo_data; int rval; DEB2(dev_dbg(&adap->dev, "hw routines registered.\n")); /* register new adapter to i2c module... */ adap->algo = &pcf_algo; if ((rval = pcf_init_8584(pcf_adap))) return rval; rval = i2c_add_adapter(adap); return rval; } EXPORT_SYMBOL(i2c_pcf_add_bus); MODULE_AUTHOR("Hans Berglund <hb@spacetec.no>"); MODULE_DESCRIPTION("I2C-Bus PCF8584 algorithm"); MODULE_LICENSE("GPL"); module_param(i2c_debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(i2c_debug, "debug level - 0 off; 1 normal; 2,3 more verbose; 9 pcf-protocol");
gpl-2.0
bitdomo2/android_kernel_lge_gammaw
drivers/mtd/nand/jz4740_nand.c
4801
10839
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC NAND controller driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/ioport.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/partitions.h> #include <linux/gpio.h> #include <asm/mach-jz4740/jz4740_nand.h> #define JZ_REG_NAND_CTRL 0x50 #define JZ_REG_NAND_ECC_CTRL 0x100 #define JZ_REG_NAND_DATA 0x104 #define JZ_REG_NAND_PAR0 0x108 #define JZ_REG_NAND_PAR1 0x10C #define JZ_REG_NAND_PAR2 0x110 #define JZ_REG_NAND_IRQ_STAT 0x114 #define JZ_REG_NAND_IRQ_CTRL 0x118 #define JZ_REG_NAND_ERR(x) (0x11C + ((x) << 2)) #define JZ_NAND_ECC_CTRL_PAR_READY BIT(4) #define JZ_NAND_ECC_CTRL_ENCODING BIT(3) #define JZ_NAND_ECC_CTRL_RS BIT(2) #define JZ_NAND_ECC_CTRL_RESET BIT(1) #define JZ_NAND_ECC_CTRL_ENABLE BIT(0) #define JZ_NAND_STATUS_ERR_COUNT (BIT(31) | BIT(30) | BIT(29)) #define JZ_NAND_STATUS_PAD_FINISH BIT(4) #define JZ_NAND_STATUS_DEC_FINISH BIT(3) #define JZ_NAND_STATUS_ENC_FINISH BIT(2) #define JZ_NAND_STATUS_UNCOR_ERROR BIT(1) #define JZ_NAND_STATUS_ERROR BIT(0) #define JZ_NAND_CTRL_ENABLE_CHIP(x) BIT((x) << 1) #define JZ_NAND_CTRL_ASSERT_CHIP(x) BIT(((x) << 1) + 1) #define JZ_NAND_MEM_ADDR_OFFSET 0x10000 #define JZ_NAND_MEM_CMD_OFFSET 0x08000 struct jz_nand { struct mtd_info mtd; struct nand_chip chip; void __iomem *base; struct resource *mem; void __iomem *bank_base; struct resource *bank_mem; struct jz_nand_platform_data *pdata; bool is_reading; }; static inline struct jz_nand *mtd_to_jz_nand(struct mtd_info *mtd) { return container_of(mtd, struct jz_nand, mtd); } static void jz_nand_cmd_ctrl(struct mtd_info *mtd, int dat, unsigned int ctrl) { struct jz_nand *nand = mtd_to_jz_nand(mtd); struct nand_chip *chip = mtd->priv; uint32_t reg; if (ctrl & NAND_CTRL_CHANGE) { BUG_ON((ctrl & NAND_ALE) && (ctrl & NAND_CLE)); if (ctrl & NAND_ALE) chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_ADDR_OFFSET; else if (ctrl & NAND_CLE) chip->IO_ADDR_W = nand->bank_base + JZ_NAND_MEM_CMD_OFFSET; else chip->IO_ADDR_W = nand->bank_base; reg = readl(nand->base + JZ_REG_NAND_CTRL); if (ctrl & NAND_NCE) reg |= JZ_NAND_CTRL_ASSERT_CHIP(0); else reg &= ~JZ_NAND_CTRL_ASSERT_CHIP(0); writel(reg, nand->base + JZ_REG_NAND_CTRL); } if (dat != NAND_CMD_NONE) writeb(dat, chip->IO_ADDR_W); } static int jz_nand_dev_ready(struct mtd_info *mtd) { struct jz_nand *nand = mtd_to_jz_nand(mtd); return gpio_get_value_cansleep(nand->pdata->busy_gpio); } static void jz_nand_hwctl(struct mtd_info *mtd, int mode) { struct jz_nand *nand = mtd_to_jz_nand(mtd); uint32_t reg; writel(0, nand->base + JZ_REG_NAND_IRQ_STAT); reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); reg |= JZ_NAND_ECC_CTRL_RESET; reg |= JZ_NAND_ECC_CTRL_ENABLE; reg |= JZ_NAND_ECC_CTRL_RS; switch (mode) { case NAND_ECC_READ: reg &= ~JZ_NAND_ECC_CTRL_ENCODING; nand->is_reading = true; break; case NAND_ECC_WRITE: reg |= JZ_NAND_ECC_CTRL_ENCODING; nand->is_reading = false; break; default: break; } writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); } static int jz_nand_calculate_ecc_rs(struct mtd_info *mtd, const uint8_t *dat, uint8_t *ecc_code) { struct jz_nand *nand = mtd_to_jz_nand(mtd); uint32_t reg, status; int i; unsigned int timeout = 1000; static uint8_t empty_block_ecc[] = {0xcd, 0x9d, 0x90, 0x58, 0xf4, 0x8b, 0xff, 0xb7, 0x6f}; if (nand->is_reading) return 0; do { status = readl(nand->base + JZ_REG_NAND_IRQ_STAT); } while (!(status & JZ_NAND_STATUS_ENC_FINISH) && --timeout); if (timeout == 0) return -1; reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); reg &= ~JZ_NAND_ECC_CTRL_ENABLE; writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); for (i = 0; i < 9; ++i) ecc_code[i] = readb(nand->base + JZ_REG_NAND_PAR0 + i); /* If the written data is completly 0xff, we also want to write 0xff as * ecc, otherwise we will get in trouble when doing subpage writes. */ if (memcmp(ecc_code, empty_block_ecc, 9) == 0) memset(ecc_code, 0xff, 9); return 0; } static void jz_nand_correct_data(uint8_t *dat, int index, int mask) { int offset = index & 0x7; uint16_t data; index += (index >> 3); data = dat[index]; data |= dat[index+1] << 8; mask ^= (data >> offset) & 0x1ff; data &= ~(0x1ff << offset); data |= (mask << offset); dat[index] = data & 0xff; dat[index+1] = (data >> 8) & 0xff; } static int jz_nand_correct_ecc_rs(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, uint8_t *calc_ecc) { struct jz_nand *nand = mtd_to_jz_nand(mtd); int i, error_count, index; uint32_t reg, status, error; uint32_t t; unsigned int timeout = 1000; t = read_ecc[0]; if (t == 0xff) { for (i = 1; i < 9; ++i) t &= read_ecc[i]; t &= dat[0]; t &= dat[nand->chip.ecc.size / 2]; t &= dat[nand->chip.ecc.size - 1]; if (t == 0xff) { for (i = 1; i < nand->chip.ecc.size - 1; ++i) t &= dat[i]; if (t == 0xff) return 0; } } for (i = 0; i < 9; ++i) writeb(read_ecc[i], nand->base + JZ_REG_NAND_PAR0 + i); reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); reg |= JZ_NAND_ECC_CTRL_PAR_READY; writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); do { status = readl(nand->base + JZ_REG_NAND_IRQ_STAT); } while (!(status & JZ_NAND_STATUS_DEC_FINISH) && --timeout); if (timeout == 0) return -1; reg = readl(nand->base + JZ_REG_NAND_ECC_CTRL); reg &= ~JZ_NAND_ECC_CTRL_ENABLE; writel(reg, nand->base + JZ_REG_NAND_ECC_CTRL); if (status & JZ_NAND_STATUS_ERROR) { if (status & JZ_NAND_STATUS_UNCOR_ERROR) return -1; error_count = (status & JZ_NAND_STATUS_ERR_COUNT) >> 29; for (i = 0; i < error_count; ++i) { error = readl(nand->base + JZ_REG_NAND_ERR(i)); index = ((error >> 16) & 0x1ff) - 1; if (index >= 0 && index < 512) jz_nand_correct_data(dat, index, error & 0x1ff); } return error_count; } return 0; } static int jz_nand_ioremap_resource(struct platform_device *pdev, const char *name, struct resource **res, void __iomem **base) { int ret; *res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); if (!*res) { dev_err(&pdev->dev, "Failed to get platform %s memory\n", name); ret = -ENXIO; goto err; } *res = request_mem_region((*res)->start, resource_size(*res), pdev->name); if (!*res) { dev_err(&pdev->dev, "Failed to request %s memory region\n", name); ret = -EBUSY; goto err; } *base = ioremap((*res)->start, resource_size(*res)); if (!*base) { dev_err(&pdev->dev, "Failed to ioremap %s memory region\n", name); ret = -EBUSY; goto err_release_mem; } return 0; err_release_mem: release_mem_region((*res)->start, resource_size(*res)); err: *res = NULL; *base = NULL; return ret; } static int __devinit jz_nand_probe(struct platform_device *pdev) { int ret; struct jz_nand *nand; struct nand_chip *chip; struct mtd_info *mtd; struct jz_nand_platform_data *pdata = pdev->dev.platform_data; nand = kzalloc(sizeof(*nand), GFP_KERNEL); if (!nand) { dev_err(&pdev->dev, "Failed to allocate device structure.\n"); return -ENOMEM; } ret = jz_nand_ioremap_resource(pdev, "mmio", &nand->mem, &nand->base); if (ret) goto err_free; ret = jz_nand_ioremap_resource(pdev, "bank", &nand->bank_mem, &nand->bank_base); if (ret) goto err_iounmap_mmio; if (pdata && gpio_is_valid(pdata->busy_gpio)) { ret = gpio_request(pdata->busy_gpio, "NAND busy pin"); if (ret) { dev_err(&pdev->dev, "Failed to request busy gpio %d: %d\n", pdata->busy_gpio, ret); goto err_iounmap_mem; } } mtd = &nand->mtd; chip = &nand->chip; mtd->priv = chip; mtd->owner = THIS_MODULE; mtd->name = "jz4740-nand"; chip->ecc.hwctl = jz_nand_hwctl; chip->ecc.calculate = jz_nand_calculate_ecc_rs; chip->ecc.correct = jz_nand_correct_ecc_rs; chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; chip->ecc.size = 512; chip->ecc.bytes = 9; chip->ecc.strength = 2; /* * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a * conservative guess, given 9 ecc bytes and reed-solomon alg. */ if (pdata) chip->ecc.layout = pdata->ecc_layout; chip->chip_delay = 50; chip->cmd_ctrl = jz_nand_cmd_ctrl; if (pdata && gpio_is_valid(pdata->busy_gpio)) chip->dev_ready = jz_nand_dev_ready; chip->IO_ADDR_R = nand->bank_base; chip->IO_ADDR_W = nand->bank_base; nand->pdata = pdata; platform_set_drvdata(pdev, nand); writel(JZ_NAND_CTRL_ENABLE_CHIP(0), nand->base + JZ_REG_NAND_CTRL); ret = nand_scan_ident(mtd, 1, NULL); if (ret) { dev_err(&pdev->dev, "Failed to scan nand\n"); goto err_gpio_free; } if (pdata && pdata->ident_callback) { pdata->ident_callback(pdev, chip, &pdata->partitions, &pdata->num_partitions); } ret = nand_scan_tail(mtd); if (ret) { dev_err(&pdev->dev, "Failed to scan nand\n"); goto err_gpio_free; } ret = mtd_device_parse_register(mtd, NULL, NULL, pdata ? pdata->partitions : NULL, pdata ? pdata->num_partitions : 0); if (ret) { dev_err(&pdev->dev, "Failed to add mtd device\n"); goto err_nand_release; } dev_info(&pdev->dev, "Successfully registered JZ4740 NAND driver\n"); return 0; err_nand_release: nand_release(&nand->mtd); err_gpio_free: platform_set_drvdata(pdev, NULL); gpio_free(pdata->busy_gpio); err_iounmap_mem: iounmap(nand->bank_base); err_iounmap_mmio: iounmap(nand->base); err_free: kfree(nand); return ret; } static int __devexit jz_nand_remove(struct platform_device *pdev) { struct jz_nand *nand = platform_get_drvdata(pdev); nand_release(&nand->mtd); /* Deassert and disable all chips */ writel(0, nand->base + JZ_REG_NAND_CTRL); iounmap(nand->bank_base); release_mem_region(nand->bank_mem->start, resource_size(nand->bank_mem)); iounmap(nand->base); release_mem_region(nand->mem->start, resource_size(nand->mem)); platform_set_drvdata(pdev, NULL); kfree(nand); return 0; } static struct platform_driver jz_nand_driver = { .probe = jz_nand_probe, .remove = __devexit_p(jz_nand_remove), .driver = { .name = "jz4740-nand", .owner = THIS_MODULE, }, }; module_platform_driver(jz_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>"); MODULE_DESCRIPTION("NAND controller driver for JZ4740 SoC"); MODULE_ALIAS("platform:jz4740-nand");
gpl-2.0
sakuraba001/android_kernel_samsung_galaxynote3
drivers/net/ethernet/dlink/sundance.c
4801
57271
/* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */ /* Written 1999-2000 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support and updates available at http://www.scyld.com/network/sundance.html [link no longer provides useful info -jgarzik] Archives of the mailing list are still available at http://www.beowulf.org/pipermail/netdrivers/ */ #define DRV_NAME "sundance" #define DRV_VERSION "1.2" #define DRV_RELDATE "11-Sep-2006" /* The user-configurable values. These may be modified when a driver module is loaded.*/ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ /* Maximum number of multicast addresses to filter (vs. rx-all-multicast). Typical is a 64 element hash table based on the Ethernet CRC. */ static const int multicast_filter_limit = 32; /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1518 effectively disables this feature. This chip can receive into offset buffers, so the Alpha does not need a copy-align. */ static int rx_copybreak; static int flowctrl=1; /* media[] specifies the media type the NIC operates at. autosense Autosensing active media. 10mbps_hd 10Mbps half duplex. 10mbps_fd 10Mbps full duplex. 100mbps_hd 100Mbps half duplex. 100mbps_fd 100Mbps full duplex. 0 Autosensing active media. 1 10Mbps half duplex. 2 10Mbps full duplex. 3 100Mbps half duplex. 4 100Mbps full duplex. */ #define MAX_UNITS 8 static char *media[MAX_UNITS]; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for compile efficiency. The compiler will convert <unsigned>'%'<2^N> into a bit mask. Making the Tx ring too large decreases the effectiveness of channel bonding and packet priority, and more than 128 requires modifying the Tx error recovery. Large receive rings merely waste memory. */ #define TX_RING_SIZE 32 #define TX_QUEUE_LEN (TX_RING_SIZE - 1) /* Limit ring entries actually used. */ #define RX_RING_SIZE 64 #define RX_BUDGET 32 #define TX_TOTAL_SIZE TX_RING_SIZE*sizeof(struct netdev_desc) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct netdev_desc) /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (4*HZ) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ /* Include files, designed to support most kernel versions 2.0.0 and later. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/init.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/io.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/crc32.h> #include <linux/ethtool.h> #include <linux/mii.h> /* These identify the driver base version and may not be removed. */ static const char version[] __devinitconst = KERN_INFO DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("Sundance Alta Ethernet driver"); MODULE_LICENSE("GPL"); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); module_param_array(media, charp, NULL, 0); module_param(flowctrl, int, 0); MODULE_PARM_DESC(debug, "Sundance Alta debug level (0-5)"); MODULE_PARM_DESC(rx_copybreak, "Sundance Alta copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(flowctrl, "Sundance Alta flow control [0|1]"); /* Theory of Operation I. Board Compatibility This driver is designed for the Sundance Technologies "Alta" ST201 chip. II. Board-specific settings III. Driver operation IIIa. Ring buffers This driver uses two statically allocated fixed-size descriptor lists formed into rings by a branch from the final descriptor to the beginning of the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. Some chips explicitly use only 2^N sized rings, while others use a 'next descriptor' pointer that the driver forms into rings. IIIb/c. Transmit/Receive Structure This driver uses a zero-copy receive and transmit scheme. The driver allocates full frame size skbuffs for the Rx ring buffers at open() time and passes the skb->data field to the chip as receive data buffers. When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is copied to the new skbuff. When the incoming frame is larger, the skbuff is passed directly up the protocol stack. Buffers consumed this way are replaced by newly allocated skbuffs in a later phase of receives. The RX_COPYBREAK value is chosen to trade-off the memory wasted by using a full-sized skbuff for small frames vs. the copying costs of larger frames. New boards are typically used in generously configured machines and the underfilled buffers have negligible impact compared to the benefit of a single allocation size, so the default value of zero results in never copying packets. When copying is done, the cost is usually mitigated by using a combined copy/checksum routine. Copying also preloads the cache, which is most useful with small frames. A subtle aspect of the operation is that the IP header at offset 14 in an ethernet frame isn't longword aligned for further processing. Unaligned buffers are permitted by the Sundance hardware, so frames are received into the skbuff at an offset of "+2", 16-byte aligning the IP header. IIId. Synchronization The driver runs as two independent, single-threaded flows of control. One is the send-packet routine, which enforces single-threaded use by the dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and interrupt handling software. The send packet thread has partial control over the Tx ring and 'dev->tbusy' flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next queue slot is empty, it clears the tbusy flag when finished otherwise it sets the 'lp->tx_full' flag. The interrupt handler has exclusive control over the Rx ring and records stats from the Tx ring. After reaping the stats, it marks the Tx queue entry as empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it clears both the tx_full and tbusy flags. IV. Notes IVb. References The Sundance ST201 datasheet, preliminary version. The Kendin KS8723 datasheet, preliminary version. The ICplus IP100 datasheet, preliminary version. http://www.scyld.com/expert/100mbps.html http://www.scyld.com/expert/NWay.html IVc. Errata */ /* Work-around for Kendin chip bugs. */ #ifndef CONFIG_SUNDANCE_MMIO #define USE_IO_OPS 1 #endif static DEFINE_PCI_DEVICE_TABLE(sundance_pci_tbl) = { { 0x1186, 0x1002, 0x1186, 0x1002, 0, 0, 0 }, { 0x1186, 0x1002, 0x1186, 0x1003, 0, 0, 1 }, { 0x1186, 0x1002, 0x1186, 0x1012, 0, 0, 2 }, { 0x1186, 0x1002, 0x1186, 0x1040, 0, 0, 3 }, { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, { 0x13F0, 0x0200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 6 }, { } }; MODULE_DEVICE_TABLE(pci, sundance_pci_tbl); enum { netdev_io_size = 128 }; struct pci_id_info { const char *name; }; static const struct pci_id_info pci_id_tbl[] __devinitdata = { {"D-Link DFE-550TX FAST Ethernet Adapter"}, {"D-Link DFE-550FX 100Mbps Fiber-optics Adapter"}, {"D-Link DFE-580TX 4 port Server Adapter"}, {"D-Link DFE-530TXS FAST Ethernet Adapter"}, {"D-Link DL10050-based FAST Ethernet Adapter"}, {"Sundance Technology Alta"}, {"IC Plus Corporation IP100A FAST Ethernet Adapter"}, { } /* terminate list. */ }; /* This driver was written to use PCI memory space, however x86-oriented hardware often uses I/O space accesses. */ /* Offsets to the device registers. Unlike software-only systems, device drivers interact with complex hardware. It's not useful to define symbolic names for every register bit in the device. The name can only partially document the semantics and make the driver longer and more difficult to read. In general, only the important configuration values or bits changed multiple times should be defined symbolically. */ enum alta_offsets { DMACtrl = 0x00, TxListPtr = 0x04, TxDMABurstThresh = 0x08, TxDMAUrgentThresh = 0x09, TxDMAPollPeriod = 0x0a, RxDMAStatus = 0x0c, RxListPtr = 0x10, DebugCtrl0 = 0x1a, DebugCtrl1 = 0x1c, RxDMABurstThresh = 0x14, RxDMAUrgentThresh = 0x15, RxDMAPollPeriod = 0x16, LEDCtrl = 0x1a, ASICCtrl = 0x30, EEData = 0x34, EECtrl = 0x36, FlashAddr = 0x40, FlashData = 0x44, TxStatus = 0x46, TxFrameId = 0x47, DownCounter = 0x18, IntrClear = 0x4a, IntrEnable = 0x4c, IntrStatus = 0x4e, MACCtrl0 = 0x50, MACCtrl1 = 0x52, StationAddr = 0x54, MaxFrameSize = 0x5A, RxMode = 0x5c, MIICtrl = 0x5e, MulticastFilter0 = 0x60, MulticastFilter1 = 0x64, RxOctetsLow = 0x68, RxOctetsHigh = 0x6a, TxOctetsLow = 0x6c, TxOctetsHigh = 0x6e, TxFramesOK = 0x70, RxFramesOK = 0x72, StatsCarrierError = 0x74, StatsLateColl = 0x75, StatsMultiColl = 0x76, StatsOneColl = 0x77, StatsTxDefer = 0x78, RxMissed = 0x79, StatsTxXSDefer = 0x7a, StatsTxAbort = 0x7b, StatsBcastTx = 0x7c, StatsBcastRx = 0x7d, StatsMcastTx = 0x7e, StatsMcastRx = 0x7f, /* Aliased and bogus values! */ RxStatus = 0x0c, }; #define ASIC_HI_WORD(x) ((x) + 2) enum ASICCtrl_HiWord_bit { GlobalReset = 0x0001, RxReset = 0x0002, TxReset = 0x0004, DMAReset = 0x0008, FIFOReset = 0x0010, NetworkReset = 0x0020, HostReset = 0x0040, ResetBusy = 0x0400, }; /* Bits in the interrupt status/mask registers. */ enum intr_status_bits { IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008, IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020, IntrDrvRqst=0x0040, StatsMax=0x0080, LinkChange=0x0100, IntrTxDMADone=0x0200, IntrRxDMADone=0x0400, }; /* Bits in the RxMode register. */ enum rx_mode_bits { AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08, AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01, }; /* Bits in MACCtrl. */ enum mac_ctrl0_bits { EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40, EnbFlowCtrl=0x100, EnbPassRxCRC=0x200, }; enum mac_ctrl1_bits { StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080, TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400, RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000, }; /* The Rx and Tx buffer descriptors. */ /* Note that using only 32 bit fields simplifies conversion to big-endian architectures. */ struct netdev_desc { __le32 next_desc; __le32 status; struct desc_frag { __le32 addr, length; } frag[1]; }; /* Bits in netdev_desc.status */ enum desc_status_bits { DescOwn=0x8000, DescEndPacket=0x4000, DescEndRing=0x2000, LastFrag=0x80000000, DescIntrOnTx=0x8000, DescIntrOnDMADone=0x80000000, DisableAlign = 0x00000001, }; #define PRIV_ALIGN 15 /* Required alignment mask */ /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment within the structure. */ #define MII_CNT 4 struct netdev_private { /* Descriptor rings first for alignment. */ struct netdev_desc *rx_ring; struct netdev_desc *tx_ring; struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; dma_addr_t tx_ring_dma; dma_addr_t rx_ring_dma; struct timer_list timer; /* Media monitoring timer. */ /* ethtool extra stats */ struct { u64 tx_multiple_collisions; u64 tx_single_collisions; u64 tx_late_collisions; u64 tx_deferred; u64 tx_deferred_excessive; u64 tx_aborted; u64 tx_bcasts; u64 rx_bcasts; u64 tx_mcasts; u64 rx_mcasts; } xstats; /* Frequently used values: keep some adjacent for cache effect. */ spinlock_t lock; int msg_enable; int chip_id; unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ unsigned int rx_buf_sz; /* Based on MTU+slack. */ struct netdev_desc *last_tx; /* Last Tx descriptor used. */ unsigned int cur_tx, dirty_tx; /* These values are keep track of the transceiver/media in use. */ unsigned int flowctrl:1; unsigned int default_port:4; /* Last dev->if_port value. */ unsigned int an_enable:1; unsigned int speed; struct tasklet_struct rx_tasklet; struct tasklet_struct tx_tasklet; int budget; int cur_task; /* Multicast and receive mode. */ spinlock_t mcastlock; /* SMP lock multicast updates. */ u16 mcast_filter[4]; /* MII transceiver section. */ struct mii_if_info mii_if; int mii_preamble_required; unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */ struct pci_dev *pci_dev; void __iomem *base; spinlock_t statlock; }; /* The station address location in the EEPROM. */ #define EEPROM_SA_OFFSET 0x10 #define DEFAULT_INTR (IntrRxDMADone | IntrPCIErr | \ IntrDrvRqst | IntrTxDone | StatsMax | \ LinkChange) static int change_mtu(struct net_device *dev, int new_mtu); static int eeprom_read(void __iomem *ioaddr, int location); static int mdio_read(struct net_device *dev, int phy_id, int location); static void mdio_write(struct net_device *dev, int phy_id, int location, int value); static int mdio_wait_link(struct net_device *dev, int wait); static int netdev_open(struct net_device *dev); static void check_duplex(struct net_device *dev); static void netdev_timer(unsigned long data); static void tx_timeout(struct net_device *dev); static void init_ring(struct net_device *dev); static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev); static int reset_tx (struct net_device *dev); static irqreturn_t intr_handler(int irq, void *dev_instance); static void rx_poll(unsigned long data); static void tx_poll(unsigned long data); static void refill_rx (struct net_device *dev); static void netdev_error(struct net_device *dev, int intr_status); static void netdev_error(struct net_device *dev, int intr_status); static void set_rx_mode(struct net_device *dev); static int __set_mac_addr(struct net_device *dev); static int sundance_set_mac_addr(struct net_device *dev, void *data); static struct net_device_stats *get_stats(struct net_device *dev); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int netdev_close(struct net_device *dev); static const struct ethtool_ops ethtool_ops; static void sundance_reset(struct net_device *dev, unsigned long reset_cmd) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base + ASICCtrl; int countdown; /* ST201 documentation states ASICCtrl is a 32bit register */ iowrite32 (reset_cmd | ioread32 (ioaddr), ioaddr); /* ST201 documentation states reset can take up to 1 ms */ countdown = 10 + 1; while (ioread32 (ioaddr) & (ResetBusy << 16)) { if (--countdown == 0) { printk(KERN_WARNING "%s : reset not completed !!\n", dev->name); break; } udelay(100); } } static const struct net_device_ops netdev_ops = { .ndo_open = netdev_open, .ndo_stop = netdev_close, .ndo_start_xmit = start_tx, .ndo_get_stats = get_stats, .ndo_set_rx_mode = set_rx_mode, .ndo_do_ioctl = netdev_ioctl, .ndo_tx_timeout = tx_timeout, .ndo_change_mtu = change_mtu, .ndo_set_mac_address = sundance_set_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static int __devinit sundance_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct netdev_private *np; static int card_idx; int chip_idx = ent->driver_data; int irq; int i; void __iomem *ioaddr; u16 mii_ctl; void *ring_space; dma_addr_t ring_dma; #ifdef USE_IO_OPS int bar = 0; #else int bar = 1; #endif int phy, phy_end, phy_idx = 0; /* when built into the kernel, we only print version if device is found */ #ifndef MODULE static int printed_version; if (!printed_version++) printk(version); #endif if (pci_enable_device(pdev)) return -EIO; pci_set_master(pdev); irq = pdev->irq; dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); if (pci_request_regions(pdev, DRV_NAME)) goto err_out_netdev; ioaddr = pci_iomap(pdev, bar, netdev_io_size); if (!ioaddr) goto err_out_res; for (i = 0; i < 3; i++) ((__le16 *)dev->dev_addr)[i] = cpu_to_le16(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET)); memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); dev->base_addr = (unsigned long)ioaddr; dev->irq = irq; np = netdev_priv(dev); np->base = ioaddr; np->pci_dev = pdev; np->chip_id = chip_idx; np->msg_enable = (1 << debug) - 1; spin_lock_init(&np->lock); spin_lock_init(&np->statlock); tasklet_init(&np->rx_tasklet, rx_poll, (unsigned long)dev); tasklet_init(&np->tx_tasklet, tx_poll, (unsigned long)dev); ring_space = dma_alloc_coherent(&pdev->dev, TX_TOTAL_SIZE, &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_cleardev; np->tx_ring = (struct netdev_desc *)ring_space; np->tx_ring_dma = ring_dma; ring_space = dma_alloc_coherent(&pdev->dev, RX_TOTAL_SIZE, &ring_dma, GFP_KERNEL); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = (struct netdev_desc *)ring_space; np->rx_ring_dma = ring_dma; np->mii_if.dev = dev; np->mii_if.mdio_read = mdio_read; np->mii_if.mdio_write = mdio_write; np->mii_if.phy_id_mask = 0x1f; np->mii_if.reg_num_mask = 0x1f; /* The chip-specific entries in the device structure. */ dev->netdev_ops = &netdev_ops; SET_ETHTOOL_OPS(dev, &ethtool_ops); dev->watchdog_timeo = TX_TIMEOUT; pci_set_drvdata(pdev, dev); i = register_netdev(dev); if (i) goto err_out_unmap_rx; printk(KERN_INFO "%s: %s at %p, %pM, IRQ %d.\n", dev->name, pci_id_tbl[chip_idx].name, ioaddr, dev->dev_addr, irq); np->phys[0] = 1; /* Default setting */ np->mii_preamble_required++; /* * It seems some phys doesn't deal well with address 0 being accessed * first */ if (sundance_pci_tbl[np->chip_id].device == 0x0200) { phy = 0; phy_end = 31; } else { phy = 1; phy_end = 32; /* wraps to zero, due to 'phy & 0x1f' */ } for (; phy <= phy_end && phy_idx < MII_CNT; phy++) { int phyx = phy & 0x1f; int mii_status = mdio_read(dev, phyx, MII_BMSR); if (mii_status != 0xffff && mii_status != 0x0000) { np->phys[phy_idx++] = phyx; np->mii_if.advertising = mdio_read(dev, phyx, MII_ADVERTISE); if ((mii_status & 0x0040) == 0) np->mii_preamble_required++; printk(KERN_INFO "%s: MII PHY found at address %d, status " "0x%4.4x advertising %4.4x.\n", dev->name, phyx, mii_status, np->mii_if.advertising); } } np->mii_preamble_required--; if (phy_idx == 0) { printk(KERN_INFO "%s: No MII transceiver found, aborting. ASIC status %x\n", dev->name, ioread32(ioaddr + ASICCtrl)); goto err_out_unregister; } np->mii_if.phy_id = np->phys[0]; /* Parse override configuration */ np->an_enable = 1; if (card_idx < MAX_UNITS) { if (media[card_idx] != NULL) { np->an_enable = 0; if (strcmp (media[card_idx], "100mbps_fd") == 0 || strcmp (media[card_idx], "4") == 0) { np->speed = 100; np->mii_if.full_duplex = 1; } else if (strcmp (media[card_idx], "100mbps_hd") == 0 || strcmp (media[card_idx], "3") == 0) { np->speed = 100; np->mii_if.full_duplex = 0; } else if (strcmp (media[card_idx], "10mbps_fd") == 0 || strcmp (media[card_idx], "2") == 0) { np->speed = 10; np->mii_if.full_duplex = 1; } else if (strcmp (media[card_idx], "10mbps_hd") == 0 || strcmp (media[card_idx], "1") == 0) { np->speed = 10; np->mii_if.full_duplex = 0; } else { np->an_enable = 1; } } if (flowctrl == 1) np->flowctrl = 1; } /* Fibre PHY? */ if (ioread32 (ioaddr + ASICCtrl) & 0x80) { /* Default 100Mbps Full */ if (np->an_enable) { np->speed = 100; np->mii_if.full_duplex = 1; np->an_enable = 0; } } /* Reset PHY */ mdio_write (dev, np->phys[0], MII_BMCR, BMCR_RESET); mdelay (300); /* If flow control enabled, we need to advertise it.*/ if (np->flowctrl) mdio_write (dev, np->phys[0], MII_ADVERTISE, np->mii_if.advertising | 0x0400); mdio_write (dev, np->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART); /* Force media type */ if (!np->an_enable) { mii_ctl = 0; mii_ctl |= (np->speed == 100) ? BMCR_SPEED100 : 0; mii_ctl |= (np->mii_if.full_duplex) ? BMCR_FULLDPLX : 0; mdio_write (dev, np->phys[0], MII_BMCR, mii_ctl); printk (KERN_INFO "Override speed=%d, %s duplex\n", np->speed, np->mii_if.full_duplex ? "Full" : "Half"); } /* Perhaps move the reset here? */ /* Reset the chip to erase previous misconfiguration. */ if (netif_msg_hw(np)) printk("ASIC Control is %x.\n", ioread32(ioaddr + ASICCtrl)); sundance_reset(dev, 0x00ff << 16); if (netif_msg_hw(np)) printk("ASIC Control is now %x.\n", ioread32(ioaddr + ASICCtrl)); card_idx++; return 0; err_out_unregister: unregister_netdev(dev); err_out_unmap_rx: dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_cleardev: pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ioaddr); err_out_res: pci_release_regions(pdev); err_out_netdev: free_netdev (dev); return -ENODEV; } static int change_mtu(struct net_device *dev, int new_mtu) { if ((new_mtu < 68) || (new_mtu > 8191)) /* Set by RxDMAFrameLen */ return -EINVAL; if (netif_running(dev)) return -EBUSY; dev->mtu = new_mtu; return 0; } #define eeprom_delay(ee_addr) ioread32(ee_addr) /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */ static int __devinit eeprom_read(void __iomem *ioaddr, int location) { int boguscnt = 10000; /* Typical 1900 ticks. */ iowrite16(0x0200 | (location & 0xff), ioaddr + EECtrl); do { eeprom_delay(ioaddr + EECtrl); if (! (ioread16(ioaddr + EECtrl) & 0x8000)) { return ioread16(ioaddr + EEData); } } while (--boguscnt > 0); return 0; } /* MII transceiver control section. Read and write the MII registers using software-generated serial MDIO protocol. See the MII specifications or DP83840A data sheet for details. The maximum data clock rate is 2.5 Mhz. The minimum timing is usually met by back-to-back 33Mhz PCI cycles. */ #define mdio_delay() ioread8(mdio_addr) enum mii_reg_bits { MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004, }; #define MDIO_EnbIn (0) #define MDIO_WRITE0 (MDIO_EnbOutput) #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput) /* Generate the preamble required for initial synchronization and a few older transceivers. */ static void mdio_sync(void __iomem *mdio_addr) { int bits = 32; /* Establish sync by sending at least 32 logic ones. */ while (--bits >= 0) { iowrite8(MDIO_WRITE1, mdio_addr); mdio_delay(); iowrite8(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr); mdio_delay(); } } static int mdio_read(struct net_device *dev, int phy_id, int location) { struct netdev_private *np = netdev_priv(dev); void __iomem *mdio_addr = np->base + MIICtrl; int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location; int i, retval = 0; if (np->mii_preamble_required) mdio_sync(mdio_addr); /* Shift the read command bits out. */ for (i = 15; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; iowrite8(dataval, mdio_addr); mdio_delay(); iowrite8(dataval | MDIO_ShiftClk, mdio_addr); mdio_delay(); } /* Read the two transition, 16 data, and wire-idle bits. */ for (i = 19; i > 0; i--) { iowrite8(MDIO_EnbIn, mdio_addr); mdio_delay(); retval = (retval << 1) | ((ioread8(mdio_addr) & MDIO_Data) ? 1 : 0); iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); mdio_delay(); } return (retval>>1) & 0xffff; } static void mdio_write(struct net_device *dev, int phy_id, int location, int value) { struct netdev_private *np = netdev_priv(dev); void __iomem *mdio_addr = np->base + MIICtrl; int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value; int i; if (np->mii_preamble_required) mdio_sync(mdio_addr); /* Shift the command bits out. */ for (i = 31; i >= 0; i--) { int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0; iowrite8(dataval, mdio_addr); mdio_delay(); iowrite8(dataval | MDIO_ShiftClk, mdio_addr); mdio_delay(); } /* Clear out extra bits. */ for (i = 2; i > 0; i--) { iowrite8(MDIO_EnbIn, mdio_addr); mdio_delay(); iowrite8(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr); mdio_delay(); } } static int mdio_wait_link(struct net_device *dev, int wait) { int bmsr; int phy_id; struct netdev_private *np; np = netdev_priv(dev); phy_id = np->phys[0]; do { bmsr = mdio_read(dev, phy_id, MII_BMSR); if (bmsr & 0x0004) return 0; mdelay(1); } while (--wait > 0); return -1; } static int netdev_open(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; unsigned long flags; int i; /* Do we need to reset the chip??? */ i = request_irq(dev->irq, intr_handler, IRQF_SHARED, dev->name, dev); if (i) return i; if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: netdev_open() irq %d.\n", dev->name, dev->irq); init_ring(dev); iowrite32(np->rx_ring_dma, ioaddr + RxListPtr); /* The Tx list pointer is written as packets are queued. */ /* Initialize other registers. */ __set_mac_addr(dev); #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE) iowrite16(dev->mtu + 18, ioaddr + MaxFrameSize); #else iowrite16(dev->mtu + 14, ioaddr + MaxFrameSize); #endif if (dev->mtu > 2047) iowrite32(ioread32(ioaddr + ASICCtrl) | 0x0C, ioaddr + ASICCtrl); /* Configure the PCI bus bursts and FIFO thresholds. */ if (dev->if_port == 0) dev->if_port = np->default_port; spin_lock_init(&np->mcastlock); set_rx_mode(dev); iowrite16(0, ioaddr + IntrEnable); iowrite16(0, ioaddr + DownCounter); /* Set the chip to poll every N*320nsec. */ iowrite8(100, ioaddr + RxDMAPollPeriod); iowrite8(127, ioaddr + TxDMAPollPeriod); /* Fix DFE-580TX packet drop issue */ if (np->pci_dev->revision >= 0x14) iowrite8(0x01, ioaddr + DebugCtrl1); netif_start_queue(dev); spin_lock_irqsave(&np->lock, flags); reset_tx(dev); spin_unlock_irqrestore(&np->lock, flags); iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); if (netif_msg_ifup(np)) printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x " "MAC Control %x, %4.4x %4.4x.\n", dev->name, ioread32(ioaddr + RxStatus), ioread8(ioaddr + TxStatus), ioread32(ioaddr + MACCtrl0), ioread16(ioaddr + MACCtrl1), ioread16(ioaddr + MACCtrl0)); /* Set the timer to check for link beat. */ init_timer(&np->timer); np->timer.expires = jiffies + 3*HZ; np->timer.data = (unsigned long)dev; np->timer.function = netdev_timer; /* timer handler */ add_timer(&np->timer); /* Enable interrupts by setting the interrupt mask. */ iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); return 0; } static void check_duplex(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); int negotiated = mii_lpa & np->mii_if.advertising; int duplex; /* Force media */ if (!np->an_enable || mii_lpa == 0xffff) { if (np->mii_if.full_duplex) iowrite16 (ioread16 (ioaddr + MACCtrl0) | EnbFullDuplex, ioaddr + MACCtrl0); return; } /* Autonegotiation */ duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040; if (np->mii_if.full_duplex != duplex) { np->mii_if.full_duplex = duplex; if (netif_msg_link(np)) printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d " "negotiated capability %4.4x.\n", dev->name, duplex ? "full" : "half", np->phys[0], negotiated); iowrite16(ioread16(ioaddr + MACCtrl0) | (duplex ? 0x20 : 0), ioaddr + MACCtrl0); } } static void netdev_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; int next_tick = 10*HZ; if (netif_msg_timer(np)) { printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, " "Tx %x Rx %x.\n", dev->name, ioread16(ioaddr + IntrEnable), ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); } check_duplex(dev); np->timer.expires = jiffies + next_tick; add_timer(&np->timer); } static void tx_timeout(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; unsigned long flag; netif_stop_queue(dev); tasklet_disable(&np->tx_tasklet); iowrite16(0, ioaddr + IntrEnable); printk(KERN_WARNING "%s: Transmit timed out, TxStatus %2.2x " "TxFrameId %2.2x," " resetting...\n", dev->name, ioread8(ioaddr + TxStatus), ioread8(ioaddr + TxFrameId)); { int i; for (i=0; i<TX_RING_SIZE; i++) { printk(KERN_DEBUG "%02x %08llx %08x %08x(%02x) %08x %08x\n", i, (unsigned long long)(np->tx_ring_dma + i*sizeof(*np->tx_ring)), le32_to_cpu(np->tx_ring[i].next_desc), le32_to_cpu(np->tx_ring[i].status), (le32_to_cpu(np->tx_ring[i].status) >> 2) & 0xff, le32_to_cpu(np->tx_ring[i].frag[0].addr), le32_to_cpu(np->tx_ring[i].frag[0].length)); } printk(KERN_DEBUG "TxListPtr=%08x netif_queue_stopped=%d\n", ioread32(np->base + TxListPtr), netif_queue_stopped(dev)); printk(KERN_DEBUG "cur_tx=%d(%02x) dirty_tx=%d(%02x)\n", np->cur_tx, np->cur_tx % TX_RING_SIZE, np->dirty_tx, np->dirty_tx % TX_RING_SIZE); printk(KERN_DEBUG "cur_rx=%d dirty_rx=%d\n", np->cur_rx, np->dirty_rx); printk(KERN_DEBUG "cur_task=%d\n", np->cur_task); } spin_lock_irqsave(&np->lock, flag); /* Stop and restart the chip's Tx processes . */ reset_tx(dev); spin_unlock_irqrestore(&np->lock, flag); dev->if_port = 0; dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_errors++; if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { netif_wake_queue(dev); } iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); tasklet_enable(&np->tx_tasklet); } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static void init_ring(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int i; np->cur_rx = np->cur_tx = 0; np->dirty_rx = np->dirty_tx = 0; np->cur_task = 0; np->rx_buf_sz = (dev->mtu <= 1520 ? PKT_BUF_SZ : dev->mtu + 16); /* Initialize all Rx descriptors. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].next_desc = cpu_to_le32(np->rx_ring_dma + ((i+1)%RX_RING_SIZE)*sizeof(*np->rx_ring)); np->rx_ring[i].status = 0; np->rx_ring[i].frag[0].length = 0; np->rx_skbuff[i] = NULL; } /* Fill in the Rx buffers. Handle allocation failure gracefully. */ for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); np->rx_skbuff[i] = skb; if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ np->rx_ring[i].frag[0].addr = cpu_to_le32( dma_map_single(&np->pci_dev->dev, skb->data, np->rx_buf_sz, DMA_FROM_DEVICE)); if (dma_mapping_error(&np->pci_dev->dev, np->rx_ring[i].frag[0].addr)) { dev_kfree_skb(skb); np->rx_skbuff[i] = NULL; break; } np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); } np->dirty_rx = (unsigned int)(i - RX_RING_SIZE); for (i = 0; i < TX_RING_SIZE; i++) { np->tx_skbuff[i] = NULL; np->tx_ring[i].status = 0; } } static void tx_poll (unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); unsigned head = np->cur_task % TX_RING_SIZE; struct netdev_desc *txdesc = &np->tx_ring[(np->cur_tx - 1) % TX_RING_SIZE]; /* Chain the next pointer */ for (; np->cur_tx - np->cur_task > 0; np->cur_task++) { int entry = np->cur_task % TX_RING_SIZE; txdesc = &np->tx_ring[entry]; if (np->last_tx) { np->last_tx->next_desc = cpu_to_le32(np->tx_ring_dma + entry*sizeof(struct netdev_desc)); } np->last_tx = txdesc; } /* Indicate the latest descriptor of tx ring */ txdesc->status |= cpu_to_le32(DescIntrOnTx); if (ioread32 (np->base + TxListPtr) == 0) iowrite32 (np->tx_ring_dma + head * sizeof(struct netdev_desc), np->base + TxListPtr); } static netdev_tx_t start_tx (struct sk_buff *skb, struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); struct netdev_desc *txdesc; unsigned entry; /* Calculate the next Tx descriptor entry. */ entry = np->cur_tx % TX_RING_SIZE; np->tx_skbuff[entry] = skb; txdesc = &np->tx_ring[entry]; txdesc->next_desc = 0; txdesc->status = cpu_to_le32 ((entry << 2) | DisableAlign); txdesc->frag[0].addr = cpu_to_le32(dma_map_single(&np->pci_dev->dev, skb->data, skb->len, DMA_TO_DEVICE)); if (dma_mapping_error(&np->pci_dev->dev, txdesc->frag[0].addr)) goto drop_frame; txdesc->frag[0].length = cpu_to_le32 (skb->len | LastFrag); /* Increment cur_tx before tasklet_schedule() */ np->cur_tx++; mb(); /* Schedule a tx_poll() task */ tasklet_schedule(&np->tx_tasklet); /* On some architectures: explicitly flush cache lines here. */ if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1 && !netif_queue_stopped(dev)) { /* do nothing */ } else { netif_stop_queue (dev); } if (netif_msg_tx_queued(np)) { printk (KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", dev->name, np->cur_tx, entry); } return NETDEV_TX_OK; drop_frame: dev_kfree_skb(skb); np->tx_skbuff[entry] = NULL; dev->stats.tx_dropped++; return NETDEV_TX_OK; } /* Reset hardware tx and free all of tx buffers */ static int reset_tx (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; struct sk_buff *skb; int i; /* Reset tx logic, TxListPtr will be cleaned */ iowrite16 (TxDisable, ioaddr + MACCtrl1); sundance_reset(dev, (NetworkReset|FIFOReset|DMAReset|TxReset) << 16); /* free all tx skbuff */ for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].next_desc = 0; skb = np->tx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[i].frag[0].addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb_any(skb); np->tx_skbuff[i] = NULL; dev->stats.tx_dropped++; } } np->cur_tx = np->dirty_tx = 0; np->cur_task = 0; np->last_tx = NULL; iowrite8(127, ioaddr + TxDMAPollPeriod); iowrite16 (StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1); return 0; } /* The interrupt handler cleans up after the Tx thread, and schedule a Rx thread work */ static irqreturn_t intr_handler(int irq, void *dev_instance) { struct net_device *dev = (struct net_device *)dev_instance; struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; int hw_frame_id; int tx_cnt; int tx_status; int handled = 0; int i; do { int intr_status = ioread16(ioaddr + IntrStatus); iowrite16(intr_status, ioaddr + IntrStatus); if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n", dev->name, intr_status); if (!(intr_status & DEFAULT_INTR)) break; handled = 1; if (intr_status & (IntrRxDMADone)) { iowrite16(DEFAULT_INTR & ~(IntrRxDone|IntrRxDMADone), ioaddr + IntrEnable); if (np->budget < 0) np->budget = RX_BUDGET; tasklet_schedule(&np->rx_tasklet); } if (intr_status & (IntrTxDone | IntrDrvRqst)) { tx_status = ioread16 (ioaddr + TxStatus); for (tx_cnt=32; tx_status & 0x80; --tx_cnt) { if (netif_msg_tx_done(np)) printk ("%s: Transmit status is %2.2x.\n", dev->name, tx_status); if (tx_status & 0x1e) { if (netif_msg_tx_err(np)) printk("%s: Transmit error status %4.4x.\n", dev->name, tx_status); dev->stats.tx_errors++; if (tx_status & 0x10) dev->stats.tx_fifo_errors++; if (tx_status & 0x08) dev->stats.collisions++; if (tx_status & 0x04) dev->stats.tx_fifo_errors++; if (tx_status & 0x02) dev->stats.tx_window_errors++; /* ** This reset has been verified on ** DFE-580TX boards ! phdm@macqel.be. */ if (tx_status & 0x10) { /* TxUnderrun */ /* Restart Tx FIFO and transmitter */ sundance_reset(dev, (NetworkReset|FIFOReset|TxReset) << 16); /* No need to reset the Tx pointer here */ } /* Restart the Tx. Need to make sure tx enabled */ i = 10; do { iowrite16(ioread16(ioaddr + MACCtrl1) | TxEnable, ioaddr + MACCtrl1); if (ioread16(ioaddr + MACCtrl1) & TxEnabled) break; mdelay(1); } while (--i); } /* Yup, this is a documentation bug. It cost me *hours*. */ iowrite16 (0, ioaddr + TxStatus); if (tx_cnt < 0) { iowrite32(5000, ioaddr + DownCounter); break; } tx_status = ioread16 (ioaddr + TxStatus); } hw_frame_id = (tx_status >> 8) & 0xff; } else { hw_frame_id = ioread8(ioaddr + TxFrameId); } if (np->pci_dev->revision >= 0x14) { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; int sw_frame_id; sw_frame_id = (le32_to_cpu( np->tx_ring[entry].status) >> 2) & 0xff; if (sw_frame_id == hw_frame_id && !(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; if (sw_frame_id == (hw_frame_id + 1) % TX_RING_SIZE) break; skb = np->tx_skbuff[entry]; /* Free the original skb. */ dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag[0].addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq (np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; } spin_unlock(&np->lock); } else { spin_lock(&np->lock); for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) { int entry = np->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (!(le32_to_cpu(np->tx_ring[entry].status) & 0x00010000)) break; skb = np->tx_skbuff[entry]; /* Free the original skb. */ dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[entry].frag[0].addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb_irq (np->tx_skbuff[entry]); np->tx_skbuff[entry] = NULL; np->tx_ring[entry].frag[0].addr = 0; np->tx_ring[entry].frag[0].length = 0; } spin_unlock(&np->lock); } if (netif_queue_stopped(dev) && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) { /* The ring is no longer full, clear busy flag. */ netif_wake_queue (dev); } /* Abnormal error summary/uncommon events handlers. */ if (intr_status & (IntrPCIErr | LinkChange | StatsMax)) netdev_error(dev, intr_status); } while (0); if (netif_msg_intr(np)) printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n", dev->name, ioread16(ioaddr + IntrStatus)); return IRQ_RETVAL(handled); } static void rx_poll(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct netdev_private *np = netdev_priv(dev); int entry = np->cur_rx % RX_RING_SIZE; int boguscnt = np->budget; void __iomem *ioaddr = np->base; int received = 0; /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (1) { struct netdev_desc *desc = &(np->rx_ring[entry]); u32 frame_status = le32_to_cpu(desc->status); int pkt_len; if (--boguscnt < 0) { goto not_done; } if (!(frame_status & DescOwn)) break; pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */ if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n", frame_status); if (frame_status & 0x001f4000) { /* There was a error. */ if (netif_msg_rx_err(np)) printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n", frame_status); dev->stats.rx_errors++; if (frame_status & 0x00100000) dev->stats.rx_length_errors++; if (frame_status & 0x00010000) dev->stats.rx_fifo_errors++; if (frame_status & 0x00060000) dev->stats.rx_frame_errors++; if (frame_status & 0x00080000) dev->stats.rx_crc_errors++; if (frame_status & 0x00100000) { printk(KERN_WARNING "%s: Oversized Ethernet frame," " status %8.8x.\n", dev->name, frame_status); } } else { struct sk_buff *skb; #ifndef final_version if (netif_msg_rx_status(np)) printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d" ", bogus_cnt %d.\n", pkt_len, boguscnt); #endif /* Check if the packet is long enough to accept without copying to a minimally-sized skbuff. */ if (pkt_len < rx_copybreak && (skb = netdev_alloc_skb(dev, pkt_len + 2)) != NULL) { skb_reserve(skb, 2); /* 16 byte align the IP header */ dma_sync_single_for_cpu(&np->pci_dev->dev, le32_to_cpu(desc->frag[0].addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_copy_to_linear_data(skb, np->rx_skbuff[entry]->data, pkt_len); dma_sync_single_for_device(&np->pci_dev->dev, le32_to_cpu(desc->frag[0].addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put(skb, pkt_len); } else { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(desc->frag[0].addr), np->rx_buf_sz, DMA_FROM_DEVICE); skb_put(skb = np->rx_skbuff[entry], pkt_len); np->rx_skbuff[entry] = NULL; } skb->protocol = eth_type_trans(skb, dev); /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */ netif_rx(skb); } entry = (entry + 1) % RX_RING_SIZE; received++; } np->cur_rx = entry; refill_rx (dev); np->budget -= received; iowrite16(DEFAULT_INTR, ioaddr + IntrEnable); return; not_done: np->cur_rx = entry; refill_rx (dev); if (!received) received = 1; np->budget -= received; if (np->budget <= 0) np->budget = RX_BUDGET; tasklet_schedule(&np->rx_tasklet); } static void refill_rx (struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); int entry; int cnt = 0; /* Refill the Rx ring buffers. */ for (;(np->cur_rx - np->dirty_rx + RX_RING_SIZE) % RX_RING_SIZE > 0; np->dirty_rx = (np->dirty_rx + 1) % RX_RING_SIZE) { struct sk_buff *skb; entry = np->dirty_rx % RX_RING_SIZE; if (np->rx_skbuff[entry] == NULL) { skb = netdev_alloc_skb(dev, np->rx_buf_sz + 2); np->rx_skbuff[entry] = skb; if (skb == NULL) break; /* Better luck next round. */ skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ np->rx_ring[entry].frag[0].addr = cpu_to_le32( dma_map_single(&np->pci_dev->dev, skb->data, np->rx_buf_sz, DMA_FROM_DEVICE)); if (dma_mapping_error(&np->pci_dev->dev, np->rx_ring[entry].frag[0].addr)) { dev_kfree_skb_irq(skb); np->rx_skbuff[entry] = NULL; break; } } /* Perhaps we need not reset this field. */ np->rx_ring[entry].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag); np->rx_ring[entry].status = 0; cnt++; } } static void netdev_error(struct net_device *dev, int intr_status) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u16 mii_ctl, mii_advertise, mii_lpa; int speed; if (intr_status & LinkChange) { if (mdio_wait_link(dev, 10) == 0) { printk(KERN_INFO "%s: Link up\n", dev->name); if (np->an_enable) { mii_advertise = mdio_read(dev, np->phys[0], MII_ADVERTISE); mii_lpa = mdio_read(dev, np->phys[0], MII_LPA); mii_advertise &= mii_lpa; printk(KERN_INFO "%s: Link changed: ", dev->name); if (mii_advertise & ADVERTISE_100FULL) { np->speed = 100; printk("100Mbps, full duplex\n"); } else if (mii_advertise & ADVERTISE_100HALF) { np->speed = 100; printk("100Mbps, half duplex\n"); } else if (mii_advertise & ADVERTISE_10FULL) { np->speed = 10; printk("10Mbps, full duplex\n"); } else if (mii_advertise & ADVERTISE_10HALF) { np->speed = 10; printk("10Mbps, half duplex\n"); } else printk("\n"); } else { mii_ctl = mdio_read(dev, np->phys[0], MII_BMCR); speed = (mii_ctl & BMCR_SPEED100) ? 100 : 10; np->speed = speed; printk(KERN_INFO "%s: Link changed: %dMbps ,", dev->name, speed); printk("%s duplex.\n", (mii_ctl & BMCR_FULLDPLX) ? "full" : "half"); } check_duplex(dev); if (np->flowctrl && np->mii_if.full_duplex) { iowrite16(ioread16(ioaddr + MulticastFilter1+2) | 0x0200, ioaddr + MulticastFilter1+2); iowrite16(ioread16(ioaddr + MACCtrl0) | EnbFlowCtrl, ioaddr + MACCtrl0); } netif_carrier_on(dev); } else { printk(KERN_INFO "%s: Link down\n", dev->name); netif_carrier_off(dev); } } if (intr_status & StatsMax) { get_stats(dev); } if (intr_status & IntrPCIErr) { printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n", dev->name, intr_status); /* We must do a global reset of DMA to continue. */ } } static struct net_device_stats *get_stats(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; unsigned long flags; u8 late_coll, single_coll, mult_coll; spin_lock_irqsave(&np->statlock, flags); /* The chip only need report frame silently dropped. */ dev->stats.rx_missed_errors += ioread8(ioaddr + RxMissed); dev->stats.tx_packets += ioread16(ioaddr + TxFramesOK); dev->stats.rx_packets += ioread16(ioaddr + RxFramesOK); dev->stats.tx_carrier_errors += ioread8(ioaddr + StatsCarrierError); mult_coll = ioread8(ioaddr + StatsMultiColl); np->xstats.tx_multiple_collisions += mult_coll; single_coll = ioread8(ioaddr + StatsOneColl); np->xstats.tx_single_collisions += single_coll; late_coll = ioread8(ioaddr + StatsLateColl); np->xstats.tx_late_collisions += late_coll; dev->stats.collisions += mult_coll + single_coll + late_coll; np->xstats.tx_deferred += ioread8(ioaddr + StatsTxDefer); np->xstats.tx_deferred_excessive += ioread8(ioaddr + StatsTxXSDefer); np->xstats.tx_aborted += ioread8(ioaddr + StatsTxAbort); np->xstats.tx_bcasts += ioread8(ioaddr + StatsBcastTx); np->xstats.rx_bcasts += ioread8(ioaddr + StatsBcastRx); np->xstats.tx_mcasts += ioread8(ioaddr + StatsMcastTx); np->xstats.rx_mcasts += ioread8(ioaddr + StatsMcastRx); dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsLow); dev->stats.tx_bytes += ioread16(ioaddr + TxOctetsHigh) << 16; dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsLow); dev->stats.rx_bytes += ioread16(ioaddr + RxOctetsHigh) << 16; spin_unlock_irqrestore(&np->statlock, flags); return &dev->stats; } static void set_rx_mode(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; u16 mc_filter[4]; /* Multicast hash filter */ u32 rx_mode; int i; if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys; } else if ((netdev_mc_count(dev) > multicast_filter_limit) || (dev->flags & IFF_ALLMULTI)) { /* Too many to match, or accept all multicasts. */ memset(mc_filter, 0xff, sizeof(mc_filter)); rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys; } else if (!netdev_mc_empty(dev)) { struct netdev_hw_addr *ha; int bit; int index; int crc; memset (mc_filter, 0, sizeof (mc_filter)); netdev_for_each_mc_addr(ha, dev) { crc = ether_crc_le(ETH_ALEN, ha->addr); for (index=0, bit=0; bit < 6; bit++, crc <<= 1) if (crc & 0x80000000) index |= 1 << bit; mc_filter[index/16] |= (1 << (index % 16)); } rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys; } else { iowrite8(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode); return; } if (np->mii_if.full_duplex && np->flowctrl) mc_filter[3] |= 0x0200; for (i = 0; i < 4; i++) iowrite16(mc_filter[i], ioaddr + MulticastFilter0 + i*2); iowrite8(rx_mode, ioaddr + RxMode); } static int __set_mac_addr(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); u16 addr16; addr16 = (dev->dev_addr[0] | (dev->dev_addr[1] << 8)); iowrite16(addr16, np->base + StationAddr); addr16 = (dev->dev_addr[2] | (dev->dev_addr[3] << 8)); iowrite16(addr16, np->base + StationAddr+2); addr16 = (dev->dev_addr[4] | (dev->dev_addr[5] << 8)); iowrite16(addr16, np->base + StationAddr+4); return 0; } /* Invoked with rtnl_lock held */ static int sundance_set_mac_addr(struct net_device *dev, void *data) { const struct sockaddr *addr = data; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); __set_mac_addr(dev); return 0; } static const struct { const char name[ETH_GSTRING_LEN]; } sundance_stats[] = { { "tx_multiple_collisions" }, { "tx_single_collisions" }, { "tx_late_collisions" }, { "tx_deferred" }, { "tx_deferred_excessive" }, { "tx_aborted" }, { "tx_bcasts" }, { "rx_bcasts" }, { "tx_mcasts" }, { "rx_mcasts" }, }; static int check_if_running(struct net_device *dev) { if (!netif_running(dev)) return -EINVAL; return 0; } static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct netdev_private *np = netdev_priv(dev); strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(np->pci_dev), sizeof(info->bus_info)); } static int get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netdev_private *np = netdev_priv(dev); spin_lock_irq(&np->lock); mii_ethtool_gset(&np->mii_if, ecmd); spin_unlock_irq(&np->lock); return 0; } static int set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { struct netdev_private *np = netdev_priv(dev); int res; spin_lock_irq(&np->lock); res = mii_ethtool_sset(&np->mii_if, ecmd); spin_unlock_irq(&np->lock); return res; } static int nway_reset(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return mii_nway_restart(&np->mii_if); } static u32 get_link(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return mii_link_ok(&np->mii_if); } static u32 get_msglevel(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); return np->msg_enable; } static void set_msglevel(struct net_device *dev, u32 val) { struct netdev_private *np = netdev_priv(dev); np->msg_enable = val; } static void get_strings(struct net_device *dev, u32 stringset, u8 *data) { if (stringset == ETH_SS_STATS) memcpy(data, sundance_stats, sizeof(sundance_stats)); } static int get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return ARRAY_SIZE(sundance_stats); default: return -EOPNOTSUPP; } } static void get_ethtool_stats(struct net_device *dev, struct ethtool_stats *stats, u64 *data) { struct netdev_private *np = netdev_priv(dev); int i = 0; get_stats(dev); data[i++] = np->xstats.tx_multiple_collisions; data[i++] = np->xstats.tx_single_collisions; data[i++] = np->xstats.tx_late_collisions; data[i++] = np->xstats.tx_deferred; data[i++] = np->xstats.tx_deferred_excessive; data[i++] = np->xstats.tx_aborted; data[i++] = np->xstats.tx_bcasts; data[i++] = np->xstats.rx_bcasts; data[i++] = np->xstats.tx_mcasts; data[i++] = np->xstats.rx_mcasts; } static const struct ethtool_ops ethtool_ops = { .begin = check_if_running, .get_drvinfo = get_drvinfo, .get_settings = get_settings, .set_settings = set_settings, .nway_reset = nway_reset, .get_link = get_link, .get_msglevel = get_msglevel, .set_msglevel = set_msglevel, .get_strings = get_strings, .get_sset_count = get_sset_count, .get_ethtool_stats = get_ethtool_stats, }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct netdev_private *np = netdev_priv(dev); int rc; if (!netif_running(dev)) return -EINVAL; spin_lock_irq(&np->lock); rc = generic_mii_ioctl(&np->mii_if, if_mii(rq), cmd, NULL); spin_unlock_irq(&np->lock); return rc; } static int netdev_close(struct net_device *dev) { struct netdev_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; struct sk_buff *skb; int i; /* Wait and kill tasklet */ tasklet_kill(&np->rx_tasklet); tasklet_kill(&np->tx_tasklet); np->cur_tx = 0; np->dirty_tx = 0; np->cur_task = 0; np->last_tx = NULL; netif_stop_queue(dev); if (netif_msg_ifdown(np)) { printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x " "Rx %4.4x Int %2.2x.\n", dev->name, ioread8(ioaddr + TxStatus), ioread32(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n", dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx); } /* Disable interrupts by clearing the interrupt mask. */ iowrite16(0x0000, ioaddr + IntrEnable); /* Disable Rx and Tx DMA for safely release resource */ iowrite32(0x500, ioaddr + DMACtrl); /* Stop the chip's Tx and Rx processes. */ iowrite16(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1); for (i = 2000; i > 0; i--) { if ((ioread32(ioaddr + DMACtrl) & 0xc000) == 0) break; mdelay(1); } iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, ioaddr + ASIC_HI_WORD(ASICCtrl)); for (i = 2000; i > 0; i--) { if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0) break; mdelay(1); } #ifdef __i386__ if (netif_msg_hw(np)) { printk(KERN_DEBUG " Tx ring at %8.8x:\n", (int)(np->tx_ring_dma)); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG " #%d desc. %4.4x %8.8x %8.8x.\n", i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr, np->tx_ring[i].frag[0].length); printk(KERN_DEBUG " Rx ring %8.8x:\n", (int)(np->rx_ring_dma)); for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) { printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n", i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr, np->rx_ring[i].frag[0].length); } } #endif /* __i386__ debugging only */ free_irq(dev->irq, dev); del_timer_sync(&np->timer); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { np->rx_ring[i].status = 0; skb = np->rx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->rx_ring[i].frag[0].addr), np->rx_buf_sz, DMA_FROM_DEVICE); dev_kfree_skb(skb); np->rx_skbuff[i] = NULL; } np->rx_ring[i].frag[0].addr = cpu_to_le32(0xBADF00D0); /* poison */ } for (i = 0; i < TX_RING_SIZE; i++) { np->tx_ring[i].next_desc = 0; skb = np->tx_skbuff[i]; if (skb) { dma_unmap_single(&np->pci_dev->dev, le32_to_cpu(np->tx_ring[i].frag[0].addr), skb->len, DMA_TO_DEVICE); dev_kfree_skb(skb); np->tx_skbuff[i] = NULL; } } return 0; } static void __devexit sundance_remove1 (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); if (dev) { struct netdev_private *np = netdev_priv(dev); unregister_netdev(dev); dma_free_coherent(&pdev->dev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); dma_free_coherent(&pdev->dev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); pci_iounmap(pdev, np->base); pci_release_regions(pdev); free_netdev(dev); pci_set_drvdata(pdev, NULL); } } #ifdef CONFIG_PM static int sundance_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pci_dev); if (!netif_running(dev)) return 0; netdev_close(dev); netif_device_detach(dev); pci_save_state(pci_dev); pci_set_power_state(pci_dev, pci_choose_state(pci_dev, state)); return 0; } static int sundance_resume(struct pci_dev *pci_dev) { struct net_device *dev = pci_get_drvdata(pci_dev); int err = 0; if (!netif_running(dev)) return 0; pci_set_power_state(pci_dev, PCI_D0); pci_restore_state(pci_dev); err = netdev_open(dev); if (err) { printk(KERN_ERR "%s: Can't resume interface!\n", dev->name); goto out; } netif_device_attach(dev); out: return err; } #endif /* CONFIG_PM */ static struct pci_driver sundance_driver = { .name = DRV_NAME, .id_table = sundance_pci_tbl, .probe = sundance_probe1, .remove = __devexit_p(sundance_remove1), #ifdef CONFIG_PM .suspend = sundance_suspend, .resume = sundance_resume, #endif /* CONFIG_PM */ }; static int __init sundance_init(void) { /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE printk(version); #endif return pci_register_driver(&sundance_driver); } static void __exit sundance_exit(void) { pci_unregister_driver(&sundance_driver); } module_init(sundance_init); module_exit(sundance_exit);
gpl-2.0
davidevinavil/kernel_s500_jb
drivers/gpu/drm/radeon/radeon_asic.c
4801
49248
/* * Copyright 2008 Advanced Micro Devices, Inc. * Copyright 2008 Red Hat Inc. * Copyright 2009 Jerome Glisse. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Dave Airlie * Alex Deucher * Jerome Glisse */ #include <linux/console.h> #include <drm/drmP.h> #include <drm/drm_crtc_helper.h> #include <drm/radeon_drm.h> #include <linux/vgaarb.h> #include <linux/vga_switcheroo.h> #include "radeon_reg.h" #include "radeon.h" #include "radeon_asic.h" #include "atom.h" /* * Registers accessors functions. */ static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg) { DRM_ERROR("Invalid callback to read register 0x%04X\n", reg); BUG_ON(1); return 0; } static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) { DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n", reg, v); BUG_ON(1); } static void radeon_register_accessor_init(struct radeon_device *rdev) { rdev->mc_rreg = &radeon_invalid_rreg; rdev->mc_wreg = &radeon_invalid_wreg; rdev->pll_rreg = &radeon_invalid_rreg; rdev->pll_wreg = &radeon_invalid_wreg; rdev->pciep_rreg = &radeon_invalid_rreg; rdev->pciep_wreg = &radeon_invalid_wreg; /* Don't change order as we are overridding accessor. */ if (rdev->family < CHIP_RV515) { rdev->pcie_reg_mask = 0xff; } else { rdev->pcie_reg_mask = 0x7ff; } /* FIXME: not sure here */ if (rdev->family <= CHIP_R580) { rdev->pll_rreg = &r100_pll_rreg; rdev->pll_wreg = &r100_pll_wreg; } if (rdev->family >= CHIP_R420) { rdev->mc_rreg = &r420_mc_rreg; rdev->mc_wreg = &r420_mc_wreg; } if (rdev->family >= CHIP_RV515) { rdev->mc_rreg = &rv515_mc_rreg; rdev->mc_wreg = &rv515_mc_wreg; } if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) { rdev->mc_rreg = &rs400_mc_rreg; rdev->mc_wreg = &rs400_mc_wreg; } if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) { rdev->mc_rreg = &rs690_mc_rreg; rdev->mc_wreg = &rs690_mc_wreg; } if (rdev->family == CHIP_RS600) { rdev->mc_rreg = &rs600_mc_rreg; rdev->mc_wreg = &rs600_mc_wreg; } if (rdev->family >= CHIP_R600) { rdev->pciep_rreg = &r600_pciep_rreg; rdev->pciep_wreg = &r600_pciep_wreg; } } /* helper to disable agp */ void radeon_agp_disable(struct radeon_device *rdev) { rdev->flags &= ~RADEON_IS_AGP; if (rdev->family >= CHIP_R600) { DRM_INFO("Forcing AGP to PCIE mode\n"); rdev->flags |= RADEON_IS_PCIE; } else if (rdev->family >= CHIP_RV515 || rdev->family == CHIP_RV380 || rdev->family == CHIP_RV410 || rdev->family == CHIP_R423) { DRM_INFO("Forcing AGP to PCIE mode\n"); rdev->flags |= RADEON_IS_PCIE; rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; } else { DRM_INFO("Forcing AGP to PCI mode\n"); rdev->flags |= RADEON_IS_PCI; rdev->asic->gart.tlb_flush = &r100_pci_gart_tlb_flush; rdev->asic->gart.set_page = &r100_pci_gart_set_page; } rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024; } /* * ASIC */ static struct radeon_asic r100_asic = { .init = &r100_init, .fini = &r100_fini, .suspend = &r100_suspend, .resume = &r100_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r100_gpu_is_lockup, .asic_reset = &r100_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, .set_page = &r100_pci_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r100_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r100_cs_parse, .ring_start = &r100_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &r100_irq_set, .process = &r100_irq_process, }, .display = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &r100_hpd_init, .fini = &r100_hpd_fini, .sense = &r100_hpd_sense, .set_polarity = &r100_hpd_set_polarity, }, .pm = { .misc = &r100_pm_misc, .prepare = &r100_pm_prepare, .finish = &r100_pm_finish, .init_profile = &r100_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, .post_page_flip = &r100_post_page_flip, }, }; static struct radeon_asic r200_asic = { .init = &r100_init, .fini = &r100_fini, .suspend = &r100_suspend, .resume = &r100_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r100_gpu_is_lockup, .asic_reset = &r100_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r100_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, .set_page = &r100_pci_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r100_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r100_cs_parse, .ring_start = &r100_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &r100_irq_set, .process = &r100_irq_process, }, .display = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &r100_hpd_init, .fini = &r100_hpd_fini, .sense = &r100_hpd_sense, .set_polarity = &r100_hpd_set_polarity, }, .pm = { .misc = &r100_pm_misc, .prepare = &r100_pm_prepare, .finish = &r100_pm_finish, .init_profile = &r100_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, .post_page_flip = &r100_post_page_flip, }, }; static struct radeon_asic r300_asic = { .init = &r300_init, .fini = &r300_fini, .suspend = &r300_suspend, .resume = &r300_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &r300_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &r100_pci_gart_tlb_flush, .set_page = &r100_pci_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &r100_irq_set, .process = &r100_irq_process, }, .display = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &r100_hpd_init, .fini = &r100_hpd_fini, .sense = &r100_hpd_sense, .set_polarity = &r100_hpd_set_polarity, }, .pm = { .misc = &r100_pm_misc, .prepare = &r100_pm_prepare, .finish = &r100_pm_finish, .init_profile = &r100_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, .post_page_flip = &r100_post_page_flip, }, }; static struct radeon_asic r300_asic_pcie = { .init = &r300_init, .fini = &r300_fini, .suspend = &r300_suspend, .resume = &r300_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &r300_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .set_page = &rv370_pcie_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &r100_irq_set, .process = &r100_irq_process, }, .display = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &r100_hpd_init, .fini = &r100_hpd_fini, .sense = &r100_hpd_sense, .set_polarity = &r100_hpd_set_polarity, }, .pm = { .misc = &r100_pm_misc, .prepare = &r100_pm_prepare, .finish = &r100_pm_finish, .init_profile = &r100_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, .post_page_flip = &r100_post_page_flip, }, }; static struct radeon_asic r420_asic = { .init = &r420_init, .fini = &r420_fini, .suspend = &r420_suspend, .resume = &r420_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &r300_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r300_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .set_page = &rv370_pcie_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &r100_irq_set, .process = &r100_irq_process, }, .display = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &r100_hpd_init, .fini = &r100_hpd_fini, .sense = &r100_hpd_sense, .set_polarity = &r100_hpd_set_polarity, }, .pm = { .misc = &r100_pm_misc, .prepare = &r100_pm_prepare, .finish = &r100_pm_finish, .init_profile = &r420_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, .post_page_flip = &r100_post_page_flip, }, }; static struct radeon_asic rs400_asic = { .init = &rs400_init, .fini = &rs400_fini, .suspend = &rs400_suspend, .resume = &rs400_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &r300_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs400_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, .set_page = &rs400_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &r100_irq_set, .process = &r100_irq_process, }, .display = { .bandwidth_update = &r100_bandwidth_update, .get_vblank_counter = &r100_get_vblank_counter, .wait_for_vblank = &r100_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &r100_hpd_init, .fini = &r100_hpd_fini, .sense = &r100_hpd_sense, .set_polarity = &r100_hpd_set_polarity, }, .pm = { .misc = &r100_pm_misc, .prepare = &r100_pm_prepare, .finish = &r100_pm_finish, .init_profile = &r100_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_legacy_get_engine_clock, .set_engine_clock = &radeon_legacy_set_engine_clock, .get_memory_clock = &radeon_legacy_get_memory_clock, .set_memory_clock = NULL, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_legacy_set_clock_gating, }, .pflip = { .pre_page_flip = &r100_pre_page_flip, .page_flip = &r100_page_flip, .post_page_flip = &r100_post_page_flip, }, }; static struct radeon_asic rs600_asic = { .init = &rs600_init, .fini = &rs600_fini, .suspend = &rs600_suspend, .resume = &rs600_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &rs600_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs600_mc_wait_for_idle, .gart = { .tlb_flush = &rs600_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &rs600_irq_set, .process = &rs600_irq_process, }, .display = { .bandwidth_update = &rs600_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &rs600_hpd_init, .fini = &rs600_hpd_fini, .sense = &rs600_hpd_sense, .set_polarity = &rs600_hpd_set_polarity, }, .pm = { .misc = &rs600_pm_misc, .prepare = &rs600_pm_prepare, .finish = &rs600_pm_finish, .init_profile = &r420_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, .post_page_flip = &rs600_post_page_flip, }, }; static struct radeon_asic rs690_asic = { .init = &rs690_init, .fini = &rs690_fini, .suspend = &rs690_suspend, .resume = &rs690_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &rs600_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rs690_mc_wait_for_idle, .gart = { .tlb_flush = &rs400_gart_tlb_flush, .set_page = &rs400_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &r300_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &rs600_irq_set, .process = &rs600_irq_process, }, .display = { .get_vblank_counter = &rs600_get_vblank_counter, .bandwidth_update = &rs690_bandwidth_update, .wait_for_vblank = &avivo_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r200_copy_dma, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &rs600_hpd_init, .fini = &rs600_hpd_fini, .sense = &rs600_hpd_sense, .set_polarity = &rs600_hpd_set_polarity, }, .pm = { .misc = &rs600_pm_misc, .prepare = &rs600_pm_prepare, .finish = &rs600_pm_finish, .init_profile = &r420_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, .post_page_flip = &rs600_post_page_flip, }, }; static struct radeon_asic rv515_asic = { .init = &rv515_init, .fini = &rv515_fini, .suspend = &rv515_suspend, .resume = &rv515_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &rs600_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &rv515_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .set_page = &rv370_pcie_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &rv515_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &rs600_irq_set, .process = &rs600_irq_process, }, .display = { .get_vblank_counter = &rs600_get_vblank_counter, .bandwidth_update = &rv515_bandwidth_update, .wait_for_vblank = &avivo_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &rs600_hpd_init, .fini = &rs600_hpd_fini, .sense = &rs600_hpd_sense, .set_polarity = &rs600_hpd_set_polarity, }, .pm = { .misc = &rs600_pm_misc, .prepare = &rs600_pm_prepare, .finish = &rs600_pm_finish, .init_profile = &r420_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, .post_page_flip = &rs600_post_page_flip, }, }; static struct radeon_asic r520_asic = { .init = &r520_init, .fini = &rv515_fini, .suspend = &rv515_suspend, .resume = &r520_resume, .vga_set_state = &r100_vga_set_state, .gpu_is_lockup = &r300_gpu_is_lockup, .asic_reset = &rs600_asic_reset, .ioctl_wait_idle = NULL, .gui_idle = &r100_gui_idle, .mc_wait_for_idle = &r520_mc_wait_for_idle, .gart = { .tlb_flush = &rv370_pcie_gart_tlb_flush, .set_page = &rv370_pcie_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r100_ring_ib_execute, .emit_fence = &r300_fence_ring_emit, .emit_semaphore = &r100_semaphore_ring_emit, .cs_parse = &r300_cs_parse, .ring_start = &rv515_ring_start, .ring_test = &r100_ring_test, .ib_test = &r100_ib_test, } }, .irq = { .set = &rs600_irq_set, .process = &rs600_irq_process, }, .display = { .bandwidth_update = &rv515_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, }, .copy = { .blit = &r100_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = &r200_copy_dma, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r100_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r100_set_surface_reg, .clear_reg = r100_clear_surface_reg, }, .hpd = { .init = &rs600_hpd_init, .fini = &rs600_hpd_fini, .sense = &rs600_hpd_sense, .set_polarity = &rs600_hpd_set_polarity, }, .pm = { .misc = &rs600_pm_misc, .prepare = &rs600_pm_prepare, .finish = &rs600_pm_finish, .init_profile = &r420_pm_init_profile, .get_dynpm_state = &r100_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = &rv370_get_pcie_lanes, .set_pcie_lanes = &rv370_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, .post_page_flip = &rs600_post_page_flip, }, }; static struct radeon_asic r600_asic = { .init = &r600_init, .fini = &r600_fini, .suspend = &r600_suspend, .resume = &r600_resume, .vga_set_state = &r600_vga_set_state, .gpu_is_lockup = &r600_gpu_is_lockup, .asic_reset = &r600_asic_reset, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &r600_mc_wait_for_idle, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r600_ring_ib_execute, .emit_fence = &r600_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &r600_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &r600_irq_set, .process = &r600_irq_process, }, .display = { .bandwidth_update = &rv515_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &r600_hpd_init, .fini = &r600_hpd_fini, .sense = &r600_hpd_sense, .set_polarity = &r600_hpd_set_polarity, }, .pm = { .misc = &r600_pm_misc, .prepare = &rs600_pm_prepare, .finish = &rs600_pm_finish, .init_profile = &r600_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = &r600_get_pcie_lanes, .set_pcie_lanes = &r600_set_pcie_lanes, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, .post_page_flip = &rs600_post_page_flip, }, }; static struct radeon_asic rs780_asic = { .init = &r600_init, .fini = &r600_fini, .suspend = &r600_suspend, .resume = &r600_resume, .gpu_is_lockup = &r600_gpu_is_lockup, .vga_set_state = &r600_vga_set_state, .asic_reset = &r600_asic_reset, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &r600_mc_wait_for_idle, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r600_ring_ib_execute, .emit_fence = &r600_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &r600_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &r600_irq_set, .process = &r600_irq_process, }, .display = { .bandwidth_update = &rs690_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &r600_hpd_init, .fini = &r600_hpd_fini, .sense = &r600_hpd_sense, .set_polarity = &r600_hpd_set_polarity, }, .pm = { .misc = &r600_pm_misc, .prepare = &rs600_pm_prepare, .finish = &rs600_pm_finish, .init_profile = &rs780_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = NULL, .set_memory_clock = NULL, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rs600_page_flip, .post_page_flip = &rs600_post_page_flip, }, }; static struct radeon_asic rv770_asic = { .init = &rv770_init, .fini = &rv770_fini, .suspend = &rv770_suspend, .resume = &rv770_resume, .asic_reset = &r600_asic_reset, .gpu_is_lockup = &r600_gpu_is_lockup, .vga_set_state = &r600_vga_set_state, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &r600_mc_wait_for_idle, .gart = { .tlb_flush = &r600_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &r600_ring_ib_execute, .emit_fence = &r600_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &r600_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &r600_irq_set, .process = &r600_irq_process, }, .display = { .bandwidth_update = &rv515_bandwidth_update, .get_vblank_counter = &rs600_get_vblank_counter, .wait_for_vblank = &avivo_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &r600_hpd_init, .fini = &r600_hpd_fini, .sense = &r600_hpd_sense, .set_polarity = &r600_hpd_set_polarity, }, .pm = { .misc = &rv770_pm_misc, .prepare = &rs600_pm_prepare, .finish = &rs600_pm_finish, .init_profile = &r600_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = &r600_get_pcie_lanes, .set_pcie_lanes = &r600_set_pcie_lanes, .set_clock_gating = &radeon_atom_set_clock_gating, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, .page_flip = &rv770_page_flip, .post_page_flip = &rs600_post_page_flip, }, }; static struct radeon_asic evergreen_asic = { .init = &evergreen_init, .fini = &evergreen_fini, .suspend = &evergreen_suspend, .resume = &evergreen_resume, .gpu_is_lockup = &evergreen_gpu_is_lockup, .asic_reset = &evergreen_asic_reset, .vga_set_state = &r600_vga_set_state, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &evergreen_ring_ib_execute, .emit_fence = &r600_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &evergreen_irq_set, .process = &evergreen_irq_process, }, .display = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &evergreen_hpd_init, .fini = &evergreen_hpd_fini, .sense = &evergreen_hpd_sense, .set_polarity = &evergreen_hpd_set_polarity, }, .pm = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, .init_profile = &r600_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = &r600_get_pcie_lanes, .set_pcie_lanes = &r600_set_pcie_lanes, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, .post_page_flip = &evergreen_post_page_flip, }, }; static struct radeon_asic sumo_asic = { .init = &evergreen_init, .fini = &evergreen_fini, .suspend = &evergreen_suspend, .resume = &evergreen_resume, .gpu_is_lockup = &evergreen_gpu_is_lockup, .asic_reset = &evergreen_asic_reset, .vga_set_state = &r600_vga_set_state, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &evergreen_ring_ib_execute, .emit_fence = &r600_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, }, }, .irq = { .set = &evergreen_irq_set, .process = &evergreen_irq_process, }, .display = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &evergreen_hpd_init, .fini = &evergreen_hpd_fini, .sense = &evergreen_hpd_sense, .set_polarity = &evergreen_hpd_set_polarity, }, .pm = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, .init_profile = &sumo_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = NULL, .set_memory_clock = NULL, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, .post_page_flip = &evergreen_post_page_flip, }, }; static struct radeon_asic btc_asic = { .init = &evergreen_init, .fini = &evergreen_fini, .suspend = &evergreen_suspend, .resume = &evergreen_resume, .gpu_is_lockup = &evergreen_gpu_is_lockup, .asic_reset = &evergreen_asic_reset, .vga_set_state = &r600_vga_set_state, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .gart = { .tlb_flush = &evergreen_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &evergreen_ring_ib_execute, .emit_fence = &r600_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &evergreen_irq_set, .process = &evergreen_irq_process, }, .display = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &evergreen_hpd_init, .fini = &evergreen_hpd_fini, .sense = &evergreen_hpd_sense, .set_polarity = &evergreen_hpd_set_polarity, }, .pm = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, .init_profile = &r600_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, .post_page_flip = &evergreen_post_page_flip, }, }; static const struct radeon_vm_funcs cayman_vm_funcs = { .init = &cayman_vm_init, .fini = &cayman_vm_fini, .bind = &cayman_vm_bind, .unbind = &cayman_vm_unbind, .tlb_flush = &cayman_vm_tlb_flush, .page_flags = &cayman_vm_page_flags, .set_page = &cayman_vm_set_page, }; static struct radeon_asic cayman_asic = { .init = &cayman_init, .fini = &cayman_fini, .suspend = &cayman_suspend, .resume = &cayman_resume, .gpu_is_lockup = &cayman_gpu_is_lockup, .asic_reset = &cayman_asic_reset, .vga_set_state = &r600_vga_set_state, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &cayman_ring_ib_execute, .ib_parse = &evergreen_ib_parse, .emit_fence = &cayman_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, }, [CAYMAN_RING_TYPE_CP1_INDEX] = { .ib_execute = &cayman_ring_ib_execute, .ib_parse = &evergreen_ib_parse, .emit_fence = &cayman_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, }, [CAYMAN_RING_TYPE_CP2_INDEX] = { .ib_execute = &cayman_ring_ib_execute, .ib_parse = &evergreen_ib_parse, .emit_fence = &cayman_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &evergreen_irq_set, .process = &evergreen_irq_process, }, .display = { .bandwidth_update = &evergreen_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &evergreen_hpd_init, .fini = &evergreen_hpd_fini, .sense = &evergreen_hpd_sense, .set_polarity = &evergreen_hpd_set_polarity, }, .pm = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, .init_profile = &r600_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, .post_page_flip = &evergreen_post_page_flip, }, }; static struct radeon_asic trinity_asic = { .init = &cayman_init, .fini = &cayman_fini, .suspend = &cayman_suspend, .resume = &cayman_resume, .gpu_is_lockup = &cayman_gpu_is_lockup, .asic_reset = &cayman_asic_reset, .vga_set_state = &r600_vga_set_state, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .gart = { .tlb_flush = &cayman_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &cayman_ring_ib_execute, .ib_parse = &evergreen_ib_parse, .emit_fence = &cayman_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, }, [CAYMAN_RING_TYPE_CP1_INDEX] = { .ib_execute = &cayman_ring_ib_execute, .ib_parse = &evergreen_ib_parse, .emit_fence = &cayman_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, }, [CAYMAN_RING_TYPE_CP2_INDEX] = { .ib_execute = &cayman_ring_ib_execute, .ib_parse = &evergreen_ib_parse, .emit_fence = &cayman_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = &evergreen_cs_parse, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &evergreen_irq_set, .process = &evergreen_irq_process, }, .display = { .bandwidth_update = &dce6_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, }, .copy = { .blit = &r600_copy_blit, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = &r600_copy_blit, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &evergreen_hpd_init, .fini = &evergreen_hpd_fini, .sense = &evergreen_hpd_sense, .set_polarity = &evergreen_hpd_set_polarity, }, .pm = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, .init_profile = &sumo_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = NULL, .set_memory_clock = NULL, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, .post_page_flip = &evergreen_post_page_flip, }, }; static const struct radeon_vm_funcs si_vm_funcs = { .init = &si_vm_init, .fini = &si_vm_fini, .bind = &si_vm_bind, .unbind = &si_vm_unbind, .tlb_flush = &si_vm_tlb_flush, .page_flags = &cayman_vm_page_flags, .set_page = &cayman_vm_set_page, }; static struct radeon_asic si_asic = { .init = &si_init, .fini = &si_fini, .suspend = &si_suspend, .resume = &si_resume, .gpu_is_lockup = &si_gpu_is_lockup, .asic_reset = &si_asic_reset, .vga_set_state = &r600_vga_set_state, .ioctl_wait_idle = r600_ioctl_wait_idle, .gui_idle = &r600_gui_idle, .mc_wait_for_idle = &evergreen_mc_wait_for_idle, .gart = { .tlb_flush = &si_pcie_gart_tlb_flush, .set_page = &rs600_gart_set_page, }, .ring = { [RADEON_RING_TYPE_GFX_INDEX] = { .ib_execute = &si_ring_ib_execute, .ib_parse = &si_ib_parse, .emit_fence = &si_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = NULL, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, }, [CAYMAN_RING_TYPE_CP1_INDEX] = { .ib_execute = &si_ring_ib_execute, .ib_parse = &si_ib_parse, .emit_fence = &si_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = NULL, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, }, [CAYMAN_RING_TYPE_CP2_INDEX] = { .ib_execute = &si_ring_ib_execute, .ib_parse = &si_ib_parse, .emit_fence = &si_fence_ring_emit, .emit_semaphore = &r600_semaphore_ring_emit, .cs_parse = NULL, .ring_test = &r600_ring_test, .ib_test = &r600_ib_test, } }, .irq = { .set = &si_irq_set, .process = &si_irq_process, }, .display = { .bandwidth_update = &dce6_bandwidth_update, .get_vblank_counter = &evergreen_get_vblank_counter, .wait_for_vblank = &dce4_wait_for_vblank, }, .copy = { .blit = NULL, .blit_ring_index = RADEON_RING_TYPE_GFX_INDEX, .dma = NULL, .dma_ring_index = RADEON_RING_TYPE_GFX_INDEX, .copy = NULL, .copy_ring_index = RADEON_RING_TYPE_GFX_INDEX, }, .surface = { .set_reg = r600_set_surface_reg, .clear_reg = r600_clear_surface_reg, }, .hpd = { .init = &evergreen_hpd_init, .fini = &evergreen_hpd_fini, .sense = &evergreen_hpd_sense, .set_polarity = &evergreen_hpd_set_polarity, }, .pm = { .misc = &evergreen_pm_misc, .prepare = &evergreen_pm_prepare, .finish = &evergreen_pm_finish, .init_profile = &sumo_pm_init_profile, .get_dynpm_state = &r600_pm_get_dynpm_state, .get_engine_clock = &radeon_atom_get_engine_clock, .set_engine_clock = &radeon_atom_set_engine_clock, .get_memory_clock = &radeon_atom_get_memory_clock, .set_memory_clock = &radeon_atom_set_memory_clock, .get_pcie_lanes = NULL, .set_pcie_lanes = NULL, .set_clock_gating = NULL, }, .pflip = { .pre_page_flip = &evergreen_pre_page_flip, .page_flip = &evergreen_page_flip, .post_page_flip = &evergreen_post_page_flip, }, }; int radeon_asic_init(struct radeon_device *rdev) { radeon_register_accessor_init(rdev); /* set the number of crtcs */ if (rdev->flags & RADEON_SINGLE_CRTC) rdev->num_crtc = 1; else rdev->num_crtc = 2; switch (rdev->family) { case CHIP_R100: case CHIP_RV100: case CHIP_RS100: case CHIP_RV200: case CHIP_RS200: rdev->asic = &r100_asic; break; case CHIP_R200: case CHIP_RV250: case CHIP_RS300: case CHIP_RV280: rdev->asic = &r200_asic; break; case CHIP_R300: case CHIP_R350: case CHIP_RV350: case CHIP_RV380: if (rdev->flags & RADEON_IS_PCIE) rdev->asic = &r300_asic_pcie; else rdev->asic = &r300_asic; break; case CHIP_R420: case CHIP_R423: case CHIP_RV410: rdev->asic = &r420_asic; /* handle macs */ if (rdev->bios == NULL) { rdev->asic->pm.get_engine_clock = &radeon_legacy_get_engine_clock; rdev->asic->pm.set_engine_clock = &radeon_legacy_set_engine_clock; rdev->asic->pm.get_memory_clock = &radeon_legacy_get_memory_clock; rdev->asic->pm.set_memory_clock = NULL; } break; case CHIP_RS400: case CHIP_RS480: rdev->asic = &rs400_asic; break; case CHIP_RS600: rdev->asic = &rs600_asic; break; case CHIP_RS690: case CHIP_RS740: rdev->asic = &rs690_asic; break; case CHIP_RV515: rdev->asic = &rv515_asic; break; case CHIP_R520: case CHIP_RV530: case CHIP_RV560: case CHIP_RV570: case CHIP_R580: rdev->asic = &r520_asic; break; case CHIP_R600: case CHIP_RV610: case CHIP_RV630: case CHIP_RV620: case CHIP_RV635: case CHIP_RV670: rdev->asic = &r600_asic; break; case CHIP_RS780: case CHIP_RS880: rdev->asic = &rs780_asic; break; case CHIP_RV770: case CHIP_RV730: case CHIP_RV710: case CHIP_RV740: rdev->asic = &rv770_asic; break; case CHIP_CEDAR: case CHIP_REDWOOD: case CHIP_JUNIPER: case CHIP_CYPRESS: case CHIP_HEMLOCK: /* set num crtcs */ if (rdev->family == CHIP_CEDAR) rdev->num_crtc = 4; else rdev->num_crtc = 6; rdev->asic = &evergreen_asic; break; case CHIP_PALM: case CHIP_SUMO: case CHIP_SUMO2: rdev->asic = &sumo_asic; break; case CHIP_BARTS: case CHIP_TURKS: case CHIP_CAICOS: /* set num crtcs */ if (rdev->family == CHIP_CAICOS) rdev->num_crtc = 4; else rdev->num_crtc = 6; rdev->asic = &btc_asic; break; case CHIP_CAYMAN: rdev->asic = &cayman_asic; /* set num crtcs */ rdev->num_crtc = 6; rdev->vm_manager.funcs = &cayman_vm_funcs; break; case CHIP_ARUBA: rdev->asic = &trinity_asic; /* set num crtcs */ rdev->num_crtc = 4; rdev->vm_manager.funcs = &cayman_vm_funcs; break; case CHIP_TAHITI: case CHIP_PITCAIRN: case CHIP_VERDE: rdev->asic = &si_asic; /* set num crtcs */ rdev->num_crtc = 6; rdev->vm_manager.funcs = &si_vm_funcs; break; default: /* FIXME: not supported yet */ return -EINVAL; } if (rdev->flags & RADEON_IS_IGP) { rdev->asic->pm.get_memory_clock = NULL; rdev->asic->pm.set_memory_clock = NULL; } return 0; }
gpl-2.0
SimpleAOSP-Kernel/kernel_hammerhead
arch/powerpc/platforms/cell/celleb_setup.c
7617
6020
/* * Celleb setup code * * (C) Copyright 2006-2007 TOSHIBA CORPORATION * * This code is based on arch/powerpc/platforms/cell/setup.c: * Copyright (C) 1995 Linus Torvalds * Adapted from 'alpha' version by Gary Thomas * Modified by Cort Dougan (cort@cs.nmt.edu) * Modified by PPC64 Team, IBM Corp * Modified by Cell Team, IBM Deutschland Entwicklung GmbH * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #undef DEBUG #include <linux/cpu.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/export.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/console.h> #include <linux/of_platform.h> #include <asm/mmu.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/cputable.h> #include <asm/irq.h> #include <asm/time.h> #include <asm/spu_priv1.h> #include <asm/firmware.h> #include <asm/rtas.h> #include <asm/cell-regs.h> #include "beat_interrupt.h" #include "beat_wrapper.h" #include "beat.h" #include "celleb_pci.h" #include "interrupt.h" #include "pervasive.h" #include "ras.h" static char celleb_machine_type[128] = "Celleb"; static void celleb_show_cpuinfo(struct seq_file *m) { struct device_node *root; const char *model = ""; root = of_find_node_by_path("/"); if (root) model = of_get_property(root, "model", NULL); /* using "CHRP" is to trick anaconda into installing FCx into Celleb */ seq_printf(m, "machine\t\t: %s %s\n", celleb_machine_type, model); of_node_put(root); } static int __init celleb_machine_type_hack(char *ptr) { strlcpy(celleb_machine_type, ptr, sizeof(celleb_machine_type)); return 0; } __setup("celleb_machine_type_hack=", celleb_machine_type_hack); static void celleb_progress(char *s, unsigned short hex) { printk("*** %04x : %s\n", hex, s ? s : ""); } static void __init celleb_setup_arch_common(void) { /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000; #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif } static struct of_device_id celleb_bus_ids[] __initdata = { { .type = "scc", }, { .type = "ioif", }, /* old style */ {}, }; static int __init celleb_publish_devices(void) { /* Publish OF platform devices for southbridge IOs */ of_platform_bus_probe(NULL, celleb_bus_ids, NULL); return 0; } machine_device_initcall(celleb_beat, celleb_publish_devices); machine_device_initcall(celleb_native, celleb_publish_devices); /* * functions for Celleb-Beat */ static void __init celleb_setup_arch_beat(void) { #ifdef CONFIG_SPU_BASE spu_priv1_ops = &spu_priv1_beat_ops; spu_management_ops = &spu_management_of_ops; #endif celleb_setup_arch_common(); } static int __init celleb_probe_beat(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "Beat")) return 0; powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS | FW_FEATURE_BEAT | FW_FEATURE_LPAR; hpte_init_beat_v3(); return 1; } /* * functions for Celleb-native */ static void __init celleb_init_IRQ_native(void) { iic_init_IRQ(); spider_init_IRQ(); } static void __init celleb_setup_arch_native(void) { #ifdef CONFIG_SPU_BASE spu_priv1_ops = &spu_priv1_mmio_ops; spu_management_ops = &spu_management_of_ops; #endif cbe_regs_init(); #ifdef CONFIG_CBE_RAS cbe_ras_init(); #endif #ifdef CONFIG_SMP smp_init_cell(); #endif cbe_pervasive_init(); /* XXX: nvram initialization should be added */ celleb_setup_arch_common(); } static int __init celleb_probe_native(void) { unsigned long root = of_get_flat_dt_root(); if (of_flat_dt_is_compatible(root, "Beat") || !of_flat_dt_is_compatible(root, "TOSHIBA,Celleb")) return 0; powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS; hpte_init_native(); return 1; } /* * machine definitions */ define_machine(celleb_beat) { .name = "Cell Reference Set (Beat)", .probe = celleb_probe_beat, .setup_arch = celleb_setup_arch_beat, .show_cpuinfo = celleb_show_cpuinfo, .restart = beat_restart, .power_off = beat_power_off, .halt = beat_halt, .get_rtc_time = beat_get_rtc_time, .set_rtc_time = beat_set_rtc_time, .calibrate_decr = generic_calibrate_decr, .progress = celleb_progress, .power_save = beat_power_save, .nvram_size = beat_nvram_get_size, .nvram_read = beat_nvram_read, .nvram_write = beat_nvram_write, .set_dabr = beat_set_xdabr, .init_IRQ = beatic_init_IRQ, .get_irq = beatic_get_irq, .pci_probe_mode = celleb_pci_probe_mode, .pci_setup_phb = celleb_setup_phb, #ifdef CONFIG_KEXEC .kexec_cpu_down = beat_kexec_cpu_down, #endif }; define_machine(celleb_native) { .name = "Cell Reference Set (native)", .probe = celleb_probe_native, .setup_arch = celleb_setup_arch_native, .show_cpuinfo = celleb_show_cpuinfo, .restart = rtas_restart, .power_off = rtas_power_off, .halt = rtas_halt, .get_boot_time = rtas_get_boot_time, .get_rtc_time = rtas_get_rtc_time, .set_rtc_time = rtas_set_rtc_time, .calibrate_decr = generic_calibrate_decr, .progress = celleb_progress, .pci_probe_mode = celleb_pci_probe_mode, .pci_setup_phb = celleb_setup_phb, .init_IRQ = celleb_init_IRQ_native, };
gpl-2.0
MassStash/htc_m8_kernel_GPE_6.0
drivers/media/video/zoran/zr36060.c
9665
30675
/* * Zoran ZR36060 basic configuration functions * * Copyright (C) 2002 Laurent Pinchart <laurent.pinchart@skynet.be> * * $Id: zr36060.c,v 1.1.2.22 2003/05/06 09:35:36 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR060_VERSION "v0.7" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* headerfile of this module */ #include "zr36060.h" /* codec io API */ #include "videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36060_codecs; static bool low_bitrate; module_param(low_bitrate, bool, 0); MODULE_PARM_DESC(low_bitrate, "Buz compatibility option, halves bitrate"); /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36060_read (struct zr36060 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xff; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); //dprintk(4, "%s: reading from 0x%04x: %02x\n",ptr->name,reg,value); return value; } static void zr36060_write(struct zr36060 *ptr, u16 reg, u8 value) { //dprintk(4, "%s: writing 0x%02x to 0x%04x\n",ptr->name,value,reg); dprintk(4, "0x%02x @0x%04x\n", value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* ========================================================================= Local helper function: status read ========================================================================= */ /* status is kept in datastructure */ static u8 zr36060_read_status (struct zr36060 *ptr) { ptr->status = zr36060_read(ptr, ZR060_CFSR); zr36060_read(ptr, 0); return ptr->status; } /* ========================================================================= Local helper function: scale factor read ========================================================================= */ /* scale factor is kept in datastructure */ static u16 zr36060_read_scalefactor (struct zr36060 *ptr) { ptr->scalefact = (zr36060_read(ptr, ZR060_SF_HI) << 8) | (zr36060_read(ptr, ZR060_SF_LO) & 0xFF); /* leave 0 selected for an eventually GO from master */ zr36060_read(ptr, 0); return ptr->scalefact; } /* ========================================================================= Local helper function: wait if codec is ready to proceed (end of processing) or time is over ========================================================================= */ static void zr36060_wait_end (struct zr36060 *ptr) { int i = 0; while (zr36060_read_status(ptr) & ZR060_CFSR_Busy) { udelay(1); if (i++ > 200000) { // 200ms, there is for sure something wrong!!! dprintk(1, "%s: timeout at wait_end (last status: 0x%02x)\n", ptr->name, ptr->status); break; } } } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from memory the SOF marker ========================================================================= */ static int zr36060_basic_test (struct zr36060 *ptr) { if ((zr36060_read(ptr, ZR060_IDR_DEV) != 0x33) && (zr36060_read(ptr, ZR060_IDR_REV) != 0x01)) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36060_wait_end(ptr); if (ptr->status & ZR060_CFSR_Busy) { dprintk(1, KERN_ERR "%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name); return -EBUSY; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets ========================================================================= */ static int zr36060_pushit (struct zr36060 *ptr, u16 startreg, u16 len, const char *data) { int i = 0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg, len); while (i < len) { zr36060_write(ptr, startreg++, data[i++]); } return i; } /* ========================================================================= Basic datasets: jpeg baseline setup data (you find it on lots places in internet, or just extract it from any regular .jpg image...) Could be variable, but until it's not needed it they are just fixed to save memory. Otherwise expand zr36060 structure with arrays, push the values to it and initialize from there, as e.g. the linux zr36057/60 driver does it. ========================================================================= */ static const char zr36060_dqt[0x86] = { 0xff, 0xdb, //Marker: DQT 0x00, 0x84, //Length: 2*65+2 0x00, //Pq,Tq first table 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 0x01, //Pq,Tq second table 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 }; static const char zr36060_dht[0x1a4] = { 0xff, 0xc4, //Marker: DHT 0x01, 0xa2, //Length: 2*AC, 2*DC 0x00, //DC first table 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, //DC second table 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x10, //AC first table 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0x11, //AC second table 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA }; /* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ #define NO_OF_COMPONENTS 0x3 //Y,U,V #define BASELINE_PRECISION 0x8 //MCU size (?) static const char zr36060_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT static const char zr36060_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC static const char zr36060_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC /* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ static const char zr36060_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; static const char zr36060_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; /* ========================================================================= Local helper functions: calculation and setup of parameter-dependent JPEG baseline segments (needed for compression only) ========================================================================= */ /* ------------------------------------------------------------------------- */ /* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */ static int zr36060_set_sof (struct zr36060 *ptr) { char sof_data[34]; // max. size of register set int i; dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, ptr->width, ptr->height, NO_OF_COMPONENTS); sof_data[0] = 0xff; sof_data[1] = 0xc0; sof_data[2] = 0x00; sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36060 sof_data[5] = (ptr->height) >> 8; sof_data[6] = (ptr->height) & 0xff; sof_data[7] = (ptr->width) >> 8; sof_data[8] = (ptr->width) & 0xff; sof_data[9] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sof_data[10 + (i * 3)] = i; // index identifier sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios sof_data[12 + (i * 3)] = zr36060_tq[i]; // Q table selection } return zr36060_pushit(ptr, ZR060_SOF_IDX, (3 * NO_OF_COMPONENTS) + 10, sof_data); } /* ------------------------------------------------------------------------- */ /* SOS (start of scan) segment depends on the used scan components of each color component */ static int zr36060_set_sos (struct zr36060 *ptr) { char sos_data[16]; // max. size of register set int i; dprintk(3, "%s: write SOS\n", ptr->name); sos_data[0] = 0xff; sos_data[1] = 0xda; sos_data[2] = 0x00; sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; sos_data[4] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sos_data[5 + (i * 2)] = i; // index sos_data[6 + (i * 2)] = (zr36060_td[i] << 4) | zr36060_ta[i]; // AC/DC tbl.sel. } sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3f; sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; return zr36060_pushit(ptr, ZR060_SOS_IDX, 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, sos_data); } /* ------------------------------------------------------------------------- */ /* DRI (define restart interval) */ static int zr36060_set_dri (struct zr36060 *ptr) { char dri_data[6]; // max. size of register set dprintk(3, "%s: write DRI\n", ptr->name); dri_data[0] = 0xff; dri_data[1] = 0xdd; dri_data[2] = 0x00; dri_data[3] = 0x04; dri_data[4] = (ptr->dri) >> 8; dri_data[5] = (ptr->dri) & 0xff; return zr36060_pushit(ptr, ZR060_DRI_IDX, 6, dri_data); } /* ========================================================================= Setup function: Setup compression/decompression of Zoran's JPEG processor ( see also zoran 36060 manual ) ... sorry for the spaghetti code ... ========================================================================= */ static void zr36060_init (struct zr36060 *ptr) { int sum = 0; long bitcnt, tmp; if (ptr->mode == CODEC_DO_COMPRESSION) { dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* 060 communicates with 067 in master mode */ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); /* Compression with or without variable scale factor */ /*FIXME: What about ptr->bitrate_ctrl? */ zr36060_write(ptr, ZR060_CMR, ZR060_CMR_Comp | ZR060_CMR_Pass2 | ZR060_CMR_BRB); /* Must be zero */ zr36060_write(ptr, ZR060_MBZ, 0x00); zr36060_write(ptr, ZR060_TCR_HI, 0x00); zr36060_write(ptr, ZR060_TCR_LO, 0x00); /* Disable all IRQs - no DataErr means autoreset */ zr36060_write(ptr, ZR060_IMR, 0); /* volume control settings */ zr36060_write(ptr, ZR060_SF_HI, ptr->scalefact >> 8); zr36060_write(ptr, ZR060_SF_LO, ptr->scalefact & 0xff); zr36060_write(ptr, ZR060_AF_HI, 0xff); zr36060_write(ptr, ZR060_AF_M, 0xff); zr36060_write(ptr, ZR060_AF_LO, 0xff); /* setup the variable jpeg tables */ sum += zr36060_set_sof(ptr); sum += zr36060_set_sos(ptr); sum += zr36060_set_dri(ptr); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ sum += zr36060_pushit(ptr, ZR060_DQT_IDX, sizeof(zr36060_dqt), zr36060_dqt); sum += zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht); zr36060_write(ptr, ZR060_APP_IDX, 0xff); zr36060_write(ptr, ZR060_APP_IDX + 1, 0xe0 + ptr->app.appn); zr36060_write(ptr, ZR060_APP_IDX + 2, 0x00); zr36060_write(ptr, ZR060_APP_IDX + 3, ptr->app.len + 2); sum += zr36060_pushit(ptr, ZR060_APP_IDX + 4, 60, ptr->app.data) + 4; zr36060_write(ptr, ZR060_COM_IDX, 0xff); zr36060_write(ptr, ZR060_COM_IDX + 1, 0xfe); zr36060_write(ptr, ZR060_COM_IDX + 2, 0x00); zr36060_write(ptr, ZR060_COM_IDX + 3, ptr->com.len + 2); sum += zr36060_pushit(ptr, ZR060_COM_IDX + 4, 60, ptr->com.data) + 4; /* setup misc. data for compression (target code sizes) */ /* size of compressed code to reach without header data */ sum = ptr->real_code_vol - sum; bitcnt = sum << 3; /* need the size in bits */ tmp = bitcnt >> 16; dprintk(3, "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_NET_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_NET_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36060_write(ptr, ZR060_TCV_NET_ML, tmp >> 8); zr36060_write(ptr, ZR060_TCV_NET_LO, tmp & 0xff); bitcnt -= bitcnt >> 7; // bits without stuffing bitcnt -= ((bitcnt * 5) >> 6); // bits without eob tmp = bitcnt >> 16; dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", ptr->name, bitcnt, tmp); zr36060_write(ptr, ZR060_TCV_DATA_HI, tmp >> 8); zr36060_write(ptr, ZR060_TCV_DATA_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36060_write(ptr, ZR060_TCV_DATA_ML, tmp >> 8); zr36060_write(ptr, ZR060_TCV_DATA_LO, tmp & 0xff); /* JPEG markers to be included in the compressed stream */ zr36060_write(ptr, ZR060_MER, ZR060_MER_DQT | ZR060_MER_DHT | ((ptr->com.len > 0) ? ZR060_MER_Com : 0) | ((ptr->app.len > 0) ? ZR060_MER_App : 0)); /* Setup the Video Frontend */ /* Limit pixel range to 16..235 as per CCIR-601 */ zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); } else { dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* 060 communicates with 067 in master mode */ zr36060_write(ptr, ZR060_CIR, ZR060_CIR_CodeMstr); /* Decompression */ zr36060_write(ptr, ZR060_CMR, 0); /* Must be zero */ zr36060_write(ptr, ZR060_MBZ, 0x00); zr36060_write(ptr, ZR060_TCR_HI, 0x00); zr36060_write(ptr, ZR060_TCR_LO, 0x00); /* Disable all IRQs - no DataErr means autoreset */ zr36060_write(ptr, ZR060_IMR, 0); /* setup misc. data for expansion */ zr36060_write(ptr, ZR060_MER, 0); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ zr36060_pushit(ptr, ZR060_DHT_IDX, sizeof(zr36060_dht), zr36060_dht); /* Setup the Video Frontend */ //zr36060_write(ptr, ZR060_VCR, ZR060_VCR_FIExt); //this doesn't seem right and doesn't work... zr36060_write(ptr, ZR060_VCR, ZR060_VCR_Range); } /* Load the tables */ zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst | ZR060_LOAD_Load); zr36060_wait_end(ptr); dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status); if (ptr->status & ZR060_CFSR_Busy) { dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36060_set_mode (struct videocodec *codec, int mode) { struct zr36060 *ptr = (struct zr36060 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36060_init(ptr); return 0; } /* set picture size (norm is ignored as the codec doesn't know about it) */ static int zr36060_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36060 *ptr = (struct zr36060 *) codec->data; u32 reg; int size; dprintk(2, "%s: set_video %d/%d-%dx%d (%%%d) call\n", ptr->name, cap->x, cap->y, cap->width, cap->height, cap->decimation); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y and norm for now ... */ ptr->width = cap->width / (cap->decimation & 0xff); ptr->height = cap->height / (cap->decimation >> 8); zr36060_write(ptr, ZR060_LOAD, ZR060_LOAD_SyncRst); /* Note that VSPol/HSPol bits in zr36060 have the opposite * meaning of their zr360x7 counterparts with the same names * N.b. for VSPol this is only true if FIVEdge = 0 (default, * left unchanged here - in accordance with datasheet). */ reg = (!pol->vsync_pol ? ZR060_VPR_VSPol : 0) | (!pol->hsync_pol ? ZR060_VPR_HSPol : 0) | (pol->field_pol ? ZR060_VPR_FIPol : 0) | (pol->blank_pol ? ZR060_VPR_BLPol : 0) | (pol->subimg_pol ? ZR060_VPR_SImgPol : 0) | (pol->poe_pol ? ZR060_VPR_PoePol : 0) | (pol->pvalid_pol ? ZR060_VPR_PValPol : 0) | (pol->vclk_pol ? ZR060_VPR_VCLKPol : 0); zr36060_write(ptr, ZR060_VPR, reg); reg = 0; switch (cap->decimation & 0xff) { default: case 1: break; case 2: reg |= ZR060_SR_HScale2; break; case 4: reg |= ZR060_SR_HScale4; break; } switch (cap->decimation >> 8) { default: case 1: break; case 2: reg |= ZR060_SR_VScale; break; } zr36060_write(ptr, ZR060_SR, reg); zr36060_write(ptr, ZR060_BCR_Y, 0x00); zr36060_write(ptr, ZR060_BCR_U, 0x80); zr36060_write(ptr, ZR060_BCR_V, 0x80); /* sync generator */ reg = norm->Ht - 1; /* Vtotal */ zr36060_write(ptr, ZR060_SGR_VTOTAL_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_VTOTAL_LO, (reg >> 0) & 0xff); reg = norm->Wt - 1; /* Htotal */ zr36060_write(ptr, ZR060_SGR_HTOTAL_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_HTOTAL_LO, (reg >> 0) & 0xff); reg = 6 - 1; /* VsyncSize */ zr36060_write(ptr, ZR060_SGR_VSYNC, reg); //reg = 30 - 1; /* HsyncSize */ ///*CP*/ reg = (zr->params.norm == 1 ? 57 : 68); reg = 68; zr36060_write(ptr, ZR060_SGR_HSYNC, reg); reg = norm->VStart - 1; /* BVstart */ zr36060_write(ptr, ZR060_SGR_BVSTART, reg); reg += norm->Ha / 2; /* BVend */ zr36060_write(ptr, ZR060_SGR_BVEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_BVEND_LO, (reg >> 0) & 0xff); reg = norm->HStart - 1; /* BHstart */ zr36060_write(ptr, ZR060_SGR_BHSTART, reg); reg += norm->Wa; /* BHend */ zr36060_write(ptr, ZR060_SGR_BHEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SGR_BHEND_LO, (reg >> 0) & 0xff); /* active area */ reg = cap->y + norm->VStart; /* Vstart */ zr36060_write(ptr, ZR060_AAR_VSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_VSTART_LO, (reg >> 0) & 0xff); reg += cap->height; /* Vend */ zr36060_write(ptr, ZR060_AAR_VEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_VEND_LO, (reg >> 0) & 0xff); reg = cap->x + norm->HStart; /* Hstart */ zr36060_write(ptr, ZR060_AAR_HSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_HSTART_LO, (reg >> 0) & 0xff); reg += cap->width; /* Hend */ zr36060_write(ptr, ZR060_AAR_HEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_AAR_HEND_LO, (reg >> 0) & 0xff); /* subimage area */ reg = norm->VStart - 4; /* SVstart */ zr36060_write(ptr, ZR060_SWR_VSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_VSTART_LO, (reg >> 0) & 0xff); reg += norm->Ha / 2 + 8; /* SVend */ zr36060_write(ptr, ZR060_SWR_VEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_VEND_LO, (reg >> 0) & 0xff); reg = norm->HStart /*+ 64 */ - 4; /* SHstart */ zr36060_write(ptr, ZR060_SWR_HSTART_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_HSTART_LO, (reg >> 0) & 0xff); reg += norm->Wa + 8; /* SHend */ zr36060_write(ptr, ZR060_SWR_HEND_HI, (reg >> 8) & 0xff); zr36060_write(ptr, ZR060_SWR_HEND_LO, (reg >> 0) & 0xff); size = ptr->width * ptr->height; /* Target compressed field size in bits: */ size = size * 16; /* uncompressed size in bits */ /* (Ronald) by default, quality = 100 is a compression * ratio 1:2. Setting low_bitrate (insmod option) sets * it to 1:4 (instead of 1:2, zr36060 max) as limit because the * buz can't handle more at decimation=1... Use low_bitrate if * you have a Buz, unless you know what you're doing */ size = size * cap->quality / (low_bitrate ? 400 : 200); /* Lower limit (arbitrary, 1 KB) */ if (size < 8192) size = 8192; /* Upper limit: 7/8 of the code buffers */ if (size > ptr->total_code_vol * 7) size = ptr->total_code_vol * 7; ptr->real_code_vol = size >> 3; /* in bytes */ /* the MBCVR is the *maximum* block volume, according to the * JPEG ISO specs, this shouldn't be used, since that allows * for the best encoding quality. So set it to it's max value */ reg = ptr->max_block_vol; zr36060_write(ptr, ZR060_MBCVR, reg); return 0; } /* additional control functions */ static int zr36060_control (struct videocodec *codec, int type, int size, void *data) { struct zr36060 *ptr = (struct zr36060 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status */ if (size != sizeof(int)) return -EFAULT; zr36060_read_status(ptr); *ival = ptr->status; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = CODEC_MODE_BJPG; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != CODEC_MODE_BJPG) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: /* not needed, do nothing */ return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; *ival = ptr->total_code_vol; break; case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; ptr->total_code_vol = *ival; ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; break; case CODEC_G_JPEG_SCALE: /* get scaling factor */ if (size != sizeof(int)) return -EFAULT; *ival = zr36060_read_scalefactor(ptr); break; case CODEC_S_JPEG_SCALE: /* set scaling factor */ if (size != sizeof(int)) return -EFAULT; ptr->scalefact = *ival; break; case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; *app = ptr->app; break; } case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; ptr->app = *app; break; } case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; *com = ptr->com; break; } case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; ptr->com = *com; break; } default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36060_unset (struct videocodec *codec) { struct zr36060 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36060_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36060_setup (struct videocodec *codec) { struct zr36060 *ptr; int res; dprintk(2, "zr36060: initializing MJPEG subsystem #%d.\n", zr36060_codecs); if (zr36060_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36060: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36060), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36060: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36060[%d]", zr36060_codecs); ptr->num = zr36060_codecs++; ptr->codec = codec; //testing res = zr36060_basic_test(ptr); if (res < 0) { zr36060_unset(codec); return res; } //final setup memcpy(ptr->h_samp_ratio, zr36060_decimation_h, 8); memcpy(ptr->v_samp_ratio, zr36060_decimation_v, 8); ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag * (what is the difference?) */ ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 384; ptr->height = 288; ptr->total_code_vol = 16000; /* CHECKME */ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; ptr->max_block_vol = 240; /* CHECKME, was 120 is 240 */ ptr->scalefact = 0x100; ptr->dri = 1; /* CHECKME, was 8 is 1 */ /* by default, no COM or APP markers - app should set those */ ptr->com.len = 0; ptr->app.appn = 0; ptr->app.len = 0; zr36060_init(ptr); dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name); return 0; } static const struct videocodec zr36060_codec = { .owner = THIS_MODULE, .name = "zr36060", .magic = 0L, // magic not used .flags = CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER | CODEC_FLAG_VFE, .type = CODEC_TYPE_ZR36060, .setup = zr36060_setup, // functionality .unset = zr36060_unset, .set_mode = zr36060_set_mode, .set_video = zr36060_set_video, .control = zr36060_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36060_init_module (void) { //dprintk(1, "zr36060 driver %s\n",ZR060_VERSION); zr36060_codecs = 0; return videocodec_register(&zr36060_codec); } static void __exit zr36060_cleanup_module (void) { if (zr36060_codecs) { dprintk(1, "zr36060: something's wrong - %d codecs left somehow.\n", zr36060_codecs); } /* however, we can't just stay alive */ videocodec_unregister(&zr36060_codec); } module_init(zr36060_init_module); module_exit(zr36060_cleanup_module); MODULE_AUTHOR("Laurent Pinchart <laurent.pinchart@skynet.be>"); MODULE_DESCRIPTION("Driver module for ZR36060 jpeg processors " ZR060_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
cuckata23/android_kernel_motorola_msm8226
drivers/input/touchscreen/fujitsu_ts.c
9921
4297
/* * Fujitsu serial touchscreen driver * * Copyright (c) Dmitry Torokhov <dtor@mail.ru> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/serio.h> #include <linux/init.h> #define DRIVER_DESC "Fujitsu serial touchscreen driver" MODULE_AUTHOR("Dmitry Torokhov <dtor@mail.ru>"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); #define FUJITSU_LENGTH 5 /* * Per-touchscreen data. */ struct fujitsu { struct input_dev *dev; struct serio *serio; int idx; unsigned char data[FUJITSU_LENGTH]; char phys[32]; }; /* * Decode serial data (5 bytes per packet) * First byte * 1 C 0 0 R S S S * Where C is 1 while in calibration mode (which we don't use) * R is 1 when no coordinate corection was done. * S are button state */ static irqreturn_t fujitsu_interrupt(struct serio *serio, unsigned char data, unsigned int flags) { struct fujitsu *fujitsu = serio_get_drvdata(serio); struct input_dev *dev = fujitsu->dev; if (fujitsu->idx == 0) { /* resync skip until start of frame */ if ((data & 0xf0) != 0x80) return IRQ_HANDLED; } else { /* resync skip garbage */ if (data & 0x80) { fujitsu->idx = 0; return IRQ_HANDLED; } } fujitsu->data[fujitsu->idx++] = data; if (fujitsu->idx == FUJITSU_LENGTH) { input_report_abs(dev, ABS_X, (fujitsu->data[2] << 7) | fujitsu->data[1]); input_report_abs(dev, ABS_Y, (fujitsu->data[4] << 7) | fujitsu->data[3]); input_report_key(dev, BTN_TOUCH, (fujitsu->data[0] & 0x03) != 2); input_sync(dev); fujitsu->idx = 0; } return IRQ_HANDLED; } /* * fujitsu_disconnect() is the opposite of fujitsu_connect() */ static void fujitsu_disconnect(struct serio *serio) { struct fujitsu *fujitsu = serio_get_drvdata(serio); input_get_device(fujitsu->dev); input_unregister_device(fujitsu->dev); serio_close(serio); serio_set_drvdata(serio, NULL); input_put_device(fujitsu->dev); kfree(fujitsu); } /* * fujitsu_connect() is the routine that is called when someone adds a * new serio device that supports the Fujitsu protocol and registers it * as input device. */ static int fujitsu_connect(struct serio *serio, struct serio_driver *drv) { struct fujitsu *fujitsu; struct input_dev *input_dev; int err; fujitsu = kzalloc(sizeof(struct fujitsu), GFP_KERNEL); input_dev = input_allocate_device(); if (!fujitsu || !input_dev) { err = -ENOMEM; goto fail1; } fujitsu->serio = serio; fujitsu->dev = input_dev; snprintf(fujitsu->phys, sizeof(fujitsu->phys), "%s/input0", serio->phys); input_dev->name = "Fujitsu Serial Touchscreen"; input_dev->phys = fujitsu->phys; input_dev->id.bustype = BUS_RS232; input_dev->id.vendor = SERIO_FUJITSU; input_dev->id.product = 0; input_dev->id.version = 0x0100; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_abs_params(input_dev, ABS_X, 0, 4096, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 4096, 0, 0); serio_set_drvdata(serio, fujitsu); err = serio_open(serio, drv); if (err) goto fail2; err = input_register_device(fujitsu->dev); if (err) goto fail3; return 0; fail3: serio_close(serio); fail2: serio_set_drvdata(serio, NULL); fail1: input_free_device(input_dev); kfree(fujitsu); return err; } /* * The serio driver structure. */ static struct serio_device_id fujitsu_serio_ids[] = { { .type = SERIO_RS232, .proto = SERIO_FUJITSU, .id = SERIO_ANY, .extra = SERIO_ANY, }, { 0 } }; MODULE_DEVICE_TABLE(serio, fujitsu_serio_ids); static struct serio_driver fujitsu_drv = { .driver = { .name = "fujitsu_ts", }, .description = DRIVER_DESC, .id_table = fujitsu_serio_ids, .interrupt = fujitsu_interrupt, .connect = fujitsu_connect, .disconnect = fujitsu_disconnect, }; static int __init fujitsu_init(void) { return serio_register_driver(&fujitsu_drv); } static void __exit fujitsu_exit(void) { serio_unregister_driver(&fujitsu_drv); } module_init(fujitsu_init); module_exit(fujitsu_exit);
gpl-2.0
tomdean1/linux
drivers/block/paride/friq.c
15553
6365
/* friq.c (c) 1998 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License friq.c is a low-level protocol driver for the Freecom "IQ" parallel port IDE adapter. Early versions of this adapter use the 'frpw' protocol. Freecom uses this adapter in a battery powered external CD-ROM drive. It is also used in LS-120 drives by Maxell and Panasonic, and other devices. The battery powered drive requires software support to control the power to the drive. This module enables the drive power when the high level driver (pcd) is loaded and disables it when the module is unloaded. Note, if the friq module is built in to the kernel, the power will never be switched off, so other means should be used to conserve battery power. */ /* Changes: 1.01 GRG 1998.12.20 Added support for soft power switch */ #define FRIQ_VERSION "1.01" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #define CMD(x) w2(4);w0(0xff);w0(0xff);w0(0x73);w0(0x73);\ w0(0xc9);w0(0xc9);w0(0x26);w0(0x26);w0(x);w0(x); #define j44(l,h) (((l>>4)&0x0f)|(h&0xf0)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set */ static int cont_map[2] = { 0x08, 0x10 }; static int friq_read_regr( PIA *pi, int cont, int regr ) { int h,l,r; r = regr + cont_map[cont]; CMD(r); w2(6); l = r1(); w2(4); h = r1(); w2(4); return j44(l,h); } static void friq_write_regr( PIA *pi, int cont, int regr, int val) { int r; r = regr + cont_map[cont]; CMD(r); w0(val); w2(5);w2(7);w2(5);w2(4); } static void friq_read_block_int( PIA *pi, char * buf, int count, int regr ) { int h, l, k, ph; switch(pi->mode) { case 0: CMD(regr); for (k=0;k<count;k++) { w2(6); l = r1(); w2(4); h = r1(); buf[k] = j44(l,h); } w2(4); break; case 1: ph = 2; CMD(regr+0xc0); w0(0xff); for (k=0;k<count;k++) { w2(0xa4 + ph); buf[k] = r0(); ph = 2 - ph; } w2(0xac); w2(0xa4); w2(4); break; case 2: CMD(regr+0x80); for (k=0;k<count-2;k++) buf[k] = r4(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; case 3: CMD(regr+0x80); for (k=0;k<(count/2)-1;k++) ((u16 *)buf)[k] = r4w(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; case 4: CMD(regr+0x80); for (k=0;k<(count/4)-1;k++) ((u32 *)buf)[k] = r4l(); buf[count-4] = r4(); buf[count-3] = r4(); w2(0xac); w2(0xa4); buf[count-2] = r4(); buf[count-1] = r4(); w2(4); break; } } static void friq_read_block( PIA *pi, char * buf, int count) { friq_read_block_int(pi,buf,count,0x08); } static void friq_write_block( PIA *pi, char * buf, int count ) { int k; switch(pi->mode) { case 0: case 1: CMD(8); w2(5); for (k=0;k<count;k++) { w0(buf[k]); w2(7);w2(5); } w2(4); break; case 2: CMD(0xc8); w2(5); for (k=0;k<count;k++) w4(buf[k]); w2(4); break; case 3: CMD(0xc8); w2(5); for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); w2(4); break; case 4: CMD(0xc8); w2(5); for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); w2(4); break; } } static void friq_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(4); } static void friq_disconnect ( PIA *pi ) { CMD(0x20); w0(pi->saved_r0); w2(pi->saved_r2); } static int friq_test_proto( PIA *pi, char * scratch, int verbose ) { int j, k, r; int e[2] = {0,0}; pi->saved_r0 = r0(); w0(0xff); udelay(20); CMD(0x3d); /* turn the power on */ udelay(500); w0(pi->saved_r0); friq_connect(pi); for (j=0;j<2;j++) { friq_write_regr(pi,0,6,0xa0+j*0x10); for (k=0;k<256;k++) { friq_write_regr(pi,0,2,k^0xaa); friq_write_regr(pi,0,3,k^0x55); if (friq_read_regr(pi,0,2) != (k^0xaa)) e[j]++; } } friq_disconnect(pi); friq_connect(pi); friq_read_block_int(pi,scratch,512,0x10); r = 0; for (k=0;k<128;k++) if (scratch[k] != k) r++; friq_disconnect(pi); if (verbose) { printk("%s: friq: port 0x%x, mode %d, test=(%d,%d,%d)\n", pi->device,pi->port,pi->mode,e[0],e[1],r); } return (r || (e[0] && e[1])); } static void friq_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[6] = {"4-bit","8-bit", "EPP-8","EPP-16","EPP-32"}; printk("%s: friq %s, Freecom IQ ASIC-2 adapter at 0x%x, ", pi->device, FRIQ_VERSION,pi->port); printk("mode %d (%s), delay %d\n",pi->mode, mode_string[pi->mode],pi->delay); pi->private = 1; friq_connect(pi); CMD(0x9e); /* disable sleep timer */ friq_disconnect(pi); } static void friq_release_proto( PIA *pi) { if (pi->private) { /* turn off the power */ friq_connect(pi); CMD(0x1d); CMD(0x1e); friq_disconnect(pi); pi->private = 0; } } static struct pi_protocol friq = { .owner = THIS_MODULE, .name = "friq", .max_mode = 5, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = friq_write_regr, .read_regr = friq_read_regr, .write_block = friq_write_block, .read_block = friq_read_block, .connect = friq_connect, .disconnect = friq_disconnect, .test_proto = friq_test_proto, .log_adapter = friq_log_adapter, .release_proto = friq_release_proto, }; static int __init friq_init(void) { return paride_register(&friq); } static void __exit friq_exit(void) { paride_unregister(&friq); } MODULE_LICENSE("GPL"); module_init(friq_init) module_exit(friq_exit)
gpl-2.0
boa19861105/Butterfly-S-Sense-4.4.3
drivers/block/paride/fit2.c
15553
3254
/* fit2.c (c) 1998 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. fit2.c is a low-level protocol driver for the older version of the Fidelity International Technology parallel port adapter. This adapter is used in their TransDisk 2000 and older TransDisk 3000 portable hard-drives. As far as I can tell, this device supports 4-bit mode _only_. Newer models of the FIT products use an enhanced protocol. The "fit3" protocol module should support current drives. */ #define FIT2_VERSION "1.0" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" #define j44(a,b) (((a>>4)&0x0f)|(b&0xf0)) /* cont = 0 - access the IDE register file cont = 1 - access the IDE command set NB: The FIT adapter does not appear to use the control registers. So, we map ALT_STATUS to STATUS and NO-OP writes to the device control register - this means that IDE reset will not work on these devices. */ static void fit2_write_regr( PIA *pi, int cont, int regr, int val) { if (cont == 1) return; w2(0xc); w0(regr); w2(4); w0(val); w2(5); w0(0); w2(4); } static int fit2_read_regr( PIA *pi, int cont, int regr ) { int a, b, r; if (cont) { if (regr != 6) return 0xff; r = 7; } else r = regr + 0x10; w2(0xc); w0(r); w2(4); w2(5); w0(0); a = r1(); w0(1); b = r1(); w2(4); return j44(a,b); } static void fit2_read_block( PIA *pi, char * buf, int count ) { int k, a, b, c, d; w2(0xc); w0(0x10); for (k=0;k<count/4;k++) { w2(4); w2(5); w0(0); a = r1(); w0(1); b = r1(); w0(3); c = r1(); w0(2); d = r1(); buf[4*k+0] = j44(a,b); buf[4*k+1] = j44(d,c); w2(4); w2(5); a = r1(); w0(3); b = r1(); w0(1); c = r1(); w0(0); d = r1(); buf[4*k+2] = j44(d,c); buf[4*k+3] = j44(a,b); } w2(4); } static void fit2_write_block( PIA *pi, char * buf, int count ) { int k; w2(0xc); w0(0); for (k=0;k<count/2;k++) { w2(4); w0(buf[2*k]); w2(5); w0(buf[2*k+1]); } w2(4); } static void fit2_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(0xcc); } static void fit2_disconnect ( PIA *pi ) { w0(pi->saved_r0); w2(pi->saved_r2); } static void fit2_log_adapter( PIA *pi, char * scratch, int verbose ) { printk("%s: fit2 %s, FIT 2000 adapter at 0x%x, delay %d\n", pi->device,FIT2_VERSION,pi->port,pi->delay); } static struct pi_protocol fit2 = { .owner = THIS_MODULE, .name = "fit2", .max_mode = 1, .epp_first = 2, .default_delay = 1, .max_units = 1, .write_regr = fit2_write_regr, .read_regr = fit2_read_regr, .write_block = fit2_write_block, .read_block = fit2_read_block, .connect = fit2_connect, .disconnect = fit2_disconnect, .log_adapter = fit2_log_adapter, }; static int __init fit2_init(void) { return paride_register(&fit2); } static void __exit fit2_exit(void) { paride_unregister(&fit2); } MODULE_LICENSE("GPL"); module_init(fit2_init) module_exit(fit2_exit)
gpl-2.0
sakuramilk/linux-2.6.35.y
drivers/gpu/drm/radeon/r600_blit_kms.c
194
22234
#include "drmP.h" #include "drm.h" #include "radeon_drm.h" #include "radeon.h" #include "r600d.h" #include "r600_blit_shaders.h" #define DI_PT_RECTLIST 0x11 #define DI_INDEX_SIZE_16_BIT 0x0 #define DI_SRC_SEL_AUTO_INDEX 0x2 #define FMT_8 0x1 #define FMT_5_6_5 0x8 #define FMT_8_8_8_8 0x1a #define COLOR_8 0x1 #define COLOR_5_6_5 0x8 #define COLOR_8_8_8_8 0x1a /* emits 21 on rv770+, 23 on r600 */ static void set_render_target(struct radeon_device *rdev, int format, int w, int h, u64 gpu_addr) { u32 cb_color_info; int pitch, slice; h = ALIGN(h, 8); if (h < 8) h = 8; cb_color_info = ((format << 2) | (1 << 27)); pitch = (w / 8) - 1; slice = ((w * h) / 64) - 1; radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (CB_COLOR0_BASE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, gpu_addr >> 8); if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) { radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_BASE_UPDATE, 0)); radeon_ring_write(rdev, 2 << 0); } radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (CB_COLOR0_SIZE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, (pitch << 0) | (slice << 10)); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (CB_COLOR0_VIEW - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (CB_COLOR0_INFO - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, cb_color_info); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (CB_COLOR0_TILE - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (CB_COLOR0_FRAG - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (CB_COLOR0_MASK - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 0); } /* emits 5dw */ static void cp_set_surface_sync(struct radeon_device *rdev, u32 sync_type, u32 size, u64 mc_addr) { u32 cp_coher_size; if (size == 0xffffffff) cp_coher_size = 0xffffffff; else cp_coher_size = ((size + 255) >> 8); radeon_ring_write(rdev, PACKET3(PACKET3_SURFACE_SYNC, 3)); radeon_ring_write(rdev, sync_type); radeon_ring_write(rdev, cp_coher_size); radeon_ring_write(rdev, mc_addr >> 8); radeon_ring_write(rdev, 10); /* poll interval */ } /* emits 21dw + 1 surface sync = 26dw */ static void set_shaders(struct radeon_device *rdev) { u64 gpu_addr; u32 sq_pgm_resources; /* setup shader regs */ sq_pgm_resources = (1 << 0); /* VS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (SQ_PGM_START_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, gpu_addr >> 8); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (SQ_PGM_RESOURCES_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, sq_pgm_resources); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_VS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 0); /* PS */ gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.ps_offset; radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (SQ_PGM_START_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, gpu_addr >> 8); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (SQ_PGM_RESOURCES_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, sq_pgm_resources | (1 << 28)); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (SQ_PGM_EXPORTS_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 2); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 1)); radeon_ring_write(rdev, (SQ_PGM_CF_OFFSET_PS - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, 0); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.vs_offset; cp_set_surface_sync(rdev, PACKET3_SH_ACTION_ENA, 512, gpu_addr); } /* emits 9 + 1 sync (5) = 14*/ static void set_vtx_resource(struct radeon_device *rdev, u64 gpu_addr) { u32 sq_vtx_constant_word2; sq_vtx_constant_word2 = ((upper_32_bits(gpu_addr) & 0xff) | (16 << 8)); radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(rdev, 0x460); radeon_ring_write(rdev, gpu_addr & 0xffffffff); radeon_ring_write(rdev, 48 - 1); radeon_ring_write(rdev, sq_vtx_constant_word2); radeon_ring_write(rdev, 1 << 0); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, SQ_TEX_VTX_VALID_BUFFER << 30); if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, 48, gpu_addr); else cp_set_surface_sync(rdev, PACKET3_VC_ACTION_ENA, 48, gpu_addr); } /* emits 9 */ static void set_tex_resource(struct radeon_device *rdev, int format, int w, int h, int pitch, u64 gpu_addr) { uint32_t sq_tex_resource_word0, sq_tex_resource_word1, sq_tex_resource_word4; if (h < 1) h = 1; sq_tex_resource_word0 = (1 << 0); sq_tex_resource_word0 |= ((((pitch >> 3) - 1) << 8) | ((w - 1) << 19)); sq_tex_resource_word1 = (format << 26); sq_tex_resource_word1 |= ((h - 1) << 0); sq_tex_resource_word4 = ((1 << 14) | (0 << 16) | (1 << 19) | (2 << 22) | (3 << 25)); radeon_ring_write(rdev, PACKET3(PACKET3_SET_RESOURCE, 7)); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, sq_tex_resource_word0); radeon_ring_write(rdev, sq_tex_resource_word1); radeon_ring_write(rdev, gpu_addr >> 8); radeon_ring_write(rdev, gpu_addr >> 8); radeon_ring_write(rdev, sq_tex_resource_word4); radeon_ring_write(rdev, 0); radeon_ring_write(rdev, SQ_TEX_VTX_VALID_TEXTURE << 30); } /* emits 12 */ static void set_scissors(struct radeon_device *rdev, int x1, int y1, int x2, int y2) { radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(rdev, (PA_SC_SCREEN_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, (x1 << 0) | (y1 << 16)); radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(rdev, (PA_SC_GENERIC_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONTEXT_REG, 2)); radeon_ring_write(rdev, (PA_SC_WINDOW_SCISSOR_TL - PACKET3_SET_CONTEXT_REG_OFFSET) >> 2); radeon_ring_write(rdev, (x1 << 0) | (y1 << 16) | (1 << 31)); radeon_ring_write(rdev, (x2 << 0) | (y2 << 16)); } /* emits 10 */ static void draw_auto(struct radeon_device *rdev) { radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1)); radeon_ring_write(rdev, (VGT_PRIMITIVE_TYPE - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(rdev, DI_PT_RECTLIST); radeon_ring_write(rdev, PACKET3(PACKET3_INDEX_TYPE, 0)); radeon_ring_write(rdev, DI_INDEX_SIZE_16_BIT); radeon_ring_write(rdev, PACKET3(PACKET3_NUM_INSTANCES, 0)); radeon_ring_write(rdev, 1); radeon_ring_write(rdev, PACKET3(PACKET3_DRAW_INDEX_AUTO, 1)); radeon_ring_write(rdev, 3); radeon_ring_write(rdev, DI_SRC_SEL_AUTO_INDEX); } /* emits 14 */ static void set_default_state(struct radeon_device *rdev) { u32 sq_config, sq_gpr_resource_mgmt_1, sq_gpr_resource_mgmt_2; u32 sq_thread_resource_mgmt, sq_stack_resource_mgmt_1, sq_stack_resource_mgmt_2; int num_ps_gprs, num_vs_gprs, num_temp_gprs, num_gs_gprs, num_es_gprs; int num_ps_threads, num_vs_threads, num_gs_threads, num_es_threads; int num_ps_stack_entries, num_vs_stack_entries, num_gs_stack_entries, num_es_stack_entries; u64 gpu_addr; int dwords; switch (rdev->family) { case CHIP_R600: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV630: case CHIP_RV635: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 40; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV610: case CHIP_RV620: case CHIP_RS780: case CHIP_RS880: default: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV670: num_ps_gprs = 144; num_vs_gprs = 40; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 136; num_vs_threads = 48; num_gs_threads = 4; num_es_threads = 4; num_ps_stack_entries = 40; num_vs_stack_entries = 40; num_gs_stack_entries = 32; num_es_stack_entries = 16; break; case CHIP_RV770: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 256; num_vs_stack_entries = 256; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV730: case CHIP_RV740: num_ps_gprs = 84; num_vs_gprs = 36; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 188; num_vs_threads = 60; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; case CHIP_RV710: num_ps_gprs = 192; num_vs_gprs = 56; num_temp_gprs = 4; num_gs_gprs = 0; num_es_gprs = 0; num_ps_threads = 144; num_vs_threads = 48; num_gs_threads = 0; num_es_threads = 0; num_ps_stack_entries = 128; num_vs_stack_entries = 128; num_gs_stack_entries = 0; num_es_stack_entries = 0; break; } if ((rdev->family == CHIP_RV610) || (rdev->family == CHIP_RV620) || (rdev->family == CHIP_RS780) || (rdev->family == CHIP_RS880) || (rdev->family == CHIP_RV710)) sq_config = 0; else sq_config = VC_ENABLE; sq_config |= (DX9_CONSTS | ALU_INST_PREFER_VECTOR | PS_PRIO(0) | VS_PRIO(1) | GS_PRIO(2) | ES_PRIO(3)); sq_gpr_resource_mgmt_1 = (NUM_PS_GPRS(num_ps_gprs) | NUM_VS_GPRS(num_vs_gprs) | NUM_CLAUSE_TEMP_GPRS(num_temp_gprs)); sq_gpr_resource_mgmt_2 = (NUM_GS_GPRS(num_gs_gprs) | NUM_ES_GPRS(num_es_gprs)); sq_thread_resource_mgmt = (NUM_PS_THREADS(num_ps_threads) | NUM_VS_THREADS(num_vs_threads) | NUM_GS_THREADS(num_gs_threads) | NUM_ES_THREADS(num_es_threads)); sq_stack_resource_mgmt_1 = (NUM_PS_STACK_ENTRIES(num_ps_stack_entries) | NUM_VS_STACK_ENTRIES(num_vs_stack_entries)); sq_stack_resource_mgmt_2 = (NUM_GS_STACK_ENTRIES(num_gs_stack_entries) | NUM_ES_STACK_ENTRIES(num_es_stack_entries)); /* emit an IB pointing at default state */ dwords = ALIGN(rdev->r600_blit.state_len, 0x10); gpu_addr = rdev->r600_blit.shader_gpu_addr + rdev->r600_blit.state_offset; radeon_ring_write(rdev, PACKET3(PACKET3_INDIRECT_BUFFER, 2)); radeon_ring_write(rdev, gpu_addr & 0xFFFFFFFC); radeon_ring_write(rdev, upper_32_bits(gpu_addr) & 0xFF); radeon_ring_write(rdev, dwords); /* SQ config */ radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 6)); radeon_ring_write(rdev, (SQ_CONFIG - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(rdev, sq_config); radeon_ring_write(rdev, sq_gpr_resource_mgmt_1); radeon_ring_write(rdev, sq_gpr_resource_mgmt_2); radeon_ring_write(rdev, sq_thread_resource_mgmt); radeon_ring_write(rdev, sq_stack_resource_mgmt_1); radeon_ring_write(rdev, sq_stack_resource_mgmt_2); } static inline uint32_t i2f(uint32_t input) { u32 result, i, exponent, fraction; if ((input & 0x3fff) == 0) result = 0; /* 0 is a special case */ else { exponent = 140; /* exponent biased by 127; */ fraction = (input & 0x3fff) << 10; /* cheat and only handle numbers below 2^^15 */ for (i = 0; i < 14; i++) { if (fraction & 0x800000) break; else { fraction = fraction << 1; /* keep shifting left until top bit = 1 */ exponent = exponent - 1; } } result = exponent << 23 | (fraction & 0x7fffff); /* mask off top bit; assumed 1 */ } return result; } int r600_blit_init(struct radeon_device *rdev) { u32 obj_size; int r, dwords; void *ptr; u32 packet2s[16]; int num_packet2s = 0; /* don't reinitialize blit */ if (rdev->r600_blit.shader_obj) return 0; mutex_init(&rdev->r600_blit.mutex); rdev->r600_blit.state_offset = 0; if (rdev->family >= CHIP_RV770) rdev->r600_blit.state_len = r7xx_default_size; else rdev->r600_blit.state_len = r6xx_default_size; dwords = rdev->r600_blit.state_len; while (dwords & 0xf) { packet2s[num_packet2s++] = PACKET2(0); dwords++; } obj_size = dwords * 4; obj_size = ALIGN(obj_size, 256); rdev->r600_blit.vs_offset = obj_size; obj_size += r6xx_vs_size * 4; obj_size = ALIGN(obj_size, 256); rdev->r600_blit.ps_offset = obj_size; obj_size += r6xx_ps_size * 4; obj_size = ALIGN(obj_size, 256); r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM, &rdev->r600_blit.shader_obj); if (r) { DRM_ERROR("r600 failed to allocate shader\n"); return r; } DRM_DEBUG("r6xx blit allocated bo %08x vs %08x ps %08x\n", obj_size, rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset); r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (unlikely(r != 0)) return r; r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr); if (r) { DRM_ERROR("failed to map blit object %d\n", r); return r; } if (rdev->family >= CHIP_RV770) memcpy_toio(ptr + rdev->r600_blit.state_offset, r7xx_default_state, rdev->r600_blit.state_len * 4); else memcpy_toio(ptr + rdev->r600_blit.state_offset, r6xx_default_state, rdev->r600_blit.state_len * 4); if (num_packet2s) memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4), packet2s, num_packet2s * 4); memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4); memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4); radeon_bo_kunmap(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); return 0; } void r600_blit_fini(struct radeon_device *rdev) { int r; if (rdev->r600_blit.shader_obj == NULL) return; /* If we can't reserve the bo, unref should be enough to destroy * it when it becomes idle. */ r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false); if (!r) { radeon_bo_unpin(rdev->r600_blit.shader_obj); radeon_bo_unreserve(rdev->r600_blit.shader_obj); } radeon_bo_unref(&rdev->r600_blit.shader_obj); } int r600_vb_ib_get(struct radeon_device *rdev) { int r; r = radeon_ib_get(rdev, &rdev->r600_blit.vb_ib); if (r) { DRM_ERROR("failed to get IB for vertex buffer\n"); return r; } rdev->r600_blit.vb_total = 64*1024; rdev->r600_blit.vb_used = 0; return 0; } void r600_vb_ib_put(struct radeon_device *rdev) { radeon_fence_emit(rdev, rdev->r600_blit.vb_ib->fence); radeon_ib_free(rdev, &rdev->r600_blit.vb_ib); } int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes) { int r; int ring_size, line_size; int max_size; /* loops of emits 64 + fence emit possible */ int dwords_per_loop = 76, num_loops; r = r600_vb_ib_get(rdev); if (r) return r; /* set_render_target emits 2 extra dwords on rv6xx */ if (rdev->family > CHIP_R600 && rdev->family < CHIP_RV770) dwords_per_loop += 2; /* 8 bpp vs 32 bpp for xfer unit */ if (size_bytes & 3) line_size = 8192; else line_size = 8192*4; max_size = 8192 * line_size; /* major loops cover the max size transfer */ num_loops = ((size_bytes + max_size) / max_size); /* minor loops cover the extra non aligned bits */ num_loops += ((size_bytes % line_size) ? 1 : 0); /* calculate number of loops correctly */ ring_size = num_loops * dwords_per_loop; /* set default + shaders */ ring_size += 40; /* shaders + def state */ ring_size += 10; /* fence emit for VB IB */ ring_size += 5; /* done copy */ ring_size += 10; /* fence emit for done copy */ r = radeon_ring_lock(rdev, ring_size); if (r) return r; set_default_state(rdev); /* 14 */ set_shaders(rdev); /* 26 */ return 0; } void r600_blit_done_copy(struct radeon_device *rdev, struct radeon_fence *fence) { int r; if (rdev->r600_blit.vb_ib) r600_vb_ib_put(rdev); if (fence) r = radeon_fence_emit(rdev, fence); radeon_ring_unlock_commit(rdev); } void r600_kms_blit_copy(struct radeon_device *rdev, u64 src_gpu_addr, u64 dst_gpu_addr, int size_bytes) { int max_bytes; u64 vb_gpu_addr; u32 *vb; DRM_DEBUG("emitting copy %16llx %16llx %d %d\n", src_gpu_addr, dst_gpu_addr, size_bytes, rdev->r600_blit.vb_used); vb = (u32 *)(rdev->r600_blit.vb_ib->ptr + rdev->r600_blit.vb_used); if ((size_bytes & 3) || (src_gpu_addr & 3) || (dst_gpu_addr & 3)) { max_bytes = 8192; while (size_bytes) { int cur_size = size_bytes; int src_x = src_gpu_addr & 255; int dst_x = dst_gpu_addr & 255; int h = 1; src_gpu_addr = src_gpu_addr & ~255ULL; dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); #if 0 r600_vb_ib_put(rdev); r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!dev_priv->blit_vb) return; set_shaders(dev); vb = r600_nomm_get_vb_ptr(dev); #endif } vb[0] = i2f(dst_x); vb[1] = 0; vb[2] = i2f(src_x); vb[3] = 0; vb[4] = i2f(dst_x); vb[5] = i2f(h); vb[6] = i2f(src_x); vb[7] = i2f(h); vb[8] = i2f(dst_x + cur_size); vb[9] = i2f(h); vb[10] = i2f(src_x + cur_size); vb[11] = i2f(h); /* src 9 */ set_tex_resource(rdev, FMT_8, src_x + cur_size, h, src_x + cur_size, src_gpu_addr); /* 5 */ cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst 23 */ set_render_target(rdev, COLOR_8, dst_x + cur_size, h, dst_gpu_addr); /* scissors 12 */ set_scissors(rdev, dst_x, 0, dst_x + cur_size, h); /* 14 */ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; set_vtx_resource(rdev, vb_gpu_addr); /* draw 10 */ draw_auto(rdev); /* 5 */ cp_set_surface_sync(rdev, PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); vb += 12; rdev->r600_blit.vb_used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } else { max_bytes = 8192 * 4; while (size_bytes) { int cur_size = size_bytes; int src_x = (src_gpu_addr & 255); int dst_x = (dst_gpu_addr & 255); int h = 1; src_gpu_addr = src_gpu_addr & ~255ULL; dst_gpu_addr = dst_gpu_addr & ~255ULL; if (!src_x && !dst_x) { h = (cur_size / max_bytes); if (h > 8192) h = 8192; if (h == 0) h = 1; else cur_size = max_bytes; } else { if (cur_size > max_bytes) cur_size = max_bytes; if (cur_size > (max_bytes - dst_x)) cur_size = (max_bytes - dst_x); if (cur_size > (max_bytes - src_x)) cur_size = (max_bytes - src_x); } if ((rdev->r600_blit.vb_used + 48) > rdev->r600_blit.vb_total) { WARN_ON(1); } #if 0 if ((rdev->blit_vb->used + 48) > rdev->blit_vb->total) { r600_nomm_put_vb(dev); r600_nomm_get_vb(dev); if (!rdev->blit_vb) return; set_shaders(dev); vb = r600_nomm_get_vb_ptr(dev); } #endif vb[0] = i2f(dst_x / 4); vb[1] = 0; vb[2] = i2f(src_x / 4); vb[3] = 0; vb[4] = i2f(dst_x / 4); vb[5] = i2f(h); vb[6] = i2f(src_x / 4); vb[7] = i2f(h); vb[8] = i2f((dst_x + cur_size) / 4); vb[9] = i2f(h); vb[10] = i2f((src_x + cur_size) / 4); vb[11] = i2f(h); /* src 9 */ set_tex_resource(rdev, FMT_8_8_8_8, (src_x + cur_size) / 4, h, (src_x + cur_size) / 4, src_gpu_addr); /* 5 */ cp_set_surface_sync(rdev, PACKET3_TC_ACTION_ENA, (src_x + cur_size * h), src_gpu_addr); /* dst 23 */ set_render_target(rdev, COLOR_8_8_8_8, (dst_x + cur_size) / 4, h, dst_gpu_addr); /* scissors 12 */ set_scissors(rdev, (dst_x / 4), 0, (dst_x + cur_size / 4), h); /* Vertex buffer setup 14 */ vb_gpu_addr = rdev->r600_blit.vb_ib->gpu_addr + rdev->r600_blit.vb_used; set_vtx_resource(rdev, vb_gpu_addr); /* draw 10 */ draw_auto(rdev); /* 5 */ cp_set_surface_sync(rdev, PACKET3_CB_ACTION_ENA | PACKET3_CB0_DEST_BASE_ENA, cur_size * h, dst_gpu_addr); /* 78 ring dwords per loop */ vb += 12; rdev->r600_blit.vb_used += 12 * 4; src_gpu_addr += cur_size * h; dst_gpu_addr += cur_size * h; size_bytes -= cur_size * h; } } }
gpl-2.0
tomdean1/linux
drivers/media/dvb-frontends/dib7000p.c
450
75517
/* * Linux-DVB Driver for DiBcom's second generation DiB7000P (PC). * * Copyright (C) 2005-7 DiBcom (http://www.dibcom.fr/) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <asm/div64.h> #include "dvb_math.h" #include "dvb_frontend.h" #include "dib7000p.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); static int buggy_sfn_workaround; module_param(buggy_sfn_workaround, int, 0644); MODULE_PARM_DESC(buggy_sfn_workaround, "Enable work-around for buggy SFNs (default: 0)"); #define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB7000P: "); printk(args); printk("\n"); } } while (0) struct i2c_device { struct i2c_adapter *i2c_adap; u8 i2c_addr; }; struct dib7000p_state { struct dvb_frontend demod; struct dib7000p_config cfg; u8 i2c_addr; struct i2c_adapter *i2c_adap; struct dibx000_i2c_master i2c_master; u16 wbd_ref; u8 current_band; u32 current_bandwidth; struct dibx000_agc_config *current_agc; u32 timf; u8 div_force_off:1; u8 div_state:1; u16 div_sync_wait; u8 agc_state; u16 gpio_dir; u16 gpio_val; u8 sfn_workaround_active:1; #define SOC7090 0x7090 u16 version; u16 tuner_enable; struct i2c_adapter dib7090_tuner_adap; /* for the I2C transfer */ struct i2c_msg msg[2]; u8 i2c_write_buffer[4]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; u8 input_mode_mpeg; /* for DVBv5 stats */ s64 old_ucb; unsigned long per_jiffies_stats; unsigned long ber_jiffies_stats; unsigned long get_stats_time; }; enum dib7000p_power_mode { DIB7000P_POWER_ALL = 0, DIB7000P_POWER_ANALOG_ADC, DIB7000P_POWER_INTERFACE_ONLY, }; /* dib7090 specific fonctions */ static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode); static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff); static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode); static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode); static u16 dib7000p_read_word(struct dib7000p_state *state, u16 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg >> 8; state->i2c_write_buffer[1] = reg & 0xff; memset(state->msg, 0, 2 * sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c_addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 2; state->msg[1].addr = state->i2c_addr >> 1; state->msg[1].flags = I2C_M_RD; state->msg[1].buf = state->i2c_read_buffer; state->msg[1].len = 2; if (i2c_transfer(state->i2c_adap, state->msg, 2) != 2) dprintk("i2c read error on %d", reg); ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static int dib7000p_write_word(struct dib7000p_state *state, u16 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = (reg >> 8) & 0xff; state->i2c_write_buffer[1] = reg & 0xff; state->i2c_write_buffer[2] = (val >> 8) & 0xff; state->i2c_write_buffer[3] = val & 0xff; memset(&state->msg[0], 0, sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c_addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 4; ret = (i2c_transfer(state->i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0); mutex_unlock(&state->i2c_buffer_lock); return ret; } static void dib7000p_write_tab(struct dib7000p_state *state, u16 * buf) { u16 l = 0, r, *n; n = buf; l = *n++; while (l) { r = *n++; do { dib7000p_write_word(state, r, *n++); r++; } while (--l); l = *n++; } } static int dib7000p_set_output_mode(struct dib7000p_state *state, int mode) { int ret = 0; u16 outreg, fifo_threshold, smo_mode; outreg = 0; fifo_threshold = 1792; smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1); dprintk("setting output mode for demod %p to %d", &state->demod, mode); switch (mode) { case OUTMODE_MPEG2_PAR_GATED_CLK: outreg = (1 << 10); /* 0x0400 */ break; case OUTMODE_MPEG2_PAR_CONT_CLK: outreg = (1 << 10) | (1 << 6); /* 0x0440 */ break; case OUTMODE_MPEG2_SERIAL: outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0480 */ break; case OUTMODE_DIVERSITY: if (state->cfg.hostbus_diversity) outreg = (1 << 10) | (4 << 6); /* 0x0500 */ else outreg = (1 << 11); break; case OUTMODE_MPEG2_FIFO: smo_mode |= (3 << 1); fifo_threshold = 512; outreg = (1 << 10) | (5 << 6); break; case OUTMODE_ANALOG_ADC: outreg = (1 << 10) | (3 << 6); break; case OUTMODE_HIGH_Z: outreg = 0; break; default: dprintk("Unhandled output_mode passed to be set for demod %p", &state->demod); break; } if (state->cfg.output_mpeg2_in_188_bytes) smo_mode |= (1 << 5); ret |= dib7000p_write_word(state, 235, smo_mode); ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */ if (state->version != SOC7090) ret |= dib7000p_write_word(state, 1286, outreg); /* P_Div_active */ return ret; } static int dib7000p_set_diversity_in(struct dvb_frontend *demod, int onoff) { struct dib7000p_state *state = demod->demodulator_priv; if (state->div_force_off) { dprintk("diversity combination deactivated - forced by COFDM parameters"); onoff = 0; dib7000p_write_word(state, 207, 0); } else dib7000p_write_word(state, 207, (state->div_sync_wait << 4) | (1 << 2) | (2 << 0)); state->div_state = (u8) onoff; if (onoff) { dib7000p_write_word(state, 204, 6); dib7000p_write_word(state, 205, 16); /* P_dvsy_sync_mode = 0, P_dvsy_sync_enable=1, P_dvcb_comb_mode=2 */ } else { dib7000p_write_word(state, 204, 1); dib7000p_write_word(state, 205, 0); } return 0; } static int dib7000p_set_power_mode(struct dib7000p_state *state, enum dib7000p_power_mode mode) { /* by default everything is powered off */ u16 reg_774 = 0x3fff, reg_775 = 0xffff, reg_776 = 0x0007, reg_899 = 0x0003, reg_1280 = (0xfe00) | (dib7000p_read_word(state, 1280) & 0x01ff); /* now, depending on the requested mode, we power on */ switch (mode) { /* power up everything in the demod */ case DIB7000P_POWER_ALL: reg_774 = 0x0000; reg_775 = 0x0000; reg_776 = 0x0; reg_899 = 0x0; if (state->version == SOC7090) reg_1280 &= 0x001f; else reg_1280 &= 0x01ff; break; case DIB7000P_POWER_ANALOG_ADC: /* dem, cfg, iqc, sad, agc */ reg_774 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10) | (1 << 9)); /* nud */ reg_776 &= ~((1 << 0)); /* Dout */ if (state->version != SOC7090) reg_1280 &= ~((1 << 11)); reg_1280 &= ~(1 << 6); /* fall through wanted to enable the interfaces */ /* just leave power on the control-interfaces: GPIO and (I2C or SDIO) */ case DIB7000P_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C */ if (state->version == SOC7090) reg_1280 &= ~((1 << 7) | (1 << 5)); else reg_1280 &= ~((1 << 14) | (1 << 13) | (1 << 12) | (1 << 10)); break; /* TODO following stuff is just converted from the dib7000-driver - check when is used what */ } dib7000p_write_word(state, 774, reg_774); dib7000p_write_word(state, 775, reg_775); dib7000p_write_word(state, 776, reg_776); dib7000p_write_word(state, 1280, reg_1280); if (state->version != SOC7090) dib7000p_write_word(state, 899, reg_899); return 0; } static void dib7000p_set_adc_state(struct dib7000p_state *state, enum dibx000_adc_states no) { u16 reg_908 = 0, reg_909 = 0; u16 reg; if (state->version != SOC7090) { reg_908 = dib7000p_read_word(state, 908); reg_909 = dib7000p_read_word(state, 909); } switch (no) { case DIBX000_SLOW_ADC_ON: if (state->version == SOC7090) { reg = dib7000p_read_word(state, 1925); dib7000p_write_word(state, 1925, reg | (1 << 4) | (1 << 2)); /* en_slowAdc = 1 & reset_sladc = 1 */ reg = dib7000p_read_word(state, 1925); /* read acces to make it works... strange ... */ msleep(200); dib7000p_write_word(state, 1925, reg & ~(1 << 4)); /* en_slowAdc = 1 & reset_sladc = 0 */ reg = dib7000p_read_word(state, 72) & ~((0x3 << 14) | (0x3 << 12)); dib7000p_write_word(state, 72, reg | (1 << 14) | (3 << 12) | 524); /* ref = Vin1 => Vbg ; sel = Vin0 or Vin3 ; (Vin2 = Vcm) */ } else { reg_909 |= (1 << 1) | (1 << 0); dib7000p_write_word(state, 909, reg_909); reg_909 &= ~(1 << 1); } break; case DIBX000_SLOW_ADC_OFF: if (state->version == SOC7090) { reg = dib7000p_read_word(state, 1925); dib7000p_write_word(state, 1925, (reg & ~(1 << 2)) | (1 << 4)); /* reset_sladc = 1 en_slowAdc = 0 */ } else reg_909 |= (1 << 1) | (1 << 0); break; case DIBX000_ADC_ON: reg_908 &= 0x0fff; reg_909 &= 0x0003; break; case DIBX000_ADC_OFF: reg_908 |= (1 << 14) | (1 << 13) | (1 << 12); reg_909 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2); break; case DIBX000_VBG_ENABLE: reg_908 &= ~(1 << 15); break; case DIBX000_VBG_DISABLE: reg_908 |= (1 << 15); break; default: break; } // dprintk( "908: %x, 909: %x\n", reg_908, reg_909); reg_909 |= (state->cfg.disable_sample_and_hold & 1) << 4; reg_908 |= (state->cfg.enable_current_mirror & 1) << 7; if (state->version != SOC7090) { dib7000p_write_word(state, 908, reg_908); dib7000p_write_word(state, 909, reg_909); } } static int dib7000p_set_bandwidth(struct dib7000p_state *state, u32 bw) { u32 timf; // store the current bandwidth for later use state->current_bandwidth = bw; if (state->timf == 0) { dprintk("using default timf"); timf = state->cfg.bw->timf; } else { dprintk("using updated timf"); timf = state->timf; } timf = timf * (bw / 50) / 160; dib7000p_write_word(state, 23, (u16) ((timf >> 16) & 0xffff)); dib7000p_write_word(state, 24, (u16) ((timf) & 0xffff)); return 0; } static int dib7000p_sad_calib(struct dib7000p_state *state) { /* internal */ dib7000p_write_word(state, 73, (0 << 1) | (0 << 0)); if (state->version == SOC7090) dib7000p_write_word(state, 74, 2048); else dib7000p_write_word(state, 74, 776); /* do the calibration */ dib7000p_write_word(state, 73, (1 << 0)); dib7000p_write_word(state, 73, (0 << 0)); msleep(1); return 0; } static int dib7000p_set_wbd_ref(struct dvb_frontend *demod, u16 value) { struct dib7000p_state *state = demod->demodulator_priv; if (value > 4095) value = 4095; state->wbd_ref = value; return dib7000p_write_word(state, 105, (dib7000p_read_word(state, 105) & 0xf000) | value); } static int dib7000p_get_agc_values(struct dvb_frontend *fe, u16 *agc_global, u16 *agc1, u16 *agc2, u16 *wbd) { struct dib7000p_state *state = fe->demodulator_priv; if (agc_global != NULL) *agc_global = dib7000p_read_word(state, 394); if (agc1 != NULL) *agc1 = dib7000p_read_word(state, 392); if (agc2 != NULL) *agc2 = dib7000p_read_word(state, 393); if (wbd != NULL) *wbd = dib7000p_read_word(state, 397); return 0; } static int dib7000p_set_agc1_min(struct dvb_frontend *fe, u16 v) { struct dib7000p_state *state = fe->demodulator_priv; return dib7000p_write_word(state, 108, v); } static void dib7000p_reset_pll(struct dib7000p_state *state) { struct dibx000_bandwidth_config *bw = &state->cfg.bw[0]; u16 clk_cfg0; if (state->version == SOC7090) { dib7000p_write_word(state, 1856, (!bw->pll_reset << 13) | (bw->pll_range << 12) | (bw->pll_ratio << 6) | (bw->pll_prediv)); while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1) ; dib7000p_write_word(state, 1857, dib7000p_read_word(state, 1857) | (!bw->pll_bypass << 15)); } else { /* force PLL bypass */ clk_cfg0 = (1 << 15) | ((bw->pll_ratio & 0x3f) << 9) | (bw->modulo << 7) | (bw->ADClkSrc << 6) | (bw->IO_CLK_en_core << 5) | (bw->bypclk_div << 2) | (bw->enable_refdiv << 1) | (0 << 0); dib7000p_write_word(state, 900, clk_cfg0); /* P_pll_cfg */ dib7000p_write_word(state, 903, (bw->pll_prediv << 5) | (((bw->pll_ratio >> 6) & 0x3) << 3) | (bw->pll_range << 1) | bw->pll_reset); clk_cfg0 = (bw->pll_bypass << 15) | (clk_cfg0 & 0x7fff); dib7000p_write_word(state, 900, clk_cfg0); } dib7000p_write_word(state, 18, (u16) (((bw->internal * 1000) >> 16) & 0xffff)); dib7000p_write_word(state, 19, (u16) ((bw->internal * 1000) & 0xffff)); dib7000p_write_word(state, 21, (u16) ((bw->ifreq >> 16) & 0xffff)); dib7000p_write_word(state, 22, (u16) ((bw->ifreq) & 0xffff)); dib7000p_write_word(state, 72, bw->sad_cfg); } static u32 dib7000p_get_internal_freq(struct dib7000p_state *state) { u32 internal = (u32) dib7000p_read_word(state, 18) << 16; internal |= (u32) dib7000p_read_word(state, 19); internal /= 1000; return internal; } static int dib7000p_update_pll(struct dvb_frontend *fe, struct dibx000_bandwidth_config *bw) { struct dib7000p_state *state = fe->demodulator_priv; u16 reg_1857, reg_1856 = dib7000p_read_word(state, 1856); u8 loopdiv, prediv; u32 internal, xtal; /* get back old values */ prediv = reg_1856 & 0x3f; loopdiv = (reg_1856 >> 6) & 0x3f; if ((bw != NULL) && (bw->pll_prediv != prediv || bw->pll_ratio != loopdiv)) { dprintk("Updating pll (prediv: old = %d new = %d ; loopdiv : old = %d new = %d)", prediv, bw->pll_prediv, loopdiv, bw->pll_ratio); reg_1856 &= 0xf000; reg_1857 = dib7000p_read_word(state, 1857); dib7000p_write_word(state, 1857, reg_1857 & ~(1 << 15)); dib7000p_write_word(state, 1856, reg_1856 | ((bw->pll_ratio & 0x3f) << 6) | (bw->pll_prediv & 0x3f)); /* write new system clk into P_sec_len */ internal = dib7000p_get_internal_freq(state); xtal = (internal / loopdiv) * prediv; internal = 1000 * (xtal / bw->pll_prediv) * bw->pll_ratio; /* new internal */ dib7000p_write_word(state, 18, (u16) ((internal >> 16) & 0xffff)); dib7000p_write_word(state, 19, (u16) (internal & 0xffff)); dib7000p_write_word(state, 1857, reg_1857 | (1 << 15)); while (((dib7000p_read_word(state, 1856) >> 15) & 0x1) != 1) dprintk("Waiting for PLL to lock"); return 0; } return -EIO; } static int dib7000p_reset_gpio(struct dib7000p_state *st) { /* reset the GPIOs */ dprintk("gpio dir: %x: val: %x, pwm_pos: %x", st->gpio_dir, st->gpio_val, st->cfg.gpio_pwm_pos); dib7000p_write_word(st, 1029, st->gpio_dir); dib7000p_write_word(st, 1030, st->gpio_val); /* TODO 1031 is P_gpio_od */ dib7000p_write_word(st, 1032, st->cfg.gpio_pwm_pos); dib7000p_write_word(st, 1037, st->cfg.pwm_freq_div); return 0; } static int dib7000p_cfg_gpio(struct dib7000p_state *st, u8 num, u8 dir, u8 val) { st->gpio_dir = dib7000p_read_word(st, 1029); st->gpio_dir &= ~(1 << num); /* reset the direction bit */ st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */ dib7000p_write_word(st, 1029, st->gpio_dir); st->gpio_val = dib7000p_read_word(st, 1030); st->gpio_val &= ~(1 << num); /* reset the direction bit */ st->gpio_val |= (val & 0x01) << num; /* set the new value */ dib7000p_write_word(st, 1030, st->gpio_val); return 0; } static int dib7000p_set_gpio(struct dvb_frontend *demod, u8 num, u8 dir, u8 val) { struct dib7000p_state *state = demod->demodulator_priv; return dib7000p_cfg_gpio(state, num, dir, val); } static u16 dib7000p_defaults[] = { // auto search configuration 3, 2, 0x0004, (1<<3)|(1<<11)|(1<<12)|(1<<13), 0x0814, /* Equal Lock */ 12, 6, 0x001b, 0x7740, 0x005b, 0x8d80, 0x01c9, 0xc380, 0x0000, 0x0080, 0x0000, 0x0090, 0x0001, 0xd4c0, 1, 26, 0x6680, /* set ADC level to -16 */ 11, 79, (1 << 13) - 825 - 117, (1 << 13) - 837 - 117, (1 << 13) - 811 - 117, (1 << 13) - 766 - 117, (1 << 13) - 737 - 117, (1 << 13) - 693 - 117, (1 << 13) - 648 - 117, (1 << 13) - 619 - 117, (1 << 13) - 575 - 117, (1 << 13) - 531 - 117, (1 << 13) - 501 - 117, 1, 142, 0x0410, /* disable power smoothing */ 8, 145, 0, 0, 0, 0, 0, 0, 0, 0, 1, 154, 1 << 13, 1, 168, 0x0ccd, 1, 183, 0x200f, 1, 212, 0x169, 5, 187, 0x023d, 0x00a4, 0x00a4, 0x7ff0, 0x3ccc, 1, 198, 0x800, 1, 222, 0x0010, 1, 235, 0x0062, 0, }; static void dib7000p_reset_stats(struct dvb_frontend *fe); static int dib7000p_demod_reset(struct dib7000p_state *state) { dib7000p_set_power_mode(state, DIB7000P_POWER_ALL); if (state->version == SOC7090) dibx000_reset_i2c_master(&state->i2c_master); dib7000p_set_adc_state(state, DIBX000_VBG_ENABLE); /* restart all parts */ dib7000p_write_word(state, 770, 0xffff); dib7000p_write_word(state, 771, 0xffff); dib7000p_write_word(state, 772, 0x001f); dib7000p_write_word(state, 1280, 0x001f - ((1 << 4) | (1 << 3))); dib7000p_write_word(state, 770, 0); dib7000p_write_word(state, 771, 0); dib7000p_write_word(state, 772, 0); dib7000p_write_word(state, 1280, 0); if (state->version != SOC7090) { dib7000p_write_word(state, 898, 0x0003); dib7000p_write_word(state, 898, 0); } /* default */ dib7000p_reset_pll(state); if (dib7000p_reset_gpio(state) != 0) dprintk("GPIO reset was not successful."); if (state->version == SOC7090) { dib7000p_write_word(state, 899, 0); /* impulse noise */ dib7000p_write_word(state, 42, (1<<5) | 3); /* P_iqc_thsat_ipc = 1 ; P_iqc_win2 = 3 */ dib7000p_write_word(state, 43, 0x2d4); /*-300 fag P_iqc_dect_min = -280 */ dib7000p_write_word(state, 44, 300); /* 300 fag P_iqc_dect_min = +280 */ dib7000p_write_word(state, 273, (0<<6) | 30); } if (dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) != 0) dprintk("OUTPUT_MODE could not be reset."); dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON); dib7000p_sad_calib(state); dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_OFF); /* unforce divstr regardless whether i2c enumeration was done or not */ dib7000p_write_word(state, 1285, dib7000p_read_word(state, 1285) & ~(1 << 1)); dib7000p_set_bandwidth(state, 8000); if (state->version == SOC7090) { dib7000p_write_word(state, 36, 0x0755);/* P_iqc_impnc_on =1 & P_iqc_corr_inh = 1 for impulsive noise */ } else { if (state->cfg.tuner_is_baseband) dib7000p_write_word(state, 36, 0x0755); else dib7000p_write_word(state, 36, 0x1f55); } dib7000p_write_tab(state, dib7000p_defaults); if (state->version != SOC7090) { dib7000p_write_word(state, 901, 0x0006); dib7000p_write_word(state, 902, (3 << 10) | (1 << 6)); dib7000p_write_word(state, 905, 0x2c8e); } dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY); return 0; } static void dib7000p_pll_clk_cfg(struct dib7000p_state *state) { u16 tmp = 0; tmp = dib7000p_read_word(state, 903); dib7000p_write_word(state, 903, (tmp | 0x1)); tmp = dib7000p_read_word(state, 900); dib7000p_write_word(state, 900, (tmp & 0x7fff) | (1 << 6)); } static void dib7000p_restart_agc(struct dib7000p_state *state) { // P_restart_iqc & P_restart_agc dib7000p_write_word(state, 770, (1 << 11) | (1 << 9)); dib7000p_write_word(state, 770, 0x0000); } static int dib7000p_update_lna(struct dib7000p_state *state) { u16 dyn_gain; if (state->cfg.update_lna) { dyn_gain = dib7000p_read_word(state, 394); if (state->cfg.update_lna(&state->demod, dyn_gain)) { dib7000p_restart_agc(state); return 1; } } return 0; } static int dib7000p_set_agc_config(struct dib7000p_state *state, u8 band) { struct dibx000_agc_config *agc = NULL; int i; if (state->current_band == band && state->current_agc != NULL) return 0; state->current_band = band; for (i = 0; i < state->cfg.agc_config_count; i++) if (state->cfg.agc[i].band_caps & band) { agc = &state->cfg.agc[i]; break; } if (agc == NULL) { dprintk("no valid AGC configuration found for band 0x%02x", band); return -EINVAL; } state->current_agc = agc; /* AGC */ dib7000p_write_word(state, 75, agc->setup); dib7000p_write_word(state, 76, agc->inv_gain); dib7000p_write_word(state, 77, agc->time_stabiliz); dib7000p_write_word(state, 100, (agc->alpha_level << 12) | agc->thlock); // Demod AGC loop configuration dib7000p_write_word(state, 101, (agc->alpha_mant << 5) | agc->alpha_exp); dib7000p_write_word(state, 102, (agc->beta_mant << 6) | agc->beta_exp); /* AGC continued */ dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d", state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel); if (state->wbd_ref != 0) dib7000p_write_word(state, 105, (agc->wbd_inv << 12) | state->wbd_ref); else dib7000p_write_word(state, 105, (agc->wbd_inv << 12) | agc->wbd_ref); dib7000p_write_word(state, 106, (agc->wbd_sel << 13) | (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8)); dib7000p_write_word(state, 107, agc->agc1_max); dib7000p_write_word(state, 108, agc->agc1_min); dib7000p_write_word(state, 109, agc->agc2_max); dib7000p_write_word(state, 110, agc->agc2_min); dib7000p_write_word(state, 111, (agc->agc1_pt1 << 8) | agc->agc1_pt2); dib7000p_write_word(state, 112, agc->agc1_pt3); dib7000p_write_word(state, 113, (agc->agc1_slope1 << 8) | agc->agc1_slope2); dib7000p_write_word(state, 114, (agc->agc2_pt1 << 8) | agc->agc2_pt2); dib7000p_write_word(state, 115, (agc->agc2_slope1 << 8) | agc->agc2_slope2); return 0; } static void dib7000p_set_dds(struct dib7000p_state *state, s32 offset_khz) { u32 internal = dib7000p_get_internal_freq(state); s32 unit_khz_dds_val = 67108864 / (internal); /* 2**26 / Fsampling is the unit 1KHz offset */ u32 abs_offset_khz = ABS(offset_khz); u32 dds = state->cfg.bw->ifreq & 0x1ffffff; u8 invert = !!(state->cfg.bw->ifreq & (1 << 25)); dprintk("setting a frequency offset of %dkHz internal freq = %d invert = %d", offset_khz, internal, invert); if (offset_khz < 0) unit_khz_dds_val *= -1; /* IF tuner */ if (invert) dds -= (abs_offset_khz * unit_khz_dds_val); /* /100 because of /100 on the unit_khz_dds_val line calc for better accuracy */ else dds += (abs_offset_khz * unit_khz_dds_val); if (abs_offset_khz <= (internal / 2)) { /* Max dds offset is the half of the demod freq */ dib7000p_write_word(state, 21, (u16) (((dds >> 16) & 0x1ff) | (0 << 10) | (invert << 9))); dib7000p_write_word(state, 22, (u16) (dds & 0xffff)); } } static int dib7000p_agc_startup(struct dvb_frontend *demod) { struct dtv_frontend_properties *ch = &demod->dtv_property_cache; struct dib7000p_state *state = demod->demodulator_priv; int ret = -1; u8 *agc_state = &state->agc_state; u8 agc_split; u16 reg; u32 upd_demod_gain_period = 0x1000; s32 frequency_offset = 0; switch (state->agc_state) { case 0: dib7000p_set_power_mode(state, DIB7000P_POWER_ALL); if (state->version == SOC7090) { reg = dib7000p_read_word(state, 0x79b) & 0xff00; dib7000p_write_word(state, 0x79a, upd_demod_gain_period & 0xFFFF); /* lsb */ dib7000p_write_word(state, 0x79b, reg | (1 << 14) | ((upd_demod_gain_period >> 16) & 0xFF)); /* enable adc i & q */ reg = dib7000p_read_word(state, 0x780); dib7000p_write_word(state, 0x780, (reg | (0x3)) & (~(1 << 7))); } else { dib7000p_set_adc_state(state, DIBX000_ADC_ON); dib7000p_pll_clk_cfg(state); } if (dib7000p_set_agc_config(state, BAND_OF_FREQUENCY(ch->frequency / 1000)) != 0) return -1; if (demod->ops.tuner_ops.get_frequency) { u32 frequency_tuner; demod->ops.tuner_ops.get_frequency(demod, &frequency_tuner); frequency_offset = (s32)frequency_tuner / 1000 - ch->frequency / 1000; } dib7000p_set_dds(state, frequency_offset); ret = 7; (*agc_state)++; break; case 1: if (state->cfg.agc_control) state->cfg.agc_control(&state->demod, 1); dib7000p_write_word(state, 78, 32768); if (!state->current_agc->perform_agc_softsplit) { /* we are using the wbd - so slow AGC startup */ /* force 0 split on WBD and restart AGC */ dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | (1 << 8)); (*agc_state)++; ret = 5; } else { /* default AGC startup */ (*agc_state) = 4; /* wait AGC rough lock time */ ret = 7; } dib7000p_restart_agc(state); break; case 2: /* fast split search path after 5sec */ dib7000p_write_word(state, 75, state->current_agc->setup | (1 << 4)); /* freeze AGC loop */ dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (2 << 9) | (0 << 8)); /* fast split search 0.25kHz */ (*agc_state)++; ret = 14; break; case 3: /* split search ended */ agc_split = (u8) dib7000p_read_word(state, 396); /* store the split value for the next time */ dib7000p_write_word(state, 78, dib7000p_read_word(state, 394)); /* set AGC gain start value */ dib7000p_write_word(state, 75, state->current_agc->setup); /* std AGC loop */ dib7000p_write_word(state, 106, (state->current_agc->wbd_sel << 13) | (state->current_agc->wbd_alpha << 9) | agc_split); /* standard split search */ dib7000p_restart_agc(state); dprintk("SPLIT %p: %hd", demod, agc_split); (*agc_state)++; ret = 5; break; case 4: /* LNA startup */ ret = 7; if (dib7000p_update_lna(state)) ret = 5; else (*agc_state)++; break; case 5: if (state->cfg.agc_control) state->cfg.agc_control(&state->demod, 0); (*agc_state)++; break; default: break; } return ret; } static void dib7000p_update_timf(struct dib7000p_state *state) { u32 timf = (dib7000p_read_word(state, 427) << 16) | dib7000p_read_word(state, 428); state->timf = timf * 160 / (state->current_bandwidth / 50); dib7000p_write_word(state, 23, (u16) (timf >> 16)); dib7000p_write_word(state, 24, (u16) (timf & 0xffff)); dprintk("updated timf_frequency: %d (default: %d)", state->timf, state->cfg.bw->timf); } static u32 dib7000p_ctrl_timf(struct dvb_frontend *fe, u8 op, u32 timf) { struct dib7000p_state *state = fe->demodulator_priv; switch (op) { case DEMOD_TIMF_SET: state->timf = timf; break; case DEMOD_TIMF_UPDATE: dib7000p_update_timf(state); break; case DEMOD_TIMF_GET: break; } dib7000p_set_bandwidth(state, state->current_bandwidth); return state->timf; } static void dib7000p_set_channel(struct dib7000p_state *state, struct dtv_frontend_properties *ch, u8 seq) { u16 value, est[4]; dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz)); /* nfft, guard, qam, alpha */ value = 0; switch (ch->transmission_mode) { case TRANSMISSION_MODE_2K: value |= (0 << 7); break; case TRANSMISSION_MODE_4K: value |= (2 << 7); break; default: case TRANSMISSION_MODE_8K: value |= (1 << 7); break; } switch (ch->guard_interval) { case GUARD_INTERVAL_1_32: value |= (0 << 5); break; case GUARD_INTERVAL_1_16: value |= (1 << 5); break; case GUARD_INTERVAL_1_4: value |= (3 << 5); break; default: case GUARD_INTERVAL_1_8: value |= (2 << 5); break; } switch (ch->modulation) { case QPSK: value |= (0 << 3); break; case QAM_16: value |= (1 << 3); break; default: case QAM_64: value |= (2 << 3); break; } switch (HIERARCHY_1) { case HIERARCHY_2: value |= 2; break; case HIERARCHY_4: value |= 4; break; default: case HIERARCHY_1: value |= 1; break; } dib7000p_write_word(state, 0, value); dib7000p_write_word(state, 5, (seq << 4) | 1); /* do not force tps, search list 0 */ /* P_dintl_native, P_dintlv_inv, P_hrch, P_code_rate, P_select_hp */ value = 0; if (1 != 0) value |= (1 << 6); if (ch->hierarchy == 1) value |= (1 << 4); if (1 == 1) value |= 1; switch ((ch->hierarchy == 0 || 1 == 1) ? ch->code_rate_HP : ch->code_rate_LP) { case FEC_2_3: value |= (2 << 1); break; case FEC_3_4: value |= (3 << 1); break; case FEC_5_6: value |= (5 << 1); break; case FEC_7_8: value |= (7 << 1); break; default: case FEC_1_2: value |= (1 << 1); break; } dib7000p_write_word(state, 208, value); /* offset loop parameters */ dib7000p_write_word(state, 26, 0x6680); dib7000p_write_word(state, 32, 0x0003); dib7000p_write_word(state, 29, 0x1273); dib7000p_write_word(state, 33, 0x0005); /* P_dvsy_sync_wait */ switch (ch->transmission_mode) { case TRANSMISSION_MODE_8K: value = 256; break; case TRANSMISSION_MODE_4K: value = 128; break; case TRANSMISSION_MODE_2K: default: value = 64; break; } switch (ch->guard_interval) { case GUARD_INTERVAL_1_16: value *= 2; break; case GUARD_INTERVAL_1_8: value *= 4; break; case GUARD_INTERVAL_1_4: value *= 8; break; default: case GUARD_INTERVAL_1_32: value *= 1; break; } if (state->cfg.diversity_delay == 0) state->div_sync_wait = (value * 3) / 2 + 48; else state->div_sync_wait = (value * 3) / 2 + state->cfg.diversity_delay; /* deactive the possibility of diversity reception if extended interleaver */ state->div_force_off = !1 && ch->transmission_mode != TRANSMISSION_MODE_8K; dib7000p_set_diversity_in(&state->demod, state->div_state); /* channel estimation fine configuration */ switch (ch->modulation) { case QAM_64: est[0] = 0x0148; /* P_adp_regul_cnt 0.04 */ est[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */ est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */ est[3] = 0xfff8; /* P_adp_noise_ext -0.001 */ break; case QAM_16: est[0] = 0x023d; /* P_adp_regul_cnt 0.07 */ est[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */ est[2] = 0x00a4; /* P_adp_regul_ext 0.02 */ est[3] = 0xfff0; /* P_adp_noise_ext -0.002 */ break; default: est[0] = 0x099a; /* P_adp_regul_cnt 0.3 */ est[1] = 0xffae; /* P_adp_noise_cnt -0.01 */ est[2] = 0x0333; /* P_adp_regul_ext 0.1 */ est[3] = 0xfff8; /* P_adp_noise_ext -0.002 */ break; } for (value = 0; value < 4; value++) dib7000p_write_word(state, 187 + value, est[value]); } static int dib7000p_autosearch_start(struct dvb_frontend *demod) { struct dtv_frontend_properties *ch = &demod->dtv_property_cache; struct dib7000p_state *state = demod->demodulator_priv; struct dtv_frontend_properties schan; u32 value, factor; u32 internal = dib7000p_get_internal_freq(state); schan = *ch; schan.modulation = QAM_64; schan.guard_interval = GUARD_INTERVAL_1_32; schan.transmission_mode = TRANSMISSION_MODE_8K; schan.code_rate_HP = FEC_2_3; schan.code_rate_LP = FEC_3_4; schan.hierarchy = 0; dib7000p_set_channel(state, &schan, 7); factor = BANDWIDTH_TO_KHZ(ch->bandwidth_hz); if (factor >= 5000) { if (state->version == SOC7090) factor = 2; else factor = 1; } else factor = 6; value = 30 * internal * factor; dib7000p_write_word(state, 6, (u16) ((value >> 16) & 0xffff)); dib7000p_write_word(state, 7, (u16) (value & 0xffff)); value = 100 * internal * factor; dib7000p_write_word(state, 8, (u16) ((value >> 16) & 0xffff)); dib7000p_write_word(state, 9, (u16) (value & 0xffff)); value = 500 * internal * factor; dib7000p_write_word(state, 10, (u16) ((value >> 16) & 0xffff)); dib7000p_write_word(state, 11, (u16) (value & 0xffff)); value = dib7000p_read_word(state, 0); dib7000p_write_word(state, 0, (u16) ((1 << 9) | value)); dib7000p_read_word(state, 1284); dib7000p_write_word(state, 0, (u16) value); return 0; } static int dib7000p_autosearch_is_irq(struct dvb_frontend *demod) { struct dib7000p_state *state = demod->demodulator_priv; u16 irq_pending = dib7000p_read_word(state, 1284); if (irq_pending & 0x1) return 1; if (irq_pending & 0x2) return 2; return 0; } static void dib7000p_spur_protect(struct dib7000p_state *state, u32 rf_khz, u32 bw) { static s16 notch[] = { 16143, 14402, 12238, 9713, 6902, 3888, 759, -2392 }; static u8 sine[] = { 0, 2, 3, 5, 6, 8, 9, 11, 13, 14, 16, 17, 19, 20, 22, 24, 25, 27, 28, 30, 31, 33, 34, 36, 38, 39, 41, 42, 44, 45, 47, 48, 50, 51, 53, 55, 56, 58, 59, 61, 62, 64, 65, 67, 68, 70, 71, 73, 74, 76, 77, 79, 80, 82, 83, 85, 86, 88, 89, 91, 92, 94, 95, 97, 98, 99, 101, 102, 104, 105, 107, 108, 109, 111, 112, 114, 115, 117, 118, 119, 121, 122, 123, 125, 126, 128, 129, 130, 132, 133, 134, 136, 137, 138, 140, 141, 142, 144, 145, 146, 147, 149, 150, 151, 152, 154, 155, 156, 157, 159, 160, 161, 162, 164, 165, 166, 167, 168, 170, 171, 172, 173, 174, 175, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, 207, 208, 209, 210, 211, 212, 213, 214, 215, 215, 216, 217, 218, 219, 220, 220, 221, 222, 223, 224, 224, 225, 226, 227, 227, 228, 229, 229, 230, 231, 231, 232, 233, 233, 234, 235, 235, 236, 237, 237, 238, 238, 239, 239, 240, 241, 241, 242, 242, 243, 243, 244, 244, 245, 245, 245, 246, 246, 247, 247, 248, 248, 248, 249, 249, 249, 250, 250, 250, 251, 251, 251, 252, 252, 252, 252, 253, 253, 253, 253, 254, 254, 254, 254, 254, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255 }; u32 xtal = state->cfg.bw->xtal_hz / 1000; int f_rel = DIV_ROUND_CLOSEST(rf_khz, xtal) * xtal - rf_khz; int k; int coef_re[8], coef_im[8]; int bw_khz = bw; u32 pha; dprintk("relative position of the Spur: %dk (RF: %dk, XTAL: %dk)", f_rel, rf_khz, xtal); if (f_rel < -bw_khz / 2 || f_rel > bw_khz / 2) return; bw_khz /= 100; dib7000p_write_word(state, 142, 0x0610); for (k = 0; k < 8; k++) { pha = ((f_rel * (k + 1) * 112 * 80 / bw_khz) / 1000) & 0x3ff; if (pha == 0) { coef_re[k] = 256; coef_im[k] = 0; } else if (pha < 256) { coef_re[k] = sine[256 - (pha & 0xff)]; coef_im[k] = sine[pha & 0xff]; } else if (pha == 256) { coef_re[k] = 0; coef_im[k] = 256; } else if (pha < 512) { coef_re[k] = -sine[pha & 0xff]; coef_im[k] = sine[256 - (pha & 0xff)]; } else if (pha == 512) { coef_re[k] = -256; coef_im[k] = 0; } else if (pha < 768) { coef_re[k] = -sine[256 - (pha & 0xff)]; coef_im[k] = -sine[pha & 0xff]; } else if (pha == 768) { coef_re[k] = 0; coef_im[k] = -256; } else { coef_re[k] = sine[pha & 0xff]; coef_im[k] = -sine[256 - (pha & 0xff)]; } coef_re[k] *= notch[k]; coef_re[k] += (1 << 14); if (coef_re[k] >= (1 << 24)) coef_re[k] = (1 << 24) - 1; coef_re[k] /= (1 << 15); coef_im[k] *= notch[k]; coef_im[k] += (1 << 14); if (coef_im[k] >= (1 << 24)) coef_im[k] = (1 << 24) - 1; coef_im[k] /= (1 << 15); dprintk("PALF COEF: %d re: %d im: %d", k, coef_re[k], coef_im[k]); dib7000p_write_word(state, 143, (0 << 14) | (k << 10) | (coef_re[k] & 0x3ff)); dib7000p_write_word(state, 144, coef_im[k] & 0x3ff); dib7000p_write_word(state, 143, (1 << 14) | (k << 10) | (coef_re[k] & 0x3ff)); } dib7000p_write_word(state, 143, 0); } static int dib7000p_tune(struct dvb_frontend *demod) { struct dtv_frontend_properties *ch = &demod->dtv_property_cache; struct dib7000p_state *state = demod->demodulator_priv; u16 tmp = 0; if (ch != NULL) dib7000p_set_channel(state, ch, 0); else return -EINVAL; // restart demod dib7000p_write_word(state, 770, 0x4000); dib7000p_write_word(state, 770, 0x0000); msleep(45); /* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3, P_ctrl_inh_cor4=1, P_ctrl_alpha_cor4=3 */ tmp = (0 << 14) | (4 << 10) | (0 << 9) | (3 << 5) | (1 << 4) | (0x3); if (state->sfn_workaround_active) { dprintk("SFN workaround is active"); tmp |= (1 << 9); dib7000p_write_word(state, 166, 0x4000); } else { dib7000p_write_word(state, 166, 0x0000); } dib7000p_write_word(state, 29, tmp); // never achieved a lock with that bandwidth so far - wait for osc-freq to update if (state->timf == 0) msleep(200); /* offset loop parameters */ /* P_timf_alpha, P_corm_alpha=6, P_corm_thres=0x80 */ tmp = (6 << 8) | 0x80; switch (ch->transmission_mode) { case TRANSMISSION_MODE_2K: tmp |= (2 << 12); break; case TRANSMISSION_MODE_4K: tmp |= (3 << 12); break; default: case TRANSMISSION_MODE_8K: tmp |= (4 << 12); break; } dib7000p_write_word(state, 26, tmp); /* timf_a(6xxx) */ /* P_ctrl_freeze_pha_shift=0, P_ctrl_pha_off_max */ tmp = (0 << 4); switch (ch->transmission_mode) { case TRANSMISSION_MODE_2K: tmp |= 0x6; break; case TRANSMISSION_MODE_4K: tmp |= 0x7; break; default: case TRANSMISSION_MODE_8K: tmp |= 0x8; break; } dib7000p_write_word(state, 32, tmp); /* P_ctrl_sfreq_inh=0, P_ctrl_sfreq_step */ tmp = (0 << 4); switch (ch->transmission_mode) { case TRANSMISSION_MODE_2K: tmp |= 0x6; break; case TRANSMISSION_MODE_4K: tmp |= 0x7; break; default: case TRANSMISSION_MODE_8K: tmp |= 0x8; break; } dib7000p_write_word(state, 33, tmp); tmp = dib7000p_read_word(state, 509); if (!((tmp >> 6) & 0x1)) { /* restart the fec */ tmp = dib7000p_read_word(state, 771); dib7000p_write_word(state, 771, tmp | (1 << 1)); dib7000p_write_word(state, 771, tmp); msleep(40); tmp = dib7000p_read_word(state, 509); } // we achieved a lock - it's time to update the osc freq if ((tmp >> 6) & 0x1) { dib7000p_update_timf(state); /* P_timf_alpha += 2 */ tmp = dib7000p_read_word(state, 26); dib7000p_write_word(state, 26, (tmp & ~(0xf << 12)) | ((((tmp >> 12) & 0xf) + 5) << 12)); } if (state->cfg.spur_protect) dib7000p_spur_protect(state, ch->frequency / 1000, BANDWIDTH_TO_KHZ(ch->bandwidth_hz)); dib7000p_set_bandwidth(state, BANDWIDTH_TO_KHZ(ch->bandwidth_hz)); dib7000p_reset_stats(demod); return 0; } static int dib7000p_wakeup(struct dvb_frontend *demod) { struct dib7000p_state *state = demod->demodulator_priv; dib7000p_set_power_mode(state, DIB7000P_POWER_ALL); dib7000p_set_adc_state(state, DIBX000_SLOW_ADC_ON); if (state->version == SOC7090) dib7000p_sad_calib(state); return 0; } static int dib7000p_sleep(struct dvb_frontend *demod) { struct dib7000p_state *state = demod->demodulator_priv; if (state->version == SOC7090) return dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY); return dib7000p_set_output_mode(state, OUTMODE_HIGH_Z) | dib7000p_set_power_mode(state, DIB7000P_POWER_INTERFACE_ONLY); } static int dib7000p_identify(struct dib7000p_state *st) { u16 value; dprintk("checking demod on I2C address: %d (%x)", st->i2c_addr, st->i2c_addr); if ((value = dib7000p_read_word(st, 768)) != 0x01b3) { dprintk("wrong Vendor ID (read=0x%x)", value); return -EREMOTEIO; } if ((value = dib7000p_read_word(st, 769)) != 0x4000) { dprintk("wrong Device ID (%x)", value); return -EREMOTEIO; } return 0; } static int dib7000p_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dib7000p_state *state = fe->demodulator_priv; u16 tps = dib7000p_read_word(state, 463); fep->inversion = INVERSION_AUTO; fep->bandwidth_hz = BANDWIDTH_TO_HZ(state->current_bandwidth); switch ((tps >> 8) & 0x3) { case 0: fep->transmission_mode = TRANSMISSION_MODE_2K; break; case 1: fep->transmission_mode = TRANSMISSION_MODE_8K; break; /* case 2: fep->transmission_mode = TRANSMISSION_MODE_4K; break; */ } switch (tps & 0x3) { case 0: fep->guard_interval = GUARD_INTERVAL_1_32; break; case 1: fep->guard_interval = GUARD_INTERVAL_1_16; break; case 2: fep->guard_interval = GUARD_INTERVAL_1_8; break; case 3: fep->guard_interval = GUARD_INTERVAL_1_4; break; } switch ((tps >> 14) & 0x3) { case 0: fep->modulation = QPSK; break; case 1: fep->modulation = QAM_16; break; case 2: default: fep->modulation = QAM_64; break; } /* as long as the frontend_param structure is fixed for hierarchical transmission I refuse to use it */ /* (tps >> 13) & 0x1 == hrch is used, (tps >> 10) & 0x7 == alpha */ fep->hierarchy = HIERARCHY_NONE; switch ((tps >> 5) & 0x7) { case 1: fep->code_rate_HP = FEC_1_2; break; case 2: fep->code_rate_HP = FEC_2_3; break; case 3: fep->code_rate_HP = FEC_3_4; break; case 5: fep->code_rate_HP = FEC_5_6; break; case 7: default: fep->code_rate_HP = FEC_7_8; break; } switch ((tps >> 2) & 0x7) { case 1: fep->code_rate_LP = FEC_1_2; break; case 2: fep->code_rate_LP = FEC_2_3; break; case 3: fep->code_rate_LP = FEC_3_4; break; case 5: fep->code_rate_LP = FEC_5_6; break; case 7: default: fep->code_rate_LP = FEC_7_8; break; } /* native interleaver: (dib7000p_read_word(state, 464) >> 5) & 0x1 */ return 0; } static int dib7000p_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *fep = &fe->dtv_property_cache; struct dib7000p_state *state = fe->demodulator_priv; int time, ret; if (state->version == SOC7090) dib7090_set_diversity_in(fe, 0); else dib7000p_set_output_mode(state, OUTMODE_HIGH_Z); /* maybe the parameter has been changed */ state->sfn_workaround_active = buggy_sfn_workaround; if (fe->ops.tuner_ops.set_params) fe->ops.tuner_ops.set_params(fe); /* start up the AGC */ state->agc_state = 0; do { time = dib7000p_agc_startup(fe); if (time != -1) msleep(time); } while (time != -1); if (fep->transmission_mode == TRANSMISSION_MODE_AUTO || fep->guard_interval == GUARD_INTERVAL_AUTO || fep->modulation == QAM_AUTO || fep->code_rate_HP == FEC_AUTO) { int i = 800, found; dib7000p_autosearch_start(fe); do { msleep(1); found = dib7000p_autosearch_is_irq(fe); } while (found == 0 && i--); dprintk("autosearch returns: %d", found); if (found == 0 || found == 1) return 0; dib7000p_get_frontend(fe); } ret = dib7000p_tune(fe); /* make this a config parameter */ if (state->version == SOC7090) { dib7090_set_output_mode(fe, state->cfg.output_mode); if (state->cfg.enMpegOutput == 0) { dib7090_setDibTxMux(state, MPEG_ON_DIBTX); dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS); } } else dib7000p_set_output_mode(state, state->cfg.output_mode); return ret; } static int dib7000p_get_stats(struct dvb_frontend *fe, fe_status_t stat); static int dib7000p_read_status(struct dvb_frontend *fe, fe_status_t * stat) { struct dib7000p_state *state = fe->demodulator_priv; u16 lock = dib7000p_read_word(state, 509); *stat = 0; if (lock & 0x8000) *stat |= FE_HAS_SIGNAL; if (lock & 0x3000) *stat |= FE_HAS_CARRIER; if (lock & 0x0100) *stat |= FE_HAS_VITERBI; if (lock & 0x0010) *stat |= FE_HAS_SYNC; if ((lock & 0x0038) == 0x38) *stat |= FE_HAS_LOCK; dib7000p_get_stats(fe, *stat); return 0; } static int dib7000p_read_ber(struct dvb_frontend *fe, u32 * ber) { struct dib7000p_state *state = fe->demodulator_priv; *ber = (dib7000p_read_word(state, 500) << 16) | dib7000p_read_word(state, 501); return 0; } static int dib7000p_read_unc_blocks(struct dvb_frontend *fe, u32 * unc) { struct dib7000p_state *state = fe->demodulator_priv; *unc = dib7000p_read_word(state, 506); return 0; } static int dib7000p_read_signal_strength(struct dvb_frontend *fe, u16 * strength) { struct dib7000p_state *state = fe->demodulator_priv; u16 val = dib7000p_read_word(state, 394); *strength = 65535 - val; return 0; } static u32 dib7000p_get_snr(struct dvb_frontend *fe) { struct dib7000p_state *state = fe->demodulator_priv; u16 val; s32 signal_mant, signal_exp, noise_mant, noise_exp; u32 result = 0; val = dib7000p_read_word(state, 479); noise_mant = (val >> 4) & 0xff; noise_exp = ((val & 0xf) << 2); val = dib7000p_read_word(state, 480); noise_exp += ((val >> 14) & 0x3); if ((noise_exp & 0x20) != 0) noise_exp -= 0x40; signal_mant = (val >> 6) & 0xFF; signal_exp = (val & 0x3F); if ((signal_exp & 0x20) != 0) signal_exp -= 0x40; if (signal_mant != 0) result = intlog10(2) * 10 * signal_exp + 10 * intlog10(signal_mant); else result = intlog10(2) * 10 * signal_exp - 100; if (noise_mant != 0) result -= intlog10(2) * 10 * noise_exp + 10 * intlog10(noise_mant); else result -= intlog10(2) * 10 * noise_exp - 100; return result; } static int dib7000p_read_snr(struct dvb_frontend *fe, u16 *snr) { u32 result; result = dib7000p_get_snr(fe); *snr = result / ((1 << 24) / 10); return 0; } static void dib7000p_reset_stats(struct dvb_frontend *demod) { struct dib7000p_state *state = demod->demodulator_priv; struct dtv_frontend_properties *c = &demod->dtv_property_cache; u32 ucb; memset(&c->strength, 0, sizeof(c->strength)); memset(&c->cnr, 0, sizeof(c->cnr)); memset(&c->post_bit_error, 0, sizeof(c->post_bit_error)); memset(&c->post_bit_count, 0, sizeof(c->post_bit_count)); memset(&c->block_error, 0, sizeof(c->block_error)); c->strength.len = 1; c->cnr.len = 1; c->block_error.len = 1; c->block_count.len = 1; c->post_bit_error.len = 1; c->post_bit_count.len = 1; c->strength.stat[0].scale = FE_SCALE_DECIBEL; c->strength.stat[0].uvalue = 0; c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; dib7000p_read_unc_blocks(demod, &ucb); state->old_ucb = ucb; state->ber_jiffies_stats = 0; state->per_jiffies_stats = 0; } struct linear_segments { unsigned x; signed y; }; /* * Table to estimate signal strength in dBm. * This table should be empirically determinated by measuring the signal * strength generated by a RF generator directly connected into * a device. * This table was determinated by measuring the signal strength generated * by a DTA-2111 RF generator directly connected into a dib7000p device * (a Hauppauge Nova-TD stick), using a good quality 3 meters length * RC6 cable and good RC6 connectors, connected directly to antenna 1. * As the minimum output power of DTA-2111 is -31dBm, a 16 dBm attenuator * were used, for the lower power values. * The real value can actually be on other devices, or even at the * second antena input, depending on several factors, like if LNA * is enabled or not, if diversity is enabled, type of connectors, etc. * Yet, it is better to use this measure in dB than a random non-linear * percentage value, especially for antenna adjustments. * On my tests, the precision of the measure using this table is about * 0.5 dB, with sounds reasonable enough to adjust antennas. */ #define DB_OFFSET 131000 static struct linear_segments strength_to_db_table[] = { { 63630, DB_OFFSET - 20500}, { 62273, DB_OFFSET - 21000}, { 60162, DB_OFFSET - 22000}, { 58730, DB_OFFSET - 23000}, { 58294, DB_OFFSET - 24000}, { 57778, DB_OFFSET - 25000}, { 57320, DB_OFFSET - 26000}, { 56779, DB_OFFSET - 27000}, { 56293, DB_OFFSET - 28000}, { 55724, DB_OFFSET - 29000}, { 55145, DB_OFFSET - 30000}, { 54680, DB_OFFSET - 31000}, { 54293, DB_OFFSET - 32000}, { 53813, DB_OFFSET - 33000}, { 53427, DB_OFFSET - 34000}, { 52981, DB_OFFSET - 35000}, { 52636, DB_OFFSET - 36000}, { 52014, DB_OFFSET - 37000}, { 51674, DB_OFFSET - 38000}, { 50692, DB_OFFSET - 39000}, { 49824, DB_OFFSET - 40000}, { 49052, DB_OFFSET - 41000}, { 48436, DB_OFFSET - 42000}, { 47836, DB_OFFSET - 43000}, { 47368, DB_OFFSET - 44000}, { 46468, DB_OFFSET - 45000}, { 45597, DB_OFFSET - 46000}, { 44586, DB_OFFSET - 47000}, { 43667, DB_OFFSET - 48000}, { 42673, DB_OFFSET - 49000}, { 41816, DB_OFFSET - 50000}, { 40876, DB_OFFSET - 51000}, { 0, 0}, }; static u32 interpolate_value(u32 value, struct linear_segments *segments, unsigned len) { u64 tmp64; u32 dx; s32 dy; int i, ret; if (value >= segments[0].x) return segments[0].y; if (value < segments[len-1].x) return segments[len-1].y; for (i = 1; i < len - 1; i++) { /* If value is identical, no need to interpolate */ if (value == segments[i].x) return segments[i].y; if (value > segments[i].x) break; } /* Linear interpolation between the two (x,y) points */ dy = segments[i - 1].y - segments[i].y; dx = segments[i - 1].x - segments[i].x; tmp64 = value - segments[i].x; tmp64 *= dy; do_div(tmp64, dx); ret = segments[i].y + tmp64; return ret; } /* FIXME: may require changes - this one was borrowed from dib8000 */ static u32 dib7000p_get_time_us(struct dvb_frontend *demod) { struct dtv_frontend_properties *c = &demod->dtv_property_cache; u64 time_us, tmp64; u32 tmp, denom; int guard, rate_num, rate_denum = 1, bits_per_symbol; int interleaving = 0, fft_div; switch (c->guard_interval) { case GUARD_INTERVAL_1_4: guard = 4; break; case GUARD_INTERVAL_1_8: guard = 8; break; case GUARD_INTERVAL_1_16: guard = 16; break; default: case GUARD_INTERVAL_1_32: guard = 32; break; } switch (c->transmission_mode) { case TRANSMISSION_MODE_2K: fft_div = 4; break; case TRANSMISSION_MODE_4K: fft_div = 2; break; default: case TRANSMISSION_MODE_8K: fft_div = 1; break; } switch (c->modulation) { case DQPSK: case QPSK: bits_per_symbol = 2; break; case QAM_16: bits_per_symbol = 4; break; default: case QAM_64: bits_per_symbol = 6; break; } switch ((c->hierarchy == 0 || 1 == 1) ? c->code_rate_HP : c->code_rate_LP) { case FEC_1_2: rate_num = 1; rate_denum = 2; break; case FEC_2_3: rate_num = 2; rate_denum = 3; break; case FEC_3_4: rate_num = 3; rate_denum = 4; break; case FEC_5_6: rate_num = 5; rate_denum = 6; break; default: case FEC_7_8: rate_num = 7; rate_denum = 8; break; } interleaving = interleaving; denom = bits_per_symbol * rate_num * fft_div * 384; /* If calculus gets wrong, wait for 1s for the next stats */ if (!denom) return 0; /* Estimate the period for the total bit rate */ time_us = rate_denum * (1008 * 1562500L); tmp64 = time_us; do_div(tmp64, guard); time_us = time_us + tmp64; time_us += denom / 2; do_div(time_us, denom); tmp = 1008 * 96 * interleaving; time_us += tmp + tmp / guard; return time_us; } static int dib7000p_get_stats(struct dvb_frontend *demod, fe_status_t stat) { struct dib7000p_state *state = demod->demodulator_priv; struct dtv_frontend_properties *c = &demod->dtv_property_cache; int show_per_stats = 0; u32 time_us = 0, val, snr; u64 blocks, ucb; s32 db; u16 strength; /* Get Signal strength */ dib7000p_read_signal_strength(demod, &strength); val = strength; db = interpolate_value(val, strength_to_db_table, ARRAY_SIZE(strength_to_db_table)) - DB_OFFSET; c->strength.stat[0].svalue = db; /* UCB/BER/CNR measures require lock */ if (!(stat & FE_HAS_LOCK)) { c->cnr.len = 1; c->block_count.len = 1; c->block_error.len = 1; c->post_bit_error.len = 1; c->post_bit_count.len = 1; c->cnr.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->post_bit_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->block_error.stat[0].scale = FE_SCALE_NOT_AVAILABLE; c->block_count.stat[0].scale = FE_SCALE_NOT_AVAILABLE; return 0; } /* Check if time for stats was elapsed */ if (time_after(jiffies, state->per_jiffies_stats)) { state->per_jiffies_stats = jiffies + msecs_to_jiffies(1000); /* Get SNR */ snr = dib7000p_get_snr(demod); if (snr) snr = (1000L * snr) >> 24; else snr = 0; c->cnr.stat[0].svalue = snr; c->cnr.stat[0].scale = FE_SCALE_DECIBEL; /* Get UCB measures */ dib7000p_read_unc_blocks(demod, &val); ucb = val - state->old_ucb; if (val < state->old_ucb) ucb += 0x100000000LL; c->block_error.stat[0].scale = FE_SCALE_COUNTER; c->block_error.stat[0].uvalue = ucb; /* Estimate the number of packets based on bitrate */ if (!time_us) time_us = dib7000p_get_time_us(demod); if (time_us) { blocks = 1250000ULL * 1000000ULL; do_div(blocks, time_us * 8 * 204); c->block_count.stat[0].scale = FE_SCALE_COUNTER; c->block_count.stat[0].uvalue += blocks; } show_per_stats = 1; } /* Get post-BER measures */ if (time_after(jiffies, state->ber_jiffies_stats)) { time_us = dib7000p_get_time_us(demod); state->ber_jiffies_stats = jiffies + msecs_to_jiffies((time_us + 500) / 1000); dprintk("Next all layers stats available in %u us.", time_us); dib7000p_read_ber(demod, &val); c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_error.stat[0].uvalue += val; c->post_bit_count.stat[0].scale = FE_SCALE_COUNTER; c->post_bit_count.stat[0].uvalue += 100000000; } /* Get PER measures */ if (show_per_stats) { dib7000p_read_unc_blocks(demod, &val); c->block_error.stat[0].scale = FE_SCALE_COUNTER; c->block_error.stat[0].uvalue += val; time_us = dib7000p_get_time_us(demod); if (time_us) { blocks = 1250000ULL * 1000000ULL; do_div(blocks, time_us * 8 * 204); c->block_count.stat[0].scale = FE_SCALE_COUNTER; c->block_count.stat[0].uvalue += blocks; } } return 0; } static int dib7000p_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static void dib7000p_release(struct dvb_frontend *demod) { struct dib7000p_state *st = demod->demodulator_priv; dibx000_exit_i2c_master(&st->i2c_master); i2c_del_adapter(&st->dib7090_tuner_adap); kfree(st); } static int dib7000pc_detection(struct i2c_adapter *i2c_adap) { u8 *tx, *rx; struct i2c_msg msg[2] = { {.addr = 18 >> 1, .flags = 0, .len = 2}, {.addr = 18 >> 1, .flags = I2C_M_RD, .len = 2}, }; int ret = 0; tx = kzalloc(2*sizeof(u8), GFP_KERNEL); if (!tx) return -ENOMEM; rx = kzalloc(2*sizeof(u8), GFP_KERNEL); if (!rx) { ret = -ENOMEM; goto rx_memory_error; } msg[0].buf = tx; msg[1].buf = rx; tx[0] = 0x03; tx[1] = 0x00; if (i2c_transfer(i2c_adap, msg, 2) == 2) if (rx[0] == 0x01 && rx[1] == 0xb3) { dprintk("-D- DiB7000PC detected"); return 1; } msg[0].addr = msg[1].addr = 0x40; if (i2c_transfer(i2c_adap, msg, 2) == 2) if (rx[0] == 0x01 && rx[1] == 0xb3) { dprintk("-D- DiB7000PC detected"); return 1; } dprintk("-D- DiB7000PC not detected"); kfree(rx); rx_memory_error: kfree(tx); return ret; } static struct i2c_adapter *dib7000p_get_i2c_master(struct dvb_frontend *demod, enum dibx000_i2c_interface intf, int gating) { struct dib7000p_state *st = demod->demodulator_priv; return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating); } static int dib7000p_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) { struct dib7000p_state *state = fe->demodulator_priv; u16 val = dib7000p_read_word(state, 235) & 0xffef; val |= (onoff & 0x1) << 4; dprintk("PID filter enabled %d", onoff); return dib7000p_write_word(state, 235, val); } static int dib7000p_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) { struct dib7000p_state *state = fe->demodulator_priv; dprintk("PID filter: index %x, PID %d, OnOff %d", id, pid, onoff); return dib7000p_write_word(state, 241 + id, onoff ? (1 << 13) | pid : 0); } static int dib7000p_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, struct dib7000p_config cfg[]) { struct dib7000p_state *dpst; int k = 0; u8 new_addr = 0; dpst = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL); if (!dpst) return -ENOMEM; dpst->i2c_adap = i2c; mutex_init(&dpst->i2c_buffer_lock); for (k = no_of_demods - 1; k >= 0; k--) { dpst->cfg = cfg[k]; /* designated i2c address */ if (cfg[k].default_i2c_addr != 0) new_addr = cfg[k].default_i2c_addr + (k << 1); else new_addr = (0x40 + k) << 1; dpst->i2c_addr = new_addr; dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */ if (dib7000p_identify(dpst) != 0) { dpst->i2c_addr = default_addr; dib7000p_write_word(dpst, 1287, 0x0003); /* sram lead in, rdy */ if (dib7000p_identify(dpst) != 0) { dprintk("DiB7000P #%d: not identified\n", k); kfree(dpst); return -EIO; } } /* start diversity to pull_down div_str - just for i2c-enumeration */ dib7000p_set_output_mode(dpst, OUTMODE_DIVERSITY); /* set new i2c address and force divstart */ dib7000p_write_word(dpst, 1285, (new_addr << 2) | 0x2); dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr); } for (k = 0; k < no_of_demods; k++) { dpst->cfg = cfg[k]; if (cfg[k].default_i2c_addr != 0) dpst->i2c_addr = (cfg[k].default_i2c_addr + k) << 1; else dpst->i2c_addr = (0x40 + k) << 1; // unforce divstr dib7000p_write_word(dpst, 1285, dpst->i2c_addr << 2); /* deactivate div - it was just for i2c-enumeration */ dib7000p_set_output_mode(dpst, OUTMODE_HIGH_Z); } kfree(dpst); return 0; } static const s32 lut_1000ln_mant[] = { 6908, 6956, 7003, 7047, 7090, 7131, 7170, 7208, 7244, 7279, 7313, 7346, 7377, 7408, 7438, 7467, 7495, 7523, 7549, 7575, 7600 }; static s32 dib7000p_get_adc_power(struct dvb_frontend *fe) { struct dib7000p_state *state = fe->demodulator_priv; u32 tmp_val = 0, exp = 0, mant = 0; s32 pow_i; u16 buf[2]; u8 ix = 0; buf[0] = dib7000p_read_word(state, 0x184); buf[1] = dib7000p_read_word(state, 0x185); pow_i = (buf[0] << 16) | buf[1]; dprintk("raw pow_i = %d", pow_i); tmp_val = pow_i; while (tmp_val >>= 1) exp++; mant = (pow_i * 1000 / (1 << exp)); dprintk(" mant = %d exp = %d", mant / 1000, exp); ix = (u8) ((mant - 1000) / 100); /* index of the LUT */ dprintk(" ix = %d", ix); pow_i = (lut_1000ln_mant[ix] + 693 * (exp - 20) - 6908); pow_i = (pow_i << 8) / 1000; dprintk(" pow_i = %d", pow_i); return pow_i; } static int map_addr_to_serpar_number(struct i2c_msg *msg) { if ((msg->buf[0] <= 15)) msg->buf[0] -= 1; else if (msg->buf[0] == 17) msg->buf[0] = 15; else if (msg->buf[0] == 16) msg->buf[0] = 17; else if (msg->buf[0] == 19) msg->buf[0] = 16; else if (msg->buf[0] >= 21 && msg->buf[0] <= 25) msg->buf[0] -= 3; else if (msg->buf[0] == 28) msg->buf[0] = 23; else return -EINVAL; return 0; } static int w7090p_tuner_write_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); u8 n_overflow = 1; u16 i = 1000; u16 serpar_num = msg[0].buf[0]; while (n_overflow == 1 && i) { n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1; i--; if (i == 0) dprintk("Tuner ITF: write busy (overflow)"); } dib7000p_write_word(state, 1985, (1 << 6) | (serpar_num & 0x3f)); dib7000p_write_word(state, 1986, (msg[0].buf[1] << 8) | msg[0].buf[2]); return num; } static int w7090p_tuner_read_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); u8 n_overflow = 1, n_empty = 1; u16 i = 1000; u16 serpar_num = msg[0].buf[0]; u16 read_word; while (n_overflow == 1 && i) { n_overflow = (dib7000p_read_word(state, 1984) >> 1) & 0x1; i--; if (i == 0) dprintk("TunerITF: read busy (overflow)"); } dib7000p_write_word(state, 1985, (0 << 6) | (serpar_num & 0x3f)); i = 1000; while (n_empty == 1 && i) { n_empty = dib7000p_read_word(state, 1984) & 0x1; i--; if (i == 0) dprintk("TunerITF: read busy (empty)"); } read_word = dib7000p_read_word(state, 1987); msg[1].buf[0] = (read_word >> 8) & 0xff; msg[1].buf[1] = (read_word) & 0xff; return num; } static int w7090p_tuner_rw_serpar(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { if (map_addr_to_serpar_number(&msg[0]) == 0) { /* else = Tuner regs to ignore : DIG_CFG, CTRL_RF_LT, PLL_CFG, PWM1_REG, ADCCLK, DIG_CFG_3; SLEEP_EN... */ if (num == 1) { /* write */ return w7090p_tuner_write_serpar(i2c_adap, msg, 1); } else { /* read */ return w7090p_tuner_read_serpar(i2c_adap, msg, 2); } } return num; } static int dib7090p_rw_on_apb(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num, u16 apb_address) { struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); u16 word; if (num == 1) { /* write */ dib7000p_write_word(state, apb_address, ((msg[0].buf[1] << 8) | (msg[0].buf[2]))); } else { word = dib7000p_read_word(state, apb_address); msg[1].buf[0] = (word >> 8) & 0xff; msg[1].buf[1] = (word) & 0xff; } return num; } static int dib7090_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dib7000p_state *state = i2c_get_adapdata(i2c_adap); u16 apb_address = 0, word; int i = 0; switch (msg[0].buf[0]) { case 0x12: apb_address = 1920; break; case 0x14: apb_address = 1921; break; case 0x24: apb_address = 1922; break; case 0x1a: apb_address = 1923; break; case 0x22: apb_address = 1924; break; case 0x33: apb_address = 1926; break; case 0x34: apb_address = 1927; break; case 0x35: apb_address = 1928; break; case 0x36: apb_address = 1929; break; case 0x37: apb_address = 1930; break; case 0x38: apb_address = 1931; break; case 0x39: apb_address = 1932; break; case 0x2a: apb_address = 1935; break; case 0x2b: apb_address = 1936; break; case 0x2c: apb_address = 1937; break; case 0x2d: apb_address = 1938; break; case 0x2e: apb_address = 1939; break; case 0x2f: apb_address = 1940; break; case 0x30: apb_address = 1941; break; case 0x31: apb_address = 1942; break; case 0x32: apb_address = 1943; break; case 0x3e: apb_address = 1944; break; case 0x3f: apb_address = 1945; break; case 0x40: apb_address = 1948; break; case 0x25: apb_address = 914; break; case 0x26: apb_address = 915; break; case 0x27: apb_address = 917; break; case 0x28: apb_address = 916; break; case 0x1d: i = ((dib7000p_read_word(state, 72) >> 12) & 0x3); word = dib7000p_read_word(state, 384 + i); msg[1].buf[0] = (word >> 8) & 0xff; msg[1].buf[1] = (word) & 0xff; return num; case 0x1f: if (num == 1) { /* write */ word = (u16) ((msg[0].buf[1] << 8) | msg[0].buf[2]); word &= 0x3; word = (dib7000p_read_word(state, 72) & ~(3 << 12)) | (word << 12); dib7000p_write_word(state, 72, word); /* Set the proper input */ return num; } } if (apb_address != 0) /* R/W acces via APB */ return dib7090p_rw_on_apb(i2c_adap, msg, num, apb_address); else /* R/W access via SERPAR */ return w7090p_tuner_rw_serpar(i2c_adap, msg, num); return 0; } static u32 dib7000p_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm dib7090_tuner_xfer_algo = { .master_xfer = dib7090_tuner_xfer, .functionality = dib7000p_i2c_func, }; static struct i2c_adapter *dib7090_get_i2c_tuner(struct dvb_frontend *fe) { struct dib7000p_state *st = fe->demodulator_priv; return &st->dib7090_tuner_adap; } static int dib7090_host_bus_drive(struct dib7000p_state *state, u8 drive) { u16 reg; /* drive host bus 2, 3, 4 */ reg = dib7000p_read_word(state, 1798) & ~((0x7) | (0x7 << 6) | (0x7 << 12)); reg |= (drive << 12) | (drive << 6) | drive; dib7000p_write_word(state, 1798, reg); /* drive host bus 5,6 */ reg = dib7000p_read_word(state, 1799) & ~((0x7 << 2) | (0x7 << 8)); reg |= (drive << 8) | (drive << 2); dib7000p_write_word(state, 1799, reg); /* drive host bus 7, 8, 9 */ reg = dib7000p_read_word(state, 1800) & ~((0x7) | (0x7 << 6) | (0x7 << 12)); reg |= (drive << 12) | (drive << 6) | drive; dib7000p_write_word(state, 1800, reg); /* drive host bus 10, 11 */ reg = dib7000p_read_word(state, 1801) & ~((0x7 << 2) | (0x7 << 8)); reg |= (drive << 8) | (drive << 2); dib7000p_write_word(state, 1801, reg); /* drive host bus 12, 13, 14 */ reg = dib7000p_read_word(state, 1802) & ~((0x7) | (0x7 << 6) | (0x7 << 12)); reg |= (drive << 12) | (drive << 6) | drive; dib7000p_write_word(state, 1802, reg); return 0; } static u32 dib7090_calcSyncFreq(u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 syncSize) { u32 quantif = 3; u32 nom = (insertExtSynchro * P_Kin + syncSize); u32 denom = P_Kout; u32 syncFreq = ((nom << quantif) / denom); if ((syncFreq & ((1 << quantif) - 1)) != 0) syncFreq = (syncFreq >> quantif) + 1; else syncFreq = (syncFreq >> quantif); if (syncFreq != 0) syncFreq = syncFreq - 1; return syncFreq; } static int dib7090_cfg_DibTx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 insertExtSynchro, u32 synchroMode, u32 syncWord, u32 syncSize) { dprintk("Configure DibStream Tx"); dib7000p_write_word(state, 1615, 1); dib7000p_write_word(state, 1603, P_Kin); dib7000p_write_word(state, 1605, P_Kout); dib7000p_write_word(state, 1606, insertExtSynchro); dib7000p_write_word(state, 1608, synchroMode); dib7000p_write_word(state, 1609, (syncWord >> 16) & 0xffff); dib7000p_write_word(state, 1610, syncWord & 0xffff); dib7000p_write_word(state, 1612, syncSize); dib7000p_write_word(state, 1615, 0); return 0; } static int dib7090_cfg_DibRx(struct dib7000p_state *state, u32 P_Kin, u32 P_Kout, u32 synchroMode, u32 insertExtSynchro, u32 syncWord, u32 syncSize, u32 dataOutRate) { u32 syncFreq; dprintk("Configure DibStream Rx"); if ((P_Kin != 0) && (P_Kout != 0)) { syncFreq = dib7090_calcSyncFreq(P_Kin, P_Kout, insertExtSynchro, syncSize); dib7000p_write_word(state, 1542, syncFreq); } dib7000p_write_word(state, 1554, 1); dib7000p_write_word(state, 1536, P_Kin); dib7000p_write_word(state, 1537, P_Kout); dib7000p_write_word(state, 1539, synchroMode); dib7000p_write_word(state, 1540, (syncWord >> 16) & 0xffff); dib7000p_write_word(state, 1541, syncWord & 0xffff); dib7000p_write_word(state, 1543, syncSize); dib7000p_write_word(state, 1544, dataOutRate); dib7000p_write_word(state, 1554, 0); return 0; } static void dib7090_enMpegMux(struct dib7000p_state *state, int onoff) { u16 reg_1287 = dib7000p_read_word(state, 1287); switch (onoff) { case 1: reg_1287 &= ~(1<<7); break; case 0: reg_1287 |= (1<<7); break; } dib7000p_write_word(state, 1287, reg_1287); } static void dib7090_configMpegMux(struct dib7000p_state *state, u16 pulseWidth, u16 enSerialMode, u16 enSerialClkDiv2) { dprintk("Enable Mpeg mux"); dib7090_enMpegMux(state, 0); /* If the input mode is MPEG do not divide the serial clock */ if ((enSerialMode == 1) && (state->input_mode_mpeg == 1)) enSerialClkDiv2 = 0; dib7000p_write_word(state, 1287, ((pulseWidth & 0x1f) << 2) | ((enSerialMode & 0x1) << 1) | (enSerialClkDiv2 & 0x1)); dib7090_enMpegMux(state, 1); } static void dib7090_setDibTxMux(struct dib7000p_state *state, int mode) { u16 reg_1288 = dib7000p_read_word(state, 1288) & ~(0x7 << 7); switch (mode) { case MPEG_ON_DIBTX: dprintk("SET MPEG ON DIBSTREAM TX"); dib7090_cfg_DibTx(state, 8, 5, 0, 0, 0, 0); reg_1288 |= (1<<9); break; case DIV_ON_DIBTX: dprintk("SET DIV_OUT ON DIBSTREAM TX"); dib7090_cfg_DibTx(state, 5, 5, 0, 0, 0, 0); reg_1288 |= (1<<8); break; case ADC_ON_DIBTX: dprintk("SET ADC_OUT ON DIBSTREAM TX"); dib7090_cfg_DibTx(state, 20, 5, 10, 0, 0, 0); reg_1288 |= (1<<7); break; default: break; } dib7000p_write_word(state, 1288, reg_1288); } static void dib7090_setHostBusMux(struct dib7000p_state *state, int mode) { u16 reg_1288 = dib7000p_read_word(state, 1288) & ~(0x7 << 4); switch (mode) { case DEMOUT_ON_HOSTBUS: dprintk("SET DEM OUT OLD INTERF ON HOST BUS"); dib7090_enMpegMux(state, 0); reg_1288 |= (1<<6); break; case DIBTX_ON_HOSTBUS: dprintk("SET DIBSTREAM TX ON HOST BUS"); dib7090_enMpegMux(state, 0); reg_1288 |= (1<<5); break; case MPEG_ON_HOSTBUS: dprintk("SET MPEG MUX ON HOST BUS"); reg_1288 |= (1<<4); break; default: break; } dib7000p_write_word(state, 1288, reg_1288); } static int dib7090_set_diversity_in(struct dvb_frontend *fe, int onoff) { struct dib7000p_state *state = fe->demodulator_priv; u16 reg_1287; switch (onoff) { case 0: /* only use the internal way - not the diversity input */ dprintk("%s mode OFF : by default Enable Mpeg INPUT", __func__); dib7090_cfg_DibRx(state, 8, 5, 0, 0, 0, 8, 0); /* Do not divide the serial clock of MPEG MUX */ /* in SERIAL MODE in case input mode MPEG is used */ reg_1287 = dib7000p_read_word(state, 1287); /* enSerialClkDiv2 == 1 ? */ if ((reg_1287 & 0x1) == 1) { /* force enSerialClkDiv2 = 0 */ reg_1287 &= ~0x1; dib7000p_write_word(state, 1287, reg_1287); } state->input_mode_mpeg = 1; break; case 1: /* both ways */ case 2: /* only the diversity input */ dprintk("%s ON : Enable diversity INPUT", __func__); dib7090_cfg_DibRx(state, 5, 5, 0, 0, 0, 0, 0); state->input_mode_mpeg = 0; break; } dib7000p_set_diversity_in(&state->demod, onoff); return 0; } static int dib7090_set_output_mode(struct dvb_frontend *fe, int mode) { struct dib7000p_state *state = fe->demodulator_priv; u16 outreg, smo_mode, fifo_threshold; u8 prefer_mpeg_mux_use = 1; int ret = 0; dib7090_host_bus_drive(state, 1); fifo_threshold = 1792; smo_mode = (dib7000p_read_word(state, 235) & 0x0050) | (1 << 1); outreg = dib7000p_read_word(state, 1286) & ~((1 << 10) | (0x7 << 6) | (1 << 1)); switch (mode) { case OUTMODE_HIGH_Z: outreg = 0; break; case OUTMODE_MPEG2_SERIAL: if (prefer_mpeg_mux_use) { dprintk("setting output mode TS_SERIAL using Mpeg Mux"); dib7090_configMpegMux(state, 3, 1, 1); dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS); } else {/* Use Smooth block */ dprintk("setting output mode TS_SERIAL using Smooth bloc"); dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS); outreg |= (2<<6) | (0 << 1); } break; case OUTMODE_MPEG2_PAR_GATED_CLK: if (prefer_mpeg_mux_use) { dprintk("setting output mode TS_PARALLEL_GATED using Mpeg Mux"); dib7090_configMpegMux(state, 2, 0, 0); dib7090_setHostBusMux(state, MPEG_ON_HOSTBUS); } else { /* Use Smooth block */ dprintk("setting output mode TS_PARALLEL_GATED using Smooth block"); dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS); outreg |= (0<<6); } break; case OUTMODE_MPEG2_PAR_CONT_CLK: /* Using Smooth block only */ dprintk("setting output mode TS_PARALLEL_CONT using Smooth block"); dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS); outreg |= (1<<6); break; case OUTMODE_MPEG2_FIFO: /* Using Smooth block because not supported by new Mpeg Mux bloc */ dprintk("setting output mode TS_FIFO using Smooth block"); dib7090_setHostBusMux(state, DEMOUT_ON_HOSTBUS); outreg |= (5<<6); smo_mode |= (3 << 1); fifo_threshold = 512; break; case OUTMODE_DIVERSITY: dprintk("setting output mode MODE_DIVERSITY"); dib7090_setDibTxMux(state, DIV_ON_DIBTX); dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS); break; case OUTMODE_ANALOG_ADC: dprintk("setting output mode MODE_ANALOG_ADC"); dib7090_setDibTxMux(state, ADC_ON_DIBTX); dib7090_setHostBusMux(state, DIBTX_ON_HOSTBUS); break; } if (mode != OUTMODE_HIGH_Z) outreg |= (1 << 10); if (state->cfg.output_mpeg2_in_188_bytes) smo_mode |= (1 << 5); ret |= dib7000p_write_word(state, 235, smo_mode); ret |= dib7000p_write_word(state, 236, fifo_threshold); /* synchronous fread */ ret |= dib7000p_write_word(state, 1286, outreg); return ret; } static int dib7090_tuner_sleep(struct dvb_frontend *fe, int onoff) { struct dib7000p_state *state = fe->demodulator_priv; u16 en_cur_state; dprintk("sleep dib7090: %d", onoff); en_cur_state = dib7000p_read_word(state, 1922); if (en_cur_state > 0xff) state->tuner_enable = en_cur_state; if (onoff) en_cur_state &= 0x00ff; else { if (state->tuner_enable != 0) en_cur_state = state->tuner_enable; } dib7000p_write_word(state, 1922, en_cur_state); return 0; } static int dib7090_get_adc_power(struct dvb_frontend *fe) { return dib7000p_get_adc_power(fe); } static int dib7090_slave_reset(struct dvb_frontend *fe) { struct dib7000p_state *state = fe->demodulator_priv; u16 reg; reg = dib7000p_read_word(state, 1794); dib7000p_write_word(state, 1794, reg | (4 << 12)); dib7000p_write_word(state, 1032, 0xffff); return 0; } static struct dvb_frontend_ops dib7000p_ops; static struct dvb_frontend *dib7000p_init(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib7000p_config *cfg) { struct dvb_frontend *demod; struct dib7000p_state *st; st = kzalloc(sizeof(struct dib7000p_state), GFP_KERNEL); if (st == NULL) return NULL; memcpy(&st->cfg, cfg, sizeof(struct dib7000p_config)); st->i2c_adap = i2c_adap; st->i2c_addr = i2c_addr; st->gpio_val = cfg->gpio_val; st->gpio_dir = cfg->gpio_dir; /* Ensure the output mode remains at the previous default if it's * not specifically set by the caller. */ if ((st->cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (st->cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK)) st->cfg.output_mode = OUTMODE_MPEG2_FIFO; demod = &st->demod; demod->demodulator_priv = st; memcpy(&st->demod.ops, &dib7000p_ops, sizeof(struct dvb_frontend_ops)); mutex_init(&st->i2c_buffer_lock); dib7000p_write_word(st, 1287, 0x0003); /* sram lead in, rdy */ if (dib7000p_identify(st) != 0) goto error; st->version = dib7000p_read_word(st, 897); /* FIXME: make sure the dev.parent field is initialized, or else request_firmware() will hit an OOPS (this should be moved somewhere more common) */ st->i2c_master.gated_tuner_i2c_adap.dev.parent = i2c_adap->dev.parent; dibx000_init_i2c_master(&st->i2c_master, DIB7000P, st->i2c_adap, st->i2c_addr); /* init 7090 tuner adapter */ strncpy(st->dib7090_tuner_adap.name, "DiB7090 tuner interface", sizeof(st->dib7090_tuner_adap.name)); st->dib7090_tuner_adap.algo = &dib7090_tuner_xfer_algo; st->dib7090_tuner_adap.algo_data = NULL; st->dib7090_tuner_adap.dev.parent = st->i2c_adap->dev.parent; i2c_set_adapdata(&st->dib7090_tuner_adap, st); i2c_add_adapter(&st->dib7090_tuner_adap); dib7000p_demod_reset(st); dib7000p_reset_stats(demod); if (st->version == SOC7090) { dib7090_set_output_mode(demod, st->cfg.output_mode); dib7090_set_diversity_in(demod, 0); } return demod; error: kfree(st); return NULL; } void *dib7000p_attach(struct dib7000p_ops *ops) { if (!ops) return NULL; ops->slave_reset = dib7090_slave_reset; ops->get_adc_power = dib7090_get_adc_power; ops->dib7000pc_detection = dib7000pc_detection; ops->get_i2c_tuner = dib7090_get_i2c_tuner; ops->tuner_sleep = dib7090_tuner_sleep; ops->init = dib7000p_init; ops->set_agc1_min = dib7000p_set_agc1_min; ops->set_gpio = dib7000p_set_gpio; ops->i2c_enumeration = dib7000p_i2c_enumeration; ops->pid_filter = dib7000p_pid_filter; ops->pid_filter_ctrl = dib7000p_pid_filter_ctrl; ops->get_i2c_master = dib7000p_get_i2c_master; ops->update_pll = dib7000p_update_pll; ops->ctrl_timf = dib7000p_ctrl_timf; ops->get_agc_values = dib7000p_get_agc_values; ops->set_wbd_ref = dib7000p_set_wbd_ref; return ops; } EXPORT_SYMBOL(dib7000p_attach); static struct dvb_frontend_ops dib7000p_ops = { .delsys = { SYS_DVBT }, .info = { .name = "DiBcom 7000PC", .frequency_min = 44250000, .frequency_max = 867250000, .frequency_stepsize = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib7000p_release, .init = dib7000p_wakeup, .sleep = dib7000p_sleep, .set_frontend = dib7000p_set_frontend, .get_tune_settings = dib7000p_fe_get_tune_settings, .get_frontend = dib7000p_get_frontend, .read_status = dib7000p_read_status, .read_ber = dib7000p_read_ber, .read_signal_strength = dib7000p_read_signal_strength, .read_snr = dib7000p_read_snr, .read_ucblocks = dib7000p_read_unc_blocks, }; MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>"); MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Driver for the DiBcom 7000PC COFDM demodulator"); MODULE_LICENSE("GPL");
gpl-2.0
psyke83/kernel_huawei_u8160
fs/affs/inode.c
706
10542
/* * linux/fs/affs/inode.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1992 Eric Youngdale Modified for ISO9660 filesystem. * * (C) 1991 Linus Torvalds - minix filesystem */ #include <linux/sched.h> #include "affs.h" extern const struct inode_operations affs_symlink_inode_operations; extern struct timezone sys_tz; struct inode *affs_iget(struct super_block *sb, unsigned long ino) { struct affs_sb_info *sbi = AFFS_SB(sb); struct buffer_head *bh; struct affs_head *head; struct affs_tail *tail; struct inode *inode; u32 block; u32 size; u32 prot; u16 id; inode = iget_locked(sb, ino); if (!inode) return ERR_PTR(-ENOMEM); if (!(inode->i_state & I_NEW)) return inode; pr_debug("AFFS: affs_iget(%lu)\n", inode->i_ino); block = inode->i_ino; bh = affs_bread(sb, block); if (!bh) { affs_warning(sb, "read_inode", "Cannot read block %d", block); goto bad_inode; } if (affs_checksum_block(sb, bh) || be32_to_cpu(AFFS_HEAD(bh)->ptype) != T_SHORT) { affs_warning(sb,"read_inode", "Checksum or type (ptype=%d) error on inode %d", AFFS_HEAD(bh)->ptype, block); goto bad_inode; } head = AFFS_HEAD(bh); tail = AFFS_TAIL(sb, bh); prot = be32_to_cpu(tail->protect); inode->i_size = 0; inode->i_nlink = 1; inode->i_mode = 0; AFFS_I(inode)->i_extcnt = 1; AFFS_I(inode)->i_ext_last = ~1; AFFS_I(inode)->i_protect = prot; atomic_set(&AFFS_I(inode)->i_opencnt, 0); AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_lc_size = 0; AFFS_I(inode)->i_lc_shift = 0; AFFS_I(inode)->i_lc_mask = 0; AFFS_I(inode)->i_ac = NULL; AFFS_I(inode)->i_ext_bh = NULL; AFFS_I(inode)->mmu_private = 0; AFFS_I(inode)->i_lastalloc = 0; AFFS_I(inode)->i_pa_cnt = 0; if (sbi->s_flags & SF_SETMODE) inode->i_mode = sbi->s_mode; else inode->i_mode = prot_to_mode(prot); id = be16_to_cpu(tail->uid); if (id == 0 || sbi->s_flags & SF_SETUID) inode->i_uid = sbi->s_uid; else if (id == 0xFFFF && sbi->s_flags & SF_MUFS) inode->i_uid = 0; else inode->i_uid = id; id = be16_to_cpu(tail->gid); if (id == 0 || sbi->s_flags & SF_SETGID) inode->i_gid = sbi->s_gid; else if (id == 0xFFFF && sbi->s_flags & SF_MUFS) inode->i_gid = 0; else inode->i_gid = id; switch (be32_to_cpu(tail->stype)) { case ST_ROOT: inode->i_uid = sbi->s_uid; inode->i_gid = sbi->s_gid; /* fall through */ case ST_USERDIR: if (be32_to_cpu(tail->stype) == ST_USERDIR || sbi->s_flags & SF_SETMODE) { if (inode->i_mode & S_IRUSR) inode->i_mode |= S_IXUSR; if (inode->i_mode & S_IRGRP) inode->i_mode |= S_IXGRP; if (inode->i_mode & S_IROTH) inode->i_mode |= S_IXOTH; inode->i_mode |= S_IFDIR; } else inode->i_mode = S_IRUGO | S_IXUGO | S_IWUSR | S_IFDIR; /* Maybe it should be controlled by mount parameter? */ //inode->i_mode |= S_ISVTX; inode->i_op = &affs_dir_inode_operations; inode->i_fop = &affs_dir_operations; break; case ST_LINKDIR: #if 0 affs_warning(sb, "read_inode", "inode is LINKDIR"); goto bad_inode; #else inode->i_mode |= S_IFDIR; /* ... and leave ->i_op and ->i_fop pointing to empty */ break; #endif case ST_LINKFILE: affs_warning(sb, "read_inode", "inode is LINKFILE"); goto bad_inode; case ST_FILE: size = be32_to_cpu(tail->size); inode->i_mode |= S_IFREG; AFFS_I(inode)->mmu_private = inode->i_size = size; if (inode->i_size) { AFFS_I(inode)->i_blkcnt = (size - 1) / sbi->s_data_blksize + 1; AFFS_I(inode)->i_extcnt = (AFFS_I(inode)->i_blkcnt - 1) / sbi->s_hashsize + 1; } if (tail->link_chain) inode->i_nlink = 2; inode->i_mapping->a_ops = (sbi->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops; inode->i_op = &affs_file_inode_operations; inode->i_fop = &affs_file_operations; break; case ST_SOFTLINK: inode->i_mode |= S_IFLNK; inode->i_op = &affs_symlink_inode_operations; inode->i_data.a_ops = &affs_symlink_aops; break; } inode->i_mtime.tv_sec = inode->i_atime.tv_sec = inode->i_ctime.tv_sec = (be32_to_cpu(tail->change.days) * (24 * 60 * 60) + be32_to_cpu(tail->change.mins) * 60 + be32_to_cpu(tail->change.ticks) / 50 + ((8 * 365 + 2) * 24 * 60 * 60)) + sys_tz.tz_minuteswest * 60; inode->i_mtime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_atime.tv_nsec = 0; affs_brelse(bh); unlock_new_inode(inode); return inode; bad_inode: affs_brelse(bh); iget_failed(inode); return ERR_PTR(-EIO); } int affs_write_inode(struct inode *inode, int unused) { struct super_block *sb = inode->i_sb; struct buffer_head *bh; struct affs_tail *tail; uid_t uid; gid_t gid; pr_debug("AFFS: write_inode(%lu)\n",inode->i_ino); if (!inode->i_nlink) // possibly free block return 0; bh = affs_bread(sb, inode->i_ino); if (!bh) { affs_error(sb,"write_inode","Cannot read block %lu",inode->i_ino); return -EIO; } tail = AFFS_TAIL(sb, bh); if (tail->stype == cpu_to_be32(ST_ROOT)) { secs_to_datestamp(inode->i_mtime.tv_sec,&AFFS_ROOT_TAIL(sb, bh)->root_change); } else { tail->protect = cpu_to_be32(AFFS_I(inode)->i_protect); tail->size = cpu_to_be32(inode->i_size); secs_to_datestamp(inode->i_mtime.tv_sec,&tail->change); if (!(inode->i_ino == AFFS_SB(sb)->s_root_block)) { uid = inode->i_uid; gid = inode->i_gid; if (AFFS_SB(sb)->s_flags & SF_MUFS) { if (inode->i_uid == 0 || inode->i_uid == 0xFFFF) uid = inode->i_uid ^ ~0; if (inode->i_gid == 0 || inode->i_gid == 0xFFFF) gid = inode->i_gid ^ ~0; } if (!(AFFS_SB(sb)->s_flags & SF_SETUID)) tail->uid = cpu_to_be16(uid); if (!(AFFS_SB(sb)->s_flags & SF_SETGID)) tail->gid = cpu_to_be16(gid); } } affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); affs_brelse(bh); affs_free_prealloc(inode); return 0; } int affs_notify_change(struct dentry *dentry, struct iattr *attr) { struct inode *inode = dentry->d_inode; int error; pr_debug("AFFS: notify_change(%lu,0x%x)\n",inode->i_ino,attr->ia_valid); error = inode_change_ok(inode,attr); if (error) goto out; if (((attr->ia_valid & ATTR_UID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETUID)) || ((attr->ia_valid & ATTR_GID) && (AFFS_SB(inode->i_sb)->s_flags & SF_SETGID)) || ((attr->ia_valid & ATTR_MODE) && (AFFS_SB(inode->i_sb)->s_flags & (SF_SETMODE | SF_IMMUTABLE)))) { if (!(AFFS_SB(inode->i_sb)->s_flags & SF_QUIET)) error = -EPERM; goto out; } error = inode_setattr(inode, attr); if (!error && (attr->ia_valid & ATTR_MODE)) mode_to_prot(inode); out: return error; } void affs_delete_inode(struct inode *inode) { pr_debug("AFFS: delete_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); truncate_inode_pages(&inode->i_data, 0); inode->i_size = 0; affs_truncate(inode); clear_inode(inode); affs_free_block(inode->i_sb, inode->i_ino); } void affs_clear_inode(struct inode *inode) { unsigned long cache_page; pr_debug("AFFS: clear_inode(ino=%lu, nlink=%u)\n", inode->i_ino, inode->i_nlink); affs_free_prealloc(inode); cache_page = (unsigned long)AFFS_I(inode)->i_lc; if (cache_page) { pr_debug("AFFS: freeing ext cache\n"); AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_ac = NULL; free_page(cache_page); } affs_brelse(AFFS_I(inode)->i_ext_bh); AFFS_I(inode)->i_ext_last = ~1; AFFS_I(inode)->i_ext_bh = NULL; } struct inode * affs_new_inode(struct inode *dir) { struct super_block *sb = dir->i_sb; struct inode *inode; u32 block; struct buffer_head *bh; if (!(inode = new_inode(sb))) goto err_inode; if (!(block = affs_alloc_block(dir, dir->i_ino))) goto err_block; bh = affs_getzeroblk(sb, block); if (!bh) goto err_bh; mark_buffer_dirty_inode(bh, inode); affs_brelse(bh); inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); inode->i_ino = block; inode->i_nlink = 1; inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME_SEC; atomic_set(&AFFS_I(inode)->i_opencnt, 0); AFFS_I(inode)->i_blkcnt = 0; AFFS_I(inode)->i_lc = NULL; AFFS_I(inode)->i_lc_size = 0; AFFS_I(inode)->i_lc_shift = 0; AFFS_I(inode)->i_lc_mask = 0; AFFS_I(inode)->i_ac = NULL; AFFS_I(inode)->i_ext_bh = NULL; AFFS_I(inode)->mmu_private = 0; AFFS_I(inode)->i_protect = 0; AFFS_I(inode)->i_lastalloc = 0; AFFS_I(inode)->i_pa_cnt = 0; AFFS_I(inode)->i_extcnt = 1; AFFS_I(inode)->i_ext_last = ~1; insert_inode_hash(inode); return inode; err_bh: affs_free_block(sb, block); err_block: iput(inode); err_inode: return NULL; } /* * Add an entry to a directory. Create the header block * and insert it into the hash table. */ int affs_add_entry(struct inode *dir, struct inode *inode, struct dentry *dentry, s32 type) { struct super_block *sb = dir->i_sb; struct buffer_head *inode_bh = NULL; struct buffer_head *bh = NULL; u32 block = 0; int retval; pr_debug("AFFS: add_entry(dir=%u, inode=%u, \"%*s\", type=%d)\n", (u32)dir->i_ino, (u32)inode->i_ino, (int)dentry->d_name.len, dentry->d_name.name, type); retval = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto done; affs_lock_link(inode); switch (type) { case ST_LINKFILE: case ST_LINKDIR: retval = -ENOSPC; block = affs_alloc_block(dir, dir->i_ino); if (!block) goto err; retval = -EIO; inode_bh = bh; bh = affs_getzeroblk(sb, block); if (!bh) goto err; break; default: break; } AFFS_HEAD(bh)->ptype = cpu_to_be32(T_SHORT); AFFS_HEAD(bh)->key = cpu_to_be32(bh->b_blocknr); affs_copy_name(AFFS_TAIL(sb, bh)->name, dentry); AFFS_TAIL(sb, bh)->stype = cpu_to_be32(type); AFFS_TAIL(sb, bh)->parent = cpu_to_be32(dir->i_ino); if (inode_bh) { __be32 chain; chain = AFFS_TAIL(sb, inode_bh)->link_chain; AFFS_TAIL(sb, bh)->original = cpu_to_be32(inode->i_ino); AFFS_TAIL(sb, bh)->link_chain = chain; AFFS_TAIL(sb, inode_bh)->link_chain = cpu_to_be32(block); affs_adjust_checksum(inode_bh, block - be32_to_cpu(chain)); mark_buffer_dirty_inode(inode_bh, inode); inode->i_nlink = 2; atomic_inc(&inode->i_count); } affs_fix_checksum(sb, bh); mark_buffer_dirty_inode(bh, inode); dentry->d_fsdata = (void *)(long)bh->b_blocknr; affs_lock_dir(dir); retval = affs_insert_hash(dir, bh); mark_buffer_dirty_inode(bh, inode); affs_unlock_dir(dir); affs_unlock_link(inode); d_instantiate(dentry, inode); done: affs_brelse(inode_bh); affs_brelse(bh); return retval; err: if (block) affs_free_block(sb, block); affs_unlock_link(inode); goto done; }
gpl-2.0
sricharanaz/iommu
drivers/net/wireless/rtlwifi/debug.c
1474
1810
/****************************************************************************** * * Copyright(c) 2009-2012 Realtek Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * wlanfae <wlanfae@realtek.com> * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park, * Hsinchu 300, Taiwan. * * Larry Finger <Larry.Finger@lwfinger.net> *****************************************************************************/ #include "wifi.h" #include <linux/moduleparam.h> void rtl_dbgp_flag_init(struct ieee80211_hw *hw) { struct rtl_priv *rtlpriv = rtl_priv(hw); u8 i; rtlpriv->dbg.global_debugcomponents = COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND | COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC | COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC | COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS | COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD | COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN | COMP_EASY_CONCURRENT | COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN | COMP_BT_COEXIST; for (i = 0; i < DBGP_TYPE_MAX; i++) rtlpriv->dbg.dbgp_type[i] = 0; /*Init Debug flag enable condition */ } EXPORT_SYMBOL_GPL(rtl_dbgp_flag_init);
gpl-2.0
yangoliver/linux
drivers/ata/pata_cs5530.c
1474
9458
/* * pata-cs5530.c - CS5530 PATA for new ATA layer * (C) 2005 Red Hat Inc * * based upon cs5530.c by Mark Lord. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Loosely based on the piix & svwks drivers. * * Documentation: * Available from AMD web site. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <scsi/scsi_host.h> #include <linux/libata.h> #include <linux/dmi.h> #define DRV_NAME "pata_cs5530" #define DRV_VERSION "0.7.4" static void __iomem *cs5530_port_base(struct ata_port *ap) { unsigned long bmdma = (unsigned long)ap->ioaddr.bmdma_addr; return (void __iomem *)((bmdma & ~0x0F) + 0x20 + 0x10 * ap->port_no); } /** * cs5530_set_piomode - PIO setup * @ap: ATA interface * @adev: device on the interface * * Set our PIO requirements. This is fairly simple on the CS5530 * chips. */ static void cs5530_set_piomode(struct ata_port *ap, struct ata_device *adev) { static const unsigned int cs5530_pio_timings[2][5] = { {0x00009172, 0x00012171, 0x00020080, 0x00032010, 0x00040010}, {0xd1329172, 0x71212171, 0x30200080, 0x20102010, 0x00100010} }; void __iomem *base = cs5530_port_base(ap); u32 tuning; int format; /* Find out which table to use */ tuning = ioread32(base + 0x04); format = (tuning & 0x80000000UL) ? 1 : 0; /* Now load the right timing register */ if (adev->devno) base += 0x08; iowrite32(cs5530_pio_timings[format][adev->pio_mode - XFER_PIO_0], base); } /** * cs5530_set_dmamode - DMA timing setup * @ap: ATA interface * @adev: Device being configured * * We cannot mix MWDMA and UDMA without reloading timings each switch * master to slave. We track the last DMA setup in order to minimise * reloads. */ static void cs5530_set_dmamode(struct ata_port *ap, struct ata_device *adev) { void __iomem *base = cs5530_port_base(ap); u32 tuning, timing = 0; u8 reg; /* Find out which table to use */ tuning = ioread32(base + 0x04); switch(adev->dma_mode) { case XFER_UDMA_0: timing = 0x00921250;break; case XFER_UDMA_1: timing = 0x00911140;break; case XFER_UDMA_2: timing = 0x00911030;break; case XFER_MW_DMA_0: timing = 0x00077771;break; case XFER_MW_DMA_1: timing = 0x00012121;break; case XFER_MW_DMA_2: timing = 0x00002020;break; default: BUG(); } /* Merge in the PIO format bit */ timing |= (tuning & 0x80000000UL); if (adev->devno == 0) /* Master */ iowrite32(timing, base + 0x04); else { if (timing & 0x00100000) tuning |= 0x00100000; /* UDMA for both */ else tuning &= ~0x00100000; /* MWDMA for both */ iowrite32(tuning, base + 0x04); iowrite32(timing, base + 0x0C); } /* Set the DMA capable bit in the BMDMA area */ reg = ioread8(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); reg |= (1 << (5 + adev->devno)); iowrite8(reg, ap->ioaddr.bmdma_addr + ATA_DMA_STATUS); /* Remember the last DMA setup we did */ ap->private_data = adev; } /** * cs5530_qc_issue - command issue * @qc: command pending * * Called when the libata layer is about to issue a command. We wrap * this interface so that we can load the correct ATA timings if * necessary. Specifically we have a problem that there is only * one MWDMA/UDMA bit. */ static unsigned int cs5530_qc_issue(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; struct ata_device *adev = qc->dev; struct ata_device *prev = ap->private_data; /* See if the DMA settings could be wrong */ if (ata_dma_enabled(adev) && adev != prev && prev != NULL) { /* Maybe, but do the channels match MWDMA/UDMA ? */ if ((ata_using_udma(adev) && !ata_using_udma(prev)) || (ata_using_udma(prev) && !ata_using_udma(adev))) /* Switch the mode bits */ cs5530_set_dmamode(ap, adev); } return ata_bmdma_qc_issue(qc); } static struct scsi_host_template cs5530_sht = { ATA_BMDMA_SHT(DRV_NAME), .sg_tablesize = LIBATA_DUMB_MAX_PRD, }; static struct ata_port_operations cs5530_port_ops = { .inherits = &ata_bmdma_port_ops, .qc_prep = ata_bmdma_dumb_qc_prep, .qc_issue = cs5530_qc_issue, .cable_detect = ata_cable_40wire, .set_piomode = cs5530_set_piomode, .set_dmamode = cs5530_set_dmamode, }; static const struct dmi_system_id palmax_dmi_table[] = { { .ident = "Palmax PD1100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Cyrix"), DMI_MATCH(DMI_PRODUCT_NAME, "Caddis"), }, }, { } }; static int cs5530_is_palmax(void) { if (dmi_check_system(palmax_dmi_table)) { printk(KERN_INFO "Palmax PD1100: Disabling DMA on docking port.\n"); return 1; } return 0; } /** * cs5530_init_chip - Chipset init * * Perform the chip initialisation work that is shared between both * setup and resume paths */ static int cs5530_init_chip(void) { struct pci_dev *master_0 = NULL, *cs5530_0 = NULL, *dev = NULL; while ((dev = pci_get_device(PCI_VENDOR_ID_CYRIX, PCI_ANY_ID, dev)) != NULL) { switch (dev->device) { case PCI_DEVICE_ID_CYRIX_PCI_MASTER: master_0 = pci_dev_get(dev); break; case PCI_DEVICE_ID_CYRIX_5530_LEGACY: cs5530_0 = pci_dev_get(dev); break; } } if (!master_0) { printk(KERN_ERR DRV_NAME ": unable to locate PCI MASTER function\n"); goto fail_put; } if (!cs5530_0) { printk(KERN_ERR DRV_NAME ": unable to locate CS5530 LEGACY function\n"); goto fail_put; } pci_set_master(cs5530_0); pci_try_set_mwi(cs5530_0); /* * Set PCI CacheLineSize to 16-bytes: * --> Write 0x04 into 8-bit PCI CACHELINESIZE reg of function 0 of the cs5530 * * Note: This value is constant because the 5530 is only a Geode companion */ pci_write_config_byte(cs5530_0, PCI_CACHE_LINE_SIZE, 0x04); /* * Disable trapping of UDMA register accesses (Win98 hack): * --> Write 0x5006 into 16-bit reg at offset 0xd0 of function 0 of the cs5530 */ pci_write_config_word(cs5530_0, 0xd0, 0x5006); /* * Bit-1 at 0x40 enables MemoryWriteAndInvalidate on internal X-bus: * The other settings are what is necessary to get the register * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x40, 0x1e); /* * Set max PCI burst size (16-bytes seems to work best): * 16bytes: set bit-1 at 0x41 (reg value of 0x16) * all others: clear bit-1 at 0x41, and do: * 128bytes: OR 0x00 at 0x41 * 256bytes: OR 0x04 at 0x41 * 512bytes: OR 0x08 at 0x41 * 1024bytes: OR 0x0c at 0x41 */ pci_write_config_byte(master_0, 0x41, 0x14); /* * These settings are necessary to get the chip * into a sane state for IDE DMA operation. */ pci_write_config_byte(master_0, 0x42, 0x00); pci_write_config_byte(master_0, 0x43, 0xc1); pci_dev_put(master_0); pci_dev_put(cs5530_0); return 0; fail_put: pci_dev_put(master_0); pci_dev_put(cs5530_0); return -ENODEV; } /** * cs5530_init_one - Initialise a CS5530 * @dev: PCI device * @id: Entry in match table * * Install a driver for the newly found CS5530 companion chip. Most of * this is just housekeeping. We have to set the chip up correctly and * turn off various bits of emulation magic. */ static int cs5530_init_one(struct pci_dev *pdev, const struct pci_device_id *id) { static const struct ata_port_info info = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA2, .port_ops = &cs5530_port_ops }; /* The docking connector doesn't do UDMA, and it seems not MWDMA */ static const struct ata_port_info info_palmax_secondary = { .flags = ATA_FLAG_SLAVE_POSS, .pio_mask = ATA_PIO4, .port_ops = &cs5530_port_ops }; const struct ata_port_info *ppi[] = { &info, NULL }; int rc; rc = pcim_enable_device(pdev); if (rc) return rc; /* Chip initialisation */ if (cs5530_init_chip()) return -ENODEV; if (cs5530_is_palmax()) ppi[1] = &info_palmax_secondary; /* Now kick off ATA set up */ return ata_pci_bmdma_init_one(pdev, ppi, &cs5530_sht, NULL, 0); } #ifdef CONFIG_PM_SLEEP static int cs5530_reinit_one(struct pci_dev *pdev) { struct ata_host *host = pci_get_drvdata(pdev); int rc; rc = ata_pci_device_do_resume(pdev); if (rc) return rc; /* If we fail on resume we are doomed */ if (cs5530_init_chip()) return -EIO; ata_host_resume(host); return 0; } #endif /* CONFIG_PM_SLEEP */ static const struct pci_device_id cs5530[] = { { PCI_VDEVICE(CYRIX, PCI_DEVICE_ID_CYRIX_5530_IDE), }, { }, }; static struct pci_driver cs5530_pci_driver = { .name = DRV_NAME, .id_table = cs5530, .probe = cs5530_init_one, .remove = ata_pci_remove_one, #ifdef CONFIG_PM_SLEEP .suspend = ata_pci_device_suspend, .resume = cs5530_reinit_one, #endif }; module_pci_driver(cs5530_pci_driver); MODULE_AUTHOR("Alan Cox"); MODULE_DESCRIPTION("low-level driver for the Cyrix/NS/AMD 5530"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cs5530); MODULE_VERSION(DRV_VERSION);
gpl-2.0
chonix/trinity
arch/powerpc/kernel/setup_64.c
1730
17594
/* * * Common boot and setup code. * * Copyright (C) 2001 PPC64 Team, IBM Corp * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/module.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/reboot.h> #include <linux/delay.h> #include <linux/initrd.h> #include <linux/seq_file.h> #include <linux/ioport.h> #include <linux/console.h> #include <linux/utsname.h> #include <linux/tty.h> #include <linux/root_dev.h> #include <linux/notifier.h> #include <linux/cpu.h> #include <linux/unistd.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/bootmem.h> #include <linux/pci.h> #include <linux/lockdep.h> #include <linux/memblock.h> #include <asm/io.h> #include <asm/kdump.h> #include <asm/prom.h> #include <asm/processor.h> #include <asm/pgtable.h> #include <asm/smp.h> #include <asm/elf.h> #include <asm/machdep.h> #include <asm/paca.h> #include <asm/time.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/btext.h> #include <asm/nvram.h> #include <asm/setup.h> #include <asm/system.h> #include <asm/rtas.h> #include <asm/iommu.h> #include <asm/serial.h> #include <asm/cache.h> #include <asm/page.h> #include <asm/mmu.h> #include <asm/firmware.h> #include <asm/xmon.h> #include <asm/udbg.h> #include <asm/kexec.h> #include <asm/mmu_context.h> #include <asm/code-patching.h> #include "setup.h" #ifdef DEBUG #define DBG(fmt...) udbg_printf(fmt) #else #define DBG(fmt...) #endif int boot_cpuid = 0; int __initdata boot_cpu_count; u64 ppc64_pft_size; /* Pick defaults since we might want to patch instructions * before we've read this from the device tree. */ struct ppc64_caches ppc64_caches = { .dline_size = 0x40, .log_dline_size = 6, .iline_size = 0x40, .log_iline_size = 6 }; EXPORT_SYMBOL_GPL(ppc64_caches); /* * These are used in binfmt_elf.c to put aux entries on the stack * for each elf executable being started. */ int dcache_bsize; int icache_bsize; int ucache_bsize; #ifdef CONFIG_SMP static char *smt_enabled_cmdline; /* Look for ibm,smt-enabled OF option */ static void check_smt_enabled(void) { struct device_node *dn; const char *smt_option; /* Default to enabling all threads */ smt_enabled_at_boot = threads_per_core; /* Allow the command line to overrule the OF option */ if (smt_enabled_cmdline) { if (!strcmp(smt_enabled_cmdline, "on")) smt_enabled_at_boot = threads_per_core; else if (!strcmp(smt_enabled_cmdline, "off")) smt_enabled_at_boot = 0; else { long smt; int rc; rc = strict_strtol(smt_enabled_cmdline, 10, &smt); if (!rc) smt_enabled_at_boot = min(threads_per_core, (int)smt); } } else { dn = of_find_node_by_path("/options"); if (dn) { smt_option = of_get_property(dn, "ibm,smt-enabled", NULL); if (smt_option) { if (!strcmp(smt_option, "on")) smt_enabled_at_boot = threads_per_core; else if (!strcmp(smt_option, "off")) smt_enabled_at_boot = 0; } of_node_put(dn); } } } /* Look for smt-enabled= cmdline option */ static int __init early_smt_enabled(char *p) { smt_enabled_cmdline = p; return 0; } early_param("smt-enabled", early_smt_enabled); #else #define check_smt_enabled() #endif /* CONFIG_SMP */ /* * Early initialization entry point. This is called by head.S * with MMU translation disabled. We rely on the "feature" of * the CPU that ignores the top 2 bits of the address in real * mode so we can access kernel globals normally provided we * only toy with things in the RMO region. From here, we do * some early parsing of the device-tree to setup out MEMBLOCK * data structures, and allocate & initialize the hash table * and segment tables so we can start running with translation * enabled. * * It is this function which will call the probe() callback of * the various platform types and copy the matching one to the * global ppc_md structure. Your platform can eventually do * some very early initializations from the probe() routine, but * this is not recommended, be very careful as, for example, the * device-tree is not accessible via normal means at this point. */ void __init early_setup(unsigned long dt_ptr) { /* -------- printk is _NOT_ safe to use here ! ------- */ /* Identify CPU type */ identify_cpu(0, mfspr(SPRN_PVR)); /* Assume we're on cpu 0 for now. Don't write to the paca yet! */ initialise_paca(&boot_paca, 0); setup_paca(&boot_paca); /* Initialize lockdep early or else spinlocks will blow */ lockdep_init(); /* -------- printk is now safe to use ------- */ /* Enable early debugging if any specified (see udbg.h) */ udbg_early_init(); DBG(" -> early_setup(), dt_ptr: 0x%lx\n", dt_ptr); /* * Do early initialization using the flattened device * tree, such as retrieving the physical memory map or * calculating/retrieving the hash table size. */ early_init_devtree(__va(dt_ptr)); /* Now we know the logical id of our boot cpu, setup the paca. */ setup_paca(&paca[boot_cpuid]); /* Fix up paca fields required for the boot cpu */ get_paca()->cpu_start = 1; /* Probe the machine type */ probe_machine(); setup_kdump_trampoline(); DBG("Found, Initializing memory management...\n"); /* Initialize the hash table or TLB handling */ early_init_mmu(); DBG(" <- early_setup()\n"); } #ifdef CONFIG_SMP void early_setup_secondary(void) { /* Mark interrupts enabled in PACA */ get_paca()->soft_enabled = 0; /* Initialize the hash table or TLB handling */ early_init_mmu_secondary(); } #endif /* CONFIG_SMP */ #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC) void smp_release_cpus(void) { unsigned long *ptr; int i; DBG(" -> smp_release_cpus()\n"); /* All secondary cpus are spinning on a common spinloop, release them * all now so they can start to spin on their individual paca * spinloops. For non SMP kernels, the secondary cpus never get out * of the common spinloop. */ ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop - PHYSICAL_START); *ptr = __pa(generic_secondary_smp_init); /* And wait a bit for them to catch up */ for (i = 0; i < 100000; i++) { mb(); HMT_low(); if (boot_cpu_count == 0) break; udelay(1); } DBG("boot_cpu_count = %d\n", boot_cpu_count); DBG(" <- smp_release_cpus()\n"); } #endif /* CONFIG_SMP || CONFIG_KEXEC */ /* * Initialize some remaining members of the ppc64_caches and systemcfg * structures * (at least until we get rid of them completely). This is mostly some * cache informations about the CPU that will be used by cache flush * routines and/or provided to userland */ static void __init initialize_cache_info(void) { struct device_node *np; unsigned long num_cpus = 0; DBG(" -> initialize_cache_info()\n"); for (np = NULL; (np = of_find_node_by_type(np, "cpu"));) { num_cpus += 1; /* We're assuming *all* of the CPUs have the same * d-cache and i-cache sizes... -Peter */ if ( num_cpus == 1 ) { const u32 *sizep, *lsizep; u32 size, lsize; size = 0; lsize = cur_cpu_spec->dcache_bsize; sizep = of_get_property(np, "d-cache-size", NULL); if (sizep != NULL) size = *sizep; lsizep = of_get_property(np, "d-cache-block-size", NULL); /* fallback if block size missing */ if (lsizep == NULL) lsizep = of_get_property(np, "d-cache-line-size", NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) DBG("Argh, can't find dcache properties ! " "sizep: %p, lsizep: %p\n", sizep, lsizep); ppc64_caches.dsize = size; ppc64_caches.dline_size = lsize; ppc64_caches.log_dline_size = __ilog2(lsize); ppc64_caches.dlines_per_page = PAGE_SIZE / lsize; size = 0; lsize = cur_cpu_spec->icache_bsize; sizep = of_get_property(np, "i-cache-size", NULL); if (sizep != NULL) size = *sizep; lsizep = of_get_property(np, "i-cache-block-size", NULL); if (lsizep == NULL) lsizep = of_get_property(np, "i-cache-line-size", NULL); if (lsizep != NULL) lsize = *lsizep; if (sizep == 0 || lsizep == 0) DBG("Argh, can't find icache properties ! " "sizep: %p, lsizep: %p\n", sizep, lsizep); ppc64_caches.isize = size; ppc64_caches.iline_size = lsize; ppc64_caches.log_iline_size = __ilog2(lsize); ppc64_caches.ilines_per_page = PAGE_SIZE / lsize; } } DBG(" <- initialize_cache_info()\n"); } /* * Do some initial setup of the system. The parameters are those which * were passed in from the bootloader. */ void __init setup_system(void) { DBG(" -> setup_system()\n"); /* Apply the CPUs-specific and firmware specific fixups to kernel * text (nop out sections not relevant to this CPU or this firmware) */ do_feature_fixups(cur_cpu_spec->cpu_features, &__start___ftr_fixup, &__stop___ftr_fixup); do_feature_fixups(cur_cpu_spec->mmu_features, &__start___mmu_ftr_fixup, &__stop___mmu_ftr_fixup); do_feature_fixups(powerpc_firmware_features, &__start___fw_ftr_fixup, &__stop___fw_ftr_fixup); do_lwsync_fixups(cur_cpu_spec->cpu_features, &__start___lwsync_fixup, &__stop___lwsync_fixup); do_final_fixups(); /* * Unflatten the device-tree passed by prom_init or kexec */ unflatten_device_tree(); /* * Fill the ppc64_caches & systemcfg structures with informations * retrieved from the device-tree. */ initialize_cache_info(); #ifdef CONFIG_PPC_RTAS /* * Initialize RTAS if available */ rtas_initialize(); #endif /* CONFIG_PPC_RTAS */ /* * Check if we have an initrd provided via the device-tree */ check_for_initrd(); /* * Do some platform specific early initializations, that includes * setting up the hash table pointers. It also sets up some interrupt-mapping * related options that will be used by finish_device_tree() */ if (ppc_md.init_early) ppc_md.init_early(); /* * We can discover serial ports now since the above did setup the * hash table management for us, thus ioremap works. We do that early * so that further code can be debugged */ find_legacy_serial_ports(); /* * Register early console */ register_early_udbg_console(); /* * Initialize xmon */ xmon_setup(); smp_setup_cpu_maps(); check_smt_enabled(); #ifdef CONFIG_SMP /* Release secondary cpus out of their spinloops at 0x60 now that * we can map physical -> logical CPU ids */ smp_release_cpus(); #endif printk("Starting Linux PPC64 %s\n", init_utsname()->version); printk("-----------------------------------------------------\n"); printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); printk("physicalMemorySize = 0x%llx\n", memblock_phys_mem_size()); if (ppc64_caches.dline_size != 0x80) printk("ppc64_caches.dcache_line_size = 0x%x\n", ppc64_caches.dline_size); if (ppc64_caches.iline_size != 0x80) printk("ppc64_caches.icache_line_size = 0x%x\n", ppc64_caches.iline_size); #ifdef CONFIG_PPC_STD_MMU_64 if (htab_address) printk("htab_address = 0x%p\n", htab_address); printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); #endif /* CONFIG_PPC_STD_MMU_64 */ if (PHYSICAL_START > 0) printk("physical_start = 0x%llx\n", (unsigned long long)PHYSICAL_START); printk("-----------------------------------------------------\n"); DBG(" <- setup_system()\n"); } /* This returns the limit below which memory accesses to the linear * mapping are guarnateed not to cause a TLB or SLB miss. This is * used to allocate interrupt or emergency stacks for which our * exception entry path doesn't deal with being interrupted. */ static u64 safe_stack_limit(void) { #ifdef CONFIG_PPC_BOOK3E /* Freescale BookE bolts the entire linear mapping */ if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) return linear_map_top; /* Other BookE, we assume the first GB is bolted */ return 1ul << 30; #else /* BookS, the first segment is bolted */ if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) return 1UL << SID_SHIFT_1T; return 1UL << SID_SHIFT; #endif } static void __init irqstack_early_init(void) { u64 limit = safe_stack_limit(); unsigned int i; /* * Interrupt stacks must be in the first segment since we * cannot afford to take SLB misses on them. */ for_each_possible_cpu(i) { softirq_ctx[i] = (struct thread_info *) __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); hardirq_ctx[i] = (struct thread_info *) __va(memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit)); } } #ifdef CONFIG_PPC_BOOK3E static void __init exc_lvl_early_init(void) { extern unsigned int interrupt_base_book3e; extern unsigned int exc_debug_debug_book3e; unsigned int i; for_each_possible_cpu(i) { critirq_ctx[i] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); dbgirq_ctx[i] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); mcheckirq_ctx[i] = (struct thread_info *) __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); } if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1, (unsigned long)&exc_debug_debug_book3e, 0); } #else #define exc_lvl_early_init() #endif /* * Stack space used when we detect a bad kernel stack pointer, and * early in SMP boots before relocation is enabled. */ static void __init emergency_stack_init(void) { u64 limit; unsigned int i; /* * Emergency stacks must be under 256MB, we cannot afford to take * SLB misses on them. The ABI also requires them to be 128-byte * aligned. * * Since we use these as temporary stacks during secondary CPU * bringup, we need to get at them in real mode. This means they * must also be within the RMO region. */ limit = min(safe_stack_limit(), ppc64_rma_size); for_each_possible_cpu(i) { unsigned long sp; sp = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit); sp += THREAD_SIZE; paca[i].emergency_sp = __va(sp); } } /* * Called into from start_kernel this initializes bootmem, which is used * to manage page allocation until mem_init is called. */ void __init setup_arch(char **cmdline_p) { ppc64_boot_msg(0x12, "Setup Arch"); *cmdline_p = cmd_line; /* * Set cache line size based on type of cpu as a default. * Systems with OF can look in the properties on the cpu node(s) * for a possibly more accurate value. */ dcache_bsize = ppc64_caches.dline_size; icache_bsize = ppc64_caches.iline_size; /* reboot on panic */ panic_timeout = 180; if (ppc_md.panic) setup_panic(); init_mm.start_code = (unsigned long)_stext; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = klimit; irqstack_early_init(); exc_lvl_early_init(); emergency_stack_init(); #ifdef CONFIG_PPC_STD_MMU_64 stabs_alloc(); #endif /* set up the bootmem stuff with available memory */ do_init_bootmem(); sparse_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif if (ppc_md.setup_arch) ppc_md.setup_arch(); paging_init(); /* Initialize the MMU context management stuff */ mmu_context_init(); ppc64_boot_msg(0x15, "Setup Done"); } /* ToDo: do something useful if ppc_md is not yet setup. */ #define PPC64_LINUX_FUNCTION 0x0f000000 #define PPC64_IPL_MESSAGE 0xc0000000 #define PPC64_TERM_MESSAGE 0xb0000000 static void ppc64_do_msg(unsigned int src, const char *msg) { if (ppc_md.progress) { char buf[128]; sprintf(buf, "%08X\n", src); ppc_md.progress(buf, 0); snprintf(buf, 128, "%s", msg); ppc_md.progress(buf, 0); } } /* Print a boot progress message. */ void ppc64_boot_msg(unsigned int src, const char *msg) { ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg); printk("[boot]%04x %s\n", src, msg); } #ifdef CONFIG_SMP #define PCPU_DYN_SIZE () static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, size_t align) { return __alloc_bootmem_node(NODE_DATA(cpu_to_node(cpu)), size, align, __pa(MAX_DMA_ADDRESS)); } static void __init pcpu_fc_free(void *ptr, size_t size) { free_bootmem(__pa(ptr), size); } static int pcpu_cpu_distance(unsigned int from, unsigned int to) { if (cpu_to_node(from) == cpu_to_node(to)) return LOCAL_DISTANCE; else return REMOTE_DISTANCE; } unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; EXPORT_SYMBOL(__per_cpu_offset); void __init setup_per_cpu_areas(void) { const size_t dyn_size = PERCPU_MODULE_RESERVE + PERCPU_DYNAMIC_RESERVE; size_t atom_size; unsigned long delta; unsigned int cpu; int rc; /* * Linear mapping is one of 4K, 1M and 16M. For 4K, no need * to group units. For larger mappings, use 1M atom which * should be large enough to contain a number of units. */ if (mmu_linear_psize == MMU_PAGE_4K) atom_size = PAGE_SIZE; else atom_size = 1 << 20; rc = pcpu_embed_first_chunk(0, dyn_size, atom_size, pcpu_cpu_distance, pcpu_fc_alloc, pcpu_fc_free); if (rc < 0) panic("cannot initialize percpu area (err=%d)", rc); delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; for_each_possible_cpu(cpu) { __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; paca[cpu].data_offset = __per_cpu_offset[cpu]; } } #endif #ifdef CONFIG_PPC_INDIRECT_IO struct ppc_pci_io ppc_pci_io; EXPORT_SYMBOL(ppc_pci_io); #endif /* CONFIG_PPC_INDIRECT_IO */
gpl-2.0
arnoldthebat/linux-stable
scripts/kconfig/lxdialog/menubox.c
2498
11241
/* * menubox.c -- implements the menu box * * ORIGINAL AUTHOR: Savio Lam (lam836@cs.cuhk.hk) * MODIFIED FOR LINUX KERNEL CONFIG BY: William Roadcap (roadcapw@cfw.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* * Changes by Clifford Wolf (god@clifford.at) * * [ 1998-06-13 ] * * *) A bugfix for the Page-Down problem * * *) Formerly when I used Page Down and Page Up, the cursor would be set * to the first position in the menu box. Now lxdialog is a bit * smarter and works more like other menu systems (just have a look at * it). * * *) Formerly if I selected something my scrolling would be broken because * lxdialog is re-invoked by the Menuconfig shell script, can't * remember the last scrolling position, and just sets it so that the * cursor is at the bottom of the box. Now it writes the temporary file * lxdialog.scrltmp which contains this information. The file is * deleted by lxdialog if the user leaves a submenu or enters a new * one, but it would be nice if Menuconfig could make another "rm -f" * just to be sure. Just try it out - you will recognise a difference! * * [ 1998-06-14 ] * * *) Now lxdialog is crash-safe against broken "lxdialog.scrltmp" files * and menus change their size on the fly. * * *) If for some reason the last scrolling position is not saved by * lxdialog, it sets the scrolling so that the selected item is in the * middle of the menu box, not at the bottom. * * 02 January 1999, Michael Elizabeth Chastain (mec@shout.net) * Reset 'scroll' to 0 if the value from lxdialog.scrltmp is bogus. * This fixes a bug in Menuconfig where using ' ' to descend into menus * would leave mis-synchronized lxdialog.scrltmp files lying around, * fscanf would read in 'scroll', and eventually that value would get used. */ #include "dialog.h" static int menu_width, item_x; /* * Print menu item */ static void do_print_item(WINDOW * win, const char *item, int line_y, int selected, int hotkey) { int j; char *menu_item = malloc(menu_width + 1); strncpy(menu_item, item, menu_width - item_x); menu_item[menu_width - item_x] = '\0'; j = first_alpha(menu_item, "YyNnMmHh"); /* Clear 'residue' of last item */ wattrset(win, dlg.menubox.atr); wmove(win, line_y, 0); #if OLD_NCURSES { int i; for (i = 0; i < menu_width; i++) waddch(win, ' '); } #else wclrtoeol(win); #endif wattrset(win, selected ? dlg.item_selected.atr : dlg.item.atr); mvwaddstr(win, line_y, item_x, menu_item); if (hotkey) { wattrset(win, selected ? dlg.tag_key_selected.atr : dlg.tag_key.atr); mvwaddch(win, line_y, item_x + j, menu_item[j]); } if (selected) { wmove(win, line_y, item_x + 1); } free(menu_item); wrefresh(win); } #define print_item(index, choice, selected) \ do { \ item_set(index); \ do_print_item(menu, item_str(), choice, selected, !item_is_tag(':')); \ } while (0) /* * Print the scroll indicators. */ static void print_arrows(WINDOW * win, int item_no, int scroll, int y, int x, int height) { int cur_y, cur_x; getyx(win, cur_y, cur_x); wmove(win, y, x); if (scroll > 0) { wattrset(win, dlg.uarrow.atr); waddch(win, ACS_UARROW); waddstr(win, "(-)"); } else { wattrset(win, dlg.menubox.atr); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); } y = y + height + 1; wmove(win, y, x); wrefresh(win); if ((height < item_no) && (scroll + height < item_no)) { wattrset(win, dlg.darrow.atr); waddch(win, ACS_DARROW); waddstr(win, "(+)"); } else { wattrset(win, dlg.menubox_border.atr); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); waddch(win, ACS_HLINE); } wmove(win, cur_y, cur_x); wrefresh(win); } /* * Display the termination buttons. */ static void print_buttons(WINDOW * win, int height, int width, int selected) { int x = width / 2 - 28; int y = height - 2; print_button(win, gettext("Select"), y, x, selected == 0); print_button(win, gettext(" Exit "), y, x + 12, selected == 1); print_button(win, gettext(" Help "), y, x + 24, selected == 2); print_button(win, gettext(" Save "), y, x + 36, selected == 3); print_button(win, gettext(" Load "), y, x + 48, selected == 4); wmove(win, y, x + 1 + 12 * selected); wrefresh(win); } /* scroll up n lines (n may be negative) */ static void do_scroll(WINDOW *win, int *scroll, int n) { /* Scroll menu up */ scrollok(win, TRUE); wscrl(win, n); scrollok(win, FALSE); *scroll = *scroll + n; wrefresh(win); } /* * Display a menu for choosing among a number of options */ int dialog_menu(const char *title, const char *prompt, const void *selected, int *s_scroll) { int i, j, x, y, box_x, box_y; int height, width, menu_height; int key = 0, button = 0, scroll = 0, choice = 0; int first_item = 0, max_choice; WINDOW *dialog, *menu; do_resize: height = getmaxy(stdscr); width = getmaxx(stdscr); if (height < MENUBOX_HEIGTH_MIN || width < MENUBOX_WIDTH_MIN) return -ERRDISPLAYTOOSMALL; height -= 4; width -= 5; menu_height = height - 10; max_choice = MIN(menu_height, item_count()); /* center dialog box on screen */ x = (getmaxx(stdscr) - width) / 2; y = (getmaxy(stdscr) - height) / 2; draw_shadow(stdscr, y, x, height, width); dialog = newwin(height, width, y, x); keypad(dialog, TRUE); draw_box(dialog, 0, 0, height, width, dlg.dialog.atr, dlg.border.atr); wattrset(dialog, dlg.border.atr); mvwaddch(dialog, height - 3, 0, ACS_LTEE); for (i = 0; i < width - 2; i++) waddch(dialog, ACS_HLINE); wattrset(dialog, dlg.dialog.atr); wbkgdset(dialog, dlg.dialog.atr & A_COLOR); waddch(dialog, ACS_RTEE); print_title(dialog, title, width); wattrset(dialog, dlg.dialog.atr); print_autowrap(dialog, prompt, width - 2, 1, 3); menu_width = width - 6; box_y = height - menu_height - 5; box_x = (width - menu_width) / 2 - 1; /* create new window for the menu */ menu = subwin(dialog, menu_height, menu_width, y + box_y + 1, x + box_x + 1); keypad(menu, TRUE); /* draw a box around the menu items */ draw_box(dialog, box_y, box_x, menu_height + 2, menu_width + 2, dlg.menubox_border.atr, dlg.menubox.atr); if (menu_width >= 80) item_x = (menu_width - 70) / 2; else item_x = 4; /* Set choice to default item */ item_foreach() if (selected && (selected == item_data())) choice = item_n(); /* get the saved scroll info */ scroll = *s_scroll; if ((scroll <= choice) && (scroll + max_choice > choice) && (scroll >= 0) && (scroll + max_choice <= item_count())) { first_item = scroll; choice = choice - scroll; } else { scroll = 0; } if ((choice >= max_choice)) { if (choice >= item_count() - max_choice / 2) scroll = first_item = item_count() - max_choice; else scroll = first_item = choice - max_choice / 2; choice = choice - scroll; } /* Print the menu */ for (i = 0; i < max_choice; i++) { print_item(first_item + i, i, i == choice); } wnoutrefresh(menu); print_arrows(dialog, item_count(), scroll, box_y, box_x + item_x + 1, menu_height); print_buttons(dialog, height, width, 0); wmove(menu, choice, item_x + 1); wrefresh(menu); while (key != KEY_ESC) { key = wgetch(menu); if (key < 256 && isalpha(key)) key = tolower(key); if (strchr("ynmh", key)) i = max_choice; else { for (i = choice + 1; i < max_choice; i++) { item_set(scroll + i); j = first_alpha(item_str(), "YyNnMmHh"); if (key == tolower(item_str()[j])) break; } if (i == max_choice) for (i = 0; i < max_choice; i++) { item_set(scroll + i); j = first_alpha(item_str(), "YyNnMmHh"); if (key == tolower(item_str()[j])) break; } } if (item_count() != 0 && (i < max_choice || key == KEY_UP || key == KEY_DOWN || key == '-' || key == '+' || key == KEY_PPAGE || key == KEY_NPAGE)) { /* Remove highligt of current item */ print_item(scroll + choice, choice, FALSE); if (key == KEY_UP || key == '-') { if (choice < 2 && scroll) { /* Scroll menu down */ do_scroll(menu, &scroll, -1); print_item(scroll, 0, FALSE); } else choice = MAX(choice - 1, 0); } else if (key == KEY_DOWN || key == '+') { print_item(scroll+choice, choice, FALSE); if ((choice > max_choice - 3) && (scroll + max_choice < item_count())) { /* Scroll menu up */ do_scroll(menu, &scroll, 1); print_item(scroll+max_choice - 1, max_choice - 1, FALSE); } else choice = MIN(choice + 1, max_choice - 1); } else if (key == KEY_PPAGE) { scrollok(menu, TRUE); for (i = 0; (i < max_choice); i++) { if (scroll > 0) { do_scroll(menu, &scroll, -1); print_item(scroll, 0, FALSE); } else { if (choice > 0) choice--; } } } else if (key == KEY_NPAGE) { for (i = 0; (i < max_choice); i++) { if (scroll + max_choice < item_count()) { do_scroll(menu, &scroll, 1); print_item(scroll+max_choice-1, max_choice - 1, FALSE); } else { if (choice + 1 < max_choice) choice++; } } } else choice = i; print_item(scroll + choice, choice, TRUE); print_arrows(dialog, item_count(), scroll, box_y, box_x + item_x + 1, menu_height); wnoutrefresh(dialog); wrefresh(menu); continue; /* wait for another key press */ } switch (key) { case KEY_LEFT: case TAB: case KEY_RIGHT: button = ((key == KEY_LEFT ? --button : ++button) < 0) ? 4 : (button > 4 ? 0 : button); print_buttons(dialog, height, width, button); wrefresh(menu); break; case ' ': case 's': case 'y': case 'n': case 'm': case '/': case 'h': case '?': case 'z': case '\n': /* save scroll info */ *s_scroll = scroll; delwin(menu); delwin(dialog); item_set(scroll + choice); item_set_selected(1); switch (key) { case 'h': case '?': return 2; case 's': case 'y': return 5; case 'n': return 6; case 'm': return 7; case ' ': return 8; case '/': return 9; case 'z': return 10; case '\n': return button; } return 0; case 'e': case 'x': key = KEY_ESC; break; case KEY_ESC: key = on_key_esc(menu); break; case KEY_RESIZE: on_key_resize(); delwin(menu); delwin(dialog); goto do_resize; } } delwin(menu); delwin(dialog); return key; /* ESC pressed */ }
gpl-2.0
TheWhisp/android_kernel_samsung_msm7x27a
arch/arm/mach-tegra/hotplug.c
4546
2562
/* * linux/arch/arm/mach-realview/hotplug.c * * Copyright (C) 2002 ARM Ltd. * All Rights Reserved * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/smp.h> #include <asm/cacheflush.h> #include <asm/cp15.h> static inline void cpu_enter_lowpower(void) { unsigned int v; flush_cache_all(); asm volatile( " mcr p15, 0, %1, c7, c5, 0\n" " mcr p15, 0, %1, c7, c10, 4\n" /* * Turn off coherency */ " mrc p15, 0, %0, c1, c0, 1\n" " bic %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" " mrc p15, 0, %0, c1, c0, 0\n" " bic %0, %0, %2\n" " mcr p15, 0, %0, c1, c0, 0\n" : "=&r" (v) : "r" (0), "Ir" (CR_C) : "cc"); } static inline void cpu_leave_lowpower(void) { unsigned int v; asm volatile( "mrc p15, 0, %0, c1, c0, 0\n" " orr %0, %0, %1\n" " mcr p15, 0, %0, c1, c0, 0\n" " mrc p15, 0, %0, c1, c0, 1\n" " orr %0, %0, #0x20\n" " mcr p15, 0, %0, c1, c0, 1\n" : "=&r" (v) : "Ir" (CR_C) : "cc"); } static inline void platform_do_lowpower(unsigned int cpu, int *spurious) { /* * there is no power-control hardware on this platform, so all * we can do is put the core into WFI; this is safe as the calling * code will have already disabled interrupts */ for (;;) { /* * here's the WFI */ asm(".word 0xe320f003\n" : : : "memory", "cc"); /*if (pen_release == cpu) {*/ /* * OK, proper wakeup, we're done */ break; /*}*/ /* * Getting here, means that we have come out of WFI without * having been woken up - this shouldn't happen * * Just note it happening - when we're woken, we can report * its occurrence. */ (*spurious)++; } } int platform_cpu_kill(unsigned int cpu) { return 1; } /* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { int spurious = 0; /* * we're ready for shutdown now, so do it */ cpu_enter_lowpower(); platform_do_lowpower(cpu, &spurious); /* * bring this CPU back into the world of cache * coherency, and then restore interrupts */ cpu_leave_lowpower(); if (spurious) pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious); } int platform_cpu_disable(unsigned int cpu) { /* * we don't allow CPU 0 to be shutdown (it is still too special * e.g. clock tick interrupts) */ return cpu == 0 ? -EPERM : 0; }
gpl-2.0
CyberGrandChallenge/linux-source-3.13.2-cgc
arch/mips/pmcs-msp71xx/msp_irq_slp.c
4546
2701
/* * This file define the irq handler for MSP SLM subsystem interrupts. * * Copyright 2005-2006 PMC-Sierra, Inc, derived from irq_cpu.c * Author: Andrew Hughes, Andrew_Hughes@pmc-sierra.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/bitops.h> #include <asm/mipsregs.h> #include <msp_slp_int.h> #include <msp_regs.h> static inline void unmask_msp_slp_irq(struct irq_data *d) { unsigned int irq = d->irq; /* check for PER interrupt range */ if (irq < MSP_PER_INTBASE) *SLP_INT_MSK_REG |= (1 << (irq - MSP_SLP_INTBASE)); else *PER_INT_MSK_REG |= (1 << (irq - MSP_PER_INTBASE)); } static inline void mask_msp_slp_irq(struct irq_data *d) { unsigned int irq = d->irq; /* check for PER interrupt range */ if (irq < MSP_PER_INTBASE) *SLP_INT_MSK_REG &= ~(1 << (irq - MSP_SLP_INTBASE)); else *PER_INT_MSK_REG &= ~(1 << (irq - MSP_PER_INTBASE)); } /* * While we ack the interrupt interrupts are disabled and thus we don't need * to deal with concurrency issues. Same for msp_slp_irq_end. */ static inline void ack_msp_slp_irq(struct irq_data *d) { unsigned int irq = d->irq; /* check for PER interrupt range */ if (irq < MSP_PER_INTBASE) *SLP_INT_STS_REG = (1 << (irq - MSP_SLP_INTBASE)); else *PER_INT_STS_REG = (1 << (irq - MSP_PER_INTBASE)); } static struct irq_chip msp_slp_irq_controller = { .name = "MSP_SLP", .irq_ack = ack_msp_slp_irq, .irq_mask = mask_msp_slp_irq, .irq_unmask = unmask_msp_slp_irq, }; void __init msp_slp_irq_init(void) { int i; /* Mask/clear interrupts. */ *SLP_INT_MSK_REG = 0x00000000; *PER_INT_MSK_REG = 0x00000000; *SLP_INT_STS_REG = 0xFFFFFFFF; *PER_INT_STS_REG = 0xFFFFFFFF; /* initialize all the IRQ descriptors */ for (i = MSP_SLP_INTBASE; i < MSP_PER_INTBASE + 32; i++) irq_set_chip_and_handler(i, &msp_slp_irq_controller, handle_level_irq); } void msp_slp_irq_dispatch(void) { u32 pending; int intbase; intbase = MSP_SLP_INTBASE; pending = *SLP_INT_STS_REG & *SLP_INT_MSK_REG; /* check for PER interrupt */ if (pending == (1 << (MSP_INT_PER - MSP_SLP_INTBASE))) { intbase = MSP_PER_INTBASE; pending = *PER_INT_STS_REG & *PER_INT_MSK_REG; } /* check for spurious interrupt */ if (pending == 0x00000000) { printk(KERN_ERR "Spurious %s interrupt?\n", (intbase == MSP_SLP_INTBASE) ? "SLP" : "PER"); return; } /* dispatch the irq */ do_IRQ(ffs(pending) + intbase - 1); }
gpl-2.0
Fuzion24/m7_vzw_kernel
drivers/net/usb/pegasus.c
4802
39300
/* * Copyright (c) 1999-2005 Petko Manolov (petkan@users.sourceforge.net) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * ChangeLog: * .... Most of the time spent on reading sources & docs. * v0.2.x First official release for the Linux kernel. * v0.3.0 Beutified and structured, some bugs fixed. * v0.3.x URBifying bulk requests and bugfixing. First relatively * stable release. Still can touch device's registers only * from top-halves. * v0.4.0 Control messages remained unurbified are now URBs. * Now we can touch the HW at any time. * v0.4.9 Control urbs again use process context to wait. Argh... * Some long standing bugs (enable_net_traffic) fixed. * Also nasty trick about resubmiting control urb from * interrupt context used. Please let me know how it * behaves. Pegasus II support added since this version. * TODO: suppressing HCD warnings spewage on disconnect. * v0.4.13 Ethernet address is now set at probe(), not at open() * time as this seems to break dhcpd. * v0.5.0 branch to 2.5.x kernels * v0.5.1 ethtool support added * v0.5.5 rx socket buffers are in a pool and the their allocation * is out of the interrupt routine. */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mii.h> #include <linux/usb.h> #include <linux/module.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #include "pegasus.h" /* * Version Information */ #define DRIVER_VERSION "v0.6.14 (2006/09/27)" #define DRIVER_AUTHOR "Petko Manolov <petkan@users.sourceforge.net>" #define DRIVER_DESC "Pegasus/Pegasus II USB Ethernet driver" static const char driver_name[] = "pegasus"; #undef PEGASUS_WRITE_EEPROM #define BMSR_MEDIA (BMSR_10HALF | BMSR_10FULL | BMSR_100HALF | \ BMSR_100FULL | BMSR_ANEGCAPABLE) static bool loopback; static bool mii_mode; static char *devid; static struct usb_eth_dev usb_dev_id[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.name = pn, .vendor = vid, .device = pid, .private = flags}, #define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ PEGASUS_DEV(pn, vid, pid, flags) #include "pegasus.h" #undef PEGASUS_DEV #undef PEGASUS_DEV_CLASS {NULL, 0, 0, 0}, {NULL, 0, 0, 0} }; static struct usb_device_id pegasus_ids[] = { #define PEGASUS_DEV(pn, vid, pid, flags) \ {.match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = vid, .idProduct = pid}, /* * The Belkin F8T012xx1 bluetooth adaptor has the same vendor and product * IDs as the Belkin F5D5050, so we need to teach the pegasus driver to * ignore adaptors belonging to the "Wireless" class 0xE0. For this one * case anyway, seeing as the pegasus is for "Wired" adaptors. */ #define PEGASUS_DEV_CLASS(pn, vid, pid, dclass, flags) \ {.match_flags = (USB_DEVICE_ID_MATCH_DEVICE | USB_DEVICE_ID_MATCH_DEV_CLASS), \ .idVendor = vid, .idProduct = pid, .bDeviceClass = dclass}, #include "pegasus.h" #undef PEGASUS_DEV #undef PEGASUS_DEV_CLASS {}, {} }; MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); module_param(loopback, bool, 0); module_param(mii_mode, bool, 0); module_param(devid, charp, 0); MODULE_PARM_DESC(loopback, "Enable MAC loopback mode (bit 0)"); MODULE_PARM_DESC(mii_mode, "Enable HomePNA mode (bit 0),default=MII mode = 0"); MODULE_PARM_DESC(devid, "The format is: 'DEV_name:VendorID:DeviceID:Flags'"); /* use ethtool to change the level for any given device */ static int msg_level = -1; module_param(msg_level, int, 0); MODULE_PARM_DESC(msg_level, "Override default message level"); MODULE_DEVICE_TABLE(usb, pegasus_ids); static const struct net_device_ops pegasus_netdev_ops; static int update_eth_regs_async(pegasus_t *); /* Aargh!!! I _really_ hate such tweaks */ static void ctrl_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; int status = urb->status; if (!pegasus) return; switch (status) { case 0: if (pegasus->flags & ETH_REGS_CHANGE) { pegasus->flags &= ~ETH_REGS_CHANGE; pegasus->flags |= ETH_REGS_CHANGED; update_eth_regs_async(pegasus); return; } break; case -EINPROGRESS: return; case -ENOENT: break; default: if (net_ratelimit()) netif_dbg(pegasus, drv, pegasus->net, "%s, status %d\n", __func__, status); break; } pegasus->flags &= ~ETH_REGS_CHANGED; wake_up(&pegasus->ctrl_wait); } static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { int ret; char *buffer; DECLARE_WAITQUEUE(wait, current); buffer = kmalloc(size, GFP_KERNEL); if (!buffer) { netif_warn(pegasus, drv, pegasus->net, "out of memory in %s\n", __func__); return -ENOMEM; } add_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); while (pegasus->flags & ETH_REGS_CHANGED) schedule(); remove_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_RUNNING); pegasus->dr.bRequestType = PEGASUS_REQT_READ; pegasus->dr.bRequest = PEGASUS_REQ_GET_REGS; pegasus->dr.wValue = cpu_to_le16(0); pegasus->dr.wIndex = cpu_to_le16(indx); pegasus->dr.wLength = cpu_to_le16(size); pegasus->ctrl_urb->transfer_buffer_length = size; usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), (char *) &pegasus->dr, buffer, size, ctrl_callback, pegasus); add_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); /* using ATOMIC, we'd never wake up if we slept */ if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { set_current_state(TASK_RUNNING); if (ret == -ENODEV) netif_device_detach(pegasus->net); if (net_ratelimit()) netif_err(pegasus, drv, pegasus->net, "%s, status %d\n", __func__, ret); goto out; } schedule(); out: remove_wait_queue(&pegasus->ctrl_wait, &wait); memcpy(data, buffer, size); kfree(buffer); return ret; } static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { int ret; char *buffer; DECLARE_WAITQUEUE(wait, current); buffer = kmemdup(data, size, GFP_KERNEL); if (!buffer) { netif_warn(pegasus, drv, pegasus->net, "out of memory in %s\n", __func__); return -ENOMEM; } add_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); while (pegasus->flags & ETH_REGS_CHANGED) schedule(); remove_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_RUNNING); pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS; pegasus->dr.wValue = cpu_to_le16(0); pegasus->dr.wIndex = cpu_to_le16(indx); pegasus->dr.wLength = cpu_to_le16(size); pegasus->ctrl_urb->transfer_buffer_length = size; usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), (char *) &pegasus->dr, buffer, size, ctrl_callback, pegasus); add_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { if (ret == -ENODEV) netif_device_detach(pegasus->net); netif_err(pegasus, drv, pegasus->net, "%s, status %d\n", __func__, ret); goto out; } schedule(); out: remove_wait_queue(&pegasus->ctrl_wait, &wait); kfree(buffer); return ret; } static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { int ret; char *tmp; DECLARE_WAITQUEUE(wait, current); tmp = kmemdup(&data, 1, GFP_KERNEL); if (!tmp) { netif_warn(pegasus, drv, pegasus->net, "out of memory in %s\n", __func__); return -ENOMEM; } add_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); while (pegasus->flags & ETH_REGS_CHANGED) schedule(); remove_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_RUNNING); pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; pegasus->dr.bRequest = PEGASUS_REQ_SET_REG; pegasus->dr.wValue = cpu_to_le16(data); pegasus->dr.wIndex = cpu_to_le16(indx); pegasus->dr.wLength = cpu_to_le16(1); pegasus->ctrl_urb->transfer_buffer_length = 1; usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), (char *) &pegasus->dr, tmp, 1, ctrl_callback, pegasus); add_wait_queue(&pegasus->ctrl_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { if (ret == -ENODEV) netif_device_detach(pegasus->net); if (net_ratelimit()) netif_err(pegasus, drv, pegasus->net, "%s, status %d\n", __func__, ret); goto out; } schedule(); out: remove_wait_queue(&pegasus->ctrl_wait, &wait); kfree(tmp); return ret; } static int update_eth_regs_async(pegasus_t *pegasus) { int ret; pegasus->dr.bRequestType = PEGASUS_REQT_WRITE; pegasus->dr.bRequest = PEGASUS_REQ_SET_REGS; pegasus->dr.wValue = cpu_to_le16(0); pegasus->dr.wIndex = cpu_to_le16(EthCtrl0); pegasus->dr.wLength = cpu_to_le16(3); pegasus->ctrl_urb->transfer_buffer_length = 3; usb_fill_control_urb(pegasus->ctrl_urb, pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), (char *) &pegasus->dr, pegasus->eth_regs, 3, ctrl_callback, pegasus); if ((ret = usb_submit_urb(pegasus->ctrl_urb, GFP_ATOMIC))) { if (ret == -ENODEV) netif_device_detach(pegasus->net); netif_err(pegasus, drv, pegasus->net, "%s, status %d\n", __func__, ret); } return ret; } /* Returns 0 on success, error on failure */ static int read_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 *regd) { int i; __u8 data[4] = { phy, 0, 0, indx }; __le16 regdi; int ret; set_register(pegasus, PhyCtrl, 0); set_registers(pegasus, PhyAddr, sizeof(data), data); set_register(pegasus, PhyCtrl, (indx | PHY_READ)); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, PhyCtrl, 1, data); if (ret == -ESHUTDOWN) goto fail; if (data[0] & PHY_DONE) break; } if (i >= REG_TIMEOUT) goto fail; ret = get_registers(pegasus, PhyData, 2, &regdi); *regd = le16_to_cpu(regdi); return ret; fail: netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); return ret; } static int mdio_read(struct net_device *dev, int phy_id, int loc) { pegasus_t *pegasus = netdev_priv(dev); u16 res; read_mii_word(pegasus, phy_id, loc, &res); return (int)res; } static int write_mii_word(pegasus_t *pegasus, __u8 phy, __u8 indx, __u16 regd) { int i; __u8 data[4] = { phy, 0, 0, indx }; int ret; data[1] = (u8) regd; data[2] = (u8) (regd >> 8); set_register(pegasus, PhyCtrl, 0); set_registers(pegasus, PhyAddr, sizeof(data), data); set_register(pegasus, PhyCtrl, (indx | PHY_WRITE)); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, PhyCtrl, 1, data); if (ret == -ESHUTDOWN) goto fail; if (data[0] & PHY_DONE) break; } if (i >= REG_TIMEOUT) goto fail; return ret; fail: netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); return -ETIMEDOUT; } static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) { pegasus_t *pegasus = netdev_priv(dev); write_mii_word(pegasus, phy_id, loc, val); } static int read_eprom_word(pegasus_t *pegasus, __u8 index, __u16 *retdata) { int i; __u8 tmp; __le16 retdatai; int ret; set_register(pegasus, EpromCtrl, 0); set_register(pegasus, EpromOffset, index); set_register(pegasus, EpromCtrl, EPROM_READ); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, EpromCtrl, 1, &tmp); if (tmp & EPROM_DONE) break; if (ret == -ESHUTDOWN) goto fail; } if (i >= REG_TIMEOUT) goto fail; ret = get_registers(pegasus, EpromData, 2, &retdatai); *retdata = le16_to_cpu(retdatai); return ret; fail: netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); return -ETIMEDOUT; } #ifdef PEGASUS_WRITE_EEPROM static inline void enable_eprom_write(pegasus_t *pegasus) { __u8 tmp; int ret; get_registers(pegasus, EthCtrl2, 1, &tmp); set_register(pegasus, EthCtrl2, tmp | EPROM_WR_ENABLE); } static inline void disable_eprom_write(pegasus_t *pegasus) { __u8 tmp; int ret; get_registers(pegasus, EthCtrl2, 1, &tmp); set_register(pegasus, EpromCtrl, 0); set_register(pegasus, EthCtrl2, tmp & ~EPROM_WR_ENABLE); } static int write_eprom_word(pegasus_t *pegasus, __u8 index, __u16 data) { int i; __u8 tmp, d[4] = { 0x3f, 0, 0, EPROM_WRITE }; int ret; __le16 le_data = cpu_to_le16(data); set_registers(pegasus, EpromOffset, 4, d); enable_eprom_write(pegasus); set_register(pegasus, EpromOffset, index); set_registers(pegasus, EpromData, 2, &le_data); set_register(pegasus, EpromCtrl, EPROM_WRITE); for (i = 0; i < REG_TIMEOUT; i++) { ret = get_registers(pegasus, EpromCtrl, 1, &tmp); if (ret == -ESHUTDOWN) goto fail; if (tmp & EPROM_DONE) break; } disable_eprom_write(pegasus); if (i >= REG_TIMEOUT) goto fail; return ret; fail: netif_warn(pegasus, drv, pegasus->net, "%s failed\n", __func__); return -ETIMEDOUT; } #endif /* PEGASUS_WRITE_EEPROM */ static inline void get_node_id(pegasus_t *pegasus, __u8 *id) { int i; __u16 w16; for (i = 0; i < 3; i++) { read_eprom_word(pegasus, i, &w16); ((__le16 *) id)[i] = cpu_to_le16(w16); } } static void set_ethernet_addr(pegasus_t *pegasus) { __u8 node_id[6]; if (pegasus->features & PEGASUS_II) { get_registers(pegasus, 0x10, sizeof(node_id), node_id); } else { get_node_id(pegasus, node_id); set_registers(pegasus, EthID, sizeof(node_id), node_id); } memcpy(pegasus->net->dev_addr, node_id, sizeof(node_id)); } static inline int reset_mac(pegasus_t *pegasus) { __u8 data = 0x8; int i; set_register(pegasus, EthCtrl1, data); for (i = 0; i < REG_TIMEOUT; i++) { get_registers(pegasus, EthCtrl1, 1, &data); if (~data & 0x08) { if (loopback) break; if (mii_mode && (pegasus->features & HAS_HOME_PNA)) set_register(pegasus, Gpio1, 0x34); else set_register(pegasus, Gpio1, 0x26); set_register(pegasus, Gpio0, pegasus->features); set_register(pegasus, Gpio0, DEFAULT_GPIO_SET); break; } } if (i == REG_TIMEOUT) return -ETIMEDOUT; if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS || usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { set_register(pegasus, Gpio0, 0x24); set_register(pegasus, Gpio0, 0x26); } if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_ELCON) { __u16 auxmode; read_mii_word(pegasus, 3, 0x1b, &auxmode); write_mii_word(pegasus, 3, 0x1b, auxmode | 4); } return 0; } static int enable_net_traffic(struct net_device *dev, struct usb_device *usb) { __u16 linkpart; __u8 data[4]; pegasus_t *pegasus = netdev_priv(dev); int ret; read_mii_word(pegasus, pegasus->phy, MII_LPA, &linkpart); data[0] = 0xc9; data[1] = 0; if (linkpart & (ADVERTISE_100FULL | ADVERTISE_10FULL)) data[1] |= 0x20; /* set full duplex */ if (linkpart & (ADVERTISE_100FULL | ADVERTISE_100HALF)) data[1] |= 0x10; /* set 100 Mbps */ if (mii_mode) data[1] = 0; data[2] = loopback ? 0x09 : 0x01; memcpy(pegasus->eth_regs, data, sizeof(data)); ret = set_registers(pegasus, EthCtrl0, 3, data); if (usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS || usb_dev_id[pegasus->dev_index].vendor == VENDOR_LINKSYS2 || usb_dev_id[pegasus->dev_index].vendor == VENDOR_DLINK) { u16 auxmode; read_mii_word(pegasus, 0, 0x1b, &auxmode); write_mii_word(pegasus, 0, 0x1b, auxmode | 4); } return ret; } static void fill_skb_pool(pegasus_t *pegasus) { int i; for (i = 0; i < RX_SKBS; i++) { if (pegasus->rx_pool[i]) continue; pegasus->rx_pool[i] = dev_alloc_skb(PEGASUS_MTU + 2); /* ** we give up if the allocation fail. the tasklet will be ** rescheduled again anyway... */ if (pegasus->rx_pool[i] == NULL) return; skb_reserve(pegasus->rx_pool[i], 2); } } static void free_skb_pool(pegasus_t *pegasus) { int i; for (i = 0; i < RX_SKBS; i++) { if (pegasus->rx_pool[i]) { dev_kfree_skb(pegasus->rx_pool[i]); pegasus->rx_pool[i] = NULL; } } } static inline struct sk_buff *pull_skb(pegasus_t * pegasus) { int i; struct sk_buff *skb; for (i = 0; i < RX_SKBS; i++) { if (likely(pegasus->rx_pool[i] != NULL)) { skb = pegasus->rx_pool[i]; pegasus->rx_pool[i] = NULL; return skb; } } return NULL; } static void read_bulk_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; struct net_device *net; int rx_status, count = urb->actual_length; int status = urb->status; u8 *buf = urb->transfer_buffer; __u16 pkt_len; if (!pegasus) return; net = pegasus->net; if (!netif_device_present(net) || !netif_running(net)) return; switch (status) { case 0: break; case -ETIME: netif_dbg(pegasus, rx_err, net, "reset MAC\n"); pegasus->flags &= ~PEGASUS_RX_BUSY; break; case -EPIPE: /* stall, or disconnect from TT */ /* FIXME schedule work to clear the halt */ netif_warn(pegasus, rx_err, net, "no rx stall recovery\n"); return; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: netif_dbg(pegasus, ifdown, net, "rx unlink, %d\n", status); return; default: netif_dbg(pegasus, rx_err, net, "RX status %d\n", status); goto goon; } if (!count || count < 4) goto goon; rx_status = buf[count - 2]; if (rx_status & 0x1e) { netif_dbg(pegasus, rx_err, net, "RX packet error %x\n", rx_status); pegasus->stats.rx_errors++; if (rx_status & 0x06) /* long or runt */ pegasus->stats.rx_length_errors++; if (rx_status & 0x08) pegasus->stats.rx_crc_errors++; if (rx_status & 0x10) /* extra bits */ pegasus->stats.rx_frame_errors++; goto goon; } if (pegasus->chip == 0x8513) { pkt_len = le32_to_cpu(*(__le32 *)urb->transfer_buffer); pkt_len &= 0x0fff; pegasus->rx_skb->data += 2; } else { pkt_len = buf[count - 3] << 8; pkt_len += buf[count - 4]; pkt_len &= 0xfff; pkt_len -= 8; } /* * If the packet is unreasonably long, quietly drop it rather than * kernel panicing by calling skb_put. */ if (pkt_len > PEGASUS_MTU) goto goon; /* * at this point we are sure pegasus->rx_skb != NULL * so we go ahead and pass up the packet. */ skb_put(pegasus->rx_skb, pkt_len); pegasus->rx_skb->protocol = eth_type_trans(pegasus->rx_skb, net); netif_rx(pegasus->rx_skb); pegasus->stats.rx_packets++; pegasus->stats.rx_bytes += pkt_len; if (pegasus->flags & PEGASUS_UNPLUG) return; spin_lock(&pegasus->rx_pool_lock); pegasus->rx_skb = pull_skb(pegasus); spin_unlock(&pegasus->rx_pool_lock); if (pegasus->rx_skb == NULL) goto tl_sched; goon: usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), pegasus->rx_skb->data, PEGASUS_MTU + 8, read_bulk_callback, pegasus); rx_status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); if (rx_status == -ENODEV) netif_device_detach(pegasus->net); else if (rx_status) { pegasus->flags |= PEGASUS_RX_URB_FAIL; goto tl_sched; } else { pegasus->flags &= ~PEGASUS_RX_URB_FAIL; } return; tl_sched: tasklet_schedule(&pegasus->rx_tl); } static void rx_fixup(unsigned long data) { pegasus_t *pegasus; unsigned long flags; int status; pegasus = (pegasus_t *) data; if (pegasus->flags & PEGASUS_UNPLUG) return; spin_lock_irqsave(&pegasus->rx_pool_lock, flags); fill_skb_pool(pegasus); if (pegasus->flags & PEGASUS_RX_URB_FAIL) if (pegasus->rx_skb) goto try_again; if (pegasus->rx_skb == NULL) pegasus->rx_skb = pull_skb(pegasus); if (pegasus->rx_skb == NULL) { netif_warn(pegasus, rx_err, pegasus->net, "low on memory\n"); tasklet_schedule(&pegasus->rx_tl); goto done; } usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), pegasus->rx_skb->data, PEGASUS_MTU + 8, read_bulk_callback, pegasus); try_again: status = usb_submit_urb(pegasus->rx_urb, GFP_ATOMIC); if (status == -ENODEV) netif_device_detach(pegasus->net); else if (status) { pegasus->flags |= PEGASUS_RX_URB_FAIL; tasklet_schedule(&pegasus->rx_tl); } else { pegasus->flags &= ~PEGASUS_RX_URB_FAIL; } done: spin_unlock_irqrestore(&pegasus->rx_pool_lock, flags); } static void write_bulk_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; struct net_device *net; int status = urb->status; if (!pegasus) return; net = pegasus->net; if (!netif_device_present(net) || !netif_running(net)) return; switch (status) { case -EPIPE: /* FIXME schedule_work() to clear the tx halt */ netif_stop_queue(net); netif_warn(pegasus, tx_err, net, "no tx stall recovery\n"); return; case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: netif_dbg(pegasus, ifdown, net, "tx unlink, %d\n", status); return; default: netif_info(pegasus, tx_err, net, "TX status %d\n", status); /* FALL THROUGH */ case 0: break; } net->trans_start = jiffies; /* prevent tx timeout */ netif_wake_queue(net); } static void intr_callback(struct urb *urb) { pegasus_t *pegasus = urb->context; struct net_device *net; int res, status = urb->status; if (!pegasus) return; net = pegasus->net; switch (status) { case 0: break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; default: /* some Pegasus-I products report LOTS of data * toggle errors... avoid log spamming */ netif_dbg(pegasus, timer, net, "intr status %d\n", status); } if (urb->actual_length >= 6) { u8 *d = urb->transfer_buffer; /* byte 0 == tx_status1, reg 2B */ if (d[0] & (TX_UNDERRUN|EXCESSIVE_COL |LATE_COL|JABBER_TIMEOUT)) { pegasus->stats.tx_errors++; if (d[0] & TX_UNDERRUN) pegasus->stats.tx_fifo_errors++; if (d[0] & (EXCESSIVE_COL | JABBER_TIMEOUT)) pegasus->stats.tx_aborted_errors++; if (d[0] & LATE_COL) pegasus->stats.tx_window_errors++; } /* d[5].LINK_STATUS lies on some adapters. * d[0].NO_CARRIER kicks in only with failed TX. * ... so monitoring with MII may be safest. */ /* bytes 3-4 == rx_lostpkt, reg 2E/2F */ pegasus->stats.rx_missed_errors += ((d[3] & 0x7f) << 8) | d[4]; } res = usb_submit_urb(urb, GFP_ATOMIC); if (res == -ENODEV) netif_device_detach(pegasus->net); if (res) netif_err(pegasus, timer, net, "can't resubmit interrupt urb, %d\n", res); } static void pegasus_tx_timeout(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); netif_warn(pegasus, timer, net, "tx timeout\n"); usb_unlink_urb(pegasus->tx_urb); pegasus->stats.tx_errors++; } static netdev_tx_t pegasus_start_xmit(struct sk_buff *skb, struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); int count = ((skb->len + 2) & 0x3f) ? skb->len + 2 : skb->len + 3; int res; __u16 l16 = skb->len; netif_stop_queue(net); ((__le16 *) pegasus->tx_buff)[0] = cpu_to_le16(l16); skb_copy_from_linear_data(skb, pegasus->tx_buff + 2, skb->len); usb_fill_bulk_urb(pegasus->tx_urb, pegasus->usb, usb_sndbulkpipe(pegasus->usb, 2), pegasus->tx_buff, count, write_bulk_callback, pegasus); if ((res = usb_submit_urb(pegasus->tx_urb, GFP_ATOMIC))) { netif_warn(pegasus, tx_err, net, "fail tx, %d\n", res); switch (res) { case -EPIPE: /* stall, or disconnect from TT */ /* cleanup should already have been scheduled */ break; case -ENODEV: /* disconnect() upcoming */ case -EPERM: netif_device_detach(pegasus->net); break; default: pegasus->stats.tx_errors++; netif_start_queue(net); } } else { pegasus->stats.tx_packets++; pegasus->stats.tx_bytes += skb->len; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static struct net_device_stats *pegasus_netdev_stats(struct net_device *dev) { return &((pegasus_t *) netdev_priv(dev))->stats; } static inline void disable_net_traffic(pegasus_t *pegasus) { __le16 tmp = cpu_to_le16(0); set_registers(pegasus, EthCtrl0, sizeof(tmp), &tmp); } static inline void get_interrupt_interval(pegasus_t *pegasus) { u16 data; u8 interval; read_eprom_word(pegasus, 4, &data); interval = data >> 8; if (pegasus->usb->speed != USB_SPEED_HIGH) { if (interval < 0x80) { netif_info(pegasus, timer, pegasus->net, "intr interval changed from %ums to %ums\n", interval, 0x80); interval = 0x80; data = (data & 0x00FF) | ((u16)interval << 8); #ifdef PEGASUS_WRITE_EEPROM write_eprom_word(pegasus, 4, data); #endif } } pegasus->intr_interval = interval; } static void set_carrier(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); u16 tmp; if (read_mii_word(pegasus, pegasus->phy, MII_BMSR, &tmp)) return; if (tmp & BMSR_LSTATUS) netif_carrier_on(net); else netif_carrier_off(net); } static void free_all_urbs(pegasus_t *pegasus) { usb_free_urb(pegasus->intr_urb); usb_free_urb(pegasus->tx_urb); usb_free_urb(pegasus->rx_urb); usb_free_urb(pegasus->ctrl_urb); } static void unlink_all_urbs(pegasus_t *pegasus) { usb_kill_urb(pegasus->intr_urb); usb_kill_urb(pegasus->tx_urb); usb_kill_urb(pegasus->rx_urb); usb_kill_urb(pegasus->ctrl_urb); } static int alloc_urbs(pegasus_t *pegasus) { pegasus->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->ctrl_urb) return 0; pegasus->rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->rx_urb) { usb_free_urb(pegasus->ctrl_urb); return 0; } pegasus->tx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->tx_urb) { usb_free_urb(pegasus->rx_urb); usb_free_urb(pegasus->ctrl_urb); return 0; } pegasus->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!pegasus->intr_urb) { usb_free_urb(pegasus->tx_urb); usb_free_urb(pegasus->rx_urb); usb_free_urb(pegasus->ctrl_urb); return 0; } return 1; } static int pegasus_open(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); int res; if (pegasus->rx_skb == NULL) pegasus->rx_skb = pull_skb(pegasus); /* ** Note: no point to free the pool. it is empty :-) */ if (!pegasus->rx_skb) return -ENOMEM; res = set_registers(pegasus, EthID, 6, net->dev_addr); usb_fill_bulk_urb(pegasus->rx_urb, pegasus->usb, usb_rcvbulkpipe(pegasus->usb, 1), pegasus->rx_skb->data, PEGASUS_MTU + 8, read_bulk_callback, pegasus); if ((res = usb_submit_urb(pegasus->rx_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(pegasus->net); netif_dbg(pegasus, ifup, net, "failed rx_urb, %d\n", res); goto exit; } usb_fill_int_urb(pegasus->intr_urb, pegasus->usb, usb_rcvintpipe(pegasus->usb, 3), pegasus->intr_buff, sizeof(pegasus->intr_buff), intr_callback, pegasus, pegasus->intr_interval); if ((res = usb_submit_urb(pegasus->intr_urb, GFP_KERNEL))) { if (res == -ENODEV) netif_device_detach(pegasus->net); netif_dbg(pegasus, ifup, net, "failed intr_urb, %d\n", res); usb_kill_urb(pegasus->rx_urb); goto exit; } if ((res = enable_net_traffic(net, pegasus->usb))) { netif_dbg(pegasus, ifup, net, "can't enable_net_traffic() - %d\n", res); res = -EIO; usb_kill_urb(pegasus->rx_urb); usb_kill_urb(pegasus->intr_urb); free_skb_pool(pegasus); goto exit; } set_carrier(net); netif_start_queue(net); netif_dbg(pegasus, ifup, net, "open\n"); res = 0; exit: return res; } static int pegasus_close(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); netif_stop_queue(net); if (!(pegasus->flags & PEGASUS_UNPLUG)) disable_net_traffic(pegasus); tasklet_kill(&pegasus->rx_tl); unlink_all_urbs(pegasus); return 0; } static void pegasus_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { pegasus_t *pegasus = netdev_priv(dev); strncpy(info->driver, driver_name, sizeof(info->driver) - 1); strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1); usb_make_path(pegasus->usb, info->bus_info, sizeof(info->bus_info)); } /* also handles three patterns of some kind in hardware */ #define WOL_SUPPORTED (WAKE_MAGIC|WAKE_PHY) static void pegasus_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { pegasus_t *pegasus = netdev_priv(dev); wol->supported = WAKE_MAGIC | WAKE_PHY; wol->wolopts = pegasus->wolopts; } static int pegasus_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) { pegasus_t *pegasus = netdev_priv(dev); u8 reg78 = 0x04; if (wol->wolopts & ~WOL_SUPPORTED) return -EINVAL; if (wol->wolopts & WAKE_MAGIC) reg78 |= 0x80; if (wol->wolopts & WAKE_PHY) reg78 |= 0x40; /* FIXME this 0x10 bit still needs to get set in the chip... */ if (wol->wolopts) pegasus->eth_regs[0] |= 0x10; else pegasus->eth_regs[0] &= ~0x10; pegasus->wolopts = wol->wolopts; return set_register(pegasus, WakeupControl, reg78); } static inline void pegasus_reset_wol(struct net_device *dev) { struct ethtool_wolinfo wol; memset(&wol, 0, sizeof wol); (void) pegasus_set_wol(dev, &wol); } static int pegasus_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { pegasus_t *pegasus; pegasus = netdev_priv(dev); mii_ethtool_gset(&pegasus->mii, ecmd); return 0; } static int pegasus_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) { pegasus_t *pegasus = netdev_priv(dev); return mii_ethtool_sset(&pegasus->mii, ecmd); } static int pegasus_nway_reset(struct net_device *dev) { pegasus_t *pegasus = netdev_priv(dev); return mii_nway_restart(&pegasus->mii); } static u32 pegasus_get_link(struct net_device *dev) { pegasus_t *pegasus = netdev_priv(dev); return mii_link_ok(&pegasus->mii); } static u32 pegasus_get_msglevel(struct net_device *dev) { pegasus_t *pegasus = netdev_priv(dev); return pegasus->msg_enable; } static void pegasus_set_msglevel(struct net_device *dev, u32 v) { pegasus_t *pegasus = netdev_priv(dev); pegasus->msg_enable = v; } static const struct ethtool_ops ops = { .get_drvinfo = pegasus_get_drvinfo, .get_settings = pegasus_get_settings, .set_settings = pegasus_set_settings, .nway_reset = pegasus_nway_reset, .get_link = pegasus_get_link, .get_msglevel = pegasus_get_msglevel, .set_msglevel = pegasus_set_msglevel, .get_wol = pegasus_get_wol, .set_wol = pegasus_set_wol, }; static int pegasus_ioctl(struct net_device *net, struct ifreq *rq, int cmd) { __u16 *data = (__u16 *) &rq->ifr_ifru; pegasus_t *pegasus = netdev_priv(net); int res; switch (cmd) { case SIOCDEVPRIVATE: data[0] = pegasus->phy; case SIOCDEVPRIVATE + 1: read_mii_word(pegasus, data[0], data[1] & 0x1f, &data[3]); res = 0; break; case SIOCDEVPRIVATE + 2: if (!capable(CAP_NET_ADMIN)) return -EPERM; write_mii_word(pegasus, pegasus->phy, data[1] & 0x1f, data[2]); res = 0; break; default: res = -EOPNOTSUPP; } return res; } static void pegasus_set_multicast(struct net_device *net) { pegasus_t *pegasus = netdev_priv(net); if (net->flags & IFF_PROMISC) { pegasus->eth_regs[EthCtrl2] |= RX_PROMISCUOUS; netif_info(pegasus, link, net, "Promiscuous mode enabled\n"); } else if (!netdev_mc_empty(net) || (net->flags & IFF_ALLMULTI)) { pegasus->eth_regs[EthCtrl0] |= RX_MULTICAST; pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; netif_dbg(pegasus, link, net, "set allmulti\n"); } else { pegasus->eth_regs[EthCtrl0] &= ~RX_MULTICAST; pegasus->eth_regs[EthCtrl2] &= ~RX_PROMISCUOUS; } pegasus->ctrl_urb->status = 0; pegasus->flags |= ETH_REGS_CHANGE; ctrl_callback(pegasus->ctrl_urb); } static __u8 mii_phy_probe(pegasus_t *pegasus) { int i; __u16 tmp; for (i = 0; i < 32; i++) { read_mii_word(pegasus, i, MII_BMSR, &tmp); if (tmp == 0 || tmp == 0xffff || (tmp & BMSR_MEDIA) == 0) continue; else return i; } return 0xff; } static inline void setup_pegasus_II(pegasus_t *pegasus) { __u8 data = 0xa5; set_register(pegasus, Reg1d, 0); set_register(pegasus, Reg7b, 1); mdelay(100); if ((pegasus->features & HAS_HOME_PNA) && mii_mode) set_register(pegasus, Reg7b, 0); else set_register(pegasus, Reg7b, 2); set_register(pegasus, 0x83, data); get_registers(pegasus, 0x83, 1, &data); if (data == 0xa5) pegasus->chip = 0x8513; else pegasus->chip = 0; set_register(pegasus, 0x80, 0xc0); set_register(pegasus, 0x83, 0xff); set_register(pegasus, 0x84, 0x01); if (pegasus->features & HAS_HOME_PNA && mii_mode) set_register(pegasus, Reg81, 6); else set_register(pegasus, Reg81, 2); } static int pegasus_count; static struct workqueue_struct *pegasus_workqueue; #define CARRIER_CHECK_DELAY (2 * HZ) static void check_carrier(struct work_struct *work) { pegasus_t *pegasus = container_of(work, pegasus_t, carrier_check.work); set_carrier(pegasus->net); if (!(pegasus->flags & PEGASUS_UNPLUG)) { queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, CARRIER_CHECK_DELAY); } } static int pegasus_blacklisted(struct usb_device *udev) { struct usb_device_descriptor *udd = &udev->descriptor; /* Special quirk to keep the driver from handling the Belkin Bluetooth * dongle which happens to have the same ID. */ if ((udd->idVendor == cpu_to_le16(VENDOR_BELKIN)) && (udd->idProduct == cpu_to_le16(0x0121)) && (udd->bDeviceClass == USB_CLASS_WIRELESS_CONTROLLER) && (udd->bDeviceProtocol == 1)) return 1; return 0; } /* we rely on probe() and remove() being serialized so we * don't need extra locking on pegasus_count. */ static void pegasus_dec_workqueue(void) { pegasus_count--; if (pegasus_count == 0) { destroy_workqueue(pegasus_workqueue); pegasus_workqueue = NULL; } } static int pegasus_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct net_device *net; pegasus_t *pegasus; int dev_index = id - pegasus_ids; int res = -ENOMEM; if (pegasus_blacklisted(dev)) return -ENODEV; if (pegasus_count == 0) { pegasus_workqueue = create_singlethread_workqueue("pegasus"); if (!pegasus_workqueue) return -ENOMEM; } pegasus_count++; usb_get_dev(dev); net = alloc_etherdev(sizeof(struct pegasus)); if (!net) goto out; pegasus = netdev_priv(net); pegasus->dev_index = dev_index; init_waitqueue_head(&pegasus->ctrl_wait); if (!alloc_urbs(pegasus)) { dev_err(&intf->dev, "can't allocate %s\n", "urbs"); goto out1; } tasklet_init(&pegasus->rx_tl, rx_fixup, (unsigned long) pegasus); INIT_DELAYED_WORK(&pegasus->carrier_check, check_carrier); pegasus->intf = intf; pegasus->usb = dev; pegasus->net = net; net->watchdog_timeo = PEGASUS_TX_TIMEOUT; net->netdev_ops = &pegasus_netdev_ops; SET_ETHTOOL_OPS(net, &ops); pegasus->mii.dev = net; pegasus->mii.mdio_read = mdio_read; pegasus->mii.mdio_write = mdio_write; pegasus->mii.phy_id_mask = 0x1f; pegasus->mii.reg_num_mask = 0x1f; spin_lock_init(&pegasus->rx_pool_lock); pegasus->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK); pegasus->features = usb_dev_id[dev_index].private; get_interrupt_interval(pegasus); if (reset_mac(pegasus)) { dev_err(&intf->dev, "can't reset MAC\n"); res = -EIO; goto out2; } set_ethernet_addr(pegasus); fill_skb_pool(pegasus); if (pegasus->features & PEGASUS_II) { dev_info(&intf->dev, "setup Pegasus II specific registers\n"); setup_pegasus_II(pegasus); } pegasus->phy = mii_phy_probe(pegasus); if (pegasus->phy == 0xff) { dev_warn(&intf->dev, "can't locate MII phy, using default\n"); pegasus->phy = 1; } pegasus->mii.phy_id = pegasus->phy; usb_set_intfdata(intf, pegasus); SET_NETDEV_DEV(net, &intf->dev); pegasus_reset_wol(net); res = register_netdev(net); if (res) goto out3; queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, CARRIER_CHECK_DELAY); dev_info(&intf->dev, "%s, %s, %pM\n", net->name, usb_dev_id[dev_index].name, net->dev_addr); return 0; out3: usb_set_intfdata(intf, NULL); free_skb_pool(pegasus); out2: free_all_urbs(pegasus); out1: free_netdev(net); out: usb_put_dev(dev); pegasus_dec_workqueue(); return res; } static void pegasus_disconnect(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!pegasus) { dev_dbg(&intf->dev, "unregistering non-bound device?\n"); return; } pegasus->flags |= PEGASUS_UNPLUG; cancel_delayed_work(&pegasus->carrier_check); unregister_netdev(pegasus->net); usb_put_dev(interface_to_usbdev(intf)); unlink_all_urbs(pegasus); free_all_urbs(pegasus); free_skb_pool(pegasus); if (pegasus->rx_skb != NULL) { dev_kfree_skb(pegasus->rx_skb); pegasus->rx_skb = NULL; } free_netdev(pegasus->net); pegasus_dec_workqueue(); } static int pegasus_suspend(struct usb_interface *intf, pm_message_t message) { struct pegasus *pegasus = usb_get_intfdata(intf); netif_device_detach(pegasus->net); cancel_delayed_work(&pegasus->carrier_check); if (netif_running(pegasus->net)) { usb_kill_urb(pegasus->rx_urb); usb_kill_urb(pegasus->intr_urb); } return 0; } static int pegasus_resume(struct usb_interface *intf) { struct pegasus *pegasus = usb_get_intfdata(intf); netif_device_attach(pegasus->net); if (netif_running(pegasus->net)) { pegasus->rx_urb->status = 0; pegasus->rx_urb->actual_length = 0; read_bulk_callback(pegasus->rx_urb); pegasus->intr_urb->status = 0; pegasus->intr_urb->actual_length = 0; intr_callback(pegasus->intr_urb); } queue_delayed_work(pegasus_workqueue, &pegasus->carrier_check, CARRIER_CHECK_DELAY); return 0; } static const struct net_device_ops pegasus_netdev_ops = { .ndo_open = pegasus_open, .ndo_stop = pegasus_close, .ndo_do_ioctl = pegasus_ioctl, .ndo_start_xmit = pegasus_start_xmit, .ndo_set_rx_mode = pegasus_set_multicast, .ndo_get_stats = pegasus_netdev_stats, .ndo_tx_timeout = pegasus_tx_timeout, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static struct usb_driver pegasus_driver = { .name = driver_name, .probe = pegasus_probe, .disconnect = pegasus_disconnect, .id_table = pegasus_ids, .suspend = pegasus_suspend, .resume = pegasus_resume, }; static void __init parse_id(char *id) { unsigned int vendor_id = 0, device_id = 0, flags = 0, i = 0; char *token, *name = NULL; if ((token = strsep(&id, ":")) != NULL) name = token; /* name now points to a null terminated string*/ if ((token = strsep(&id, ":")) != NULL) vendor_id = simple_strtoul(token, NULL, 16); if ((token = strsep(&id, ":")) != NULL) device_id = simple_strtoul(token, NULL, 16); flags = simple_strtoul(id, NULL, 16); pr_info("%s: new device %s, vendor ID 0x%04x, device ID 0x%04x, flags: 0x%x\n", driver_name, name, vendor_id, device_id, flags); if (vendor_id > 0x10000 || vendor_id == 0) return; if (device_id > 0x10000 || device_id == 0) return; for (i = 0; usb_dev_id[i].name; i++); usb_dev_id[i].name = name; usb_dev_id[i].vendor = vendor_id; usb_dev_id[i].device = device_id; usb_dev_id[i].private = flags; pegasus_ids[i].match_flags = USB_DEVICE_ID_MATCH_DEVICE; pegasus_ids[i].idVendor = vendor_id; pegasus_ids[i].idProduct = device_id; } static int __init pegasus_init(void) { pr_info("%s: %s, " DRIVER_DESC "\n", driver_name, DRIVER_VERSION); if (devid) parse_id(devid); return usb_register(&pegasus_driver); } static void __exit pegasus_exit(void) { usb_deregister(&pegasus_driver); } module_init(pegasus_init); module_exit(pegasus_exit);
gpl-2.0
cm-mirror/android_kernel_xiaomi_dior
drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
4802
81943
/* * Original code based Host AP (software wireless LAN access point) driver * for Intersil Prism2/2.5/3 - hostap.o module, common routines * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <jkmaline@cc.hut.fi> * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright (c) 2004, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andreamrl@tiscali.it> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> //#include <linux/config.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include <linux/ctype.h> #include "ieee80211.h" #include "dot11d.h" static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); skb->dev = ieee->dev; skb_reset_mac_header(skb); skb_pull(skb, ieee80211_get_hdrlen(fc)); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_80211_RAW); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } /* Called only as a tasklet (software IRQ) */ static struct ieee80211_frag_entry * ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq, unsigned int frag, u8 tid,u8 *src, u8 *dst) { struct ieee80211_frag_entry *entry; int i; for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) { entry = &ieee->frag_cache[tid][i]; if (entry->skb != NULL && time_after(jiffies, entry->first_frag_time + 2 * HZ)) { IEEE80211_DEBUG_FRAG( "expiring fragment cache entry " "seq=%u last_frag=%u\n", entry->seq, entry->last_frag); dev_kfree_skb_any(entry->skb); entry->skb = NULL; } if (entry->skb != NULL && entry->seq == seq && (entry->last_frag + 1 == frag || frag == -1) && memcmp(entry->src_addr, src, ETH_ALEN) == 0 && memcmp(entry->dst_addr, dst, ETH_ALEN) == 0) return entry; } return NULL; } /* Called only as a tasklet (software IRQ) */ static struct sk_buff * ieee80211_frag_cache_get(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int frag = WLAN_GET_SEQ_FRAG(sc); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct ieee80211_frag_entry *entry; struct ieee80211_hdr_3addrqos *hdr_3addrqos; struct ieee80211_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if (IEEE80211_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { tid = 0; } if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(ieee->dev->mtu + sizeof(struct ieee80211_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */ + (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */); if (skb == NULL) return NULL; entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; ieee->frag_next_idx[tid]++; if (ieee->frag_next_idx[tid] >= IEEE80211_FRAG_CACHE_LEN) ieee->frag_next_idx[tid] = 0; if (entry->skb != NULL) dev_kfree_skb_any(entry->skb); entry->first_frag_time = jiffies; entry->seq = seq; entry->last_frag = frag; entry->skb = skb; memcpy(entry->src_addr, hdr->addr2, ETH_ALEN); memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN); } else { /* received a fragment of a frame for which the head fragment * should have already been received */ entry = ieee80211_frag_cache_find(ieee, seq, frag, tid,hdr->addr2, hdr->addr1); if (entry != NULL) { entry->last_frag = frag; skb = entry->skb; } } return skb; } /* Called only as a tasklet (software IRQ) */ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *hdr) { u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct ieee80211_frag_entry *entry; struct ieee80211_hdr_3addrqos *hdr_3addrqos; struct ieee80211_hdr_4addrqos *hdr_4addrqos; u8 tid; if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if (IEEE80211_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { tid = 0; } entry = ieee80211_frag_cache_find(ieee, seq, -1, tid,hdr->addr2, hdr->addr1); if (entry == NULL) { IEEE80211_DEBUG_FRAG( "could not invalidate fragment cache " "entry (seq=%u)\n", seq); return -1; } entry->skb = NULL; return 0; } /* ieee80211_rx_frame_mgtmt * * Responsible for handling management control frames * * Called by ieee80211_rx */ static inline int ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, u16 type, u16 stype) { /* On the struct stats definition there is written that * this is not mandatory.... but seems that the probe * response parser uses it */ struct ieee80211_hdr_3addr * hdr = (struct ieee80211_hdr_3addr *)skb->data; rx_stats->len = skb->len; ieee80211_rx_mgt(ieee,(struct ieee80211_hdr_4addr *)skb->data,rx_stats); //if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN))) if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))//use ADDR1 to perform address matching for Management frames { dev_kfree_skb_any(skb); return 0; } ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype); dev_kfree_skb_any(skb); return 0; #ifdef NOT_YET if (ieee->iw_mode == IW_MODE_MASTER) { printk(KERN_DEBUG "%s: Master mode not yet supported.\n", ieee->dev->name); return 0; /* hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *) skb->data);*/ } if (ieee->hostapd && type == IEEE80211_TYPE_MGMT) { if (stype == WLAN_FC_STYPE_BEACON && ieee->iw_mode == IW_MODE_MASTER) { struct sk_buff *skb2; /* Process beacon frames also in kernel driver to * update STA(AP) table statistics */ skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) hostap_rx(skb2->dev, skb2, rx_stats); } /* send management frames to the user space daemon for * processing */ ieee->apdevstats.rx_packets++; ieee->apdevstats.rx_bytes += skb->len; prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT); return 0; } if (ieee->iw_mode == IW_MODE_MASTER) { if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) { printk(KERN_DEBUG "%s: unknown management frame " "(type=0x%02x, stype=0x%02x) dropped\n", skb->dev->name, type, stype); return -1; } hostap_rx(skb->dev, skb, rx_stats); return 0; } printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame " "received in non-Host AP mode\n", skb->dev->name); return -1; #endif } /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ /* Called by ieee80211_rx_frame_decrypt */ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee, struct sk_buff *skb, size_t hdrlen) { struct net_device *dev = ieee->dev; u16 fc, ethertype; struct ieee80211_hdr_4addr *hdr; u8 *pos; if (skb->len < 24) return 0; hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 && memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { /* ToDS frame with own addr BSSID and DA */ } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) { /* FromDS frame with own addr as DA */ } else return 0; if (skb->len < 24 + 8) return 0; /* check for port access entity Ethernet type */ // pos = skb->data + 24; pos = skb->data + hdrlen; ethertype = (pos[6] << 8) | pos[7]; if (ethertype == ETH_P_PAE) return 1; return 0; } /* Called only as a tasklet (software IRQ), by ieee80211_rx */ static inline int ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt) { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; if (ieee->hwsec_active) { cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; } hdr = (struct ieee80211_hdr_4addr *) skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); #ifdef CONFIG_IEEE80211_CRYPT_TKIP if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "received packet from %pM\n", ieee->dev->name, hdr->addr2); } return -1; } #endif atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { IEEE80211_DEBUG_DROP( "decryption failed (SA=%pM" ") res=%d\n", hdr->addr2, res); if (res == -2) IEEE80211_DEBUG_DROP("Decryption failed ICV " "mismatch (key %d)\n", skb->data[hdrlen + 3] >> 6); ieee->ieee_stats.rx_discards_undecryptable++; return -1; } return res; } /* Called only as a tasklet (software IRQ), by ieee80211_rx */ static inline int ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb, int keyidx, struct ieee80211_crypt_data *crypt) { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; if (ieee->hwsec_active) { cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; } hdr = (struct ieee80211_hdr_4addr *) skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" " (SA=%pM keyidx=%d)\n", ieee->dev->name, hdr->addr2, keyidx); return -1; } return 0; } /* this function is stolen from ipw2200 driver*/ #define IEEE_PACKET_RETRY_TIME (5*HZ) static int is_duplicate_packet(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *header) { u16 fc = le16_to_cpu(header->frame_ctl); u16 sc = le16_to_cpu(header->seq_ctl); u16 seq = WLAN_GET_SEQ_SEQ(sc); u16 frag = WLAN_GET_SEQ_FRAG(sc); u16 *last_seq, *last_frag; unsigned long *last_time; struct ieee80211_hdr_3addrqos *hdr_3addrqos; struct ieee80211_hdr_4addrqos *hdr_4addrqos; u8 tid; //TO2DS and QoS if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)header; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS hdr_3addrqos = (struct ieee80211_hdr_3addrqos*)header; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { // no QoS tid = 0; } switch (ieee->iw_mode) { case IW_MODE_ADHOC: { struct list_head *p; struct ieee_ibss_seq *entry = NULL; u8 *mac = header->addr2; int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE; //for (pos = (head)->next; pos != (head); pos = pos->next) //__list_for_each(p, &ieee->ibss_mac_hash[index]) { list_for_each(p, &ieee->ibss_mac_hash[index]) { entry = list_entry(p, struct ieee_ibss_seq, list); if (!memcmp(entry->mac, mac, ETH_ALEN)) break; } // if (memcmp(entry->mac, mac, ETH_ALEN)){ if (p == &ieee->ibss_mac_hash[index]) { entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC); if (!entry) { printk(KERN_WARNING "Cannot malloc new mac entry\n"); return 0; } memcpy(entry->mac, mac, ETH_ALEN); entry->seq_num[tid] = seq; entry->frag_num[tid] = frag; entry->packet_time[tid] = jiffies; list_add(&entry->list, &ieee->ibss_mac_hash[index]); return 0; } last_seq = &entry->seq_num[tid]; last_frag = &entry->frag_num[tid]; last_time = &entry->packet_time[tid]; break; } case IW_MODE_INFRA: last_seq = &ieee->last_rxseq_num[tid]; last_frag = &ieee->last_rxfrag_num[tid]; last_time = &ieee->last_packet_time[tid]; break; default: return 0; } // if(tid != 0) { // printk(KERN_WARNING ":)))))))))))%x %x %x, fc(%x)\n", tid, *last_seq, seq, header->frame_ctl); // } if ((*last_seq == seq) && time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) { if (*last_frag == frag){ //printk(KERN_WARNING "[1] go drop!\n"); goto drop; } if (*last_frag + 1 != frag) /* out-of-order fragment */ //printk(KERN_WARNING "[2] go drop!\n"); goto drop; } else *last_seq = seq; *last_frag = frag; *last_time = jiffies; return 0; drop: // BUG_ON(!(fc & IEEE80211_FCTL_RETRY)); // printk("DUP\n"); return 1; } bool AddReorderEntry( PRX_TS_RECORD pTS, PRX_REORDER_ENTRY pReorderEntry ) { struct list_head *pList = &pTS->RxPendingPktList; while(pList->next != &pTS->RxPendingPktList) { if( SN_LESS(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) ) { pList = pList->next; } else if( SN_EQUAL(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) ) { return false; } else { break; } } pReorderEntry->List.next = pList->next; pReorderEntry->List.next->prev = &pReorderEntry->List; pReorderEntry->List.prev = pList; pList->next = &pReorderEntry->List; return true; } void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index) { u8 i = 0 , j=0; u16 ethertype; // if(index > 1) // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__FUNCTION__,index); for(j = 0; j<index; j++) { //added by amy for reorder struct ieee80211_rxb* prxb = prxbIndicateArray[j]; for(i = 0; i<prxb->nr_subframes; i++) { struct sk_buff *sub_skb = prxb->subframes[i]; /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } //stats->rx_packets++; //stats->rx_bytes += sub_skb->len; /* Indicat the packets to upper layer */ if (sub_skb) { //printk("0skb_len(%d)\n", skb->len); sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev); memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->dev = ieee->dev; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; //printk("1skb_len(%d)\n", skb->len); netif_rx(sub_skb); } } kfree(prxb); prxb = NULL; } } void RxReorderIndicatePacket( struct ieee80211_device *ieee, struct ieee80211_rxb* prxb, PRX_TS_RECORD pTS, u16 SeqNum) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PRX_REORDER_ENTRY pReorderEntry = NULL; struct ieee80211_rxb* prxbIndicateArray[REORDER_WIN_SIZE]; u8 WinSize = pHTInfo->RxReorderWinSize; u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096; u8 index = 0; bool bMatchWinStart = false, bPktInBuf = false; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__FUNCTION__,SeqNum,pTS->RxIndicateSeq,WinSize); /* Rx Reorder initialize condition.*/ if(pTS->RxIndicateSeq == 0xffff) { pTS->RxIndicateSeq = SeqNum; } /* Drop out the packet which SeqNum is smaller than WinStart */ if(SN_LESS(SeqNum, pTS->RxIndicateSeq)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); pHTInfo->RxReorderDropCounter++; { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } return; } /* * Sliding window manipulation. Conditions includes: * 1. Incoming SeqNum is equal to WinStart =>Window shift 1 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N */ if(SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) { pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; bMatchWinStart = true; } else if(SN_LESS(WinEnd, SeqNum)) { if(SeqNum >= (WinSize - 1)) { pTS->RxIndicateSeq = SeqNum + 1 -WinSize; } else { pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum +1)) + 1; } IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); } /* * Indication process. * After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets * with the SeqNum smaller than latest WinStart and buffer other packets. */ /* For Rx Reorder condition: * 1. All packets with SeqNum smaller than WinStart => Indicate * 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */ if(bMatchWinStart) { /* Current packet is going to be indicated.*/ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\ pTS->RxIndicateSeq, SeqNum); prxbIndicateArray[0] = prxb; // printk("========================>%s(): SeqNum is %d\n",__FUNCTION__,SeqNum); index = 1; } else { /* Current packet is going to be inserted into pending list.*/ //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): We RX no ordered packed, insert to orderd list\n",__FUNCTION__); if(!list_empty(&ieee->RxReorder_Unused_List)) { pReorderEntry = (PRX_REORDER_ENTRY)list_entry(ieee->RxReorder_Unused_List.next,RX_REORDER_ENTRY,List); list_del_init(&pReorderEntry->List); /* Make a reorder entry and insert into a the packet list.*/ pReorderEntry->SeqNum = SeqNum; pReorderEntry->prxb = prxb; // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pREorderEntry->SeqNum is %d\n",__FUNCTION__,pReorderEntry->SeqNum); if(!AddReorderEntry(pTS, pReorderEntry)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, pTS->RxIndicateSeq, SeqNum); list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List); { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } } else { IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); } } else { /* * Packets are dropped if there is not enough reorder entries. * This part shall be modified!! We can just indicate all the * packets in buffer and get reorder entries. */ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n"); { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } } } /* Check if there is any packet need indicate.*/ while(!list_empty(&pTS->RxPendingPktList)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__FUNCTION__); pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List); if( SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) || SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) { /* This protect buffer from overflow. */ if(index >= REORDER_WIN_SIZE) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n"); bPktInBuf = true; break; } list_del_init(&pReorderEntry->List); if(SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); prxbIndicateArray[index] = pReorderEntry->prxb; // printk("========================>%s(): pReorderEntry->SeqNum is %d\n",__FUNCTION__,pReorderEntry->SeqNum); index++; list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List); } else { bPktInBuf = true; break; } } /* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/ if(index>0) { // Cancel previous pending timer. // del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxTimeoutIndicateSeq = 0xffff; // Indicate packets if(index>REORDER_WIN_SIZE){ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n"); return; } ieee80211_indicate_packets(ieee, prxbIndicateArray, index); } if(bPktInBuf && pTS->RxTimeoutIndicateSeq==0xffff) { // Set new pending timer. IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): SET rx timeout timer\n", __FUNCTION__); pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq; if(timer_pending(&pTS->RxPktPendingTimer)) del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxPktPendingTimer.expires = jiffies + MSECS(pHTInfo->RxReorderPendingTime); add_timer(&pTS->RxPktPendingTimer); } } u8 parse_subframe(struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, struct ieee80211_rxb *rxb,u8* src,u8* dst) { struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr* )skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 LLCOffset= sizeof(struct ieee80211_hdr_3addr); u16 ChkLength; bool bIsAggregateFrame = false; u16 nSubframe_Length; u8 nPadding_Length = 0; u16 SeqNum=0; struct sk_buff *sub_skb; u8 *data_ptr; /* just for debug purpose */ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl)); if((IEEE80211_QOS_HAS_SEQ(fc))&&\ (((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) { bIsAggregateFrame = true; } if(IEEE80211_QOS_HAS_SEQ(fc)) { LLCOffset += 2; } if(rx_stats->bContainHTC) { LLCOffset += sHTCLng; } //printk("ChkLength = %d\n", LLCOffset); // Null packet, don't indicate it to upper layer ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/ if( skb->len <= ChkLength ) { return 0; } skb_pull(skb, LLCOffset); if(!bIsAggregateFrame) { rxb->nr_subframes = 1; #ifdef JOHN_NOCPY rxb->subframes[0] = skb; #else rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC); #endif memcpy(rxb->src,src,ETH_ALEN); memcpy(rxb->dst,dst,ETH_ALEN); //IEEE80211_DEBUG_DATA(IEEE80211_DL_RX,skb->data,skb->len); return 1; } else { rxb->nr_subframes = 0; memcpy(rxb->src,src,ETH_ALEN); memcpy(rxb->dst,dst,ETH_ALEN); while(skb->len > ETHERNET_HEADER_SIZE) { /* Offset 12 denote 2 mac address */ nSubframe_Length = *((u16*)(skb->data + 12)); //==m==>change the length order nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8); if(skb->len<(ETHERNET_HEADER_SIZE + nSubframe_Length)) { printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\ __FUNCTION__,rxb->nr_subframes); printk("%s: A-MSDU parse error!! Subframe Length: %d\n",__FUNCTION__, nSubframe_Length); printk("nRemain_Length is %d and nSubframe_Length is : %d\n",skb->len,nSubframe_Length); printk("The Packet SeqNum is %d\n",SeqNum); return 0; } /* move the data point to data content */ skb_pull(skb, ETHERNET_HEADER_SIZE); #ifdef JOHN_NOCPY sub_skb = skb_clone(skb, GFP_ATOMIC); sub_skb->len = nSubframe_Length; sub_skb->tail = sub_skb->data + nSubframe_Length; #else /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length); memcpy(data_ptr,skb->data,nSubframe_Length); #endif rxb->subframes[rxb->nr_subframes++] = sub_skb; if(rxb->nr_subframes >= MAX_SUBFRAME_COUNT) { IEEE80211_DEBUG_RX("ParseSubframe(): Too many Subframes! Packets dropped!\n"); break; } skb_pull(skb,nSubframe_Length); if(skb->len != 0) { nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4); if(nPadding_Length == 4) { nPadding_Length = 0; } if(skb->len < nPadding_Length) { return 0; } skb_pull(skb,nPadding_Length); } } #ifdef JOHN_NOCPY dev_kfree_skb(skb); #endif //{just for debug added by david //printk("AMSDU::rxb->nr_subframes = %d\n",rxb->nr_subframes); //} return rxb->nr_subframes; } } /* All received frames are sent to this function. @skb contains the frame in * IEEE 802.11 format, i.e., in the format it was sent over air. * This function is called only as a tasklet (software IRQ). */ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct ieee80211_hdr_4addr *hdr; //struct ieee80211_hdr_3addrqos *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; unsigned int frag; u8 *payload; u16 ethertype; //added by amy for reorder u8 TID = 0; u16 SeqNum = 0; PRX_TS_RECORD pTS = NULL; //bool bIsAggregateFrame = false; //added by amy for reorder #ifdef NOT_YET struct net_device *wds = NULL; struct sk_buff *skb2 = NULL; struct net_device *wds = NULL; int frame_authorized = 0; int from_assoc_ap = 0; void *sta = NULL; #endif // u16 qos_ctl = 0; u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; u8 bssid[ETH_ALEN]; struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; int i; struct ieee80211_rxb* rxb = NULL; // cheat the the hdr type hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", dev->name); goto rx_dropped; } fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); if(HTCCheck(ieee, skb->data)) { if(net_ratelimit()) printk("find HTCControl\n"); hdrlen += 4; rx_stats->bContainHTC = 1; } //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); #ifdef NOT_YET #if WIRELESS_EXT > 15 /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ if (iface->spy_data.spy_number > 0) { struct iw_quality wstats; wstats.level = rx_stats->rssi; wstats.noise = rx_stats->noise; wstats.updated = 6; /* No qual value */ /* Update spy records */ wireless_spy_update(dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ #endif /* WIRELESS_EXT > 15 */ hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif #if WIRELESS_EXT > 15 if (ieee->iw_mode == IW_MODE_MONITOR) { ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; stats->rx_bytes += skb->len; return 1; } #endif if (ieee->host_decrypt) { int idx = 0; if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; crypt = ieee->crypt[idx]; #ifdef NOT_YET sta = NULL; /* Use station specific key to override default keys if the * receiver address is a unicast address ("individual RA"). If * bcrx_sta_key parameter is set, station specific key is used * even with broad/multicast targets (this is against IEEE * 802.11, but makes it easier to use different keys with * stations that do not support WEP key mapping). */ if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) (void) hostap_handle_sta_crypto(local, hdr, &crypt, &sta); #endif /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (crypt && (crypt->ops == NULL || crypt->ops->decrypt_mpdu == NULL)) crypt = NULL; if (!crypt && (fc & IEEE80211_FCTL_WEP)) { /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ IEEE80211_DEBUG_DROP("Decryption failed (not set)" " (SA=%pM)\n", hdr->addr2); ieee->ieee_stats.rx_discards_undecryptable++; goto rx_dropped; } } if (skb->len < IEEE80211_DATA_HDR3_LEN) goto rx_dropped; // if QoS enabled, should check the sequence for each of the AC if( (ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)){ if (is_duplicate_packet(ieee, hdr)) goto rx_dropped; } else { PRX_TS_RECORD pRxTS = NULL; //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__FUNCTION__, tid); if(GetTs( ieee, (PTS_COMMON_INFO*) &pRxTS, hdr->addr2, (u8)Frame_QoSTID((u8*)(skb->data)), RX_DIR, true)) { // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__FUNCTION__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc)); if( (fc & (1<<11)) && (frag == pRxTS->RxLastFragNum) && (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum) ) { goto rx_dropped; } else { pRxTS->RxLastFragNum = frag; pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc); } } else { IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n",__FUNCTION__); goto rx_dropped; } } if (type == IEEE80211_FTYPE_MGMT) { //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } /* Data frame - extract src/dst addresses */ switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); memcpy(bssid, hdr->addr2, ETH_ALEN); break; case IEEE80211_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr1, ETH_ALEN); break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: if (skb->len < IEEE80211_DATA_HDR4_LEN) goto rx_dropped; memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); memcpy(bssid, ieee->current_network.bssid, ETH_ALEN); break; case 0: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr3, ETH_ALEN); break; } #ifdef NOT_YET if (hostap_rx_frame_wds(ieee, hdr, fc, &wds)) goto rx_dropped; if (wds) { skb->dev = dev = wds; stats = hostap_get_stats(dev); } if (ieee->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && ieee->stadev && memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) { /* Frame from BSSID of the AP for which we are a client */ skb->dev = dev = ieee->stadev; stats = hostap_get_stats(dev); from_assoc_ap = 1; } #endif dev->last_rx = jiffies; #ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, wds != NULL)) { case AP_RX_CONTINUE_NOT_AUTHORIZED: frame_authorized = 0; break; case AP_RX_CONTINUE: frame_authorized = 1; break; case AP_RX_DROP: goto rx_dropped; case AP_RX_EXIT: goto rx_exit; } } #endif //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && stype != IEEE80211_STYPE_DATA_CFACKPOLL&& stype != IEEE80211_STYPE_QOS_DATA//add by David,2006.8.4 ) { if (stype != IEEE80211_STYPE_NULLFUNC) IEEE80211_DEBUG_DROP( "RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x, len=%d)\n", type, stype, skb->len); goto rx_dropped; } if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN)) goto rx_dropped; /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) { printk("decrypt frame error\n"); goto rx_dropped; } hdr = (struct ieee80211_hdr_4addr *) skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ // PR: FIXME: hostap has additional conditions in the "if" below: // ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) { int flen; struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & IEEE80211_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); goto rx_dropped; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); ieee80211_frag_cache_invalidate(ieee, hdr); goto rx_dropped; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ memcpy(skb_put(frag_skb, flen), skb->data, flen); } else { /* append frame payload to the end of the fragment * cache skb */ memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & IEEE80211_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ goto rx_exit; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct ieee80211_hdr_4addr *) skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) { printk("==>decrypt msdu error\n"); goto rx_dropped; } //added by amy for AP roaming ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; hdr = (struct ieee80211_hdr_4addr *) skb->data; if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) { if (/*ieee->ieee802_1x &&*/ ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { #ifdef CONFIG_IEEE80211_DEBUG /* pass unencrypted EAPOL frames even if encryption is * configured */ struct eapol *eap = (struct eapol *)(skb->data + 24); IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); #endif } else { IEEE80211_DEBUG_DROP( "encryption configured, but RX " "frame not encrypted (SA=%pM)\n", hdr->addr2); goto rx_dropped; } } #ifdef CONFIG_IEEE80211_DEBUG if (crypt && !(fc & IEEE80211_FCTL_WEP) && ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { struct eapol *eap = (struct eapol *)(skb->data + 24); IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } #endif if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep && !ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { IEEE80211_DEBUG_DROP( "dropped unencrypted RX data " "frame from %pM" " (drop_unencrypted=1)\n", hdr->addr2); goto rx_dropped; } /* if(ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { printk(KERN_WARNING "RX: IEEE802.1X EPAOL frame!\n"); } */ //added by amy for reorder if(ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data) && !is_multicast_ether_addr(hdr->addr1) && !is_broadcast_ether_addr(hdr->addr1)) { TID = Frame_QoSTID(skb->data); SeqNum = WLAN_GET_SEQ_SEQ(sc); GetTs(ieee,(PTS_COMMON_INFO*) &pTS,hdr->addr2,TID,RX_DIR,true); if(TID !=0 && TID !=3) { ieee->bis_any_nonbepkts = true; } } //added by amy for reorder /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; //ethertype = (payload[6] << 8) | payload[7]; rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC); if(rxb == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__FUNCTION__); goto rx_dropped; } /* to parse amsdu packets */ /* qos data packets & reserved bit is 1 */ if(parse_subframe(skb,rx_stats,rxb,src,dst) == 0) { /* only to free rxb, and not submit the packets to upper layer */ for(i =0; i < rxb->nr_subframes; i++) { dev_kfree_skb(rxb->subframes[i]); } kfree(rxb); rxb = NULL; goto rx_dropped; } //added by amy for reorder if(ieee->pHTInfo->bCurRxReorderEnable == false ||pTS == NULL){ //added by amy for reorder for(i = 0; i<rxb->nr_subframes; i++) { struct sk_buff *sub_skb = rxb->subframes[i]; if (sub_skb) { /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } stats->rx_packets++; stats->rx_bytes += sub_skb->len; if(is_multicast_ether_addr(dst)) { stats->multicast++; } /* Indicat the packets to upper layer */ //printk("0skb_len(%d)\n", skb->len); sub_skb->protocol = eth_type_trans(sub_skb, dev); memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->dev = dev; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; //printk("1skb_len(%d)\n", skb->len); netif_rx(sub_skb); } } kfree(rxb); rxb = NULL; } else { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n",__FUNCTION__); RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum); } #ifndef JOHN_NOCPY dev_kfree_skb(skb); #endif rx_exit: #ifdef NOT_YET if (sta) hostap_handle_sta_release(sta); #endif return 1; rx_dropped: kfree(rxb); rxb = NULL; stats->rx_dropped++; /* Returning 0 indicates to caller that we have not handled the SKB-- * so it is still allocated and can be used again by underlying * hardware as a DMA target */ return 0; } #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; /* * Make the structure we read from the beacon packet to have * the right values */ static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element *info_element, int sub_type) { if (info_element->qui_subtype != sub_type) return -1; if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) return -1; if (info_element->qui_type != QOS_OUI_TYPE) return -1; if (info_element->version != QOS_VERSION_1) return -1; return 0; } /* * Parse a QoS parameter element */ static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info *element_param, struct ieee80211_info_element *info_element) { int ret = 0; u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2; if ((info_element == NULL) || (element_param == NULL)) return -1; if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { memcpy(element_param->info_element.qui, info_element->data, info_element->len); element_param->info_element.elementID = info_element->id; element_param->info_element.length = info_element->len; } else ret = -1; if (ret == 0) ret = ieee80211_verify_qos_info(&element_param->info_element, QOS_OUI_PARAM_SUB_TYPE); return ret; } /* * Parse a QoS information element */ static int ieee80211_read_qos_info_element(struct ieee80211_qos_information_element *element_info, struct ieee80211_info_element *info_element) { int ret = 0; u16 size = sizeof(struct ieee80211_qos_information_element) - 2; if (element_info == NULL) return -1; if (info_element == NULL) return -1; if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { memcpy(element_info->qui, info_element->data, info_element->len); element_info->elementID = info_element->id; element_info->length = info_element->len; } else ret = -1; if (ret == 0) ret = ieee80211_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE); return ret; } /* * Write QoS parameters from the ac parameters. */ static int ieee80211_qos_convert_ac_to_parameters(struct ieee80211_qos_parameter_info *param_elm, struct ieee80211_qos_parameters *qos_param) { int rc = 0; int i; struct ieee80211_qos_ac_parameter *ac_params; u8 aci; //u8 cw_min; //u8 cw_max; for (i = 0; i < QOS_QUEUE_NUM; i++) { ac_params = &(param_elm->ac_params_record[i]); aci = (ac_params->aci_aifsn & 0x60) >> 5; if(aci >= QOS_QUEUE_NUM) continue; qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f; /* WMM spec P.11: The minimum value for AIFSN shall be 2 */ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci]; qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F; qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4; qos_param->flag[aci] = (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit); } return rc; } /* * we have a generic data element which it may contain QoS information or * parameters element. check the information element length to decide * which type to read */ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element *info_element, struct ieee80211_network *network) { int rc = 0; struct ieee80211_qos_parameters *qos_param = NULL; struct ieee80211_qos_information_element qos_info_element; rc = ieee80211_read_qos_info_element(&qos_info_element, info_element); if (rc == 0) { network->qos_data.param_count = qos_info_element.ac_info & 0x0F; network->flags |= NETWORK_HAS_QOS_INFORMATION; } else { struct ieee80211_qos_parameter_info param_element; rc = ieee80211_read_qos_param_element(&param_element, info_element); if (rc == 0) { qos_param = &(network->qos_data.parameters); ieee80211_qos_convert_ac_to_parameters(&param_element, qos_param); network->flags |= NETWORK_HAS_QOS_PARAMETERS; network->qos_data.param_count = param_element.info_element.ac_info & 0x0F; } } if (rc == 0) { IEEE80211_DEBUG_QOS("QoS is supported\n"); network->qos_data.supported = 1; } return rc; } #ifdef CONFIG_IEEE80211_DEBUG #define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x static const char *get_info_element_string(u16 id) { switch (id) { MFIE_STRING(SSID); MFIE_STRING(RATES); MFIE_STRING(FH_SET); MFIE_STRING(DS_SET); MFIE_STRING(CF_SET); MFIE_STRING(TIM); MFIE_STRING(IBSS_SET); MFIE_STRING(COUNTRY); MFIE_STRING(HOP_PARAMS); MFIE_STRING(HOP_TABLE); MFIE_STRING(REQUEST); MFIE_STRING(CHALLENGE); MFIE_STRING(POWER_CONSTRAINT); MFIE_STRING(POWER_CAPABILITY); MFIE_STRING(TPC_REQUEST); MFIE_STRING(TPC_REPORT); MFIE_STRING(SUPP_CHANNELS); MFIE_STRING(CSA); MFIE_STRING(MEASURE_REQUEST); MFIE_STRING(MEASURE_REPORT); MFIE_STRING(QUIET); MFIE_STRING(IBSS_DFS); // MFIE_STRING(ERP_INFO); MFIE_STRING(RSN); MFIE_STRING(RATES_EX); MFIE_STRING(GENERIC); MFIE_STRING(QOS_PARAMETER); default: return "UNKNOWN"; } } #endif static inline void ieee80211_extract_country_ie( struct ieee80211_device *ieee, struct ieee80211_info_element *info_element, struct ieee80211_network *network, u8 * addr2 ) { if(IS_DOT11D_ENABLE(ieee)) { if(info_element->len!= 0) { memcpy(network->CountryIeBuf, info_element->data, info_element->len); network->CountryIeLen = info_element->len; if(!IS_COUNTRY_IE_VALID(ieee)) { Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data); } } // // 070305, rcnjko: I update country IE watch dog here because // some AP (e.g. Cisco 1242) don't include country IE in their // probe response frame. // if(IS_EQUAL_CIE_SRC(ieee, addr2) ) { UPDATE_CIE_WATCHDOG(ieee); } } } int ieee80211_parse_info_param(struct ieee80211_device *ieee, struct ieee80211_info_element *info_element, u16 length, struct ieee80211_network *network, struct ieee80211_rx_stats *stats) { u8 i; short offset; u16 tmp_htcap_len=0; u16 tmp_htinfo_len=0; u16 ht_realtek_agg_len=0; u8 ht_realtek_agg_buf[MAX_IE_LEN]; // u16 broadcom_len = 0; #ifdef CONFIG_IEEE80211_DEBUG char rates_str[64]; char *p; #endif while (length >= sizeof(*info_element)) { if (sizeof(*info_element) + info_element->len > length) { IEEE80211_DEBUG_MGMT("Info elem: parse failed: " "info_element->len + 2 > left : " "info_element->len+2=%zd left=%d, id=%d.\n", info_element->len + sizeof(*info_element), length, info_element->id); /* We stop processing but don't return an error here * because some misbehaviour APs break this rule. ie. * Orinoco AP1000. */ break; } switch (info_element->id) { case MFIE_TYPE_SSID: if (ieee80211_is_empty_essid(info_element->data, info_element->len)) { network->flags |= NETWORK_EMPTY_ESSID; break; } network->ssid_len = min(info_element->len, (u8) IW_ESSID_MAX_SIZE); memcpy(network->ssid, info_element->data, network->ssid_len); if (network->ssid_len < IW_ESSID_MAX_SIZE) memset(network->ssid + network->ssid_len, 0, IW_ESSID_MAX_SIZE - network->ssid_len); IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", network->ssid, network->ssid_len); break; case MFIE_TYPE_RATES: #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif network->rates_len = min(info_element->len, MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & IEEE80211_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", rates_str, network->rates_len); break; case MFIE_TYPE_RATES_EX: #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & IEEE80211_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", rates_str, network->rates_ex_len); break; case MFIE_TYPE_DS_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", info_element->data[0]); network->channel = info_element->data[0]; break; case MFIE_TYPE_FH_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); break; case MFIE_TYPE_CF_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); break; case MFIE_TYPE_TIM: if(info_element->len < 4) break; network->tim.tim_count = info_element->data[0]; network->tim.tim_period = info_element->data[1]; network->dtim_period = info_element->data[1]; if(ieee->state != IEEE80211_LINKED) break; network->last_dtim_sta_time[0] = stats->mac_time[0]; network->last_dtim_sta_time[1] = stats->mac_time[1]; network->dtim_data = IEEE80211_DTIM_VALID; if(info_element->data[0] != 0) break; if(info_element->data[2] & 1) network->dtim_data |= IEEE80211_DTIM_MBCAST; offset = (info_element->data[2] >> 1)*2; //printk("offset1:%x aid:%x\n",offset, ieee->assoc_id); if(ieee->assoc_id < 8*offset || ieee->assoc_id > 8*(offset + info_element->len -3)) break; offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ; if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8))) network->dtim_data |= IEEE80211_DTIM_UCAST; //IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n"); break; case MFIE_TYPE_ERP: network->erp_value = info_element->data[0]; network->flags |= NETWORK_HAS_ERP_VALUE; IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", network->erp_value); break; case MFIE_TYPE_IBSS_SET: network->atim_window = info_element->data[0]; IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", network->atim_window); break; case MFIE_TYPE_CHALLENGE: IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); break; case MFIE_TYPE_GENERIC: IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); if (!ieee80211_parse_qos_info_param_IE(info_element, network)) break; if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x01) { network->wpa_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->wpa_ie, info_element, network->wpa_ie_len); break; } #ifdef THOMAS_TURBO if (info_element->len == 7 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x01 && info_element->data[4] == 0x02) { network->Turbo_Enable = 1; } #endif //for HTcap and HTinfo parameters if(tmp_htcap_len == 0){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x033){ tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htcap_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\ sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen); } } if(tmp_htcap_len != 0) network->bssht.bdSupportHT = true; else network->bssht.bdSupportHT = false; } if(tmp_htinfo_len == 0){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x034){ tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htinfo_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; if(tmp_htinfo_len){ network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\ sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen); } } } } if(ieee->aggregation){ if(network->bssht.bdSupportHT){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x02){ ht_realtek_agg_len = min(info_element->len,(u8)MAX_IE_LEN); memcpy(ht_realtek_agg_buf,info_element->data,info_element->len); } if(ht_realtek_agg_len >= 5){ network->bssht.bdRT2RTAggregation = true; if((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02)) network->bssht.bdRT2RTLongSlotTime = true; } } } //if(tmp_htcap_len !=0 || tmp_htinfo_len != 0) { if((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x05 && info_element->data[2] == 0xb5) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0a && info_element->data[2] == 0xf7) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x10 && info_element->data[2] == 0x18)){ network->broadcom_cap_exist = true; } } if(info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0c && info_element->data[2] == 0x43) { network->ralink_cap_exist = true; } else network->ralink_cap_exist = false; //added by amy for atheros AP if((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x03 && info_element->data[2] == 0x7f) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x13 && info_element->data[2] == 0x74)) { printk("========>%s(): athros AP is exist\n",__FUNCTION__); network->atheros_cap_exist = true; } else network->atheros_cap_exist = false; if(info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96) { network->cisco_cap_exist = true; } else network->cisco_cap_exist = false; //added by amy for LEAP of cisco if(info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if(info_element->len == 6) { memcpy(network->CcxRmState, &info_element[4], 2); if(network->CcxRmState[0] != 0) { network->bCcxRmEnable = true; } else network->bCcxRmEnable = false; // // CCXv4 Table 59-1 MBSSID Masks. // network->MBssidMask = network->CcxRmState[1] & 0x07; if(network->MBssidMask != 0) { network->bMBssidValid = true; network->MBssidMask = 0xff << (network->MBssidMask); cpMacAddr(network->MBssid, network->bssid); network->MBssid[5] &= network->MBssidMask; } else { network->bMBssidValid = false; } } else { network->bCcxRmEnable = false; } } if(info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x03) { if(info_element->len == 5) { network->bWithCcxVerNum = true; network->BssCcxVerNumber = info_element->data[4]; } else { network->bWithCcxVerNum = false; network->BssCcxVerNumber = 0; } } break; case MFIE_TYPE_RSN: IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", info_element->len); network->rsn_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->rsn_ie, info_element, network->rsn_ie_len); break; //HT related element. case MFIE_TYPE_HT_CAP: IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n", info_element->len); tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htcap_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\ sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen); //If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT() // windows driver will update WMM parameters each beacon received once connected // Linux driver is a bit different. network->bssht.bdSupportHT = true; } else network->bssht.bdSupportHT = false; break; case MFIE_TYPE_HT_INFO: IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n", info_element->len); tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htinfo_len){ network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE; network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\ sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen); } break; case MFIE_TYPE_AIRONET: IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n", info_element->len); if(info_element->len >IE_CISCO_FLAG_POSITION) { network->bWithAironetIE = true; // CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23): // "A Cisco access point advertises support for CKIP in beacon and probe response packets, // by adding an Aironet element and setting one or both of the CKIP negotiation bits." if( (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_MIC) || (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_PK) ) { network->bCkipSupported = true; } else { network->bCkipSupported = false; } } else { network->bWithAironetIE = false; network->bCkipSupported = false; } break; case MFIE_TYPE_QOS_PARAMETER: printk(KERN_ERR "QoS Error need to parse QOS_PARAMETER IE\n"); break; case MFIE_TYPE_COUNTRY: IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n", info_element->len); //printk("=====>Receive <%s> Country IE\n",network->ssid); ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP break; /* TODO */ default: IEEE80211_DEBUG_MGMT ("Unsupported info element: %s (%d)\n", get_info_element_string(info_element->id), info_element->id); break; } length -= sizeof(*info_element) + info_element->len; info_element = (struct ieee80211_info_element *)&info_element-> data[info_element->len]; } if(!network->atheros_cap_exist && !network->broadcom_cap_exist && !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation) { network->unknown_cap_exist = true; } else { network->unknown_cap_exist = false; } return 0; } static inline u8 ieee80211_SignalStrengthTranslate( u8 CurrSS ) { u8 RetSS; // Step 1. Scale mapping. if(CurrSS >= 71 && CurrSS <= 100) { RetSS = 90 + ((CurrSS - 70) / 3); } else if(CurrSS >= 41 && CurrSS <= 70) { RetSS = 78 + ((CurrSS - 40) / 3); } else if(CurrSS >= 31 && CurrSS <= 40) { RetSS = 66 + (CurrSS - 30); } else if(CurrSS >= 21 && CurrSS <= 30) { RetSS = 54 + (CurrSS - 20); } else if(CurrSS >= 5 && CurrSS <= 20) { RetSS = 42 + (((CurrSS - 5) * 2) / 3); } else if(CurrSS == 4) { RetSS = 36; } else if(CurrSS == 3) { RetSS = 27; } else if(CurrSS == 2) { RetSS = 18; } else if(CurrSS == 1) { RetSS = 9; } else { RetSS = CurrSS; } //RT_TRACE(COMP_DBG, DBG_LOUD, ("##### After Mapping: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS)); // Step 2. Smoothing. //RT_TRACE(COMP_DBG, DBG_LOUD, ("$$$$$ After Smoothing: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS)); return RetSS; } long ieee80211_translate_todbm(u8 signal_strength_index )// 0-100 index. { long signal_power; // in dBm. // Translate to dBm (x=0.5y-95). signal_power = (long)((signal_strength_index + 1) >> 1); signal_power -= 95; return signal_power; } static inline int ieee80211_network_init( struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_network *network, struct ieee80211_rx_stats *stats) { #ifdef CONFIG_IEEE80211_DEBUG //char rates_str[64]; //char *p; #endif network->qos_data.active = 0; network->qos_data.supported = 0; network->qos_data.param_count = 0; network->qos_data.old_param_count = 0; /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); network->capability = le16_to_cpu(beacon->capability); network->last_scanned = jiffies; network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); network->beacon_interval = le32_to_cpu(beacon->beacon_interval); /* Where to pull this? beacon->listen_interval;*/ network->listen_interval = 0x0A; network->rates_len = network->rates_ex_len = 0; network->last_associate = 0; network->ssid_len = 0; network->flags = 0; network->atim_window = 0; network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; network->berp_info_valid = false; network->broadcom_cap_exist = false; network->ralink_cap_exist = false; network->atheros_cap_exist = false; network->cisco_cap_exist = false; network->unknown_cap_exist = false; #ifdef THOMAS_TURBO network->Turbo_Enable = 0; #endif network->CountryIeLen = 0; memset(network->CountryIeBuf, 0, MAX_IE_LEN); //Initialize HT parameters //ieee80211_ht_initialize(&network->bssht); HTInitializeBssDesc(&network->bssht); if (stats->freq == IEEE80211_52GHZ_BAND) { /* for A band (No DS info) */ network->channel = stats->received_channel; } else network->flags |= NETWORK_HAS_CCK; network->wpa_ie_len = 0; network->rsn_ie_len = 0; if (ieee80211_parse_info_param (ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats)) return 1; network->mode = 0; if (stats->freq == IEEE80211_52GHZ_BAND) network->mode = IEEE_A; else { if (network->flags & NETWORK_HAS_OFDM) network->mode |= IEEE_G; if (network->flags & NETWORK_HAS_CCK) network->mode |= IEEE_B; } if (network->mode == 0) { IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' " "network.\n", escape_essid(network->ssid, network->ssid_len), network->bssid); return 1; } if(network->bssht.bdSupportHT){ if(network->mode == IEEE_A) network->mode = IEEE_N_5G; else if(network->mode & (IEEE_G | IEEE_B)) network->mode = IEEE_N_24G; } if (ieee80211_is_empty_essid(network->ssid, network->ssid_len)) network->flags |= NETWORK_EMPTY_ESSID; stats->signal = 30 + (stats->SignalStrength * 70) / 100; //stats->signal = ieee80211_SignalStrengthTranslate(stats->signal); stats->noise = ieee80211_translate_todbm((u8)(100-stats->signal)) -25; memcpy(&network->stats, stats, sizeof(network->stats)); return 0; } static inline int is_same_network(struct ieee80211_network *src, struct ieee80211_network *dst, struct ieee80211_device* ieee) { /* A network is only a duplicate if the channel, BSSID, ESSID * and the capability field (in particular IBSS and BSS) all match. * We treat all <hidden> with the same BSSID and channel * as one network */ return //((src->ssid_len == dst->ssid_len) && (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && (src->channel == dst->channel) && !memcmp(src->bssid, dst->bssid, ETH_ALEN) && //!memcmp(src->ssid, dst->ssid, src->ssid_len) && (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && ((src->capability & WLAN_CAPABILITY_IBSS) == (dst->capability & WLAN_CAPABILITY_IBSS)) && ((src->capability & WLAN_CAPABILITY_BSS) == (dst->capability & WLAN_CAPABILITY_BSS))); } static inline void update_network(struct ieee80211_network *dst, struct ieee80211_network *src) { int qos_active; u8 old_param; memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); dst->capability = src->capability; memcpy(dst->rates, src->rates, src->rates_len); dst->rates_len = src->rates_len; memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len); dst->rates_ex_len = src->rates_ex_len; if(src->ssid_len > 0) { memset(dst->ssid, 0, dst->ssid_len); dst->ssid_len = src->ssid_len; memcpy(dst->ssid, src->ssid, src->ssid_len); } dst->mode = src->mode; dst->flags = src->flags; dst->time_stamp[0] = src->time_stamp[0]; dst->time_stamp[1] = src->time_stamp[1]; if (src->flags & NETWORK_HAS_ERP_VALUE) { dst->erp_value = src->erp_value; dst->berp_info_valid = src->berp_info_valid = true; } dst->beacon_interval = src->beacon_interval; dst->listen_interval = src->listen_interval; dst->atim_window = src->atim_window; dst->dtim_period = src->dtim_period; dst->dtim_data = src->dtim_data; dst->last_dtim_sta_time[0] = src->last_dtim_sta_time[0]; dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1]; memcpy(&dst->tim, &src->tim, sizeof(struct ieee80211_tim_parameters)); dst->bssht.bdSupportHT = src->bssht.bdSupportHT; dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation; dst->bssht.bdHTCapLen= src->bssht.bdHTCapLen; memcpy(dst->bssht.bdHTCapBuf,src->bssht.bdHTCapBuf,src->bssht.bdHTCapLen); dst->bssht.bdHTInfoLen= src->bssht.bdHTInfoLen; memcpy(dst->bssht.bdHTInfoBuf,src->bssht.bdHTInfoBuf,src->bssht.bdHTInfoLen); dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer; dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime; dst->broadcom_cap_exist = src->broadcom_cap_exist; dst->ralink_cap_exist = src->ralink_cap_exist; dst->atheros_cap_exist = src->atheros_cap_exist; dst->cisco_cap_exist = src->cisco_cap_exist; dst->unknown_cap_exist = src->unknown_cap_exist; memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); dst->wpa_ie_len = src->wpa_ie_len; memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len); dst->rsn_ie_len = src->rsn_ie_len; dst->last_scanned = jiffies; /* qos related parameters */ //qos_active = src->qos_data.active; qos_active = dst->qos_data.active; //old_param = dst->qos_data.old_param_count; old_param = dst->qos_data.param_count; if(dst->flags & NETWORK_HAS_QOS_MASK) memcpy(&dst->qos_data, &src->qos_data, sizeof(struct ieee80211_qos_data)); else { dst->qos_data.supported = src->qos_data.supported; dst->qos_data.param_count = src->qos_data.param_count; } if(dst->qos_data.supported == 1) { dst->QoS_Enable = 1; if(dst->ssid_len) IEEE80211_DEBUG_QOS ("QoS the network %s is QoS supported\n", dst->ssid); else IEEE80211_DEBUG_QOS ("QoS the network is QoS supported\n"); } dst->qos_data.active = qos_active; dst->qos_data.old_param_count = old_param; /* dst->last_associate is not overwritten */ dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame. if(src->wmm_param[0].ac_aci_acm_aifsn|| \ src->wmm_param[1].ac_aci_acm_aifsn|| \ src->wmm_param[2].ac_aci_acm_aifsn|| \ src->wmm_param[3].ac_aci_acm_aifsn) { memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN); } //dst->QoS_Enable = src->QoS_Enable; #ifdef THOMAS_TURBO dst->Turbo_Enable = src->Turbo_Enable; #endif dst->CountryIeLen = src->CountryIeLen; memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen); //added by amy for LEAP dst->bWithAironetIE = src->bWithAironetIE; dst->bCkipSupported = src->bCkipSupported; memcpy(dst->CcxRmState,src->CcxRmState,2); dst->bCcxRmEnable = src->bCcxRmEnable; dst->MBssidMask = src->MBssidMask; dst->bMBssidValid = src->bMBssidValid; memcpy(dst->MBssid,src->MBssid,6); dst->bWithCcxVerNum = src->bWithCcxVerNum; dst->BssCcxVerNumber = src->BssCcxVerNumber; } static inline int is_beacon(__le16 fc) { return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); } static inline void ieee80211_process_probe_response( struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_rx_stats *stats) { struct ieee80211_network network; struct ieee80211_network *target; struct ieee80211_network *oldest = NULL; #ifdef CONFIG_IEEE80211_DEBUG struct ieee80211_info_element *info_element = &beacon->info_element[0]; #endif unsigned long flags; short renew; //u8 wmm_info; memset(&network, 0, sizeof(struct ieee80211_network)); IEEE80211_DEBUG_SCAN( "'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, (beacon->capability & (1<<0xf)) ? '1' : '0', (beacon->capability & (1<<0xe)) ? '1' : '0', (beacon->capability & (1<<0xd)) ? '1' : '0', (beacon->capability & (1<<0xc)) ? '1' : '0', (beacon->capability & (1<<0xb)) ? '1' : '0', (beacon->capability & (1<<0xa)) ? '1' : '0', (beacon->capability & (1<<0x9)) ? '1' : '0', (beacon->capability & (1<<0x8)) ? '1' : '0', (beacon->capability & (1<<0x7)) ? '1' : '0', (beacon->capability & (1<<0x6)) ? '1' : '0', (beacon->capability & (1<<0x5)) ? '1' : '0', (beacon->capability & (1<<0x4)) ? '1' : '0', (beacon->capability & (1<<0x3)) ? '1' : '0', (beacon->capability & (1<<0x2)) ? '1' : '0', (beacon->capability & (1<<0x1)) ? '1' : '0', (beacon->capability & (1<<0x0)) ? '1' : '0'); if (ieee80211_network_init(ieee, beacon, &network, stats)) { IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); return; } // For Asus EeePc request, // (1) if wireless adapter receive get any 802.11d country code in AP beacon, // wireless adapter should follow the country code. // (2) If there is no any country code in beacon, // then wireless adapter should do active scan from ch1~11 and // passive scan from ch12~14 if( !IsLegalChannel(ieee, network.channel) ) return; if(ieee->bGlobalDomain) { if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP) { // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { if( !IsLegalChannel(ieee, network.channel) ) { printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel); return; } } // Case 2: No any country code. else { // Filter over channel ch12~14 if(network.channel > 11) { printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel); return; } } } else { // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { if( !IsLegalChannel(ieee, network.channel) ) { printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel); return; } } // Case 2: No any country code. else { // Filter over channel ch12~14 if(network.channel > 14) { printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network.channel); return; } } } } /* The network parsed correctly -- so now we scan our known networks * to see if we can find it in our list. * * NOTE: This search is definitely not optimized. Once its doing * the "right thing" we'll optimize it for efficiency if * necessary */ /* Search for this entry in the list and update it if it is * already there. */ spin_lock_irqsave(&ieee->lock, flags); if(is_same_network(&ieee->current_network, &network, ieee)) { update_network(&ieee->current_network, &network); if((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G) && ieee->current_network.berp_info_valid){ if(ieee->current_network.erp_value& ERP_UseProtection) ieee->current_network.buseprotection = true; else ieee->current_network.buseprotection = false; } if(is_beacon(beacon->header.frame_ctl)) { if(ieee->state == IEEE80211_LINKED) ieee->LinkDetectInfo.NumRecvBcnInPeriod++; } else //hidden AP network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags); } list_for_each_entry(target, &ieee->network_list, list) { if (is_same_network(target, &network, ieee)) break; if ((oldest == NULL) || (target->last_scanned < oldest->last_scanned)) oldest = target; } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (&target->list == &ieee->network_list) { if (list_empty(&ieee->network_free_list)) { /* If there are no more slots, expire the oldest */ list_del(&oldest->list); target = oldest; IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from " "network list.\n", escape_essid(target->ssid, target->ssid_len), target->bssid); } else { /* Otherwise just pull from the free list */ target = list_entry(ieee->network_free_list.next, struct ieee80211_network, list); list_del(ieee->network_free_list.next); } #ifdef CONFIG_IEEE80211_DEBUG IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n", escape_essid(network.ssid, network.ssid_len), network.bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); #endif memcpy(target, &network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) ieee80211_softmac_new_net(ieee,&network); } else { IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n", escape_essid(target->ssid, target->ssid_len), target->bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); /* we have an entry and we are going to update it. But this entry may * be already expired. In this case we do the same as we found a new * net and call the new_net handler */ renew = !time_after(target->last_scanned + ieee->scan_age, jiffies); //YJ,add,080819,for hidden ap if(is_beacon(beacon->header.frame_ctl) == 0) network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags); //if(strncmp(network.ssid, "linksys-c",9) == 0) // printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags); if(((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \ && (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\ ||((ieee->current_network.ssid_len == network.ssid_len)&&(strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK)))) renew = 1; //YJ,add,080819,for hidden ap,end update_network(target, &network); if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)) ieee80211_softmac_new_net(ieee,&network); } spin_unlock_irqrestore(&ieee->lock, flags); if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\ (ieee->state == IEEE80211_LINKED)) { if(ieee->handle_beacon != NULL) { ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network); } } } void ieee80211_rx_mgt(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *header, struct ieee80211_rx_stats *stats) { switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { case IEEE80211_STYPE_BEACON: IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); IEEE80211_DEBUG_SCAN("Beacon\n"); ieee80211_process_probe_response( ieee, (struct ieee80211_probe_response *)header, stats); break; case IEEE80211_STYPE_PROBE_RESP: IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); IEEE80211_DEBUG_SCAN("Probe response\n"); ieee80211_process_probe_response( ieee, (struct ieee80211_probe_response *)header, stats); break; } } EXPORT_SYMBOL(ieee80211_rx_mgt); EXPORT_SYMBOL(ieee80211_rx);
gpl-2.0
CM-Tab-S/android_kernel_samsung_klimtwifi
drivers/media/dvb/dvb-usb/lmedm04.c
4802
32424
/* DVB USB compliant linux driver for * * DM04/QQBOX DVB-S USB BOX LME2510C + SHARP:BS2F7HZ7395 * LME2510C + LG TDQY-P001F * LME2510C + BS2F7HZ0194 * LME2510 + LG TDQY-P001F * LME2510 + BS2F7HZ0194 * * MVB7395 (LME2510C+SHARP:BS2F7HZ7395) * SHARP:BS2F7HZ7395 = (STV0288+Sharp IX2505V) * * MV001F (LME2510+LGTDQY-P001F) * LG TDQY - P001F =(TDA8263 + TDA10086H) * * MVB0001F (LME2510C+LGTDQT-P001F) * * MV0194 (LME2510+SHARP:BS2F7HZ0194) * SHARP:BS2F7HZ0194 = (STV0299+IX2410) * * MVB0194 (LME2510C+SHARP0194) * * For firmware see Documentation/dvb/lmedm04.txt * * I2C addresses: * 0xd0 - STV0288 - Demodulator * 0xc0 - Sharp IX2505V - Tuner * -- * 0x1c - TDA10086 - Demodulator * 0xc0 - TDA8263 - Tuner * -- * 0xd0 - STV0299 - Demodulator * 0xc0 - IX2410 - Tuner * * * VID = 3344 PID LME2510=1122 LME2510C=1120 * * Copyright (C) 2010 Malcolm Priestley (tvboxspy@gmail.com) * LME2510(C)(C) Leaguerme (Shenzhen) MicroElectronics Co., Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License Version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * see Documentation/dvb/README.dvb-usb for more information * * Known Issues : * LME2510: Non Intel USB chipsets fail to maintain High Speed on * Boot or Hot Plug. * * QQbox suffers from noise on LNB voltage. * * LME2510: SHARP:BS2F7HZ0194(MV0194) cannot cold reset and share system * with other tuners. After a cold reset streaming will not start. * */ #define DVB_USB_LOG_PREFIX "LME2510(C)" #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> #include "dvb-usb.h" #include "lmedm04.h" #include "tda826x.h" #include "tda10086.h" #include "stv0288.h" #include "ix2505v.h" #include "stv0299.h" #include "dvb-pll.h" #include "z0194a.h" #include "m88rs2000.h" /* debug */ static int dvb_usb_lme2510_debug; #define l_dprintk(var, level, args...) do { \ if ((var >= level)) \ printk(KERN_DEBUG DVB_USB_LOG_PREFIX ": " args); \ } while (0) #define deb_info(level, args...) l_dprintk(dvb_usb_lme2510_debug, level, args) #define debug_data_snipet(level, name, p) \ deb_info(level, name" (%02x%02x%02x%02x%02x%02x%02x%02x)", \ *p, *(p+1), *(p+2), *(p+3), *(p+4), \ *(p+5), *(p+6), *(p+7)); module_param_named(debug, dvb_usb_lme2510_debug, int, 0644); MODULE_PARM_DESC(debug, "set debugging level (1=info (or-able))." DVB_USB_DEBUG_STATUS); static int dvb_usb_lme2510_firmware; module_param_named(firmware, dvb_usb_lme2510_firmware, int, 0644); MODULE_PARM_DESC(firmware, "set default firmware 0=Sharp7395 1=LG"); static int pid_filter; module_param_named(pid, pid_filter, int, 0644); MODULE_PARM_DESC(pid, "set default 0=default 1=off 2=on"); DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); #define TUNER_DEFAULT 0x0 #define TUNER_LG 0x1 #define TUNER_S7395 0x2 #define TUNER_S0194 0x3 #define TUNER_RS2000 0x4 struct lme2510_state { u8 id; u8 tuner_config; u8 signal_lock; u8 signal_level; u8 signal_sn; u8 time_key; u8 last_key; u8 key_timeout; u8 i2c_talk_onoff; u8 i2c_gate; u8 i2c_tuner_gate_w; u8 i2c_tuner_gate_r; u8 i2c_tuner_addr; u8 stream_on; u8 pid_size; u8 pid_off; void *buffer; struct urb *lme_urb; void *usb_buffer; }; static int lme2510_bulk_write(struct usb_device *dev, u8 *snd, int len, u8 pipe) { int ret, actual_l; ret = usb_bulk_msg(dev, usb_sndbulkpipe(dev, pipe), snd, len , &actual_l, 100); return ret; } static int lme2510_bulk_read(struct usb_device *dev, u8 *rev, int len, u8 pipe) { int ret, actual_l; ret = usb_bulk_msg(dev, usb_rcvbulkpipe(dev, pipe), rev, len , &actual_l, 200); return ret; } static int lme2510_usb_talk(struct dvb_usb_device *d, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { struct lme2510_state *st = d->priv; u8 *buff; int ret = 0; if (st->usb_buffer == NULL) { st->usb_buffer = kmalloc(64, GFP_KERNEL); if (st->usb_buffer == NULL) { info("MEM Error no memory"); return -ENOMEM; } } buff = st->usb_buffer; ret = mutex_lock_interruptible(&d->usb_mutex); if (ret < 0) return -EAGAIN; /* the read/write capped at 64 */ memcpy(buff, wbuf, (wlen < 64) ? wlen : 64); ret |= lme2510_bulk_write(d->udev, buff, wlen , 0x01); ret |= lme2510_bulk_read(d->udev, buff, (rlen < 64) ? rlen : 64 , 0x01); if (rlen > 0) memcpy(rbuf, buff, rlen); mutex_unlock(&d->usb_mutex); return (ret < 0) ? -ENODEV : 0; } static int lme2510_stream_restart(struct dvb_usb_device *d) { struct lme2510_state *st = d->priv; u8 all_pids[] = LME_ALL_PIDS; u8 stream_on[] = LME_ST_ON_W; int ret; u8 rbuff[1]; if (st->pid_off) ret = lme2510_usb_talk(d, all_pids, sizeof(all_pids), rbuff, sizeof(rbuff)); /*Restart Stream Command*/ ret = lme2510_usb_talk(d, stream_on, sizeof(stream_on), rbuff, sizeof(rbuff)); return ret; } static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) { struct lme2510_state *st = d->priv; static u8 pid_buff[] = LME_ZERO_PID; static u8 rbuf[1]; u8 pid_no = index * 2; u8 pid_len = pid_no + 2; int ret = 0; deb_info(1, "PID Setting Pid %04x", pid_out); if (st->pid_size == 0) ret |= lme2510_stream_restart(d); pid_buff[2] = pid_no; pid_buff[3] = (u8)pid_out & 0xff; pid_buff[4] = pid_no + 1; pid_buff[5] = (u8)(pid_out >> 8); if (pid_len > st->pid_size) st->pid_size = pid_len; pid_buff[7] = 0x80 + st->pid_size; ret |= lme2510_usb_talk(d, pid_buff , sizeof(pid_buff) , rbuf, sizeof(rbuf)); if (st->stream_on) ret |= lme2510_stream_restart(d); return ret; } static void lme2510_int_response(struct urb *lme_urb) { struct dvb_usb_adapter *adap = lme_urb->context; struct lme2510_state *st = adap->dev->priv; static u8 *ibuf, *rbuf; int i = 0, offset; u32 key; switch (lme_urb->status) { case 0: case -ETIMEDOUT: break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: return; default: info("Error %x", lme_urb->status); break; } rbuf = (u8 *) lme_urb->transfer_buffer; offset = ((lme_urb->actual_length/8) > 4) ? 4 : (lme_urb->actual_length/8) ; for (i = 0; i < offset; ++i) { ibuf = (u8 *)&rbuf[i*8]; deb_info(5, "INT O/S C =%02x C/O=%02x Type =%02x%02x", offset, i, ibuf[0], ibuf[1]); switch (ibuf[0]) { case 0xaa: debug_data_snipet(1, "INT Remote data snipet", ibuf); if ((ibuf[4] + ibuf[5]) == 0xff) { key = ibuf[5]; key += (ibuf[3] > 0) ? (ibuf[3] ^ 0xff) << 8 : 0; key += (ibuf[2] ^ 0xff) << 16; deb_info(1, "INT Key =%08x", key); if (adap->dev->rc_dev != NULL) rc_keydown(adap->dev->rc_dev, key, 0); } break; case 0xbb: switch (st->tuner_config) { case TUNER_LG: if (ibuf[2] > 0) st->signal_lock = ibuf[2]; st->signal_level = ibuf[4]; st->signal_sn = ibuf[3]; st->time_key = ibuf[7]; break; case TUNER_S7395: case TUNER_S0194: /* Tweak for earlier firmware*/ if (ibuf[1] == 0x03) { if (ibuf[2] > 1) st->signal_lock = ibuf[2]; st->signal_level = ibuf[3]; st->signal_sn = ibuf[4]; } else { st->signal_level = ibuf[4]; st->signal_sn = ibuf[5]; st->signal_lock = (st->signal_lock & 0xf7) + ((ibuf[2] & 0x01) << 0x03); } break; case TUNER_RS2000: if (ibuf[2] > 0) st->signal_lock = 0xff; else st->signal_lock = 0xf0; st->signal_level = ibuf[4]; st->signal_sn = ibuf[5]; st->time_key = ibuf[7]; default: break; } debug_data_snipet(5, "INT Remote data snipet in", ibuf); break; case 0xcc: debug_data_snipet(1, "INT Control data snipet", ibuf); break; default: debug_data_snipet(1, "INT Unknown data snipet", ibuf); break; } } usb_submit_urb(lme_urb, GFP_ATOMIC); } static int lme2510_int_read(struct dvb_usb_adapter *adap) { struct lme2510_state *lme_int = adap->dev->priv; lme_int->lme_urb = usb_alloc_urb(0, GFP_ATOMIC); if (lme_int->lme_urb == NULL) return -ENOMEM; lme_int->buffer = usb_alloc_coherent(adap->dev->udev, 128, GFP_ATOMIC, &lme_int->lme_urb->transfer_dma); if (lme_int->buffer == NULL) return -ENOMEM; usb_fill_int_urb(lme_int->lme_urb, adap->dev->udev, usb_rcvintpipe(adap->dev->udev, 0xa), lme_int->buffer, 128, lme2510_int_response, adap, 8); lme_int->lme_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_submit_urb(lme_int->lme_urb, GFP_ATOMIC); info("INT Interrupt Service Started"); return 0; } static int lme2510_pid_filter_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct lme2510_state *st = adap->dev->priv; static u8 clear_pid_reg[] = LME_ALL_PIDS; static u8 rbuf[1]; int ret; deb_info(1, "PID Clearing Filter"); mutex_lock(&adap->dev->i2c_mutex); if (!onoff) { ret |= lme2510_usb_talk(adap->dev, clear_pid_reg, sizeof(clear_pid_reg), rbuf, sizeof(rbuf)); st->pid_off = true; } else st->pid_off = false; st->pid_size = 0; mutex_unlock(&adap->dev->i2c_mutex); return 0; } static int lme2510_pid_filter(struct dvb_usb_adapter *adap, int index, u16 pid, int onoff) { int ret = 0; deb_info(3, "%s PID=%04x Index=%04x onoff=%02x", __func__, pid, index, onoff); if (onoff) { mutex_lock(&adap->dev->i2c_mutex); ret |= lme2510_enable_pid(adap->dev, index, pid); mutex_unlock(&adap->dev->i2c_mutex); } return ret; } static int lme2510_return_status(struct usb_device *dev) { int ret = 0; u8 *data; data = kzalloc(10, GFP_KERNEL); if (!data) return -ENOMEM; ret |= usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x06, 0x80, 0x0302, 0x00, data, 0x0006, 200); info("Firmware Status: %x (%x)", ret , data[2]); ret = (ret < 0) ? -ENODEV : data[2]; kfree(data); return ret; } static int lme2510_msg(struct dvb_usb_device *d, u8 *wbuf, int wlen, u8 *rbuf, int rlen) { int ret = 0; struct lme2510_state *st = d->priv; if (st->i2c_talk_onoff == 1) { ret = lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); switch (st->tuner_config) { case TUNER_LG: if (wbuf[2] == 0x1c) { if (wbuf[3] == 0x0e) { st->signal_lock = rbuf[1]; if ((st->stream_on & 1) && (st->signal_lock & 0x10)) { lme2510_stream_restart(d); st->i2c_talk_onoff = 0; } msleep(80); } } break; case TUNER_S7395: if (wbuf[2] == 0xd0) { if (wbuf[3] == 0x24) { st->signal_lock = rbuf[1]; if ((st->stream_on & 1) && (st->signal_lock & 0x8)) { lme2510_stream_restart(d); st->i2c_talk_onoff = 0; } } } break; case TUNER_S0194: if (wbuf[2] == 0xd0) { if (wbuf[3] == 0x1b) { st->signal_lock = rbuf[1]; if ((st->stream_on & 1) && (st->signal_lock & 0x8)) { lme2510_stream_restart(d); st->i2c_talk_onoff = 0; } } } break; case TUNER_RS2000: default: break; } } else { /* TODO rewrite this section */ switch (st->tuner_config) { case TUNER_LG: switch (wbuf[3]) { case 0x0e: rbuf[0] = 0x55; rbuf[1] = st->signal_lock; break; case 0x43: rbuf[0] = 0x55; rbuf[1] = st->signal_level; break; case 0x1c: rbuf[0] = 0x55; rbuf[1] = st->signal_sn; break; case 0x15: case 0x16: case 0x17: case 0x18: rbuf[0] = 0x55; rbuf[1] = 0x00; break; default: lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); st->i2c_talk_onoff = 1; break; } break; case TUNER_S7395: switch (wbuf[3]) { case 0x10: rbuf[0] = 0x55; rbuf[1] = (st->signal_level & 0x80) ? 0 : (st->signal_level * 2); break; case 0x2d: rbuf[0] = 0x55; rbuf[1] = st->signal_sn; break; case 0x24: rbuf[0] = 0x55; rbuf[1] = st->signal_lock; break; case 0x2e: case 0x26: case 0x27: rbuf[0] = 0x55; rbuf[1] = 0x00; break; default: lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); st->i2c_talk_onoff = 1; break; } break; case TUNER_S0194: switch (wbuf[3]) { case 0x18: rbuf[0] = 0x55; rbuf[1] = (st->signal_level & 0x80) ? 0 : (st->signal_level * 2); break; case 0x24: rbuf[0] = 0x55; rbuf[1] = st->signal_sn; break; case 0x1b: rbuf[0] = 0x55; rbuf[1] = st->signal_lock; break; case 0x19: case 0x25: case 0x1e: case 0x1d: rbuf[0] = 0x55; rbuf[1] = 0x00; break; default: lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); st->i2c_talk_onoff = 1; break; } break; case TUNER_RS2000: switch (wbuf[3]) { case 0x8c: rbuf[0] = 0x55; rbuf[1] = 0xff; if (st->last_key == st->time_key) { st->key_timeout++; if (st->key_timeout > 5) rbuf[1] = 0; } else st->key_timeout = 0; st->last_key = st->time_key; break; default: lme2510_usb_talk(d, wbuf, wlen, rbuf, rlen); st->i2c_talk_onoff = 1; break; } default: break; } deb_info(4, "I2C From Interrupt Message out(%02x) in(%02x)", wbuf[3], rbuf[1]); } return ret; } static int lme2510_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msg[], int num) { struct dvb_usb_device *d = i2c_get_adapdata(adap); struct lme2510_state *st = d->priv; static u8 obuf[64], ibuf[64]; int i, read, read_o; u16 len; u8 gate = st->i2c_gate; mutex_lock(&d->i2c_mutex); if (gate == 0) gate = 5; if (num > 2) warn("more than 2 i2c messages" "at a time is not handled yet. TODO."); for (i = 0; i < num; i++) { read_o = 1 & (msg[i].flags & I2C_M_RD); read = i+1 < num && (msg[i+1].flags & I2C_M_RD); read |= read_o; gate = (msg[i].addr == st->i2c_tuner_addr) ? (read) ? st->i2c_tuner_gate_r : st->i2c_tuner_gate_w : st->i2c_gate; obuf[0] = gate | (read << 7); if (gate == 5) obuf[1] = (read) ? 2 : msg[i].len + 1; else obuf[1] = msg[i].len + read + 1; obuf[2] = msg[i].addr; if (read) { if (read_o) len = 3; else { memcpy(&obuf[3], msg[i].buf, msg[i].len); obuf[msg[i].len+3] = msg[i+1].len; len = msg[i].len+4; } } else { memcpy(&obuf[3], msg[i].buf, msg[i].len); len = msg[i].len+3; } if (lme2510_msg(d, obuf, len, ibuf, 64) < 0) { deb_info(1, "i2c transfer failed."); mutex_unlock(&d->i2c_mutex); return -EAGAIN; } if (read) { if (read_o) memcpy(msg[i].buf, &ibuf[1], msg[i].len); else { memcpy(msg[i+1].buf, &ibuf[1], msg[i+1].len); i++; } } } mutex_unlock(&d->i2c_mutex); return i; } static u32 lme2510_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm lme2510_i2c_algo = { .master_xfer = lme2510_i2c_xfer, .functionality = lme2510_i2c_func, }; /* Callbacks for DVB USB */ static int lme2510_identify_state(struct usb_device *udev, struct dvb_usb_device_properties *props, struct dvb_usb_device_description **desc, int *cold) { if (pid_filter != 2) props->adapter[0].fe[0].caps &= ~DVB_USB_ADAP_NEED_PID_FILTERING; *cold = 0; return 0; } static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) { struct lme2510_state *st = adap->dev->priv; static u8 clear_reg_3[] = LME_ALL_PIDS; static u8 rbuf[1]; int ret = 0, rlen = sizeof(rbuf); deb_info(1, "STM (%02x)", onoff); /* Streaming is started by FE_HAS_LOCK */ if (onoff == 1) st->stream_on = 1; else { deb_info(1, "STM Steam Off"); /* mutex is here only to avoid collision with I2C */ mutex_lock(&adap->dev->i2c_mutex); ret = lme2510_usb_talk(adap->dev, clear_reg_3, sizeof(clear_reg_3), rbuf, rlen); st->stream_on = 0; st->i2c_talk_onoff = 1; mutex_unlock(&adap->dev->i2c_mutex); } return (ret < 0) ? -ENODEV : 0; } static u8 check_sum(u8 *p, u8 len) { u8 sum = 0; while (len--) sum += *p++; return sum; } static int lme2510_download_firmware(struct usb_device *dev, const struct firmware *fw) { int ret = 0; u8 *data; u16 j, wlen, len_in, start, end; u8 packet_size, dlen, i; u8 *fw_data; packet_size = 0x31; len_in = 1; data = kzalloc(512, GFP_KERNEL); if (!data) { info("FRM Could not start Firmware Download (Buffer allocation failed)"); return -ENOMEM; } info("FRM Starting Firmware Download"); for (i = 1; i < 3; i++) { start = (i == 1) ? 0 : 512; end = (i == 1) ? 512 : fw->size; for (j = start; j < end; j += (packet_size+1)) { fw_data = (u8 *)(fw->data + j); if ((end - j) > packet_size) { data[0] = i; dlen = packet_size; } else { data[0] = i | 0x80; dlen = (u8)(end - j)-1; } data[1] = dlen; memcpy(&data[2], fw_data, dlen+1); wlen = (u8) dlen + 4; data[wlen-1] = check_sum(fw_data, dlen+1); deb_info(1, "Data S=%02x:E=%02x CS= %02x", data[3], data[dlen+2], data[dlen+3]); ret |= lme2510_bulk_write(dev, data, wlen, 1); ret |= lme2510_bulk_read(dev, data, len_in , 1); ret |= (data[0] == 0x88) ? 0 : -1; } } usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x06, 0x80, 0x0200, 0x00, data, 0x0109, 1000); data[0] = 0x8a; len_in = 1; msleep(2000); ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Resetting*/ ret |= lme2510_bulk_read(dev, data, len_in, 1); msleep(400); if (ret < 0) info("FRM Firmware Download Failed (%04x)" , ret); else info("FRM Firmware Download Completed - Resetting Device"); kfree(data); return (ret < 0) ? -ENODEV : 0; } static void lme_coldreset(struct usb_device *dev) { int ret = 0, len_in; u8 data[512] = {0}; data[0] = 0x0a; len_in = 1; info("FRM Firmware Cold Reset"); ret |= lme2510_bulk_write(dev, data , len_in, 1); /*Cold Resetting*/ ret |= lme2510_bulk_read(dev, data, len_in, 1); return; } static int lme_firmware_switch(struct usb_device *udev, int cold) { const struct firmware *fw = NULL; const char fw_c_s7395[] = "dvb-usb-lme2510c-s7395.fw"; const char fw_c_lg[] = "dvb-usb-lme2510c-lg.fw"; const char fw_c_s0194[] = "dvb-usb-lme2510c-s0194.fw"; const char fw_c_rs2000[] = "dvb-usb-lme2510c-rs2000.fw"; const char fw_lg[] = "dvb-usb-lme2510-lg.fw"; const char fw_s0194[] = "dvb-usb-lme2510-s0194.fw"; const char *fw_lme; int ret = 0, cold_fw; cold = (cold > 0) ? (cold & 1) : 0; cold_fw = !cold; switch (le16_to_cpu(udev->descriptor.idProduct)) { case 0x1122: switch (dvb_usb_lme2510_firmware) { default: dvb_usb_lme2510_firmware = TUNER_S0194; case TUNER_S0194: fw_lme = fw_s0194; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { cold = 0; break; } dvb_usb_lme2510_firmware = TUNER_LG; case TUNER_LG: fw_lme = fw_lg; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) break; info("FRM No Firmware Found - please install"); dvb_usb_lme2510_firmware = TUNER_DEFAULT; cold = 0; cold_fw = 0; break; } break; case 0x1120: switch (dvb_usb_lme2510_firmware) { default: dvb_usb_lme2510_firmware = TUNER_S7395; case TUNER_S7395: fw_lme = fw_c_s7395; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) { cold = 0; break; } dvb_usb_lme2510_firmware = TUNER_LG; case TUNER_LG: fw_lme = fw_c_lg; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) break; dvb_usb_lme2510_firmware = TUNER_S0194; case TUNER_S0194: fw_lme = fw_c_s0194; ret = request_firmware(&fw, fw_lme, &udev->dev); if (ret == 0) break; info("FRM No Firmware Found - please install"); dvb_usb_lme2510_firmware = TUNER_DEFAULT; cold = 0; cold_fw = 0; break; } break; case 0x22f0: fw_lme = fw_c_rs2000; ret = request_firmware(&fw, fw_lme, &udev->dev); dvb_usb_lme2510_firmware = TUNER_RS2000; break; default: fw_lme = fw_c_s7395; } if (cold_fw) { info("FRM Loading %s file", fw_lme); ret = lme2510_download_firmware(udev, fw); } release_firmware(fw); if (cold) { info("FRM Changing to %s firmware", fw_lme); lme_coldreset(udev); return -ENODEV; } return ret; } static int lme2510_kill_urb(struct usb_data_stream *stream) { int i; for (i = 0; i < stream->urbs_submitted; i++) { deb_info(3, "killing URB no. %d.", i); /* stop the URB */ usb_kill_urb(stream->urb_list[i]); } stream->urbs_submitted = 0; return 0; } static struct tda10086_config tda10086_config = { .demod_address = 0x1c, .invert = 0, .diseqc_tone = 1, .xtal_freq = TDA10086_XTAL_16M, }; static struct stv0288_config lme_config = { .demod_address = 0xd0, .min_delay_ms = 15, .inittab = s7395_inittab, }; static struct ix2505v_config lme_tuner = { .tuner_address = 0xc0, .min_delay_ms = 100, .tuner_gain = 0x0, .tuner_chargepump = 0x3, }; static struct stv0299_config sharp_z0194_config = { .demod_address = 0xd0, .inittab = sharp_z0194a_inittab, .mclk = 88000000UL, .invert = 0, .skip_reinit = 0, .lock_output = STV0299_LOCKOUTPUT_1, .volt13_op0_op1 = STV0299_VOLT13_OP1, .min_delay_ms = 100, .set_symbol_rate = sharp_z0194a_set_symbol_rate, }; static int dm04_rs2000_set_ts_param(struct dvb_frontend *fe, int caller) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct dvb_usb_device *d = adap->dev; struct lme2510_state *st = d->priv; mutex_lock(&d->i2c_mutex); if ((st->i2c_talk_onoff == 1) && (st->stream_on & 1)) { st->i2c_talk_onoff = 0; lme2510_stream_restart(d); } mutex_unlock(&d->i2c_mutex); return 0; } static struct m88rs2000_config m88rs2000_config = { .demod_addr = 0xd0, .tuner_addr = 0xc0, .set_ts_params = dm04_rs2000_set_ts_param, }; static int dm04_lme2510_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) { struct dvb_usb_adapter *adap = fe->dvb->priv; static u8 voltage_low[] = LME_VOLTAGE_L; static u8 voltage_high[] = LME_VOLTAGE_H; static u8 rbuf[1]; int ret = 0, len = 3, rlen = 1; mutex_lock(&adap->dev->i2c_mutex); switch (voltage) { case SEC_VOLTAGE_18: ret |= lme2510_usb_talk(adap->dev, voltage_high, len, rbuf, rlen); break; case SEC_VOLTAGE_OFF: case SEC_VOLTAGE_13: default: ret |= lme2510_usb_talk(adap->dev, voltage_low, len, rbuf, rlen); break; } mutex_unlock(&adap->dev->i2c_mutex); return (ret < 0) ? -ENODEV : 0; } static int dm04_rs2000_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct lme2510_state *st = adap->dev->priv; *strength = (u16)((u32)st->signal_level * 0xffff / 0x7f); return 0; } static int dm04_rs2000_read_snr(struct dvb_frontend *fe, u16 *snr) { struct dvb_usb_adapter *adap = fe->dvb->priv; struct lme2510_state *st = adap->dev->priv; *snr = (u16)((u32)st->signal_sn * 0xffff / 0xff); return 0; } static int lme_name(struct dvb_usb_adapter *adap) { struct lme2510_state *st = adap->dev->priv; const char *desc = adap->dev->desc->name; char *fe_name[] = {"", " LG TDQY-P001F", " SHARP:BS2F7HZ7395", " SHARP:BS2F7HZ0194", " RS2000"}; char *name = adap->fe_adap[0].fe->ops.info.name; strlcpy(name, desc, 128); strlcat(name, fe_name[st->tuner_config], 128); return 0; } static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) { struct lme2510_state *st = adap->dev->priv; int ret = 0; st->i2c_talk_onoff = 1; switch (le16_to_cpu(adap->dev->udev->descriptor.idProduct)) { case 0x1122: case 0x1120: st->i2c_gate = 4; adap->fe_adap[0].fe = dvb_attach(tda10086_attach, &tda10086_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) { info("TUN Found Frontend TDA10086"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 4; st->i2c_tuner_addr = 0xc0; st->tuner_config = TUNER_LG; if (dvb_usb_lme2510_firmware != TUNER_LG) { dvb_usb_lme2510_firmware = TUNER_LG; ret = lme_firmware_switch(adap->dev->udev, 1); } break; } st->i2c_gate = 4; adap->fe_adap[0].fe = dvb_attach(stv0299_attach, &sharp_z0194_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) { info("FE Found Stv0299"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0xc0; st->tuner_config = TUNER_S0194; if (dvb_usb_lme2510_firmware != TUNER_S0194) { dvb_usb_lme2510_firmware = TUNER_S0194; ret = lme_firmware_switch(adap->dev->udev, 1); } break; } st->i2c_gate = 5; adap->fe_adap[0].fe = dvb_attach(stv0288_attach, &lme_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) { info("FE Found Stv0288"); st->i2c_tuner_gate_w = 4; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0xc0; st->tuner_config = TUNER_S7395; if (dvb_usb_lme2510_firmware != TUNER_S7395) { dvb_usb_lme2510_firmware = TUNER_S7395; ret = lme_firmware_switch(adap->dev->udev, 1); } break; } case 0x22f0: st->i2c_gate = 5; adap->fe_adap[0].fe = dvb_attach(m88rs2000_attach, &m88rs2000_config, &adap->dev->i2c_adap); if (adap->fe_adap[0].fe) { info("FE Found M88RS2000"); st->i2c_tuner_gate_w = 5; st->i2c_tuner_gate_r = 5; st->i2c_tuner_addr = 0xc0; st->tuner_config = TUNER_RS2000; adap->fe_adap[0].fe->ops.read_signal_strength = dm04_rs2000_read_signal_strength; adap->fe_adap[0].fe->ops.read_snr = dm04_rs2000_read_snr; } break; } if (adap->fe_adap[0].fe == NULL) { info("DM04/QQBOX Not Powered up or not Supported"); return -ENODEV; } if (ret) { if (adap->fe_adap[0].fe) { dvb_frontend_detach(adap->fe_adap[0].fe); adap->fe_adap[0].fe = NULL; } adap->dev->props.rc.core.rc_codes = NULL; return -ENODEV; } adap->fe_adap[0].fe->ops.set_voltage = dm04_lme2510_set_voltage; ret = lme_name(adap); return ret; } static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) { struct lme2510_state *st = adap->dev->priv; char *tun_msg[] = {"", "TDA8263", "IX2505V", "DVB_PLL_OPERA", "RS2000"}; int ret = 0; switch (st->tuner_config) { case TUNER_LG: if (dvb_attach(tda826x_attach, adap->fe_adap[0].fe, 0xc0, &adap->dev->i2c_adap, 1)) ret = st->tuner_config; break; case TUNER_S7395: if (dvb_attach(ix2505v_attach , adap->fe_adap[0].fe, &lme_tuner, &adap->dev->i2c_adap)) ret = st->tuner_config; break; case TUNER_S0194: if (dvb_attach(dvb_pll_attach , adap->fe_adap[0].fe, 0xc0, &adap->dev->i2c_adap, DVB_PLL_OPERA1)) ret = st->tuner_config; break; case TUNER_RS2000: ret = st->tuner_config; break; default: break; } if (ret) info("TUN Found %s tuner", tun_msg[ret]); else { info("TUN No tuner found --- resetting device"); lme_coldreset(adap->dev->udev); return -ENODEV; } /* Start the Interrupt*/ ret = lme2510_int_read(adap); if (ret < 0) { info("INT Unable to start Interrupt Service"); return -ENODEV; } return ret; } static int lme2510_powerup(struct dvb_usb_device *d, int onoff) { struct lme2510_state *st = d->priv; static u8 lnb_on[] = LNB_ON; static u8 lnb_off[] = LNB_OFF; static u8 rbuf[1]; int ret = 0, len = 3, rlen = 1; mutex_lock(&d->i2c_mutex); if (onoff) ret = lme2510_usb_talk(d, lnb_on, len, rbuf, rlen); else ret = lme2510_usb_talk(d, lnb_off, len, rbuf, rlen); st->i2c_talk_onoff = 1; mutex_unlock(&d->i2c_mutex); return ret; } /* DVB USB Driver stuff */ static struct dvb_usb_device_properties lme2510_properties; static struct dvb_usb_device_properties lme2510c_properties; static int lme2510_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); int ret = 0; usb_reset_configuration(udev); usb_set_interface(udev, intf->cur_altsetting->desc.bInterfaceNumber, 1); if (udev->speed != USB_SPEED_HIGH) { ret = usb_reset_device(udev); info("DEV Failed to connect in HIGH SPEED mode"); return -ENODEV; } if (lme2510_return_status(udev) == 0x44) { lme_firmware_switch(udev, 0); return -ENODEV; } if (0 == dvb_usb_device_init(intf, &lme2510_properties, THIS_MODULE, NULL, adapter_nr)) { info("DEV registering device driver"); return 0; } if (0 == dvb_usb_device_init(intf, &lme2510c_properties, THIS_MODULE, NULL, adapter_nr)) { info("DEV registering device driver"); return 0; } info("DEV lme2510 Error"); return -ENODEV; } static struct usb_device_id lme2510_table[] = { { USB_DEVICE(0x3344, 0x1122) }, /* LME2510 */ { USB_DEVICE(0x3344, 0x1120) }, /* LME2510C */ { USB_DEVICE(0x3344, 0x22f0) }, /* LME2510C RS2000 */ {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, lme2510_table); static struct dvb_usb_device_properties lme2510_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .size_of_priv = sizeof(struct lme2510_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER| DVB_USB_ADAP_NEED_PID_FILTERING| DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .streaming_ctrl = lme2510_streaming_ctrl, .pid_filter_count = 32, .pid_filter = lme2510_pid_filter, .pid_filter_ctrl = lme2510_pid_filter_ctrl, .frontend_attach = dm04_lme2510_frontend_attach, .tuner_attach = dm04_lme2510_tuner, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x06, .u = { .bulk = { .buffersize = 4096, } } } }}, } }, .rc.core = { .protocol = RC_TYPE_NEC, .module_name = "LME2510 Remote Control", .allowed_protos = RC_TYPE_NEC, .rc_codes = RC_MAP_LME2510, }, .power_ctrl = lme2510_powerup, .identify_state = lme2510_identify_state, .i2c_algo = &lme2510_i2c_algo, .generic_bulk_ctrl_endpoint = 0, .num_device_descs = 1, .devices = { { "DM04_LME2510_DVB-S", { &lme2510_table[0], NULL }, }, } }; static struct dvb_usb_device_properties lme2510c_properties = { .caps = DVB_USB_IS_AN_I2C_ADAPTER, .size_of_priv = sizeof(struct lme2510_state), .num_adapters = 1, .adapter = { { .num_frontends = 1, .fe = {{ .caps = DVB_USB_ADAP_HAS_PID_FILTER| DVB_USB_ADAP_NEED_PID_FILTERING| DVB_USB_ADAP_PID_FILTER_CAN_BE_TURNED_OFF, .streaming_ctrl = lme2510_streaming_ctrl, .pid_filter_count = 32, .pid_filter = lme2510_pid_filter, .pid_filter_ctrl = lme2510_pid_filter_ctrl, .frontend_attach = dm04_lme2510_frontend_attach, .tuner_attach = dm04_lme2510_tuner, /* parameter for the MPEG2-data transfer */ .stream = { .type = USB_BULK, .count = 10, .endpoint = 0x8, .u = { .bulk = { .buffersize = 4096, } } } }}, } }, .rc.core = { .protocol = RC_TYPE_NEC, .module_name = "LME2510 Remote Control", .allowed_protos = RC_TYPE_NEC, .rc_codes = RC_MAP_LME2510, }, .power_ctrl = lme2510_powerup, .identify_state = lme2510_identify_state, .i2c_algo = &lme2510_i2c_algo, .generic_bulk_ctrl_endpoint = 0, .num_device_descs = 2, .devices = { { "DM04_LME2510C_DVB-S", { &lme2510_table[1], NULL }, }, { "DM04_LME2510C_DVB-S RS2000", { &lme2510_table[2], NULL }, }, } }; static void *lme2510_exit_int(struct dvb_usb_device *d) { struct lme2510_state *st = d->priv; struct dvb_usb_adapter *adap = &d->adapter[0]; void *buffer = NULL; if (adap != NULL) { lme2510_kill_urb(&adap->fe_adap[0].stream); adap->feedcount = 0; } if (st->usb_buffer != NULL) { st->i2c_talk_onoff = 1; st->signal_lock = 0; st->signal_level = 0; st->signal_sn = 0; buffer = st->usb_buffer; } if (st->lme_urb != NULL) { usb_kill_urb(st->lme_urb); usb_free_coherent(d->udev, 128, st->buffer, st->lme_urb->transfer_dma); info("Interrupt Service Stopped"); } return buffer; } static void lme2510_exit(struct usb_interface *intf) { struct dvb_usb_device *d = usb_get_intfdata(intf); void *usb_buffer; if (d != NULL) { usb_buffer = lme2510_exit_int(d); dvb_usb_device_exit(intf); if (usb_buffer != NULL) kfree(usb_buffer); } } static struct usb_driver lme2510_driver = { .name = "LME2510C_DVB-S", .probe = lme2510_probe, .disconnect = lme2510_exit, .id_table = lme2510_table, }; module_usb_driver(lme2510_driver); MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); MODULE_VERSION("1.99"); MODULE_LICENSE("GPL");
gpl-2.0
khanfrd/android_kernel_xiaomi_kenzo
drivers/firewire/init_ohci1394_dma.c
4802
9838
/* * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers * * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de> * * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c * this file has functions to: * - scan the PCI very early on boot for all OHCI 1394-compliant controllers * - reset and initialize them and make them join the IEEE1394 bus and * - enable physical DMA on them to allow remote debugging * * All code and data is marked as __init and __initdata, respective as * during boot, all OHCI1394 controllers may be claimed by the firewire * stack and at this point, this code should not touch them anymore. * * To use physical DMA after the initialization of the firewire stack, * be sure that the stack enables it and (re-)attach after the bus reset * which may be caused by the firewire stack initialization. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/pci.h> /* for PCI defines */ #include <linux/string.h> #include <asm/pci-direct.h> /* for direct PCI config space access */ #include <asm/fixmap.h> #include <linux/init_ohci1394_dma.h> #include "ohci.h" int __initdata init_ohci1394_dma_early; struct ohci { void __iomem *registers; }; static inline void reg_write(const struct ohci *ohci, int offset, u32 data) { writel(data, ohci->registers + offset); } static inline u32 reg_read(const struct ohci *ohci, int offset) { return readl(ohci->registers + offset); } #define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */ /* Reads a PHY register of an OHCI-1394 controller */ static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr) { int i; u32 r; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000) break; mdelay(1); } r = reg_read(ohci, OHCI1394_PhyControl); return (r & 0x00ff0000) >> 16; } /* Writes to a PHY register of an OHCI-1394 controller */ static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data) { int i; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000)) break; mdelay(1); } } /* Resets an OHCI-1394 controller (for sane state before initialization) */ static inline void __init init_ohci1394_soft_reset(struct ohci *ohci) { int i; reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset)) break; mdelay(1); } } #define OHCI1394_MAX_AT_REQ_RETRIES 0xf #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 /* Basic OHCI-1394 register and port inititalization */ static inline void __init init_ohci1394_initialize(struct ohci *ohci) { u32 bus_options; int num_ports, i; /* Put some defaults to these undefined bus options */ bus_options = reg_read(ohci, OHCI1394_BusOptions); bus_options |= 0x60000000; /* Enable CMC and ISC */ bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ bus_options &= ~0x18000000; /* Disable PMC and BMC */ reg_write(ohci, OHCI1394_BusOptions, bus_options); /* Set the bus number */ reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); /* Enable posted writes */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable); /* Clear link control register */ reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); /* enable phys */ reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_rcvPhyPkt); /* Don't accept phy packets into AR request context */ reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); /* Clear the Isochonouys interrupt masks */ reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); /* Accept asynchronous transfer requests from all nodes for now */ reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); /* Specify asynchronous transfer retries */ reg_write(ohci, OHCI1394_ATRetries, OHCI1394_MAX_AT_REQ_RETRIES | (OHCI1394_MAX_AT_RESP_RETRIES<<4) | (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); /* We don't want hardware swapping */ reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwapData); /* Enable link */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); /* If anything is connected to a port, make sure it is enabled */ num_ports = get_phy_reg(ohci, 2) & 0xf; for (i = 0; i < num_ports; i++) { unsigned int status; set_phy_reg(ohci, 7, i); status = get_phy_reg(ohci, 8); if (status & 0x20) set_phy_reg(ohci, 8, status & ~1); } } /** * init_ohci1394_wait_for_busresets - wait until bus resets are completed * * OHCI1394 initialization itself and any device going on- or offline * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec * specifies that physical DMA is disabled on each bus reset and it * has to be enabled after each bus reset when needed. We resort * to polling here because on early boot, we have no interrupts. */ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci) { int i, events; for (i = 0; i < 9; i++) { mdelay(200); events = reg_read(ohci, OHCI1394_IntEventSet); if (events & OHCI1394_busReset) reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); } } /** * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging * This enables remote DMA access over IEEE1394 from every host for the low * 4GB of address space. DMA accesses above 4GB are not available currently. */ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci) { reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff); reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff); reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); } /** * init_ohci1394_reset_and_init_dma - init controller and enable DMA * This initializes the given controller and enables physical DMA engine in it. */ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) { /* Start off with a soft reset, clears everything to a sane state. */ init_ohci1394_soft_reset(ohci); /* Accessing some registers without LPS enabled may cause lock up */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS); /* Disable and clear interrupts */ reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); mdelay(50); /* Wait 50msec to make sure we have full link enabled */ init_ohci1394_initialize(ohci); /* * The initialization causes at least one IEEE1394 bus reset. Enabling * physical DMA only works *after* *all* bus resets have calmed down: */ init_ohci1394_wait_for_busresets(ohci); /* We had to wait and do this now if we want to debug early problems */ init_ohci1394_enable_physical_dma(ohci); } /** * init_ohci1394_controller - Map the registers of the controller and init DMA * This maps the registers of the specified controller and initializes it */ static inline void __init init_ohci1394_controller(int num, int slot, int func) { unsigned long ohci_base; struct ohci ohci; printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394" " at %02x:%02x.%x\n", num, slot, func); ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2)) & PCI_BASE_ADDRESS_MEM_MASK; set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base); ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE); init_ohci1394_reset_and_init_dma(&ohci); } /** * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them */ void __init init_ohci1394_dma_on_all_controllers(void) { int num, slot, func; u32 class; if (!early_pci_allowed()) return; /* Poor man's PCI discovery, the only thing we can do at early boot */ for (num = 0; num < 32; num++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { class = read_pci_config(num, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) continue; /* No device at this func */ if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) continue; /* Not an OHCI-1394 device */ init_ohci1394_controller(num, slot, func); break; /* Assume one controller per device */ } } } printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n"); } /** * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization */ static int __init setup_ohci1394_dma(char *opt) { if (!strcmp(opt, "early")) init_ohci1394_dma_early = 1; return 0; } /* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */ early_param("ohci1394_dma", setup_ohci1394_dma);
gpl-2.0
Hadramos/android_sony_xperiaz_kernel_sources
drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
4802
81943
/* * Original code based Host AP (software wireless LAN access point) driver * for Intersil Prism2/2.5/3 - hostap.o module, common routines * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <jkmaline@cc.hut.fi> * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright (c) 2004, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andreamrl@tiscali.it> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> //#include <linux/config.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include <linux/ctype.h> #include "ieee80211.h" #include "dot11d.h" static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct ieee80211_hdr_4addr *hdr = (struct ieee80211_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); skb->dev = ieee->dev; skb_reset_mac_header(skb); skb_pull(skb, ieee80211_get_hdrlen(fc)); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_80211_RAW); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } /* Called only as a tasklet (software IRQ) */ static struct ieee80211_frag_entry * ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq, unsigned int frag, u8 tid,u8 *src, u8 *dst) { struct ieee80211_frag_entry *entry; int i; for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) { entry = &ieee->frag_cache[tid][i]; if (entry->skb != NULL && time_after(jiffies, entry->first_frag_time + 2 * HZ)) { IEEE80211_DEBUG_FRAG( "expiring fragment cache entry " "seq=%u last_frag=%u\n", entry->seq, entry->last_frag); dev_kfree_skb_any(entry->skb); entry->skb = NULL; } if (entry->skb != NULL && entry->seq == seq && (entry->last_frag + 1 == frag || frag == -1) && memcmp(entry->src_addr, src, ETH_ALEN) == 0 && memcmp(entry->dst_addr, dst, ETH_ALEN) == 0) return entry; } return NULL; } /* Called only as a tasklet (software IRQ) */ static struct sk_buff * ieee80211_frag_cache_get(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int frag = WLAN_GET_SEQ_FRAG(sc); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct ieee80211_frag_entry *entry; struct ieee80211_hdr_3addrqos *hdr_3addrqos; struct ieee80211_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if (IEEE80211_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { tid = 0; } if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(ieee->dev->mtu + sizeof(struct ieee80211_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */ + (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */); if (skb == NULL) return NULL; entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; ieee->frag_next_idx[tid]++; if (ieee->frag_next_idx[tid] >= IEEE80211_FRAG_CACHE_LEN) ieee->frag_next_idx[tid] = 0; if (entry->skb != NULL) dev_kfree_skb_any(entry->skb); entry->first_frag_time = jiffies; entry->seq = seq; entry->last_frag = frag; entry->skb = skb; memcpy(entry->src_addr, hdr->addr2, ETH_ALEN); memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN); } else { /* received a fragment of a frame for which the head fragment * should have already been received */ entry = ieee80211_frag_cache_find(ieee, seq, frag, tid,hdr->addr2, hdr->addr1); if (entry != NULL) { entry->last_frag = frag; skb = entry->skb; } } return skb; } /* Called only as a tasklet (software IRQ) */ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *hdr) { u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct ieee80211_frag_entry *entry; struct ieee80211_hdr_3addrqos *hdr_3addrqos; struct ieee80211_hdr_4addrqos *hdr_4addrqos; u8 tid; if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if (IEEE80211_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct ieee80211_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { tid = 0; } entry = ieee80211_frag_cache_find(ieee, seq, -1, tid,hdr->addr2, hdr->addr1); if (entry == NULL) { IEEE80211_DEBUG_FRAG( "could not invalidate fragment cache " "entry (seq=%u)\n", seq); return -1; } entry->skb = NULL; return 0; } /* ieee80211_rx_frame_mgtmt * * Responsible for handling management control frames * * Called by ieee80211_rx */ static inline int ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, u16 type, u16 stype) { /* On the struct stats definition there is written that * this is not mandatory.... but seems that the probe * response parser uses it */ struct ieee80211_hdr_3addr * hdr = (struct ieee80211_hdr_3addr *)skb->data; rx_stats->len = skb->len; ieee80211_rx_mgt(ieee,(struct ieee80211_hdr_4addr *)skb->data,rx_stats); //if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN))) if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))//use ADDR1 to perform address matching for Management frames { dev_kfree_skb_any(skb); return 0; } ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype); dev_kfree_skb_any(skb); return 0; #ifdef NOT_YET if (ieee->iw_mode == IW_MODE_MASTER) { printk(KERN_DEBUG "%s: Master mode not yet supported.\n", ieee->dev->name); return 0; /* hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *) skb->data);*/ } if (ieee->hostapd && type == IEEE80211_TYPE_MGMT) { if (stype == WLAN_FC_STYPE_BEACON && ieee->iw_mode == IW_MODE_MASTER) { struct sk_buff *skb2; /* Process beacon frames also in kernel driver to * update STA(AP) table statistics */ skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) hostap_rx(skb2->dev, skb2, rx_stats); } /* send management frames to the user space daemon for * processing */ ieee->apdevstats.rx_packets++; ieee->apdevstats.rx_bytes += skb->len; prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT); return 0; } if (ieee->iw_mode == IW_MODE_MASTER) { if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) { printk(KERN_DEBUG "%s: unknown management frame " "(type=0x%02x, stype=0x%02x) dropped\n", skb->dev->name, type, stype); return -1; } hostap_rx(skb->dev, skb, rx_stats); return 0; } printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame " "received in non-Host AP mode\n", skb->dev->name); return -1; #endif } /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ /* Called by ieee80211_rx_frame_decrypt */ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee, struct sk_buff *skb, size_t hdrlen) { struct net_device *dev = ieee->dev; u16 fc, ethertype; struct ieee80211_hdr_4addr *hdr; u8 *pos; if (skb->len < 24) return 0; hdr = (struct ieee80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 && memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { /* ToDS frame with own addr BSSID and DA */ } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) { /* FromDS frame with own addr as DA */ } else return 0; if (skb->len < 24 + 8) return 0; /* check for port access entity Ethernet type */ // pos = skb->data + 24; pos = skb->data + hdrlen; ethertype = (pos[6] << 8) | pos[7]; if (ethertype == ETH_P_PAE) return 1; return 0; } /* Called only as a tasklet (software IRQ), by ieee80211_rx */ static inline int ieee80211_rx_frame_decrypt(struct ieee80211_device* ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt) { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; if (ieee->hwsec_active) { cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; } hdr = (struct ieee80211_hdr_4addr *) skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); #ifdef CONFIG_IEEE80211_CRYPT_TKIP if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "received packet from %pM\n", ieee->dev->name, hdr->addr2); } return -1; } #endif atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { IEEE80211_DEBUG_DROP( "decryption failed (SA=%pM" ") res=%d\n", hdr->addr2, res); if (res == -2) IEEE80211_DEBUG_DROP("Decryption failed ICV " "mismatch (key %d)\n", skb->data[hdrlen + 3] >> 6); ieee->ieee_stats.rx_discards_undecryptable++; return -1; } return res; } /* Called only as a tasklet (software IRQ), by ieee80211_rx */ static inline int ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device* ieee, struct sk_buff *skb, int keyidx, struct ieee80211_crypt_data *crypt) { struct ieee80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; if (ieee->hwsec_active) { cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; } hdr = (struct ieee80211_hdr_4addr *) skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" " (SA=%pM keyidx=%d)\n", ieee->dev->name, hdr->addr2, keyidx); return -1; } return 0; } /* this function is stolen from ipw2200 driver*/ #define IEEE_PACKET_RETRY_TIME (5*HZ) static int is_duplicate_packet(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *header) { u16 fc = le16_to_cpu(header->frame_ctl); u16 sc = le16_to_cpu(header->seq_ctl); u16 seq = WLAN_GET_SEQ_SEQ(sc); u16 frag = WLAN_GET_SEQ_FRAG(sc); u16 *last_seq, *last_frag; unsigned long *last_time; struct ieee80211_hdr_3addrqos *hdr_3addrqos; struct ieee80211_hdr_4addrqos *hdr_4addrqos; u8 tid; //TO2DS and QoS if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct ieee80211_hdr_4addrqos *)header; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS hdr_3addrqos = (struct ieee80211_hdr_3addrqos*)header; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { // no QoS tid = 0; } switch (ieee->iw_mode) { case IW_MODE_ADHOC: { struct list_head *p; struct ieee_ibss_seq *entry = NULL; u8 *mac = header->addr2; int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE; //for (pos = (head)->next; pos != (head); pos = pos->next) //__list_for_each(p, &ieee->ibss_mac_hash[index]) { list_for_each(p, &ieee->ibss_mac_hash[index]) { entry = list_entry(p, struct ieee_ibss_seq, list); if (!memcmp(entry->mac, mac, ETH_ALEN)) break; } // if (memcmp(entry->mac, mac, ETH_ALEN)){ if (p == &ieee->ibss_mac_hash[index]) { entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC); if (!entry) { printk(KERN_WARNING "Cannot malloc new mac entry\n"); return 0; } memcpy(entry->mac, mac, ETH_ALEN); entry->seq_num[tid] = seq; entry->frag_num[tid] = frag; entry->packet_time[tid] = jiffies; list_add(&entry->list, &ieee->ibss_mac_hash[index]); return 0; } last_seq = &entry->seq_num[tid]; last_frag = &entry->frag_num[tid]; last_time = &entry->packet_time[tid]; break; } case IW_MODE_INFRA: last_seq = &ieee->last_rxseq_num[tid]; last_frag = &ieee->last_rxfrag_num[tid]; last_time = &ieee->last_packet_time[tid]; break; default: return 0; } // if(tid != 0) { // printk(KERN_WARNING ":)))))))))))%x %x %x, fc(%x)\n", tid, *last_seq, seq, header->frame_ctl); // } if ((*last_seq == seq) && time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) { if (*last_frag == frag){ //printk(KERN_WARNING "[1] go drop!\n"); goto drop; } if (*last_frag + 1 != frag) /* out-of-order fragment */ //printk(KERN_WARNING "[2] go drop!\n"); goto drop; } else *last_seq = seq; *last_frag = frag; *last_time = jiffies; return 0; drop: // BUG_ON(!(fc & IEEE80211_FCTL_RETRY)); // printk("DUP\n"); return 1; } bool AddReorderEntry( PRX_TS_RECORD pTS, PRX_REORDER_ENTRY pReorderEntry ) { struct list_head *pList = &pTS->RxPendingPktList; while(pList->next != &pTS->RxPendingPktList) { if( SN_LESS(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) ) { pList = pList->next; } else if( SN_EQUAL(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) ) { return false; } else { break; } } pReorderEntry->List.next = pList->next; pReorderEntry->List.next->prev = &pReorderEntry->List; pReorderEntry->List.prev = pList; pList->next = &pReorderEntry->List; return true; } void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb** prxbIndicateArray,u8 index) { u8 i = 0 , j=0; u16 ethertype; // if(index > 1) // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__FUNCTION__,index); for(j = 0; j<index; j++) { //added by amy for reorder struct ieee80211_rxb* prxb = prxbIndicateArray[j]; for(i = 0; i<prxb->nr_subframes; i++) { struct sk_buff *sub_skb = prxb->subframes[i]; /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } //stats->rx_packets++; //stats->rx_bytes += sub_skb->len; /* Indicat the packets to upper layer */ if (sub_skb) { //printk("0skb_len(%d)\n", skb->len); sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev); memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->dev = ieee->dev; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; //printk("1skb_len(%d)\n", skb->len); netif_rx(sub_skb); } } kfree(prxb); prxb = NULL; } } void RxReorderIndicatePacket( struct ieee80211_device *ieee, struct ieee80211_rxb* prxb, PRX_TS_RECORD pTS, u16 SeqNum) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PRX_REORDER_ENTRY pReorderEntry = NULL; struct ieee80211_rxb* prxbIndicateArray[REORDER_WIN_SIZE]; u8 WinSize = pHTInfo->RxReorderWinSize; u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096; u8 index = 0; bool bMatchWinStart = false, bPktInBuf = false; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__FUNCTION__,SeqNum,pTS->RxIndicateSeq,WinSize); /* Rx Reorder initialize condition.*/ if(pTS->RxIndicateSeq == 0xffff) { pTS->RxIndicateSeq = SeqNum; } /* Drop out the packet which SeqNum is smaller than WinStart */ if(SN_LESS(SeqNum, pTS->RxIndicateSeq)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); pHTInfo->RxReorderDropCounter++; { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } return; } /* * Sliding window manipulation. Conditions includes: * 1. Incoming SeqNum is equal to WinStart =>Window shift 1 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N */ if(SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) { pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; bMatchWinStart = true; } else if(SN_LESS(WinEnd, SeqNum)) { if(SeqNum >= (WinSize - 1)) { pTS->RxIndicateSeq = SeqNum + 1 -WinSize; } else { pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum +1)) + 1; } IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); } /* * Indication process. * After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets * with the SeqNum smaller than latest WinStart and buffer other packets. */ /* For Rx Reorder condition: * 1. All packets with SeqNum smaller than WinStart => Indicate * 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */ if(bMatchWinStart) { /* Current packet is going to be indicated.*/ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\ pTS->RxIndicateSeq, SeqNum); prxbIndicateArray[0] = prxb; // printk("========================>%s(): SeqNum is %d\n",__FUNCTION__,SeqNum); index = 1; } else { /* Current packet is going to be inserted into pending list.*/ //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): We RX no ordered packed, insert to orderd list\n",__FUNCTION__); if(!list_empty(&ieee->RxReorder_Unused_List)) { pReorderEntry = (PRX_REORDER_ENTRY)list_entry(ieee->RxReorder_Unused_List.next,RX_REORDER_ENTRY,List); list_del_init(&pReorderEntry->List); /* Make a reorder entry and insert into a the packet list.*/ pReorderEntry->SeqNum = SeqNum; pReorderEntry->prxb = prxb; // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pREorderEntry->SeqNum is %d\n",__FUNCTION__,pReorderEntry->SeqNum); if(!AddReorderEntry(pTS, pReorderEntry)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n", __FUNCTION__, pTS->RxIndicateSeq, SeqNum); list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List); { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } } else { IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); } } else { /* * Packets are dropped if there is not enough reorder entries. * This part shall be modified!! We can just indicate all the * packets in buffer and get reorder entries. */ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n"); { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } } } /* Check if there is any packet need indicate.*/ while(!list_empty(&pTS->RxPendingPktList)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__FUNCTION__); pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List); if( SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) || SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) { /* This protect buffer from overflow. */ if(index >= REORDER_WIN_SIZE) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n"); bPktInBuf = true; break; } list_del_init(&pReorderEntry->List); if(SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); prxbIndicateArray[index] = pReorderEntry->prxb; // printk("========================>%s(): pReorderEntry->SeqNum is %d\n",__FUNCTION__,pReorderEntry->SeqNum); index++; list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List); } else { bPktInBuf = true; break; } } /* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/ if(index>0) { // Cancel previous pending timer. // del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxTimeoutIndicateSeq = 0xffff; // Indicate packets if(index>REORDER_WIN_SIZE){ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n"); return; } ieee80211_indicate_packets(ieee, prxbIndicateArray, index); } if(bPktInBuf && pTS->RxTimeoutIndicateSeq==0xffff) { // Set new pending timer. IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): SET rx timeout timer\n", __FUNCTION__); pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq; if(timer_pending(&pTS->RxPktPendingTimer)) del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxPktPendingTimer.expires = jiffies + MSECS(pHTInfo->RxReorderPendingTime); add_timer(&pTS->RxPktPendingTimer); } } u8 parse_subframe(struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, struct ieee80211_rxb *rxb,u8* src,u8* dst) { struct ieee80211_hdr_3addr *hdr = (struct ieee80211_hdr_3addr* )skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 LLCOffset= sizeof(struct ieee80211_hdr_3addr); u16 ChkLength; bool bIsAggregateFrame = false; u16 nSubframe_Length; u8 nPadding_Length = 0; u16 SeqNum=0; struct sk_buff *sub_skb; u8 *data_ptr; /* just for debug purpose */ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl)); if((IEEE80211_QOS_HAS_SEQ(fc))&&\ (((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) { bIsAggregateFrame = true; } if(IEEE80211_QOS_HAS_SEQ(fc)) { LLCOffset += 2; } if(rx_stats->bContainHTC) { LLCOffset += sHTCLng; } //printk("ChkLength = %d\n", LLCOffset); // Null packet, don't indicate it to upper layer ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/ if( skb->len <= ChkLength ) { return 0; } skb_pull(skb, LLCOffset); if(!bIsAggregateFrame) { rxb->nr_subframes = 1; #ifdef JOHN_NOCPY rxb->subframes[0] = skb; #else rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC); #endif memcpy(rxb->src,src,ETH_ALEN); memcpy(rxb->dst,dst,ETH_ALEN); //IEEE80211_DEBUG_DATA(IEEE80211_DL_RX,skb->data,skb->len); return 1; } else { rxb->nr_subframes = 0; memcpy(rxb->src,src,ETH_ALEN); memcpy(rxb->dst,dst,ETH_ALEN); while(skb->len > ETHERNET_HEADER_SIZE) { /* Offset 12 denote 2 mac address */ nSubframe_Length = *((u16*)(skb->data + 12)); //==m==>change the length order nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8); if(skb->len<(ETHERNET_HEADER_SIZE + nSubframe_Length)) { printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\ __FUNCTION__,rxb->nr_subframes); printk("%s: A-MSDU parse error!! Subframe Length: %d\n",__FUNCTION__, nSubframe_Length); printk("nRemain_Length is %d and nSubframe_Length is : %d\n",skb->len,nSubframe_Length); printk("The Packet SeqNum is %d\n",SeqNum); return 0; } /* move the data point to data content */ skb_pull(skb, ETHERNET_HEADER_SIZE); #ifdef JOHN_NOCPY sub_skb = skb_clone(skb, GFP_ATOMIC); sub_skb->len = nSubframe_Length; sub_skb->tail = sub_skb->data + nSubframe_Length; #else /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length); memcpy(data_ptr,skb->data,nSubframe_Length); #endif rxb->subframes[rxb->nr_subframes++] = sub_skb; if(rxb->nr_subframes >= MAX_SUBFRAME_COUNT) { IEEE80211_DEBUG_RX("ParseSubframe(): Too many Subframes! Packets dropped!\n"); break; } skb_pull(skb,nSubframe_Length); if(skb->len != 0) { nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4); if(nPadding_Length == 4) { nPadding_Length = 0; } if(skb->len < nPadding_Length) { return 0; } skb_pull(skb,nPadding_Length); } } #ifdef JOHN_NOCPY dev_kfree_skb(skb); #endif //{just for debug added by david //printk("AMSDU::rxb->nr_subframes = %d\n",rxb->nr_subframes); //} return rxb->nr_subframes; } } /* All received frames are sent to this function. @skb contains the frame in * IEEE 802.11 format, i.e., in the format it was sent over air. * This function is called only as a tasklet (software IRQ). */ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct ieee80211_hdr_4addr *hdr; //struct ieee80211_hdr_3addrqos *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; unsigned int frag; u8 *payload; u16 ethertype; //added by amy for reorder u8 TID = 0; u16 SeqNum = 0; PRX_TS_RECORD pTS = NULL; //bool bIsAggregateFrame = false; //added by amy for reorder #ifdef NOT_YET struct net_device *wds = NULL; struct sk_buff *skb2 = NULL; struct net_device *wds = NULL; int frame_authorized = 0; int from_assoc_ap = 0; void *sta = NULL; #endif // u16 qos_ctl = 0; u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; u8 bssid[ETH_ALEN]; struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; int i; struct ieee80211_rxb* rxb = NULL; // cheat the the hdr type hdr = (struct ieee80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", dev->name); goto rx_dropped; } fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); if(HTCCheck(ieee, skb->data)) { if(net_ratelimit()) printk("find HTCControl\n"); hdrlen += 4; rx_stats->bContainHTC = 1; } //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); #ifdef NOT_YET #if WIRELESS_EXT > 15 /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ if (iface->spy_data.spy_number > 0) { struct iw_quality wstats; wstats.level = rx_stats->rssi; wstats.noise = rx_stats->noise; wstats.updated = 6; /* No qual value */ /* Update spy records */ wireless_spy_update(dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ #endif /* WIRELESS_EXT > 15 */ hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif #if WIRELESS_EXT > 15 if (ieee->iw_mode == IW_MODE_MONITOR) { ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; stats->rx_bytes += skb->len; return 1; } #endif if (ieee->host_decrypt) { int idx = 0; if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; crypt = ieee->crypt[idx]; #ifdef NOT_YET sta = NULL; /* Use station specific key to override default keys if the * receiver address is a unicast address ("individual RA"). If * bcrx_sta_key parameter is set, station specific key is used * even with broad/multicast targets (this is against IEEE * 802.11, but makes it easier to use different keys with * stations that do not support WEP key mapping). */ if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) (void) hostap_handle_sta_crypto(local, hdr, &crypt, &sta); #endif /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (crypt && (crypt->ops == NULL || crypt->ops->decrypt_mpdu == NULL)) crypt = NULL; if (!crypt && (fc & IEEE80211_FCTL_WEP)) { /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ IEEE80211_DEBUG_DROP("Decryption failed (not set)" " (SA=%pM)\n", hdr->addr2); ieee->ieee_stats.rx_discards_undecryptable++; goto rx_dropped; } } if (skb->len < IEEE80211_DATA_HDR3_LEN) goto rx_dropped; // if QoS enabled, should check the sequence for each of the AC if( (ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)){ if (is_duplicate_packet(ieee, hdr)) goto rx_dropped; } else { PRX_TS_RECORD pRxTS = NULL; //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__FUNCTION__, tid); if(GetTs( ieee, (PTS_COMMON_INFO*) &pRxTS, hdr->addr2, (u8)Frame_QoSTID((u8*)(skb->data)), RX_DIR, true)) { // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__FUNCTION__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc)); if( (fc & (1<<11)) && (frag == pRxTS->RxLastFragNum) && (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum) ) { goto rx_dropped; } else { pRxTS->RxLastFragNum = frag; pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc); } } else { IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n",__FUNCTION__); goto rx_dropped; } } if (type == IEEE80211_FTYPE_MGMT) { //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } /* Data frame - extract src/dst addresses */ switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); memcpy(bssid, hdr->addr2, ETH_ALEN); break; case IEEE80211_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr1, ETH_ALEN); break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: if (skb->len < IEEE80211_DATA_HDR4_LEN) goto rx_dropped; memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); memcpy(bssid, ieee->current_network.bssid, ETH_ALEN); break; case 0: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr3, ETH_ALEN); break; } #ifdef NOT_YET if (hostap_rx_frame_wds(ieee, hdr, fc, &wds)) goto rx_dropped; if (wds) { skb->dev = dev = wds; stats = hostap_get_stats(dev); } if (ieee->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && ieee->stadev && memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) { /* Frame from BSSID of the AP for which we are a client */ skb->dev = dev = ieee->stadev; stats = hostap_get_stats(dev); from_assoc_ap = 1; } #endif dev->last_rx = jiffies; #ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, wds != NULL)) { case AP_RX_CONTINUE_NOT_AUTHORIZED: frame_authorized = 0; break; case AP_RX_CONTINUE: frame_authorized = 1; break; case AP_RX_DROP: goto rx_dropped; case AP_RX_EXIT: goto rx_exit; } } #endif //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && stype != IEEE80211_STYPE_DATA_CFACKPOLL&& stype != IEEE80211_STYPE_QOS_DATA//add by David,2006.8.4 ) { if (stype != IEEE80211_STYPE_NULLFUNC) IEEE80211_DEBUG_DROP( "RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x, len=%d)\n", type, stype, skb->len); goto rx_dropped; } if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN)) goto rx_dropped; /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) { printk("decrypt frame error\n"); goto rx_dropped; } hdr = (struct ieee80211_hdr_4addr *) skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ // PR: FIXME: hostap has additional conditions in the "if" below: // ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) { int flen; struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & IEEE80211_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); goto rx_dropped; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); ieee80211_frag_cache_invalidate(ieee, hdr); goto rx_dropped; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ memcpy(skb_put(frag_skb, flen), skb->data, flen); } else { /* append frame payload to the end of the fragment * cache skb */ memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & IEEE80211_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ goto rx_exit; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct ieee80211_hdr_4addr *) skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) { printk("==>decrypt msdu error\n"); goto rx_dropped; } //added by amy for AP roaming ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; hdr = (struct ieee80211_hdr_4addr *) skb->data; if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) { if (/*ieee->ieee802_1x &&*/ ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { #ifdef CONFIG_IEEE80211_DEBUG /* pass unencrypted EAPOL frames even if encryption is * configured */ struct eapol *eap = (struct eapol *)(skb->data + 24); IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); #endif } else { IEEE80211_DEBUG_DROP( "encryption configured, but RX " "frame not encrypted (SA=%pM)\n", hdr->addr2); goto rx_dropped; } } #ifdef CONFIG_IEEE80211_DEBUG if (crypt && !(fc & IEEE80211_FCTL_WEP) && ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { struct eapol *eap = (struct eapol *)(skb->data + 24); IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } #endif if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep && !ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { IEEE80211_DEBUG_DROP( "dropped unencrypted RX data " "frame from %pM" " (drop_unencrypted=1)\n", hdr->addr2); goto rx_dropped; } /* if(ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { printk(KERN_WARNING "RX: IEEE802.1X EPAOL frame!\n"); } */ //added by amy for reorder if(ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data) && !is_multicast_ether_addr(hdr->addr1) && !is_broadcast_ether_addr(hdr->addr1)) { TID = Frame_QoSTID(skb->data); SeqNum = WLAN_GET_SEQ_SEQ(sc); GetTs(ieee,(PTS_COMMON_INFO*) &pTS,hdr->addr2,TID,RX_DIR,true); if(TID !=0 && TID !=3) { ieee->bis_any_nonbepkts = true; } } //added by amy for reorder /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; //ethertype = (payload[6] << 8) | payload[7]; rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC); if(rxb == NULL) { IEEE80211_DEBUG(IEEE80211_DL_ERR,"%s(): kmalloc rxb error\n",__FUNCTION__); goto rx_dropped; } /* to parse amsdu packets */ /* qos data packets & reserved bit is 1 */ if(parse_subframe(skb,rx_stats,rxb,src,dst) == 0) { /* only to free rxb, and not submit the packets to upper layer */ for(i =0; i < rxb->nr_subframes; i++) { dev_kfree_skb(rxb->subframes[i]); } kfree(rxb); rxb = NULL; goto rx_dropped; } //added by amy for reorder if(ieee->pHTInfo->bCurRxReorderEnable == false ||pTS == NULL){ //added by amy for reorder for(i = 0; i<rxb->nr_subframes; i++) { struct sk_buff *sub_skb = rxb->subframes[i]; if (sub_skb) { /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } stats->rx_packets++; stats->rx_bytes += sub_skb->len; if(is_multicast_ether_addr(dst)) { stats->multicast++; } /* Indicat the packets to upper layer */ //printk("0skb_len(%d)\n", skb->len); sub_skb->protocol = eth_type_trans(sub_skb, dev); memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->dev = dev; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; //printk("1skb_len(%d)\n", skb->len); netif_rx(sub_skb); } } kfree(rxb); rxb = NULL; } else { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n",__FUNCTION__); RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum); } #ifndef JOHN_NOCPY dev_kfree_skb(skb); #endif rx_exit: #ifdef NOT_YET if (sta) hostap_handle_sta_release(sta); #endif return 1; rx_dropped: kfree(rxb); rxb = NULL; stats->rx_dropped++; /* Returning 0 indicates to caller that we have not handled the SKB-- * so it is still allocated and can be used again by underlying * hardware as a DMA target */ return 0; } #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; /* * Make the structure we read from the beacon packet to have * the right values */ static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element *info_element, int sub_type) { if (info_element->qui_subtype != sub_type) return -1; if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) return -1; if (info_element->qui_type != QOS_OUI_TYPE) return -1; if (info_element->version != QOS_VERSION_1) return -1; return 0; } /* * Parse a QoS parameter element */ static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info *element_param, struct ieee80211_info_element *info_element) { int ret = 0; u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2; if ((info_element == NULL) || (element_param == NULL)) return -1; if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { memcpy(element_param->info_element.qui, info_element->data, info_element->len); element_param->info_element.elementID = info_element->id; element_param->info_element.length = info_element->len; } else ret = -1; if (ret == 0) ret = ieee80211_verify_qos_info(&element_param->info_element, QOS_OUI_PARAM_SUB_TYPE); return ret; } /* * Parse a QoS information element */ static int ieee80211_read_qos_info_element(struct ieee80211_qos_information_element *element_info, struct ieee80211_info_element *info_element) { int ret = 0; u16 size = sizeof(struct ieee80211_qos_information_element) - 2; if (element_info == NULL) return -1; if (info_element == NULL) return -1; if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { memcpy(element_info->qui, info_element->data, info_element->len); element_info->elementID = info_element->id; element_info->length = info_element->len; } else ret = -1; if (ret == 0) ret = ieee80211_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE); return ret; } /* * Write QoS parameters from the ac parameters. */ static int ieee80211_qos_convert_ac_to_parameters(struct ieee80211_qos_parameter_info *param_elm, struct ieee80211_qos_parameters *qos_param) { int rc = 0; int i; struct ieee80211_qos_ac_parameter *ac_params; u8 aci; //u8 cw_min; //u8 cw_max; for (i = 0; i < QOS_QUEUE_NUM; i++) { ac_params = &(param_elm->ac_params_record[i]); aci = (ac_params->aci_aifsn & 0x60) >> 5; if(aci >= QOS_QUEUE_NUM) continue; qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f; /* WMM spec P.11: The minimum value for AIFSN shall be 2 */ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci]; qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F; qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4; qos_param->flag[aci] = (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit); } return rc; } /* * we have a generic data element which it may contain QoS information or * parameters element. check the information element length to decide * which type to read */ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element *info_element, struct ieee80211_network *network) { int rc = 0; struct ieee80211_qos_parameters *qos_param = NULL; struct ieee80211_qos_information_element qos_info_element; rc = ieee80211_read_qos_info_element(&qos_info_element, info_element); if (rc == 0) { network->qos_data.param_count = qos_info_element.ac_info & 0x0F; network->flags |= NETWORK_HAS_QOS_INFORMATION; } else { struct ieee80211_qos_parameter_info param_element; rc = ieee80211_read_qos_param_element(&param_element, info_element); if (rc == 0) { qos_param = &(network->qos_data.parameters); ieee80211_qos_convert_ac_to_parameters(&param_element, qos_param); network->flags |= NETWORK_HAS_QOS_PARAMETERS; network->qos_data.param_count = param_element.info_element.ac_info & 0x0F; } } if (rc == 0) { IEEE80211_DEBUG_QOS("QoS is supported\n"); network->qos_data.supported = 1; } return rc; } #ifdef CONFIG_IEEE80211_DEBUG #define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x static const char *get_info_element_string(u16 id) { switch (id) { MFIE_STRING(SSID); MFIE_STRING(RATES); MFIE_STRING(FH_SET); MFIE_STRING(DS_SET); MFIE_STRING(CF_SET); MFIE_STRING(TIM); MFIE_STRING(IBSS_SET); MFIE_STRING(COUNTRY); MFIE_STRING(HOP_PARAMS); MFIE_STRING(HOP_TABLE); MFIE_STRING(REQUEST); MFIE_STRING(CHALLENGE); MFIE_STRING(POWER_CONSTRAINT); MFIE_STRING(POWER_CAPABILITY); MFIE_STRING(TPC_REQUEST); MFIE_STRING(TPC_REPORT); MFIE_STRING(SUPP_CHANNELS); MFIE_STRING(CSA); MFIE_STRING(MEASURE_REQUEST); MFIE_STRING(MEASURE_REPORT); MFIE_STRING(QUIET); MFIE_STRING(IBSS_DFS); // MFIE_STRING(ERP_INFO); MFIE_STRING(RSN); MFIE_STRING(RATES_EX); MFIE_STRING(GENERIC); MFIE_STRING(QOS_PARAMETER); default: return "UNKNOWN"; } } #endif static inline void ieee80211_extract_country_ie( struct ieee80211_device *ieee, struct ieee80211_info_element *info_element, struct ieee80211_network *network, u8 * addr2 ) { if(IS_DOT11D_ENABLE(ieee)) { if(info_element->len!= 0) { memcpy(network->CountryIeBuf, info_element->data, info_element->len); network->CountryIeLen = info_element->len; if(!IS_COUNTRY_IE_VALID(ieee)) { Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data); } } // // 070305, rcnjko: I update country IE watch dog here because // some AP (e.g. Cisco 1242) don't include country IE in their // probe response frame. // if(IS_EQUAL_CIE_SRC(ieee, addr2) ) { UPDATE_CIE_WATCHDOG(ieee); } } } int ieee80211_parse_info_param(struct ieee80211_device *ieee, struct ieee80211_info_element *info_element, u16 length, struct ieee80211_network *network, struct ieee80211_rx_stats *stats) { u8 i; short offset; u16 tmp_htcap_len=0; u16 tmp_htinfo_len=0; u16 ht_realtek_agg_len=0; u8 ht_realtek_agg_buf[MAX_IE_LEN]; // u16 broadcom_len = 0; #ifdef CONFIG_IEEE80211_DEBUG char rates_str[64]; char *p; #endif while (length >= sizeof(*info_element)) { if (sizeof(*info_element) + info_element->len > length) { IEEE80211_DEBUG_MGMT("Info elem: parse failed: " "info_element->len + 2 > left : " "info_element->len+2=%zd left=%d, id=%d.\n", info_element->len + sizeof(*info_element), length, info_element->id); /* We stop processing but don't return an error here * because some misbehaviour APs break this rule. ie. * Orinoco AP1000. */ break; } switch (info_element->id) { case MFIE_TYPE_SSID: if (ieee80211_is_empty_essid(info_element->data, info_element->len)) { network->flags |= NETWORK_EMPTY_ESSID; break; } network->ssid_len = min(info_element->len, (u8) IW_ESSID_MAX_SIZE); memcpy(network->ssid, info_element->data, network->ssid_len); if (network->ssid_len < IW_ESSID_MAX_SIZE) memset(network->ssid + network->ssid_len, 0, IW_ESSID_MAX_SIZE - network->ssid_len); IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", network->ssid, network->ssid_len); break; case MFIE_TYPE_RATES: #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif network->rates_len = min(info_element->len, MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & IEEE80211_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", rates_str, network->rates_len); break; case MFIE_TYPE_RATES_EX: #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & IEEE80211_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", rates_str, network->rates_ex_len); break; case MFIE_TYPE_DS_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", info_element->data[0]); network->channel = info_element->data[0]; break; case MFIE_TYPE_FH_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); break; case MFIE_TYPE_CF_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); break; case MFIE_TYPE_TIM: if(info_element->len < 4) break; network->tim.tim_count = info_element->data[0]; network->tim.tim_period = info_element->data[1]; network->dtim_period = info_element->data[1]; if(ieee->state != IEEE80211_LINKED) break; network->last_dtim_sta_time[0] = stats->mac_time[0]; network->last_dtim_sta_time[1] = stats->mac_time[1]; network->dtim_data = IEEE80211_DTIM_VALID; if(info_element->data[0] != 0) break; if(info_element->data[2] & 1) network->dtim_data |= IEEE80211_DTIM_MBCAST; offset = (info_element->data[2] >> 1)*2; //printk("offset1:%x aid:%x\n",offset, ieee->assoc_id); if(ieee->assoc_id < 8*offset || ieee->assoc_id > 8*(offset + info_element->len -3)) break; offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ; if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8))) network->dtim_data |= IEEE80211_DTIM_UCAST; //IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n"); break; case MFIE_TYPE_ERP: network->erp_value = info_element->data[0]; network->flags |= NETWORK_HAS_ERP_VALUE; IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", network->erp_value); break; case MFIE_TYPE_IBSS_SET: network->atim_window = info_element->data[0]; IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", network->atim_window); break; case MFIE_TYPE_CHALLENGE: IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); break; case MFIE_TYPE_GENERIC: IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); if (!ieee80211_parse_qos_info_param_IE(info_element, network)) break; if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x01) { network->wpa_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->wpa_ie, info_element, network->wpa_ie_len); break; } #ifdef THOMAS_TURBO if (info_element->len == 7 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x01 && info_element->data[4] == 0x02) { network->Turbo_Enable = 1; } #endif //for HTcap and HTinfo parameters if(tmp_htcap_len == 0){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x033){ tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htcap_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\ sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen); } } if(tmp_htcap_len != 0) network->bssht.bdSupportHT = true; else network->bssht.bdSupportHT = false; } if(tmp_htinfo_len == 0){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x034){ tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htinfo_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; if(tmp_htinfo_len){ network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\ sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen); } } } } if(ieee->aggregation){ if(network->bssht.bdSupportHT){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x02){ ht_realtek_agg_len = min(info_element->len,(u8)MAX_IE_LEN); memcpy(ht_realtek_agg_buf,info_element->data,info_element->len); } if(ht_realtek_agg_len >= 5){ network->bssht.bdRT2RTAggregation = true; if((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02)) network->bssht.bdRT2RTLongSlotTime = true; } } } //if(tmp_htcap_len !=0 || tmp_htinfo_len != 0) { if((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x05 && info_element->data[2] == 0xb5) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0a && info_element->data[2] == 0xf7) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x10 && info_element->data[2] == 0x18)){ network->broadcom_cap_exist = true; } } if(info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0c && info_element->data[2] == 0x43) { network->ralink_cap_exist = true; } else network->ralink_cap_exist = false; //added by amy for atheros AP if((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x03 && info_element->data[2] == 0x7f) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x13 && info_element->data[2] == 0x74)) { printk("========>%s(): athros AP is exist\n",__FUNCTION__); network->atheros_cap_exist = true; } else network->atheros_cap_exist = false; if(info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96) { network->cisco_cap_exist = true; } else network->cisco_cap_exist = false; //added by amy for LEAP of cisco if(info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if(info_element->len == 6) { memcpy(network->CcxRmState, &info_element[4], 2); if(network->CcxRmState[0] != 0) { network->bCcxRmEnable = true; } else network->bCcxRmEnable = false; // // CCXv4 Table 59-1 MBSSID Masks. // network->MBssidMask = network->CcxRmState[1] & 0x07; if(network->MBssidMask != 0) { network->bMBssidValid = true; network->MBssidMask = 0xff << (network->MBssidMask); cpMacAddr(network->MBssid, network->bssid); network->MBssid[5] &= network->MBssidMask; } else { network->bMBssidValid = false; } } else { network->bCcxRmEnable = false; } } if(info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x03) { if(info_element->len == 5) { network->bWithCcxVerNum = true; network->BssCcxVerNumber = info_element->data[4]; } else { network->bWithCcxVerNum = false; network->BssCcxVerNumber = 0; } } break; case MFIE_TYPE_RSN: IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", info_element->len); network->rsn_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->rsn_ie, info_element, network->rsn_ie_len); break; //HT related element. case MFIE_TYPE_HT_CAP: IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n", info_element->len); tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htcap_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\ sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen); //If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT() // windows driver will update WMM parameters each beacon received once connected // Linux driver is a bit different. network->bssht.bdSupportHT = true; } else network->bssht.bdSupportHT = false; break; case MFIE_TYPE_HT_INFO: IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n", info_element->len); tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htinfo_len){ network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE; network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\ sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen); } break; case MFIE_TYPE_AIRONET: IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n", info_element->len); if(info_element->len >IE_CISCO_FLAG_POSITION) { network->bWithAironetIE = true; // CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23): // "A Cisco access point advertises support for CKIP in beacon and probe response packets, // by adding an Aironet element and setting one or both of the CKIP negotiation bits." if( (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_MIC) || (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_PK) ) { network->bCkipSupported = true; } else { network->bCkipSupported = false; } } else { network->bWithAironetIE = false; network->bCkipSupported = false; } break; case MFIE_TYPE_QOS_PARAMETER: printk(KERN_ERR "QoS Error need to parse QOS_PARAMETER IE\n"); break; case MFIE_TYPE_COUNTRY: IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n", info_element->len); //printk("=====>Receive <%s> Country IE\n",network->ssid); ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP break; /* TODO */ default: IEEE80211_DEBUG_MGMT ("Unsupported info element: %s (%d)\n", get_info_element_string(info_element->id), info_element->id); break; } length -= sizeof(*info_element) + info_element->len; info_element = (struct ieee80211_info_element *)&info_element-> data[info_element->len]; } if(!network->atheros_cap_exist && !network->broadcom_cap_exist && !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation) { network->unknown_cap_exist = true; } else { network->unknown_cap_exist = false; } return 0; } static inline u8 ieee80211_SignalStrengthTranslate( u8 CurrSS ) { u8 RetSS; // Step 1. Scale mapping. if(CurrSS >= 71 && CurrSS <= 100) { RetSS = 90 + ((CurrSS - 70) / 3); } else if(CurrSS >= 41 && CurrSS <= 70) { RetSS = 78 + ((CurrSS - 40) / 3); } else if(CurrSS >= 31 && CurrSS <= 40) { RetSS = 66 + (CurrSS - 30); } else if(CurrSS >= 21 && CurrSS <= 30) { RetSS = 54 + (CurrSS - 20); } else if(CurrSS >= 5 && CurrSS <= 20) { RetSS = 42 + (((CurrSS - 5) * 2) / 3); } else if(CurrSS == 4) { RetSS = 36; } else if(CurrSS == 3) { RetSS = 27; } else if(CurrSS == 2) { RetSS = 18; } else if(CurrSS == 1) { RetSS = 9; } else { RetSS = CurrSS; } //RT_TRACE(COMP_DBG, DBG_LOUD, ("##### After Mapping: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS)); // Step 2. Smoothing. //RT_TRACE(COMP_DBG, DBG_LOUD, ("$$$$$ After Smoothing: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS)); return RetSS; } long ieee80211_translate_todbm(u8 signal_strength_index )// 0-100 index. { long signal_power; // in dBm. // Translate to dBm (x=0.5y-95). signal_power = (long)((signal_strength_index + 1) >> 1); signal_power -= 95; return signal_power; } static inline int ieee80211_network_init( struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_network *network, struct ieee80211_rx_stats *stats) { #ifdef CONFIG_IEEE80211_DEBUG //char rates_str[64]; //char *p; #endif network->qos_data.active = 0; network->qos_data.supported = 0; network->qos_data.param_count = 0; network->qos_data.old_param_count = 0; /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); network->capability = le16_to_cpu(beacon->capability); network->last_scanned = jiffies; network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); network->beacon_interval = le32_to_cpu(beacon->beacon_interval); /* Where to pull this? beacon->listen_interval;*/ network->listen_interval = 0x0A; network->rates_len = network->rates_ex_len = 0; network->last_associate = 0; network->ssid_len = 0; network->flags = 0; network->atim_window = 0; network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; network->berp_info_valid = false; network->broadcom_cap_exist = false; network->ralink_cap_exist = false; network->atheros_cap_exist = false; network->cisco_cap_exist = false; network->unknown_cap_exist = false; #ifdef THOMAS_TURBO network->Turbo_Enable = 0; #endif network->CountryIeLen = 0; memset(network->CountryIeBuf, 0, MAX_IE_LEN); //Initialize HT parameters //ieee80211_ht_initialize(&network->bssht); HTInitializeBssDesc(&network->bssht); if (stats->freq == IEEE80211_52GHZ_BAND) { /* for A band (No DS info) */ network->channel = stats->received_channel; } else network->flags |= NETWORK_HAS_CCK; network->wpa_ie_len = 0; network->rsn_ie_len = 0; if (ieee80211_parse_info_param (ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats)) return 1; network->mode = 0; if (stats->freq == IEEE80211_52GHZ_BAND) network->mode = IEEE_A; else { if (network->flags & NETWORK_HAS_OFDM) network->mode |= IEEE_G; if (network->flags & NETWORK_HAS_CCK) network->mode |= IEEE_B; } if (network->mode == 0) { IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' " "network.\n", escape_essid(network->ssid, network->ssid_len), network->bssid); return 1; } if(network->bssht.bdSupportHT){ if(network->mode == IEEE_A) network->mode = IEEE_N_5G; else if(network->mode & (IEEE_G | IEEE_B)) network->mode = IEEE_N_24G; } if (ieee80211_is_empty_essid(network->ssid, network->ssid_len)) network->flags |= NETWORK_EMPTY_ESSID; stats->signal = 30 + (stats->SignalStrength * 70) / 100; //stats->signal = ieee80211_SignalStrengthTranslate(stats->signal); stats->noise = ieee80211_translate_todbm((u8)(100-stats->signal)) -25; memcpy(&network->stats, stats, sizeof(network->stats)); return 0; } static inline int is_same_network(struct ieee80211_network *src, struct ieee80211_network *dst, struct ieee80211_device* ieee) { /* A network is only a duplicate if the channel, BSSID, ESSID * and the capability field (in particular IBSS and BSS) all match. * We treat all <hidden> with the same BSSID and channel * as one network */ return //((src->ssid_len == dst->ssid_len) && (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && (src->channel == dst->channel) && !memcmp(src->bssid, dst->bssid, ETH_ALEN) && //!memcmp(src->ssid, dst->ssid, src->ssid_len) && (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && ((src->capability & WLAN_CAPABILITY_IBSS) == (dst->capability & WLAN_CAPABILITY_IBSS)) && ((src->capability & WLAN_CAPABILITY_BSS) == (dst->capability & WLAN_CAPABILITY_BSS))); } static inline void update_network(struct ieee80211_network *dst, struct ieee80211_network *src) { int qos_active; u8 old_param; memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); dst->capability = src->capability; memcpy(dst->rates, src->rates, src->rates_len); dst->rates_len = src->rates_len; memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len); dst->rates_ex_len = src->rates_ex_len; if(src->ssid_len > 0) { memset(dst->ssid, 0, dst->ssid_len); dst->ssid_len = src->ssid_len; memcpy(dst->ssid, src->ssid, src->ssid_len); } dst->mode = src->mode; dst->flags = src->flags; dst->time_stamp[0] = src->time_stamp[0]; dst->time_stamp[1] = src->time_stamp[1]; if (src->flags & NETWORK_HAS_ERP_VALUE) { dst->erp_value = src->erp_value; dst->berp_info_valid = src->berp_info_valid = true; } dst->beacon_interval = src->beacon_interval; dst->listen_interval = src->listen_interval; dst->atim_window = src->atim_window; dst->dtim_period = src->dtim_period; dst->dtim_data = src->dtim_data; dst->last_dtim_sta_time[0] = src->last_dtim_sta_time[0]; dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1]; memcpy(&dst->tim, &src->tim, sizeof(struct ieee80211_tim_parameters)); dst->bssht.bdSupportHT = src->bssht.bdSupportHT; dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation; dst->bssht.bdHTCapLen= src->bssht.bdHTCapLen; memcpy(dst->bssht.bdHTCapBuf,src->bssht.bdHTCapBuf,src->bssht.bdHTCapLen); dst->bssht.bdHTInfoLen= src->bssht.bdHTInfoLen; memcpy(dst->bssht.bdHTInfoBuf,src->bssht.bdHTInfoBuf,src->bssht.bdHTInfoLen); dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer; dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime; dst->broadcom_cap_exist = src->broadcom_cap_exist; dst->ralink_cap_exist = src->ralink_cap_exist; dst->atheros_cap_exist = src->atheros_cap_exist; dst->cisco_cap_exist = src->cisco_cap_exist; dst->unknown_cap_exist = src->unknown_cap_exist; memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); dst->wpa_ie_len = src->wpa_ie_len; memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len); dst->rsn_ie_len = src->rsn_ie_len; dst->last_scanned = jiffies; /* qos related parameters */ //qos_active = src->qos_data.active; qos_active = dst->qos_data.active; //old_param = dst->qos_data.old_param_count; old_param = dst->qos_data.param_count; if(dst->flags & NETWORK_HAS_QOS_MASK) memcpy(&dst->qos_data, &src->qos_data, sizeof(struct ieee80211_qos_data)); else { dst->qos_data.supported = src->qos_data.supported; dst->qos_data.param_count = src->qos_data.param_count; } if(dst->qos_data.supported == 1) { dst->QoS_Enable = 1; if(dst->ssid_len) IEEE80211_DEBUG_QOS ("QoS the network %s is QoS supported\n", dst->ssid); else IEEE80211_DEBUG_QOS ("QoS the network is QoS supported\n"); } dst->qos_data.active = qos_active; dst->qos_data.old_param_count = old_param; /* dst->last_associate is not overwritten */ dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame. if(src->wmm_param[0].ac_aci_acm_aifsn|| \ src->wmm_param[1].ac_aci_acm_aifsn|| \ src->wmm_param[2].ac_aci_acm_aifsn|| \ src->wmm_param[3].ac_aci_acm_aifsn) { memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN); } //dst->QoS_Enable = src->QoS_Enable; #ifdef THOMAS_TURBO dst->Turbo_Enable = src->Turbo_Enable; #endif dst->CountryIeLen = src->CountryIeLen; memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen); //added by amy for LEAP dst->bWithAironetIE = src->bWithAironetIE; dst->bCkipSupported = src->bCkipSupported; memcpy(dst->CcxRmState,src->CcxRmState,2); dst->bCcxRmEnable = src->bCcxRmEnable; dst->MBssidMask = src->MBssidMask; dst->bMBssidValid = src->bMBssidValid; memcpy(dst->MBssid,src->MBssid,6); dst->bWithCcxVerNum = src->bWithCcxVerNum; dst->BssCcxVerNumber = src->BssCcxVerNumber; } static inline int is_beacon(__le16 fc) { return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); } static inline void ieee80211_process_probe_response( struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_rx_stats *stats) { struct ieee80211_network network; struct ieee80211_network *target; struct ieee80211_network *oldest = NULL; #ifdef CONFIG_IEEE80211_DEBUG struct ieee80211_info_element *info_element = &beacon->info_element[0]; #endif unsigned long flags; short renew; //u8 wmm_info; memset(&network, 0, sizeof(struct ieee80211_network)); IEEE80211_DEBUG_SCAN( "'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, (beacon->capability & (1<<0xf)) ? '1' : '0', (beacon->capability & (1<<0xe)) ? '1' : '0', (beacon->capability & (1<<0xd)) ? '1' : '0', (beacon->capability & (1<<0xc)) ? '1' : '0', (beacon->capability & (1<<0xb)) ? '1' : '0', (beacon->capability & (1<<0xa)) ? '1' : '0', (beacon->capability & (1<<0x9)) ? '1' : '0', (beacon->capability & (1<<0x8)) ? '1' : '0', (beacon->capability & (1<<0x7)) ? '1' : '0', (beacon->capability & (1<<0x6)) ? '1' : '0', (beacon->capability & (1<<0x5)) ? '1' : '0', (beacon->capability & (1<<0x4)) ? '1' : '0', (beacon->capability & (1<<0x3)) ? '1' : '0', (beacon->capability & (1<<0x2)) ? '1' : '0', (beacon->capability & (1<<0x1)) ? '1' : '0', (beacon->capability & (1<<0x0)) ? '1' : '0'); if (ieee80211_network_init(ieee, beacon, &network, stats)) { IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); return; } // For Asus EeePc request, // (1) if wireless adapter receive get any 802.11d country code in AP beacon, // wireless adapter should follow the country code. // (2) If there is no any country code in beacon, // then wireless adapter should do active scan from ch1~11 and // passive scan from ch12~14 if( !IsLegalChannel(ieee, network.channel) ) return; if(ieee->bGlobalDomain) { if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP) { // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { if( !IsLegalChannel(ieee, network.channel) ) { printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel); return; } } // Case 2: No any country code. else { // Filter over channel ch12~14 if(network.channel > 11) { printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel); return; } } } else { // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { if( !IsLegalChannel(ieee, network.channel) ) { printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel); return; } } // Case 2: No any country code. else { // Filter over channel ch12~14 if(network.channel > 14) { printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network.channel); return; } } } } /* The network parsed correctly -- so now we scan our known networks * to see if we can find it in our list. * * NOTE: This search is definitely not optimized. Once its doing * the "right thing" we'll optimize it for efficiency if * necessary */ /* Search for this entry in the list and update it if it is * already there. */ spin_lock_irqsave(&ieee->lock, flags); if(is_same_network(&ieee->current_network, &network, ieee)) { update_network(&ieee->current_network, &network); if((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G) && ieee->current_network.berp_info_valid){ if(ieee->current_network.erp_value& ERP_UseProtection) ieee->current_network.buseprotection = true; else ieee->current_network.buseprotection = false; } if(is_beacon(beacon->header.frame_ctl)) { if(ieee->state == IEEE80211_LINKED) ieee->LinkDetectInfo.NumRecvBcnInPeriod++; } else //hidden AP network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags); } list_for_each_entry(target, &ieee->network_list, list) { if (is_same_network(target, &network, ieee)) break; if ((oldest == NULL) || (target->last_scanned < oldest->last_scanned)) oldest = target; } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (&target->list == &ieee->network_list) { if (list_empty(&ieee->network_free_list)) { /* If there are no more slots, expire the oldest */ list_del(&oldest->list); target = oldest; IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from " "network list.\n", escape_essid(target->ssid, target->ssid_len), target->bssid); } else { /* Otherwise just pull from the free list */ target = list_entry(ieee->network_free_list.next, struct ieee80211_network, list); list_del(ieee->network_free_list.next); } #ifdef CONFIG_IEEE80211_DEBUG IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n", escape_essid(network.ssid, network.ssid_len), network.bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); #endif memcpy(target, &network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) ieee80211_softmac_new_net(ieee,&network); } else { IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n", escape_essid(target->ssid, target->ssid_len), target->bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); /* we have an entry and we are going to update it. But this entry may * be already expired. In this case we do the same as we found a new * net and call the new_net handler */ renew = !time_after(target->last_scanned + ieee->scan_age, jiffies); //YJ,add,080819,for hidden ap if(is_beacon(beacon->header.frame_ctl) == 0) network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags); //if(strncmp(network.ssid, "linksys-c",9) == 0) // printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags); if(((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \ && (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\ ||((ieee->current_network.ssid_len == network.ssid_len)&&(strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK)))) renew = 1; //YJ,add,080819,for hidden ap,end update_network(target, &network); if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)) ieee80211_softmac_new_net(ieee,&network); } spin_unlock_irqrestore(&ieee->lock, flags); if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\ (ieee->state == IEEE80211_LINKED)) { if(ieee->handle_beacon != NULL) { ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network); } } } void ieee80211_rx_mgt(struct ieee80211_device *ieee, struct ieee80211_hdr_4addr *header, struct ieee80211_rx_stats *stats) { switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { case IEEE80211_STYPE_BEACON: IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); IEEE80211_DEBUG_SCAN("Beacon\n"); ieee80211_process_probe_response( ieee, (struct ieee80211_probe_response *)header, stats); break; case IEEE80211_STYPE_PROBE_RESP: IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); IEEE80211_DEBUG_SCAN("Probe response\n"); ieee80211_process_probe_response( ieee, (struct ieee80211_probe_response *)header, stats); break; } } EXPORT_SYMBOL(ieee80211_rx_mgt); EXPORT_SYMBOL(ieee80211_rx);
gpl-2.0
revjunkie/kernel
drivers/firewire/init_ohci1394_dma.c
4802
9838
/* * init_ohci1394_dma.c - Initializes physical DMA on all OHCI 1394 controllers * * Copyright (C) 2006-2007 Bernhard Kaindl <bk@suse.de> * * Derived from drivers/ieee1394/ohci1394.c and arch/x86/kernel/early-quirks.c * this file has functions to: * - scan the PCI very early on boot for all OHCI 1394-compliant controllers * - reset and initialize them and make them join the IEEE1394 bus and * - enable physical DMA on them to allow remote debugging * * All code and data is marked as __init and __initdata, respective as * during boot, all OHCI1394 controllers may be claimed by the firewire * stack and at this point, this code should not touch them anymore. * * To use physical DMA after the initialization of the firewire stack, * be sure that the stack enables it and (re-)attach after the bus reset * which may be caused by the firewire stack initialization. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/pci.h> /* for PCI defines */ #include <linux/string.h> #include <asm/pci-direct.h> /* for direct PCI config space access */ #include <asm/fixmap.h> #include <linux/init_ohci1394_dma.h> #include "ohci.h" int __initdata init_ohci1394_dma_early; struct ohci { void __iomem *registers; }; static inline void reg_write(const struct ohci *ohci, int offset, u32 data) { writel(data, ohci->registers + offset); } static inline u32 reg_read(const struct ohci *ohci, int offset) { return readl(ohci->registers + offset); } #define OHCI_LOOP_COUNT 100 /* Number of loops for reg read waits */ /* Reads a PHY register of an OHCI-1394 controller */ static inline u8 __init get_phy_reg(struct ohci *ohci, u8 addr) { int i; u32 r; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | 0x00008000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (reg_read(ohci, OHCI1394_PhyControl) & 0x80000000) break; mdelay(1); } r = reg_read(ohci, OHCI1394_PhyControl); return (r & 0x00ff0000) >> 16; } /* Writes to a PHY register of an OHCI-1394 controller */ static inline void __init set_phy_reg(struct ohci *ohci, u8 addr, u8 data) { int i; reg_write(ohci, OHCI1394_PhyControl, (addr << 8) | data | 0x00004000); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (!(reg_read(ohci, OHCI1394_PhyControl) & 0x00004000)) break; mdelay(1); } } /* Resets an OHCI-1394 controller (for sane state before initialization) */ static inline void __init init_ohci1394_soft_reset(struct ohci *ohci) { int i; reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset); for (i = 0; i < OHCI_LOOP_COUNT; i++) { if (!(reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_softReset)) break; mdelay(1); } } #define OHCI1394_MAX_AT_REQ_RETRIES 0xf #define OHCI1394_MAX_AT_RESP_RETRIES 0x2 #define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8 /* Basic OHCI-1394 register and port inititalization */ static inline void __init init_ohci1394_initialize(struct ohci *ohci) { u32 bus_options; int num_ports, i; /* Put some defaults to these undefined bus options */ bus_options = reg_read(ohci, OHCI1394_BusOptions); bus_options |= 0x60000000; /* Enable CMC and ISC */ bus_options &= ~0x00ff0000; /* XXX: Set cyc_clk_acc to zero for now */ bus_options &= ~0x18000000; /* Disable PMC and BMC */ reg_write(ohci, OHCI1394_BusOptions, bus_options); /* Set the bus number */ reg_write(ohci, OHCI1394_NodeID, 0x0000ffc0); /* Enable posted writes */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_postedWriteEnable); /* Clear link control register */ reg_write(ohci, OHCI1394_LinkControlClear, 0xffffffff); /* enable phys */ reg_write(ohci, OHCI1394_LinkControlSet, OHCI1394_LinkControl_rcvPhyPkt); /* Don't accept phy packets into AR request context */ reg_write(ohci, OHCI1394_LinkControlClear, 0x00000400); /* Clear the Isochonouys interrupt masks */ reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 0xffffffff); reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 0xffffffff); /* Accept asynchronous transfer requests from all nodes for now */ reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000); /* Specify asynchronous transfer retries */ reg_write(ohci, OHCI1394_ATRetries, OHCI1394_MAX_AT_REQ_RETRIES | (OHCI1394_MAX_AT_RESP_RETRIES<<4) | (OHCI1394_MAX_PHYS_RESP_RETRIES<<8)); /* We don't want hardware swapping */ reg_write(ohci, OHCI1394_HCControlClear, OHCI1394_HCControl_noByteSwapData); /* Enable link */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_linkEnable); /* If anything is connected to a port, make sure it is enabled */ num_ports = get_phy_reg(ohci, 2) & 0xf; for (i = 0; i < num_ports; i++) { unsigned int status; set_phy_reg(ohci, 7, i); status = get_phy_reg(ohci, 8); if (status & 0x20) set_phy_reg(ohci, 8, status & ~1); } } /** * init_ohci1394_wait_for_busresets - wait until bus resets are completed * * OHCI1394 initialization itself and any device going on- or offline * and any cable issue cause a IEEE1394 bus reset. The OHCI1394 spec * specifies that physical DMA is disabled on each bus reset and it * has to be enabled after each bus reset when needed. We resort * to polling here because on early boot, we have no interrupts. */ static inline void __init init_ohci1394_wait_for_busresets(struct ohci *ohci) { int i, events; for (i = 0; i < 9; i++) { mdelay(200); events = reg_read(ohci, OHCI1394_IntEventSet); if (events & OHCI1394_busReset) reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset); } } /** * init_ohci1394_enable_physical_dma - Enable physical DMA for remote debugging * This enables remote DMA access over IEEE1394 from every host for the low * 4GB of address space. DMA accesses above 4GB are not available currently. */ static inline void __init init_ohci1394_enable_physical_dma(struct ohci *ohci) { reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 0xffffffff); reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 0xffffffff); reg_write(ohci, OHCI1394_PhyUpperBound, 0xffff0000); } /** * init_ohci1394_reset_and_init_dma - init controller and enable DMA * This initializes the given controller and enables physical DMA engine in it. */ static inline void __init init_ohci1394_reset_and_init_dma(struct ohci *ohci) { /* Start off with a soft reset, clears everything to a sane state. */ init_ohci1394_soft_reset(ohci); /* Accessing some registers without LPS enabled may cause lock up */ reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_LPS); /* Disable and clear interrupts */ reg_write(ohci, OHCI1394_IntEventClear, 0xffffffff); reg_write(ohci, OHCI1394_IntMaskClear, 0xffffffff); mdelay(50); /* Wait 50msec to make sure we have full link enabled */ init_ohci1394_initialize(ohci); /* * The initialization causes at least one IEEE1394 bus reset. Enabling * physical DMA only works *after* *all* bus resets have calmed down: */ init_ohci1394_wait_for_busresets(ohci); /* We had to wait and do this now if we want to debug early problems */ init_ohci1394_enable_physical_dma(ohci); } /** * init_ohci1394_controller - Map the registers of the controller and init DMA * This maps the registers of the specified controller and initializes it */ static inline void __init init_ohci1394_controller(int num, int slot, int func) { unsigned long ohci_base; struct ohci ohci; printk(KERN_INFO "init_ohci1394_dma: initializing OHCI-1394" " at %02x:%02x.%x\n", num, slot, func); ohci_base = read_pci_config(num, slot, func, PCI_BASE_ADDRESS_0+(0<<2)) & PCI_BASE_ADDRESS_MEM_MASK; set_fixmap_nocache(FIX_OHCI1394_BASE, ohci_base); ohci.registers = (void __iomem *)fix_to_virt(FIX_OHCI1394_BASE); init_ohci1394_reset_and_init_dma(&ohci); } /** * debug_init_ohci1394_dma - scan for OHCI1394 controllers and init DMA on them * Scans the whole PCI space for OHCI1394 controllers and inits DMA on them */ void __init init_ohci1394_dma_on_all_controllers(void) { int num, slot, func; u32 class; if (!early_pci_allowed()) return; /* Poor man's PCI discovery, the only thing we can do at early boot */ for (num = 0; num < 32; num++) { for (slot = 0; slot < 32; slot++) { for (func = 0; func < 8; func++) { class = read_pci_config(num, slot, func, PCI_CLASS_REVISION); if (class == 0xffffffff) continue; /* No device at this func */ if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI) continue; /* Not an OHCI-1394 device */ init_ohci1394_controller(num, slot, func); break; /* Assume one controller per device */ } } } printk(KERN_INFO "init_ohci1394_dma: finished initializing OHCI DMA\n"); } /** * setup_init_ohci1394_early - enables early OHCI1394 DMA initialization */ static int __init setup_ohci1394_dma(char *opt) { if (!strcmp(opt, "early")) init_ohci1394_dma_early = 1; return 0; } /* passing ohci1394_dma=early on boot causes early OHCI1394 DMA initialization */ early_param("ohci1394_dma", setup_ohci1394_dma);
gpl-2.0
gearslam/JB_LS970ZVC
arch/arm/mach-s3c24xx/s3c2440.c
5058
1795
/* linux/arch/arm/mach-s3c2440/s3c2440.c * * Copyright (c) 2004-2006 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Samsung S3C2440 Mobile CPU support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/serial_core.h> #include <linux/device.h> #include <linux/syscore_ops.h> #include <linux/gpio.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <asm/irq.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/s3c244x.h> #include <plat/pm.h> #include <plat/gpio-core.h> #include <plat/gpio-cfg.h> #include <plat/gpio-cfg-helpers.h> static struct device s3c2440_dev = { .bus = &s3c2440_subsys, }; int __init s3c2440_init(void) { printk("S3C2440: Initialising architecture\n"); /* change irq for watchdog */ s3c_device_wdt.resource[1].start = IRQ_S3C2440_WDT; s3c_device_wdt.resource[1].end = IRQ_S3C2440_WDT; /* register suspend/resume handlers */ #ifdef CONFIG_PM register_syscore_ops(&s3c2410_pm_syscore_ops); #endif register_syscore_ops(&s3c244x_pm_syscore_ops); register_syscore_ops(&s3c24xx_irq_syscore_ops); /* register our system device for everything else */ return device_register(&s3c2440_dev); } void __init s3c2440_map_io(void) { s3c244x_map_io(); s3c24xx_gpiocfg_default.set_pull = s3c24xx_gpio_setpull_1up; s3c24xx_gpiocfg_default.get_pull = s3c24xx_gpio_getpull_1up; }
gpl-2.0
Kurre/kernel_samsung_manta
arch/arm/mach-s5p64x0/pm.c
5058
5436
/* linux/arch/arm/mach-s5p64x0/pm.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5P64X0 Power Management Support * * Based on arch/arm/mach-s3c64xx/pm.c by Ben Dooks * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/suspend.h> #include <linux/syscore_ops.h> #include <linux/io.h> #include <plat/cpu.h> #include <plat/pm.h> #include <plat/regs-timer.h> #include <plat/wakeup-mask.h> #include <mach/regs-clock.h> #include <mach/regs-gpio.h> static struct sleep_save s5p64x0_core_save[] = { SAVE_ITEM(S5P64X0_APLL_CON), SAVE_ITEM(S5P64X0_MPLL_CON), SAVE_ITEM(S5P64X0_EPLL_CON), SAVE_ITEM(S5P64X0_EPLL_CON_K), SAVE_ITEM(S5P64X0_CLK_SRC0), SAVE_ITEM(S5P64X0_CLK_SRC1), SAVE_ITEM(S5P64X0_CLK_DIV0), SAVE_ITEM(S5P64X0_CLK_DIV1), SAVE_ITEM(S5P64X0_CLK_DIV2), SAVE_ITEM(S5P64X0_CLK_DIV3), SAVE_ITEM(S5P64X0_CLK_GATE_MEM0), SAVE_ITEM(S5P64X0_CLK_GATE_HCLK1), SAVE_ITEM(S5P64X0_CLK_GATE_SCLK1), }; static struct sleep_save s5p64x0_misc_save[] = { SAVE_ITEM(S5P64X0_AHB_CON0), SAVE_ITEM(S5P64X0_SPCON0), SAVE_ITEM(S5P64X0_SPCON1), SAVE_ITEM(S5P64X0_MEM0CONSLP0), SAVE_ITEM(S5P64X0_MEM0CONSLP1), SAVE_ITEM(S5P64X0_MEM0DRVCON), SAVE_ITEM(S5P64X0_MEM1DRVCON), SAVE_ITEM(S3C64XX_TINT_CSTAT), }; /* DPLL is present only in S5P6450 */ static struct sleep_save s5p6450_core_save[] = { SAVE_ITEM(S5P6450_DPLL_CON), SAVE_ITEM(S5P6450_DPLL_CON_K), }; void s3c_pm_configure_extint(void) { __raw_writel(s3c_irqwake_eintmask, S5P64X0_EINT_WAKEUP_MASK); } void s3c_pm_restore_core(void) { __raw_writel(0, S5P64X0_EINT_WAKEUP_MASK); s3c_pm_do_restore_core(s5p64x0_core_save, ARRAY_SIZE(s5p64x0_core_save)); if (soc_is_s5p6450()) s3c_pm_do_restore_core(s5p6450_core_save, ARRAY_SIZE(s5p6450_core_save)); s3c_pm_do_restore(s5p64x0_misc_save, ARRAY_SIZE(s5p64x0_misc_save)); } void s3c_pm_save_core(void) { s3c_pm_do_save(s5p64x0_misc_save, ARRAY_SIZE(s5p64x0_misc_save)); if (soc_is_s5p6450()) s3c_pm_do_save(s5p6450_core_save, ARRAY_SIZE(s5p6450_core_save)); s3c_pm_do_save(s5p64x0_core_save, ARRAY_SIZE(s5p64x0_core_save)); } static int s5p64x0_cpu_suspend(unsigned long arg) { unsigned long tmp = 0; /* * Issue the standby signal into the pm unit. Note, we * issue a write-buffer drain just in case. */ asm("b 1f\n\t" ".align 5\n\t" "1:\n\t" "mcr p15, 0, %0, c7, c10, 5\n\t" "mcr p15, 0, %0, c7, c10, 4\n\t" "mcr p15, 0, %0, c7, c0, 4" : : "r" (tmp)); /* we should never get past here */ panic("sleep resumed to originator?"); } /* mapping of interrupts to parts of the wakeup mask */ static struct samsung_wakeup_mask s5p64x0_wake_irqs[] = { { .irq = IRQ_RTC_ALARM, .bit = S5P64X0_PWR_CFG_RTC_ALRM_DISABLE, }, { .irq = IRQ_RTC_TIC, .bit = S5P64X0_PWR_CFG_RTC_TICK_DISABLE, }, { .irq = IRQ_HSMMC0, .bit = S5P64X0_PWR_CFG_MMC0_DISABLE, }, { .irq = IRQ_HSMMC1, .bit = S5P64X0_PWR_CFG_MMC1_DISABLE, }, }; static void s5p64x0_pm_prepare(void) { u32 tmp; samsung_sync_wakemask(S5P64X0_PWR_CFG, s5p64x0_wake_irqs, ARRAY_SIZE(s5p64x0_wake_irqs)); /* store the resume address in INFORM0 register */ __raw_writel(virt_to_phys(s3c_cpu_resume), S5P64X0_INFORM0); /* setup clock gating for FIMGVG block */ __raw_writel((__raw_readl(S5P64X0_CLK_GATE_HCLK1) | \ (S5P64X0_CLK_GATE_HCLK1_FIMGVG)), S5P64X0_CLK_GATE_HCLK1); __raw_writel((__raw_readl(S5P64X0_CLK_GATE_SCLK1) | \ (S5P64X0_CLK_GATE_SCLK1_FIMGVG)), S5P64X0_CLK_GATE_SCLK1); /* Configure the stabilization counter with wait time required */ __raw_writel(S5P64X0_PWR_STABLE_PWR_CNT_VAL4, S5P64X0_PWR_STABLE); /* set WFI to SLEEP mode configuration */ tmp = __raw_readl(S5P64X0_SLEEP_CFG); tmp &= ~(S5P64X0_SLEEP_CFG_OSC_EN); __raw_writel(tmp, S5P64X0_SLEEP_CFG); tmp = __raw_readl(S5P64X0_PWR_CFG); tmp &= ~(S5P64X0_PWR_CFG_WFI_MASK); tmp |= S5P64X0_PWR_CFG_WFI_SLEEP; __raw_writel(tmp, S5P64X0_PWR_CFG); /* * set OTHERS register to disable interrupt before going to * sleep. This bit is present only in S5P6450, it is reserved * in S5P6440. */ if (soc_is_s5p6450()) { tmp = __raw_readl(S5P64X0_OTHERS); tmp |= S5P6450_OTHERS_DISABLE_INT; __raw_writel(tmp, S5P64X0_OTHERS); } /* ensure previous wakeup state is cleared before sleeping */ __raw_writel(__raw_readl(S5P64X0_WAKEUP_STAT), S5P64X0_WAKEUP_STAT); } static int s5p64x0_pm_add(struct device *dev, struct subsys_interface *sif) { pm_cpu_prep = s5p64x0_pm_prepare; pm_cpu_sleep = s5p64x0_cpu_suspend; pm_uart_udivslot = 1; return 0; } static struct subsys_interface s5p64x0_pm_interface = { .name = "s5p64x0_pm", .subsys = &s5p64x0_subsys, .add_dev = s5p64x0_pm_add, }; static __init int s5p64x0_pm_drvinit(void) { s3c_pm_init(); return subsys_interface_register(&s5p64x0_pm_interface); } arch_initcall(s5p64x0_pm_drvinit); static void s5p64x0_pm_resume(void) { u32 tmp; tmp = __raw_readl(S5P64X0_OTHERS); tmp |= (S5P64X0_OTHERS_RET_MMC0 | S5P64X0_OTHERS_RET_MMC1 | \ S5P64X0_OTHERS_RET_UART); __raw_writel(tmp , S5P64X0_OTHERS); } static struct syscore_ops s5p64x0_pm_syscore_ops = { .resume = s5p64x0_pm_resume, }; static __init int s5p64x0_pm_syscore_init(void) { register_syscore_ops(&s5p64x0_pm_syscore_ops); return 0; } arch_initcall(s5p64x0_pm_syscore_init);
gpl-2.0
juston-li/mako
drivers/infiniband/hw/ehca/ehca_sqp.c
12994
7028
/* * IBM eServer eHCA Infiniband device driver for Linux on POWER * * SQP functions * * Authors: Khadija Souissi <souissi@de.ibm.com> * Heiko J Schick <schickhj@de.ibm.com> * * Copyright (c) 2005 IBM Corporation * * All rights reserved. * * This source code is distributed under a dual license of GPL v2.0 and OpenIB * BSD. * * OpenIB BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials * provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include <rdma/ib_mad.h> #include "ehca_classes.h" #include "ehca_tools.h" #include "ehca_iverbs.h" #include "hcp_if.h" #define IB_MAD_STATUS_REDIRECT cpu_to_be16(0x0002) #define IB_MAD_STATUS_UNSUP_VERSION cpu_to_be16(0x0004) #define IB_MAD_STATUS_UNSUP_METHOD cpu_to_be16(0x0008) #define IB_PMA_CLASS_PORT_INFO cpu_to_be16(0x0001) /** * ehca_define_sqp - Defines special queue pair 1 (GSI QP). When special queue * pair is created successfully, the corresponding port gets active. * * Define Special Queue pair 0 (SMI QP) is still not supported. * * @qp_init_attr: Queue pair init attributes with port and queue pair type */ u64 ehca_define_sqp(struct ehca_shca *shca, struct ehca_qp *ehca_qp, struct ib_qp_init_attr *qp_init_attr) { u32 pma_qp_nr, bma_qp_nr; u64 ret; u8 port = qp_init_attr->port_num; int counter; shca->sport[port - 1].port_state = IB_PORT_DOWN; switch (qp_init_attr->qp_type) { case IB_QPT_SMI: /* function not supported yet */ break; case IB_QPT_GSI: ret = hipz_h_define_aqp1(shca->ipz_hca_handle, ehca_qp->ipz_qp_handle, ehca_qp->galpas.kernel, (u32) qp_init_attr->port_num, &pma_qp_nr, &bma_qp_nr); if (ret != H_SUCCESS) { ehca_err(&shca->ib_device, "Can't define AQP1 for port %x. h_ret=%lli", port, ret); return ret; } shca->sport[port - 1].pma_qp_nr = pma_qp_nr; ehca_dbg(&shca->ib_device, "port=%x pma_qp_nr=%x", port, pma_qp_nr); break; default: ehca_err(&shca->ib_device, "invalid qp_type=%x", qp_init_attr->qp_type); return H_PARAMETER; } if (ehca_nr_ports < 0) /* autodetect mode */ return H_SUCCESS; for (counter = 0; shca->sport[port - 1].port_state != IB_PORT_ACTIVE && counter < ehca_port_act_time; counter++) { ehca_dbg(&shca->ib_device, "... wait until port %x is active", port); msleep_interruptible(1000); } if (counter == ehca_port_act_time) { ehca_err(&shca->ib_device, "Port %x is not active.", port); return H_HARDWARE; } return H_SUCCESS; } struct ib_perf { struct ib_mad_hdr mad_hdr; u8 reserved[40]; u8 data[192]; } __attribute__ ((packed)); /* TC/SL/FL packed into 32 bits, as in ClassPortInfo */ struct tcslfl { u32 tc:8; u32 sl:4; u32 fl:20; } __attribute__ ((packed)); /* IP Version/TC/FL packed into 32 bits, as in GRH */ struct vertcfl { u32 ver:4; u32 tc:8; u32 fl:20; } __attribute__ ((packed)); static int ehca_process_perf(struct ib_device *ibdev, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_perf *in_perf = (struct ib_perf *)in_mad; struct ib_perf *out_perf = (struct ib_perf *)out_mad; struct ib_class_port_info *poi = (struct ib_class_port_info *)out_perf->data; struct tcslfl *tcslfl = (struct tcslfl *)&poi->redirect_tcslfl; struct ehca_shca *shca = container_of(ibdev, struct ehca_shca, ib_device); struct ehca_sport *sport = &shca->sport[port_num - 1]; ehca_dbg(ibdev, "method=%x", in_perf->mad_hdr.method); *out_mad = *in_mad; if (in_perf->mad_hdr.class_version != 1) { ehca_warn(ibdev, "Unsupported class_version=%x", in_perf->mad_hdr.class_version); out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_VERSION; goto perf_reply; } switch (in_perf->mad_hdr.method) { case IB_MGMT_METHOD_GET: case IB_MGMT_METHOD_SET: /* set class port info for redirection */ out_perf->mad_hdr.attr_id = IB_PMA_CLASS_PORT_INFO; out_perf->mad_hdr.status = IB_MAD_STATUS_REDIRECT; memset(poi, 0, sizeof(*poi)); poi->base_version = 1; poi->class_version = 1; poi->resp_time_value = 18; /* copy local routing information from WC where applicable */ tcslfl->sl = in_wc->sl; poi->redirect_lid = sport->saved_attr.lid | in_wc->dlid_path_bits; poi->redirect_qp = sport->pma_qp_nr; poi->redirect_qkey = IB_QP1_QKEY; ehca_query_pkey(ibdev, port_num, in_wc->pkey_index, &poi->redirect_pkey); /* if request was globally routed, copy route info */ if (in_grh) { struct vertcfl *vertcfl = (struct vertcfl *)&in_grh->version_tclass_flow; memcpy(poi->redirect_gid, in_grh->dgid.raw, sizeof(poi->redirect_gid)); tcslfl->tc = vertcfl->tc; tcslfl->fl = vertcfl->fl; } else /* else only fill in default GID */ ehca_query_gid(ibdev, port_num, 0, (union ib_gid *)&poi->redirect_gid); ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x", sport->saved_attr.lid, sport->pma_qp_nr); break; case IB_MGMT_METHOD_GET_RESP: return IB_MAD_RESULT_FAILURE; default: out_perf->mad_hdr.status = IB_MAD_STATUS_UNSUP_METHOD; break; } perf_reply: out_perf->mad_hdr.method = IB_MGMT_METHOD_GET_RESP; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num, struct ib_wc *in_wc, struct ib_grh *in_grh, struct ib_mad *in_mad, struct ib_mad *out_mad) { int ret; if (!port_num || port_num > ibdev->phys_port_cnt || !in_wc) return IB_MAD_RESULT_FAILURE; /* accept only pma request */ if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT) return IB_MAD_RESULT_SUCCESS; ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp); ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh, in_mad, out_mad); return ret; }
gpl-2.0
Tof37/Caf-msm-3.4
drivers/block/paride/epia.c
15554
8049
/* epia.c (c) 1997-8 Grant R. Guenther <grant@torque.net> Under the terms of the GNU General Public License. epia.c is a low-level protocol driver for Shuttle Technologies EPIA parallel to IDE adapter chip. This device is now obsolete and has been replaced with the EPAT chip, which is supported by epat.c, however, some devices based on EPIA are still available. */ /* Changes: 1.01 GRG 1998.05.06 init_proto, release_proto 1.02 GRG 1998.06.17 support older versions of EPIA */ #define EPIA_VERSION "1.02" #include <linux/module.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/wait.h> #include <asm/io.h> #include "paride.h" /* mode codes: 0 nybble reads on port 1, 8-bit writes 1 5/3 reads on ports 1 & 2, 8-bit writes 2 8-bit reads and writes 3 8-bit EPP mode 4 16-bit EPP 5 32-bit EPP */ #define j44(a,b) (((a>>4)&0x0f)+(b&0xf0)) #define j53(a,b) (((a>>3)&0x1f)+((b<<4)&0xe0)) /* cont = 0 IDE register file cont = 1 IDE control registers */ static int cont_map[2] = { 0, 0x80 }; static int epia_read_regr( PIA *pi, int cont, int regr ) { int a, b, r; regr += cont_map[cont]; switch (pi->mode) { case 0: r = regr^0x39; w0(r); w2(1); w2(3); w0(r); a = r1(); w2(1); b = r1(); w2(4); return j44(a,b); case 1: r = regr^0x31; w0(r); w2(1); w0(r&0x37); w2(3); w2(5); w0(r|0xf0); a = r1(); b = r2(); w2(4); return j53(a,b); case 2: r = regr^0x29; w0(r); w2(1); w2(0X21); w2(0x23); a = r0(); w2(4); return a; case 3: case 4: case 5: w3(regr); w2(0x24); a = r4(); w2(4); return a; } return -1; } static void epia_write_regr( PIA *pi, int cont, int regr, int val) { int r; regr += cont_map[cont]; switch (pi->mode) { case 0: case 1: case 2: r = regr^0x19; w0(r); w2(1); w0(val); w2(3); w2(4); break; case 3: case 4: case 5: r = regr^0x40; w3(r); w4(val); w2(4); break; } } #define WR(r,v) epia_write_regr(pi,0,r,v) #define RR(r) (epia_read_regr(pi,0,r)) /* The use of register 0x84 is entirely unclear - it seems to control some EPP counters ... currently we know about 3 different block sizes: the standard 512 byte reads and writes, 12 byte writes and 2048 byte reads (the last two being used in the CDrom drivers. */ static void epia_connect ( PIA *pi ) { pi->saved_r0 = r0(); pi->saved_r2 = r2(); w2(4); w0(0xa0); w0(0x50); w0(0xc0); w0(0x30); w0(0xa0); w0(0); w2(1); w2(4); if (pi->mode >= 3) { w0(0xa); w2(1); w2(4); w0(0x82); w2(4); w2(0xc); w2(4); w2(0x24); w2(0x26); w2(4); } WR(0x86,8); } static void epia_disconnect ( PIA *pi ) { /* WR(0x84,0x10); */ w0(pi->saved_r0); w2(1); w2(4); w0(pi->saved_r0); w2(pi->saved_r2); } static void epia_read_block( PIA *pi, char * buf, int count ) { int k, ph, a, b; switch (pi->mode) { case 0: w0(0x81); w2(1); w2(3); w0(0xc1); ph = 1; for (k=0;k<count;k++) { w2(2+ph); a = r1(); w2(4+ph); b = r1(); buf[k] = j44(a,b); ph = 1 - ph; } w0(0); w2(4); break; case 1: w0(0x91); w2(1); w0(0x10); w2(3); w0(0x51); w2(5); w0(0xd1); ph = 1; for (k=0;k<count;k++) { w2(4+ph); a = r1(); b = r2(); buf[k] = j53(a,b); ph = 1 - ph; } w0(0); w2(4); break; case 2: w0(0x89); w2(1); w2(0x23); w2(0x21); ph = 1; for (k=0;k<count;k++) { w2(0x24+ph); buf[k] = r0(); ph = 1 - ph; } w2(6); w2(4); break; case 3: if (count > 512) WR(0x84,3); w3(0); w2(0x24); for (k=0;k<count;k++) buf[k] = r4(); w2(4); WR(0x84,0); break; case 4: if (count > 512) WR(0x84,3); w3(0); w2(0x24); for (k=0;k<count/2;k++) ((u16 *)buf)[k] = r4w(); w2(4); WR(0x84,0); break; case 5: if (count > 512) WR(0x84,3); w3(0); w2(0x24); for (k=0;k<count/4;k++) ((u32 *)buf)[k] = r4l(); w2(4); WR(0x84,0); break; } } static void epia_write_block( PIA *pi, char * buf, int count ) { int ph, k, last, d; switch (pi->mode) { case 0: case 1: case 2: w0(0xa1); w2(1); w2(3); w2(1); w2(5); ph = 0; last = 0x8000; for (k=0;k<count;k++) { d = buf[k]; if (d != last) { last = d; w0(d); } w2(4+ph); ph = 1 - ph; } w2(7); w2(4); break; case 3: if (count < 512) WR(0x84,1); w3(0x40); for (k=0;k<count;k++) w4(buf[k]); if (count < 512) WR(0x84,0); break; case 4: if (count < 512) WR(0x84,1); w3(0x40); for (k=0;k<count/2;k++) w4w(((u16 *)buf)[k]); if (count < 512) WR(0x84,0); break; case 5: if (count < 512) WR(0x84,1); w3(0x40); for (k=0;k<count/4;k++) w4l(((u32 *)buf)[k]); if (count < 512) WR(0x84,0); break; } } static int epia_test_proto( PIA *pi, char * scratch, int verbose ) { int j, k, f; int e[2] = {0,0}; epia_connect(pi); for (j=0;j<2;j++) { WR(6,0xa0+j*0x10); for (k=0;k<256;k++) { WR(2,k^0xaa); WR(3,k^0x55); if (RR(2) != (k^0xaa)) e[j]++; } WR(2,1); WR(3,1); } epia_disconnect(pi); f = 0; epia_connect(pi); WR(0x84,8); epia_read_block(pi,scratch,512); for (k=0;k<256;k++) { if ((scratch[2*k] & 0xff) != ((k+1) & 0xff)) f++; if ((scratch[2*k+1] & 0xff) != ((-2-k) & 0xff)) f++; } WR(0x84,0); epia_disconnect(pi); if (verbose) { printk("%s: epia: port 0x%x, mode %d, test=(%d,%d,%d)\n", pi->device,pi->port,pi->mode,e[0],e[1],f); } return (e[0] && e[1]) || f; } static void epia_log_adapter( PIA *pi, char * scratch, int verbose ) { char *mode_string[6] = {"4-bit","5/3","8-bit", "EPP-8","EPP-16","EPP-32"}; printk("%s: epia %s, Shuttle EPIA at 0x%x, ", pi->device,EPIA_VERSION,pi->port); printk("mode %d (%s), delay %d\n",pi->mode, mode_string[pi->mode],pi->delay); } static struct pi_protocol epia = { .owner = THIS_MODULE, .name = "epia", .max_mode = 6, .epp_first = 3, .default_delay = 1, .max_units = 1, .write_regr = epia_write_regr, .read_regr = epia_read_regr, .write_block = epia_write_block, .read_block = epia_read_block, .connect = epia_connect, .disconnect = epia_disconnect, .test_proto = epia_test_proto, .log_adapter = epia_log_adapter, }; static int __init epia_init(void) { return paride_register(&epia); } static void __exit epia_exit(void) { paride_unregister(&epia); } MODULE_LICENSE("GPL"); module_init(epia_init) module_exit(epia_exit)
gpl-2.0
fopina/ZTE-Joe-Kernel-2.6.29-AOSP
drivers/sbus/char/bbc_envctrl.c
195
15825
/* bbc_envctrl.c: UltraSPARC-III environment control driver. * * Copyright (C) 2001, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/kthread.h> #include <linux/delay.h> #include <linux/kmod.h> #include <linux/reboot.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/oplib.h> #include "bbc_i2c.h" #include "max1617.h" #undef ENVCTRL_TRACE /* WARNING: Making changes to this driver is very dangerous. * If you misprogram the sensor chips they can * cut the power on you instantly. */ /* Two temperature sensors exist in the SunBLADE-1000 enclosure. * Both are implemented using max1617 i2c devices. Each max1617 * monitors 2 temperatures, one for one of the cpu dies and the other * for the ambient temperature. * * The max1617 is capable of being programmed with power-off * temperature values, one low limit and one high limit. These * can be controlled independently for the cpu or ambient temperature. * If a limit is violated, the power is simply shut off. The frequency * with which the max1617 does temperature sampling can be controlled * as well. * * Three fans exist inside the machine, all three are controlled with * an i2c digital to analog converter. There is a fan directed at the * two processor slots, another for the rest of the enclosure, and the * third is for the power supply. The first two fans may be speed * controlled by changing the voltage fed to them. The third fan may * only be completely off or on. The third fan is meant to only be * disabled/enabled when entering/exiting the lowest power-saving * mode of the machine. * * An environmental control kernel thread periodically monitors all * temperature sensors. Based upon the samples it will adjust the * fan speeds to try and keep the system within a certain temperature * range (the goal being to make the fans as quiet as possible without * allowing the system to get too hot). * * If the temperature begins to rise/fall outside of the acceptable * operating range, a periodic warning will be sent to the kernel log. * The fans will be put on full blast to attempt to deal with this * situation. After exceeding the acceptable operating range by a * certain threshold, the kernel thread will shut down the system. * Here, the thread is attempting to shut the machine down cleanly * before the hardware based power-off event is triggered. */ /* These settings are in Celsius. We use these defaults only * if we cannot interrogate the cpu-fru SEEPROM. */ struct temp_limits { s8 high_pwroff, high_shutdown, high_warn; s8 low_warn, low_shutdown, low_pwroff; }; static struct temp_limits cpu_temp_limits[2] = { { 100, 85, 80, 5, -5, -10 }, { 100, 85, 80, 5, -5, -10 }, }; static struct temp_limits amb_temp_limits[2] = { { 65, 55, 40, 5, -5, -10 }, { 65, 55, 40, 5, -5, -10 }, }; static LIST_HEAD(all_temps); static LIST_HEAD(all_fans); #define CPU_FAN_REG 0xf0 #define SYS_FAN_REG 0xf2 #define PSUPPLY_FAN_REG 0xf4 #define FAN_SPEED_MIN 0x0c #define FAN_SPEED_MAX 0x3f #define PSUPPLY_FAN_ON 0x1f #define PSUPPLY_FAN_OFF 0x00 static void set_fan_speeds(struct bbc_fan_control *fp) { /* Put temperatures into range so we don't mis-program * the hardware. */ if (fp->cpu_fan_speed < FAN_SPEED_MIN) fp->cpu_fan_speed = FAN_SPEED_MIN; if (fp->cpu_fan_speed > FAN_SPEED_MAX) fp->cpu_fan_speed = FAN_SPEED_MAX; if (fp->system_fan_speed < FAN_SPEED_MIN) fp->system_fan_speed = FAN_SPEED_MIN; if (fp->system_fan_speed > FAN_SPEED_MAX) fp->system_fan_speed = FAN_SPEED_MAX; #ifdef ENVCTRL_TRACE printk("fan%d: Changed fan speed to cpu(%02x) sys(%02x)\n", fp->index, fp->cpu_fan_speed, fp->system_fan_speed); #endif bbc_i2c_writeb(fp->client, fp->cpu_fan_speed, CPU_FAN_REG); bbc_i2c_writeb(fp->client, fp->system_fan_speed, SYS_FAN_REG); bbc_i2c_writeb(fp->client, (fp->psupply_fan_on ? PSUPPLY_FAN_ON : PSUPPLY_FAN_OFF), PSUPPLY_FAN_REG); } static void get_current_temps(struct bbc_cpu_temperature *tp) { tp->prev_amb_temp = tp->curr_amb_temp; bbc_i2c_readb(tp->client, (unsigned char *) &tp->curr_amb_temp, MAX1617_AMB_TEMP); tp->prev_cpu_temp = tp->curr_cpu_temp; bbc_i2c_readb(tp->client, (unsigned char *) &tp->curr_cpu_temp, MAX1617_CPU_TEMP); #ifdef ENVCTRL_TRACE printk("temp%d: cpu(%d C) amb(%d C)\n", tp->index, (int) tp->curr_cpu_temp, (int) tp->curr_amb_temp); #endif } static void do_envctrl_shutdown(struct bbc_cpu_temperature *tp) { static int shutting_down = 0; char *type = "???"; s8 val = -1; if (shutting_down != 0) return; if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_shutdown || tp->curr_amb_temp < amb_temp_limits[tp->index].low_shutdown) { type = "ambient"; val = tp->curr_amb_temp; } else if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_shutdown || tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_shutdown) { type = "CPU"; val = tp->curr_cpu_temp; } printk(KERN_CRIT "temp%d: Outside of safe %s " "operating temperature, %d C.\n", tp->index, type, val); printk(KERN_CRIT "kenvctrld: Shutting down the system now.\n"); shutting_down = 1; if (orderly_poweroff(true) < 0) printk(KERN_CRIT "envctrl: shutdown execution failed\n"); } #define WARN_INTERVAL (30 * HZ) static void analyze_ambient_temp(struct bbc_cpu_temperature *tp, unsigned long *last_warn, int tick) { int ret = 0; if (time_after(jiffies, (*last_warn + WARN_INTERVAL))) { if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_warn) { printk(KERN_WARNING "temp%d: " "Above safe ambient operating temperature, %d C.\n", tp->index, (int) tp->curr_amb_temp); ret = 1; } else if (tp->curr_amb_temp < amb_temp_limits[tp->index].low_warn) { printk(KERN_WARNING "temp%d: " "Below safe ambient operating temperature, %d C.\n", tp->index, (int) tp->curr_amb_temp); ret = 1; } if (ret) *last_warn = jiffies; } else if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_warn || tp->curr_amb_temp < amb_temp_limits[tp->index].low_warn) ret = 1; /* Now check the shutdown limits. */ if (tp->curr_amb_temp >= amb_temp_limits[tp->index].high_shutdown || tp->curr_amb_temp < amb_temp_limits[tp->index].low_shutdown) { do_envctrl_shutdown(tp); ret = 1; } if (ret) { tp->fan_todo[FAN_AMBIENT] = FAN_FULLBLAST; } else if ((tick & (8 - 1)) == 0) { s8 amb_goal_hi = amb_temp_limits[tp->index].high_warn - 10; s8 amb_goal_lo; amb_goal_lo = amb_goal_hi - 3; /* We do not try to avoid 'too cold' events. Basically we * only try to deal with over-heating and fan noise reduction. */ if (tp->avg_amb_temp < amb_goal_hi) { if (tp->avg_amb_temp >= amb_goal_lo) tp->fan_todo[FAN_AMBIENT] = FAN_SAME; else tp->fan_todo[FAN_AMBIENT] = FAN_SLOWER; } else { tp->fan_todo[FAN_AMBIENT] = FAN_FASTER; } } else { tp->fan_todo[FAN_AMBIENT] = FAN_SAME; } } static void analyze_cpu_temp(struct bbc_cpu_temperature *tp, unsigned long *last_warn, int tick) { int ret = 0; if (time_after(jiffies, (*last_warn + WARN_INTERVAL))) { if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_warn) { printk(KERN_WARNING "temp%d: " "Above safe CPU operating temperature, %d C.\n", tp->index, (int) tp->curr_cpu_temp); ret = 1; } else if (tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_warn) { printk(KERN_WARNING "temp%d: " "Below safe CPU operating temperature, %d C.\n", tp->index, (int) tp->curr_cpu_temp); ret = 1; } if (ret) *last_warn = jiffies; } else if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_warn || tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_warn) ret = 1; /* Now check the shutdown limits. */ if (tp->curr_cpu_temp >= cpu_temp_limits[tp->index].high_shutdown || tp->curr_cpu_temp < cpu_temp_limits[tp->index].low_shutdown) { do_envctrl_shutdown(tp); ret = 1; } if (ret) { tp->fan_todo[FAN_CPU] = FAN_FULLBLAST; } else if ((tick & (8 - 1)) == 0) { s8 cpu_goal_hi = cpu_temp_limits[tp->index].high_warn - 10; s8 cpu_goal_lo; cpu_goal_lo = cpu_goal_hi - 3; /* We do not try to avoid 'too cold' events. Basically we * only try to deal with over-heating and fan noise reduction. */ if (tp->avg_cpu_temp < cpu_goal_hi) { if (tp->avg_cpu_temp >= cpu_goal_lo) tp->fan_todo[FAN_CPU] = FAN_SAME; else tp->fan_todo[FAN_CPU] = FAN_SLOWER; } else { tp->fan_todo[FAN_CPU] = FAN_FASTER; } } else { tp->fan_todo[FAN_CPU] = FAN_SAME; } } static void analyze_temps(struct bbc_cpu_temperature *tp, unsigned long *last_warn) { tp->avg_amb_temp = (s8)((int)((int)tp->avg_amb_temp + (int)tp->curr_amb_temp) / 2); tp->avg_cpu_temp = (s8)((int)((int)tp->avg_cpu_temp + (int)tp->curr_cpu_temp) / 2); analyze_ambient_temp(tp, last_warn, tp->sample_tick); analyze_cpu_temp(tp, last_warn, tp->sample_tick); tp->sample_tick++; } static enum fan_action prioritize_fan_action(int which_fan) { struct bbc_cpu_temperature *tp; enum fan_action decision = FAN_STATE_MAX; /* Basically, prioritize what the temperature sensors * recommend we do, and perform that action on all the * fans. */ list_for_each_entry(tp, &all_temps, glob_list) { if (tp->fan_todo[which_fan] == FAN_FULLBLAST) { decision = FAN_FULLBLAST; break; } if (tp->fan_todo[which_fan] == FAN_SAME && decision != FAN_FASTER) decision = FAN_SAME; else if (tp->fan_todo[which_fan] == FAN_FASTER) decision = FAN_FASTER; else if (decision != FAN_FASTER && decision != FAN_SAME && tp->fan_todo[which_fan] == FAN_SLOWER) decision = FAN_SLOWER; } if (decision == FAN_STATE_MAX) decision = FAN_SAME; return decision; } static int maybe_new_ambient_fan_speed(struct bbc_fan_control *fp) { enum fan_action decision = prioritize_fan_action(FAN_AMBIENT); int ret; if (decision == FAN_SAME) return 0; ret = 1; if (decision == FAN_FULLBLAST) { if (fp->system_fan_speed >= FAN_SPEED_MAX) ret = 0; else fp->system_fan_speed = FAN_SPEED_MAX; } else { if (decision == FAN_FASTER) { if (fp->system_fan_speed >= FAN_SPEED_MAX) ret = 0; else fp->system_fan_speed += 2; } else { int orig_speed = fp->system_fan_speed; if (orig_speed <= FAN_SPEED_MIN || orig_speed <= (fp->cpu_fan_speed - 3)) ret = 0; else fp->system_fan_speed -= 1; } } return ret; } static int maybe_new_cpu_fan_speed(struct bbc_fan_control *fp) { enum fan_action decision = prioritize_fan_action(FAN_CPU); int ret; if (decision == FAN_SAME) return 0; ret = 1; if (decision == FAN_FULLBLAST) { if (fp->cpu_fan_speed >= FAN_SPEED_MAX) ret = 0; else fp->cpu_fan_speed = FAN_SPEED_MAX; } else { if (decision == FAN_FASTER) { if (fp->cpu_fan_speed >= FAN_SPEED_MAX) ret = 0; else { fp->cpu_fan_speed += 2; if (fp->system_fan_speed < (fp->cpu_fan_speed - 3)) fp->system_fan_speed = fp->cpu_fan_speed - 3; } } else { if (fp->cpu_fan_speed <= FAN_SPEED_MIN) ret = 0; else fp->cpu_fan_speed -= 1; } } return ret; } static void maybe_new_fan_speeds(struct bbc_fan_control *fp) { int new; new = maybe_new_ambient_fan_speed(fp); new |= maybe_new_cpu_fan_speed(fp); if (new) set_fan_speeds(fp); } static void fans_full_blast(void) { struct bbc_fan_control *fp; /* Since we will not be monitoring things anymore, put * the fans on full blast. */ list_for_each_entry(fp, &all_fans, glob_list) { fp->cpu_fan_speed = FAN_SPEED_MAX; fp->system_fan_speed = FAN_SPEED_MAX; fp->psupply_fan_on = 1; set_fan_speeds(fp); } } #define POLL_INTERVAL (5 * 1000) static unsigned long last_warning_jiffies; static struct task_struct *kenvctrld_task; static int kenvctrld(void *__unused) { printk(KERN_INFO "bbc_envctrl: kenvctrld starting...\n"); last_warning_jiffies = jiffies - WARN_INTERVAL; for (;;) { struct bbc_cpu_temperature *tp; struct bbc_fan_control *fp; msleep_interruptible(POLL_INTERVAL); if (kthread_should_stop()) break; list_for_each_entry(tp, &all_temps, glob_list) { get_current_temps(tp); analyze_temps(tp, &last_warning_jiffies); } list_for_each_entry(fp, &all_fans, glob_list) maybe_new_fan_speeds(fp); } printk(KERN_INFO "bbc_envctrl: kenvctrld exiting...\n"); fans_full_blast(); return 0; } static void attach_one_temp(struct bbc_i2c_bus *bp, struct of_device *op, int temp_idx) { struct bbc_cpu_temperature *tp; tp = kzalloc(sizeof(*tp), GFP_KERNEL); if (!tp) return; tp->client = bbc_i2c_attach(bp, op); if (!tp->client) { kfree(tp); return; } tp->index = temp_idx; list_add(&tp->glob_list, &all_temps); list_add(&tp->bp_list, &bp->temps); /* Tell it to convert once every 5 seconds, clear all cfg * bits. */ bbc_i2c_writeb(tp->client, 0x00, MAX1617_WR_CFG_BYTE); bbc_i2c_writeb(tp->client, 0x02, MAX1617_WR_CVRATE_BYTE); /* Program the hard temperature limits into the chip. */ bbc_i2c_writeb(tp->client, amb_temp_limits[tp->index].high_pwroff, MAX1617_WR_AMB_HIGHLIM); bbc_i2c_writeb(tp->client, amb_temp_limits[tp->index].low_pwroff, MAX1617_WR_AMB_LOWLIM); bbc_i2c_writeb(tp->client, cpu_temp_limits[tp->index].high_pwroff, MAX1617_WR_CPU_HIGHLIM); bbc_i2c_writeb(tp->client, cpu_temp_limits[tp->index].low_pwroff, MAX1617_WR_CPU_LOWLIM); get_current_temps(tp); tp->prev_cpu_temp = tp->avg_cpu_temp = tp->curr_cpu_temp; tp->prev_amb_temp = tp->avg_amb_temp = tp->curr_amb_temp; tp->fan_todo[FAN_AMBIENT] = FAN_SAME; tp->fan_todo[FAN_CPU] = FAN_SAME; } static void attach_one_fan(struct bbc_i2c_bus *bp, struct of_device *op, int fan_idx) { struct bbc_fan_control *fp; fp = kzalloc(sizeof(*fp), GFP_KERNEL); if (!fp) return; fp->client = bbc_i2c_attach(bp, op); if (!fp->client) { kfree(fp); return; } fp->index = fan_idx; list_add(&fp->glob_list, &all_fans); list_add(&fp->bp_list, &bp->fans); /* The i2c device controlling the fans is write-only. * So the only way to keep track of the current power * level fed to the fans is via software. Choose half * power for cpu/system and 'on' fo the powersupply fan * and set it now. */ fp->psupply_fan_on = 1; fp->cpu_fan_speed = (FAN_SPEED_MAX - FAN_SPEED_MIN) / 2; fp->cpu_fan_speed += FAN_SPEED_MIN; fp->system_fan_speed = (FAN_SPEED_MAX - FAN_SPEED_MIN) / 2; fp->system_fan_speed += FAN_SPEED_MIN; set_fan_speeds(fp); } int bbc_envctrl_init(struct bbc_i2c_bus *bp) { struct of_device *op; int temp_index = 0; int fan_index = 0; int devidx = 0; while ((op = bbc_i2c_getdev(bp, devidx++)) != NULL) { if (!strcmp(op->node->name, "temperature")) attach_one_temp(bp, op, temp_index++); if (!strcmp(op->node->name, "fan-control")) attach_one_fan(bp, op, fan_index++); } if (temp_index != 0 && fan_index != 0) { kenvctrld_task = kthread_run(kenvctrld, NULL, "kenvctrld"); if (IS_ERR(kenvctrld_task)) return PTR_ERR(kenvctrld_task); } return 0; } static void destroy_one_temp(struct bbc_cpu_temperature *tp) { bbc_i2c_detach(tp->client); kfree(tp); } static void destroy_one_fan(struct bbc_fan_control *fp) { bbc_i2c_detach(fp->client); kfree(fp); } void bbc_envctrl_cleanup(struct bbc_i2c_bus *bp) { struct bbc_cpu_temperature *tp, *tpos; struct bbc_fan_control *fp, *fpos; kthread_stop(kenvctrld_task); list_for_each_entry_safe(tp, tpos, &bp->temps, bp_list) { list_del(&tp->bp_list); list_del(&tp->glob_list); destroy_one_temp(tp); } list_for_each_entry_safe(fp, fpos, &bp->fans, bp_list) { list_del(&fp->bp_list); list_del(&fp->glob_list); destroy_one_fan(fp); } }
gpl-2.0